X-Git-Url: https://git.camperquake.de/gitweb.cgi?a=blobdiff_plain;f=module%2Fzfs%2Fzfs_vfsops.c;h=9ae7ab500942b21688693ea88a52eb6e8d2e150f;hb=refs%2Fheads%2Frertzinger%2Ffeature-zpool-get--p;hp=ed847f1304e51f2a6427ffad1bd75c8fb31058e7;hpb=591fb62f19ee2431983a4cbeb0d200b1b8e7daf5;p=zfs.git diff --git a/module/zfs/zfs_vfsops.c b/module/zfs/zfs_vfsops.c index ed847f1..9ae7ab5 100644 --- a/module/zfs/zfs_vfsops.c +++ b/module/zfs/zfs_vfsops.c @@ -49,6 +49,7 @@ #include #include #include +#include #include #include #include @@ -56,13 +57,13 @@ #include #include #include +#include #include #include #include #include #include #include -#include #include #include "zfs_comutil.h" @@ -140,10 +141,16 @@ xattr_changed_cb(void *arg, uint64_t newval) { zfs_sb_t *zsb = arg; - if (newval == TRUE) - zsb->z_flags |= ZSB_XATTR; - else + if (newval == ZFS_XATTR_OFF) { zsb->z_flags &= ~ZSB_XATTR; + } else { + zsb->z_flags |= ZSB_XATTR; + + if (newval == ZFS_XATTR_SA) + zsb->z_xattr_sa = B_TRUE; + else + zsb->z_xattr_sa = B_FALSE; + } } static void @@ -226,10 +233,11 @@ zfs_register_callbacks(zfs_sb_t *zsb) { struct dsl_dataset *ds = NULL; objset_t *os = zsb->z_os; + boolean_t do_readonly = B_FALSE; int error = 0; if (zfs_is_readonly(zsb) || !spa_writeable(dmu_objset_spa(os))) - readonly_changed_cb(zsb, B_TRUE); + do_readonly = B_TRUE; /* * Register property callbacks. @@ -264,6 +272,9 @@ zfs_register_callbacks(zfs_sb_t *zsb) if (error) goto unregister; + if (do_readonly) + readonly_changed_cb(zsb, B_TRUE); + return (0); unregister: @@ -293,7 +304,6 @@ static int zfs_space_delta_cb(dmu_object_type_t bonustype, void *data, uint64_t *userp, uint64_t *groupp) { - znode_phys_t *znp = data; int error = 0; /* @@ -312,20 +322,18 @@ zfs_space_delta_cb(dmu_object_type_t bonustype, void *data, return (EEXIST); if (bonustype == DMU_OT_ZNODE) { + znode_phys_t *znp = data; *userp = znp->zp_uid; *groupp = znp->zp_gid; } else { int hdrsize; + sa_hdr_phys_t *sap = data; + sa_hdr_phys_t sa = *sap; + boolean_t swap = B_FALSE; ASSERT(bonustype == DMU_OT_SA); - hdrsize = sa_hdrsize(data); - if (hdrsize != 0) { - *userp = *((uint64_t *)((uintptr_t)data + hdrsize + - SA_UID_OFFSET)); - *groupp = *((uint64_t *)((uintptr_t)data + hdrsize + - SA_GID_OFFSET)); - } else { + if (sa.sa_magic == 0) { /* * This should only happen for newly created * files that haven't had the znode data filled @@ -333,6 +341,25 @@ zfs_space_delta_cb(dmu_object_type_t bonustype, void *data, */ *userp = 0; *groupp = 0; + return (0); + } + if (sa.sa_magic == BSWAP_32(SA_MAGIC)) { + sa.sa_magic = SA_MAGIC; + sa.sa_layout_info = BSWAP_16(sa.sa_layout_info); + swap = B_TRUE; + } else { + VERIFY3U(sa.sa_magic, ==, SA_MAGIC); + } + + hdrsize = sa_hdrsize(&sa); + VERIFY3U(hdrsize, >=, sizeof (sa_hdr_phys_t)); + *userp = *((uint64_t *)((uintptr_t)data + hdrsize + + SA_UID_OFFSET)); + *groupp = *((uint64_t *)((uintptr_t)data + hdrsize + + SA_GID_OFFSET)); + if (swap) { + *userp = BSWAP_64(*userp); + *groupp = BSWAP_64(*groupp); } } return (error); @@ -577,7 +604,7 @@ zfs_sb_create(const char *osname, zfs_sb_t **zsbp) int i, error; uint64_t sa_obj; - zsb = kmem_zalloc(sizeof (zfs_sb_t), KM_SLEEP); + zsb = kmem_zalloc(sizeof (zfs_sb_t), KM_SLEEP | KM_NODEBUG); /* * We claim to always be readonly so we can open snapshots; @@ -600,12 +627,6 @@ zfs_sb_create(const char *osname, zfs_sb_t **zsbp) zsb->z_show_ctldir = ZFS_SNAPDIR_VISIBLE; zsb->z_os = os; - error = -bdi_init(&zsb->z_bdi); - if (error) { - kmem_free(zsb, sizeof (zfs_sb_t)); - return (error); - } - error = zfs_get_zplprop(os, ZFS_PROP_VERSION, &zsb->z_version); if (error) { goto out; @@ -647,6 +668,10 @@ zfs_sb_create(const char *osname, zfs_sb_t **zsbp) &sa_obj); if (error) goto out; + + error = zfs_get_zplprop(os, ZFS_PROP_XATTR, &zval); + if ((error == 0) && (zval == ZFS_XATTR_SA)) + zsb->z_xattr_sa = B_TRUE; } else { /* * Pre SA versions file systems should never touch @@ -706,6 +731,10 @@ zfs_sb_create(const char *osname, zfs_sb_t **zsbp) for (i = 0; i != ZFS_OBJ_MTX_SZ; i++) mutex_init(&zsb->z_hold_mtx[i], NULL, MUTEX_DEFAULT, NULL); + avl_create(&zsb->z_ctldir_snaps, snapentry_compare, + sizeof (zfs_snapentry_t), offsetof(zfs_snapentry_t, se_node)); + mutex_init(&zsb->z_ctldir_lock, NULL, MUTEX_DEFAULT, NULL); + *zsbp = zsb; return (0); @@ -807,7 +836,6 @@ zfs_sb_free(zfs_sb_t *zsb) zfs_fuid_destroy(zsb); - bdi_destroy(&zsb->z_bdi); mutex_destroy(&zsb->z_znodes_lock); mutex_destroy(&zsb->z_lock); list_destroy(&zsb->z_all_znodes); @@ -816,6 +844,8 @@ zfs_sb_free(zfs_sb_t *zsb) rw_destroy(&zsb->z_fuid_lock); for (i = 0; i != ZFS_OBJ_MTX_SZ; i++) mutex_destroy(&zsb->z_hold_mtx[i]); + mutex_destroy(&zsb->z_ctldir_lock); + avl_destroy(&zsb->z_ctldir_snaps); kmem_free(zsb, sizeof (zfs_sb_t)); } EXPORT_SYMBOL(zfs_sb_free); @@ -910,6 +940,7 @@ zfs_statvfs(struct dentry *dentry, struct kstatfs *statp) { zfs_sb_t *zsb = dentry->d_sb->s_fs_info; uint64_t refdbytes, availbytes, usedobjs, availobjs; + uint64_t fsid; uint32_t bshift; ZFS_ENTER(zsb); @@ -917,6 +948,7 @@ zfs_statvfs(struct dentry *dentry, struct kstatfs *statp) dmu_objset_space(zsb->z_os, &refdbytes, &availbytes, &usedobjs, &availobjs); + fsid = dmu_objset_fsid_guid(zsb->z_os); /* * The underlying storage pool actually uses multiple block * size. Under Solaris frsize (fragment size) is reported as @@ -950,8 +982,8 @@ zfs_statvfs(struct dentry *dentry, struct kstatfs *statp) */ statp->f_ffree = MIN(availobjs, availbytes >> DNODE_SHIFT); statp->f_files = statp->f_ffree + usedobjs; - statp->f_fsid.val[0] = dentry->d_sb->s_dev; - statp->f_fsid.val[1] = 0; + statp->f_fsid.val[0] = (uint32_t)fsid; + statp->f_fsid.val[1] = (uint32_t)(fsid >> 32); statp->f_type = ZFS_SUPER_MAGIC; statp->f_namelen = ZFS_MAXNAMELEN; @@ -983,8 +1015,28 @@ zfs_root(zfs_sb_t *zsb, struct inode **ipp) } EXPORT_SYMBOL(zfs_root); +#ifdef HAVE_SHRINK +int +zfs_sb_prune(struct super_block *sb, unsigned long nr_to_scan, int *objects) +{ + zfs_sb_t *zsb = sb->s_fs_info; + struct shrinker *shrinker = &sb->s_shrink; + struct shrink_control sc = { + .nr_to_scan = nr_to_scan, + .gfp_mask = GFP_KERNEL, + }; + + ZFS_ENTER(zsb); + *objects = (*shrinker->shrink)(shrinker, &sc); + ZFS_EXIT(zsb); + + return (0); +} +EXPORT_SYMBOL(zfs_sb_prune); +#endif /* HAVE_SHRINK */ + /* - * Teardown the zfs_sb_t::z_os. + * Teardown the zfs_sb_t. * * Note, if 'unmounting' if FALSE, we return with the 'z_teardown_lock' * and 'z_teardown_inactive_lock' held. @@ -1005,14 +1057,15 @@ zfs_sb_teardown(zfs_sb_t *zsb, boolean_t unmounting) * for non-snapshots. */ shrink_dcache_sb(zsb->z_parent->z_sb); - (void) spl_invalidate_inodes(zsb->z_parent->z_sb, 0); } /* - * Drain the iput_taskq to ensure all active references to the + * If someone has not already unmounted this file system, + * drain the iput_taskq to ensure all active references to the * zfs_sb_t have been handled only then can it be safely destroyed. */ - taskq_wait(dsl_pool_iput_taskq(dmu_objset_pool(zsb->z_os))); + if (zsb->z_os) + taskq_wait(dsl_pool_iput_taskq(dmu_objset_pool(zsb->z_os))); /* * Close the zil. NB: Can't close the zil while zfs_inactive @@ -1037,25 +1090,26 @@ zfs_sb_teardown(zfs_sb_t *zsb, boolean_t unmounting) } /* - * At this point there are no vops active, and any new vops will - * fail with EIO since we have z_teardown_lock for writer (only - * relavent for forced unmount). + * At this point there are no VFS ops active, and any new VFS ops + * will fail with EIO since we have z_teardown_lock for writer (only + * relevant for forced unmount). * * Release all holds on dbufs. */ mutex_enter(&zsb->z_znodes_lock); for (zp = list_head(&zsb->z_all_znodes); zp != NULL; - zp = list_next(&zsb->z_all_znodes, zp)) + zp = list_next(&zsb->z_all_znodes, zp)) { if (zp->z_sa_hdl) { ASSERT(atomic_read(&ZTOI(zp)->i_count) > 0); zfs_znode_dmu_fini(zp); } + } mutex_exit(&zsb->z_znodes_lock); /* - * If we are unmounting, set the unmounted flag and let new vops + * If we are unmounting, set the unmounted flag and let new VFS ops * unblock. zfs_inactive will have the unmounted behavior, and all - * other vops will fail with EIO. + * other VFS ops will fail with EIO. */ if (unmounting) { zsb->z_unmounted = B_TRUE; @@ -1080,18 +1134,18 @@ zfs_sb_teardown(zfs_sb_t *zsb, boolean_t unmounting) /* * Evict cached data */ - if (dmu_objset_is_dirty_anywhere(zsb->z_os)) - if (!zfs_is_readonly(zsb)) - txg_wait_synced(dmu_objset_pool(zsb->z_os), 0); + if (dsl_dataset_is_dirty(dmu_objset_ds(zsb->z_os)) && + !zfs_is_readonly(zsb)) + txg_wait_synced(dmu_objset_pool(zsb->z_os), 0); (void) dmu_objset_evict_dbufs(zsb->z_os); return (0); } EXPORT_SYMBOL(zfs_sb_teardown); -#ifdef HAVE_BDI -static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0); -#endif /* HAVE_BDI */ +#if defined(HAVE_BDI) && !defined(HAVE_BDI_SETUP_AND_REGISTER) +atomic_long_t zfs_bdi_seq = ATOMIC_LONG_INIT(0); +#endif /* HAVE_BDI && !HAVE_BDI_SETUP_AND_REGISTER */ int zfs_domount(struct super_block *sb, void *data, int silent) @@ -1118,12 +1172,31 @@ zfs_domount(struct super_block *sb, void *data, int silent) sb->s_time_gran = 1; sb->s_blocksize = recordsize; sb->s_blocksize_bits = ilog2(recordsize); - bdi_put_sb(sb, NULL); + +#ifdef HAVE_BDI + /* + * 2.6.32 API change, + * Added backing_device_info (BDI) per super block interfaces. A BDI + * must be configured when using a non-device backed filesystem for + * proper writeback. This is not required for older pdflush kernels. + * + * NOTE: Linux read-ahead is disabled in favor of zfs read-ahead. + */ + zsb->z_bdi.ra_pages = 0; + sb->s_bdi = &zsb->z_bdi; + + error = -bdi_setup_and_register(&zsb->z_bdi, "zfs", BDI_CAP_MAP_COPY); + if (error) + goto out; +#endif /* HAVE_BDI */ /* Set callback operations for the file system. */ sb->s_op = &zpl_super_operations; sb->s_xattr = zpl_xattr_handlers; sb->s_export_op = &zpl_export_operations; +#ifdef HAVE_S_D_OP + sb->s_d_op = &zpl_dentry_operations; +#endif /* HAVE_S_D_OP */ /* Set features for file system. */ zfs_set_fuid_feature(zsb); @@ -1143,20 +1216,7 @@ zfs_domount(struct super_block *sb, void *data, int silent) dmu_objset_set_user(zsb->z_os, zsb); mutex_exit(&zsb->z_os->os_user_ptr_lock); } else { - /* Disable Linux read-ahead handled by lower layers */ - zsb->z_bdi.ra_pages = 0; - - error = -bdi_register(&zsb->z_bdi, NULL, "zfs-%d", - atomic_long_inc_return(&bdi_seq)); - if (error) - goto out; - - bdi_put_sb(sb, &zsb->z_bdi); - error = zfs_sb_setup(zsb, B_TRUE); -#ifdef HAVE_SNAPSHOT - (void) zfs_snap_create(zsb); -#endif /* HAVE_SNAPSHOT */ } /* Allocate a root inode for the filesystem. */ @@ -1167,12 +1227,15 @@ zfs_domount(struct super_block *sb, void *data, int silent) } /* Allocate a root dentry for the filesystem */ - sb->s_root = d_alloc_root(root_inode); + sb->s_root = d_make_root(root_inode); if (sb->s_root == NULL) { (void) zfs_umount(sb); error = ENOMEM; goto out; } + + if (!zsb->z_issnap) + zfsctl_create(zsb); out: if (error) { dmu_objset_disown(zsb->z_os, zsb); @@ -1183,6 +1246,27 @@ out: } EXPORT_SYMBOL(zfs_domount); +/* + * Called when an unmount is requested and certain sanity checks have + * already passed. At this point no dentries or inodes have been reclaimed + * from their respective caches. We drop the extra reference on the .zfs + * control directory to allow everything to be reclaimed. All snapshots + * must already have been unmounted to reach this point. + */ +void +zfs_preumount(struct super_block *sb) +{ + zfs_sb_t *zsb = sb->s_fs_info; + + if (zsb != NULL && zsb->z_ctldir != NULL) + zfsctl_destroy(zsb); +} +EXPORT_SYMBOL(zfs_preumount); + +/* + * Called once all other unmount released tear down has occurred. + * It is our responsibility to release any remaining infrastructure. + */ /*ARGSUSED*/ int zfs_umount(struct super_block *sb) @@ -1193,10 +1277,9 @@ zfs_umount(struct super_block *sb) VERIFY(zfs_sb_teardown(zsb, B_TRUE) == 0); os = zsb->z_os; - if (bdi_get_sb(sb)) { - bdi_unregister(bdi_get_sb(sb)); - bdi_put_sb(sb, NULL); - } +#ifdef HAVE_BDI + bdi_destroy(sb->s_bdi); +#endif /* HAVE_BDI */ /* * z_os will be NULL if there was an error in @@ -1260,11 +1343,10 @@ zfs_vget(struct super_block *sb, struct inode **ipp, fid_t *fidp) ZFS_EXIT(zsb); -#ifdef HAVE_SNAPSHOT - err = zfsctl_lookup_objset(vfsp, objsetid, &zsb); + err = zfsctl_lookup_objset(sb, objsetid, &zsb); if (err) return (EINVAL); -#endif /* HAVE_SNAPSHOT */ + ZFS_ENTER(zsb); } @@ -1281,22 +1363,20 @@ zfs_vget(struct super_block *sb, struct inode **ipp, fid_t *fidp) return (EINVAL); } -#ifdef HAVE_SNAPSHOT /* A zero fid_gen means we are in the .zfs control directories */ if (fid_gen == 0 && (object == ZFSCTL_INO_ROOT || object == ZFSCTL_INO_SNAPDIR)) { *ipp = zsb->z_ctldir; ASSERT(*ipp != NULL); if (object == ZFSCTL_INO_SNAPDIR) { - VERIFY(zfsctl_root_lookup(*ipp, "snapshot", ipp, NULL, - 0, NULL, NULL, NULL, NULL, NULL) == 0); + VERIFY(zfsctl_root_lookup(*ipp, "snapshot", ipp, + 0, kcred, NULL, NULL) == 0); } else { igrab(*ipp); } ZFS_EXIT(zsb); return (0); } -#endif /* HAVE_SNAPSHOT */ gen_mask = -1ULL >> (64 - 8 * i); @@ -1327,7 +1407,7 @@ zfs_vget(struct super_block *sb, struct inode **ipp, fid_t *fidp) EXPORT_SYMBOL(zfs_vget); /* - * Block out VOPs and close zfs_sb_t::z_os + * Block out VFS ops and close zfs_sb_t * * Note, if successful, then we return with the 'z_teardown_lock' and * 'z_teardown_inactive_lock' write held. @@ -1339,6 +1419,7 @@ zfs_suspend_fs(zfs_sb_t *zsb) if ((error = zfs_sb_teardown(zsb, B_FALSE)) != 0) return (error); + dmu_objset_disown(zsb->z_os, zsb); return (0); @@ -1346,7 +1427,7 @@ zfs_suspend_fs(zfs_sb_t *zsb) EXPORT_SYMBOL(zfs_suspend_fs); /* - * Reopen zfs_sb_t::z_os and release VOPs. + * Reopen zfs_sb_t and release VFS ops. */ int zfs_resume_fs(zfs_sb_t *zsb, const char *osname) @@ -1375,33 +1456,41 @@ zfs_resume_fs(zfs_sb_t *zsb, const char *osname) goto bail; VERIFY(zfs_sb_setup(zsb, B_FALSE) == 0); + zsb->z_rollback_time = jiffies; /* - * Attempt to re-establish all the active znodes with - * their dbufs. If a zfs_rezget() fails, then we'll let - * any potential callers discover that via ZFS_ENTER_VERIFY_VP - * when they try to use their znode. + * Attempt to re-establish all the active inodes with their + * dbufs. If a zfs_rezget() fails, then we unhash the inode + * and mark it stale. This prevents a collision if a new + * inode/object is created which must use the same inode + * number. The stale inode will be be released when the + * VFS prunes the dentry holding the remaining references + * on the stale inode. */ mutex_enter(&zsb->z_znodes_lock); for (zp = list_head(&zsb->z_all_znodes); zp; zp = list_next(&zsb->z_all_znodes, zp)) { - (void) zfs_rezget(zp); + err2 = zfs_rezget(zp); + if (err2) { + remove_inode_hash(ZTOI(zp)); + zp->z_is_stale = B_TRUE; + } } mutex_exit(&zsb->z_znodes_lock); - } bail: - /* release the VOPs */ + /* release the VFS ops */ rw_exit(&zsb->z_teardown_inactive_lock); rrw_exit(&zsb->z_teardown_lock, FTAG); if (err) { /* - * Since we couldn't reopen zfs_sb_t::z_os, force - * unmount this file system. + * Since we couldn't reopen zfs_sb_t or, setup the + * sa framework, force unmount this file system. */ - (void) zfs_umount(zsb->z_sb); + if (zsb->z_os) + (void) zfs_umount(zsb->z_sb); } return (err); } @@ -1455,7 +1544,7 @@ zfs_set_version(zfs_sb_t *zsb, uint64_t newvers) error = zap_add(os, MASTER_NODE_OBJ, ZFS_SA_ATTRS, 8, 1, &sa_obj, tx); - ASSERT3U(error, ==, 0); + ASSERT0(error); VERIFY(0 == sa_set_sa_object(os, sa_obj)); sa_register_update_callback(os, zfs_sa_upgrade); @@ -1522,9 +1611,11 @@ EXPORT_SYMBOL(zfs_get_zplprop); void zfs_init(void) { + zfsctl_init(); zfs_znode_init(); dmu_objset_register_type(DMU_OST_ZFS, zfs_space_delta_cb); register_filesystem(&zpl_fs_type); + (void) arc_add_prune_callback(zpl_prune_sbs, NULL); } void @@ -1532,4 +1623,5 @@ zfs_fini(void) { unregister_filesystem(&zpl_fs_type); zfs_znode_fini(); + zfsctl_fini(); }