X-Git-Url: https://git.camperquake.de/gitweb.cgi?a=blobdiff_plain;f=module%2Fzfs%2Fzfs_vfsops.c;h=602c332eaf53f4478a3b3ba417a6f13c4eae42f8;hb=ab68b6e5db29abcd20c0c7b12a8c2fa570031c8b;hp=bb2fdb029574f303a0dc3d24c505b9c881c21a7a;hpb=ceb43b935d6c4f4f6509623c7498d5dcc40ea813;p=zfs.git diff --git a/module/zfs/zfs_vfsops.c b/module/zfs/zfs_vfsops.c index bb2fdb0..602c332 100644 --- a/module/zfs/zfs_vfsops.c +++ b/module/zfs/zfs_vfsops.c @@ -49,6 +49,7 @@ #include #include #include +#include #include #include #include @@ -56,21 +57,23 @@ #include #include #include +#include #include #include #include #include #include #include -#include #include #include "zfs_comutil.h" /*ARGSUSED*/ int -zfs_sync(zfs_sb_t *zsb, short flag, cred_t *cr) +zfs_sync(struct super_block *sb, int wait, cred_t *cr) { + zfs_sb_t *zsb = sb->s_fs_info; + /* * Data integrity is job one. We don't want a compromised kernel * writing to the storage pool, so we never sync during panic. @@ -78,6 +81,13 @@ zfs_sync(zfs_sb_t *zsb, short flag, cred_t *cr) if (unlikely(oops_in_progress)) return (0); + /* + * Semantically, the only requirement is that the sync be initiated. + * The DMU syncs out txgs frequently, so there's nothing to do. + */ + if (!wait) + return (0); + if (zsb != NULL) { /* * Sync a specific filesystem. @@ -87,19 +97,14 @@ zfs_sync(zfs_sb_t *zsb, short flag, cred_t *cr) ZFS_ENTER(zsb); dp = dmu_objset_pool(zsb->z_os); -#ifdef HAVE_SHUTDOWN /* * If the system is shutting down, then skip any * filesystems which may exist on a suspended pool. - * - * XXX: This can be implemented using the Linux reboot - * notifiers: {un}register_reboot_notifier(). */ - if (sys_shutdown && spa_suspended(dp->dp_spa)) { + if (spa_suspended(dp->dp_spa)) { ZFS_EXIT(zsb); return (0); } -#endif /* HAVE_SHUTDOWN */ if (zsb->z_log != NULL) zil_commit(zsb->z_log, 0); @@ -118,22 +123,17 @@ zfs_sync(zfs_sb_t *zsb, short flag, cred_t *cr) } EXPORT_SYMBOL(zfs_sync); +boolean_t +zfs_is_readonly(zfs_sb_t *zsb) +{ + return (!!(zsb->z_sb->s_flags & MS_RDONLY)); +} +EXPORT_SYMBOL(zfs_is_readonly); + static void atime_changed_cb(void *arg, uint64_t newval) { - zfs_sb_t *zsb = arg; - struct super_block *sb = zsb->z_sb; - struct vfsmount *vfs = zsb->z_vfs; - - if (newval == TRUE) { - vfs->mnt_flags &= ~MNT_NOATIME; - sb->s_flags &= ~MS_NOATIME; - zsb->z_atime = TRUE; - } else { - vfs->mnt_flags |= MNT_NOATIME; - sb->s_flags |= MS_NOATIME; - zsb->z_atime = FALSE; - } + ((zfs_sb_t *)arg)->z_atime = newval; } static void @@ -141,10 +141,15 @@ xattr_changed_cb(void *arg, uint64_t newval) { zfs_sb_t *zsb = arg; - if (newval == TRUE) { - zsb->z_flags |= ZSB_XATTR_USER; + if (newval == ZFS_XATTR_OFF) { + zsb->z_flags &= ~ZSB_XATTR; } else { - zsb->z_flags &= ~ZSB_XATTR_USER; + zsb->z_flags |= ZSB_XATTR; + + if (newval == ZFS_XATTR_SA) + zsb->z_xattr_sa = B_TRUE; + else + zsb->z_xattr_sa = B_FALSE; } } @@ -165,84 +170,44 @@ readonly_changed_cb(void *arg, uint64_t newval) { zfs_sb_t *zsb = arg; struct super_block *sb = zsb->z_sb; - struct vfsmount *vfs = zsb->z_vfs; - if (newval) { - vfs->mnt_flags |= MNT_READONLY; + if (sb == NULL) + return; + + if (newval) sb->s_flags |= MS_RDONLY; - } else { - vfs->mnt_flags &= ~MNT_READONLY; + else sb->s_flags &= ~MS_RDONLY; - } } static void devices_changed_cb(void *arg, uint64_t newval) { - zfs_sb_t *zsb = arg; - struct super_block *sb = zsb->z_sb; - struct vfsmount *vfs = zsb->z_vfs; - - if (newval == FALSE) { - vfs->mnt_flags |= MNT_NODEV; - sb->s_flags |= MS_NODEV; - } else { - vfs->mnt_flags &= ~MNT_NODEV; - sb->s_flags &= ~MS_NODEV; - } } static void setuid_changed_cb(void *arg, uint64_t newval) { - zfs_sb_t *zsb = arg; - struct super_block *sb = zsb->z_sb; - struct vfsmount *vfs = zsb->z_vfs; - - if (newval == FALSE) { - vfs->mnt_flags |= MNT_NOSUID; - sb->s_flags |= MS_NOSUID; - } else { - vfs->mnt_flags &= ~MNT_NOSUID; - sb->s_flags &= ~MS_NOSUID; - } } static void exec_changed_cb(void *arg, uint64_t newval) { - zfs_sb_t *zsb = arg; - struct super_block *sb = zsb->z_sb; - struct vfsmount *vfs = zsb->z_vfs; - - if (newval == FALSE) { - vfs->mnt_flags |= MNT_NOEXEC; - sb->s_flags |= MS_NOEXEC; - } else { - vfs->mnt_flags &= ~MNT_NOEXEC; - sb->s_flags &= ~MS_NOEXEC; - } } -/* - * The nbmand mount option can be changed at mount time. - * We can't allow it to be toggled on live file systems or incorrect - * behavior may be seen from cifs clients - * - * This property isn't registered via dsl_prop_register(), but this callback - * will be called when a file system is first mounted - */ static void nbmand_changed_cb(void *arg, uint64_t newval) { zfs_sb_t *zsb = arg; struct super_block *sb = zsb->z_sb; - if (newval == TRUE) { + if (sb == NULL) + return; + + if (newval == TRUE) sb->s_flags |= MS_MANDLOCK; - } else { + else sb->s_flags &= ~MS_MANDLOCK; - } } static void @@ -266,58 +231,12 @@ acl_inherit_changed_cb(void *arg, uint64_t newval) int zfs_register_callbacks(zfs_sb_t *zsb) { - struct vfsmount *vfsp = zsb->z_vfs; struct dsl_dataset *ds = NULL; objset_t *os = zsb->z_os; - uint64_t nbmand; - boolean_t readonly = B_FALSE; - boolean_t setuid = B_TRUE; - boolean_t exec = B_TRUE; - boolean_t devices = B_TRUE; - boolean_t xattr = B_TRUE; - boolean_t atime = B_TRUE; - char osname[MAXNAMELEN]; int error = 0; - /* - * While Linux allows multiple vfs mounts per super block we have - * limited it artificially to one in zfs_fill_super. Thus it is - * safe for us to modify the vfs mount fails through the callbacks. - */ - if ((vfsp->mnt_flags & MNT_READONLY) || - !spa_writeable(dmu_objset_spa(os))) - readonly = B_TRUE; - - if (vfsp->mnt_flags & MNT_NOSUID) { - devices = B_FALSE; - setuid = B_FALSE; - } else { - if (vfsp->mnt_flags & MNT_NODEV) - devices = B_FALSE; - } - - if (vfsp->mnt_flags & MNT_NOEXEC) - exec = B_FALSE; - - if (vfsp->mnt_flags & MNT_NOATIME) - atime = B_FALSE; - - /* - * nbmand is a special property which may only be changed at - * mount time. Unfortunately, Linux does not have a VFS mount - * flag instead this is a super block flag. So setting this - * option at mount time will have to wait until we can parse - * the mount option string. For now we rely on the nbmand - * value stored with the object set. Additional mount option - * string to be handled: - * - * case: sensitive|insensitive|mixed - * zerocopy: on|off - */ - - dmu_objset_name(os, osname); - if ((error = dsl_prop_get_integer(osname, "nbmand", &nbmand, NULL))) - return (error); + if (zfs_is_readonly(zsb) || !spa_writeable(dmu_objset_spa(os))) + readonly_changed_cb(zsb, B_TRUE); /* * Register property callbacks. @@ -347,20 +266,11 @@ zfs_register_callbacks(zfs_sb_t *zsb) "aclinherit", acl_inherit_changed_cb, zsb); error = error ? error : dsl_prop_register(ds, "vscan", vscan_changed_cb, zsb); + error = error ? error : dsl_prop_register(ds, + "nbmand", nbmand_changed_cb, zsb); if (error) goto unregister; - /* - * Invoke our callbacks to set required flags. - */ - readonly_changed_cb(zsb, readonly); - setuid_changed_cb(zsb, setuid); - exec_changed_cb(zsb, exec); - devices_changed_cb(zsb, devices); - xattr_changed_cb(zsb, xattr); - atime_changed_cb(zsb, atime); - nbmand_changed_cb(zsb, nbmand); - return (0); unregister: @@ -380,6 +290,7 @@ unregister: (void) dsl_prop_unregister(ds, "aclinherit", acl_inherit_changed_cb, zsb); (void) dsl_prop_unregister(ds, "vscan", vscan_changed_cb, zsb); + (void) dsl_prop_unregister(ds, "nbmand", nbmand_changed_cb, zsb); return (error); } @@ -389,7 +300,6 @@ static int zfs_space_delta_cb(dmu_object_type_t bonustype, void *data, uint64_t *userp, uint64_t *groupp) { - znode_phys_t *znp = data; int error = 0; /* @@ -408,20 +318,18 @@ zfs_space_delta_cb(dmu_object_type_t bonustype, void *data, return (EEXIST); if (bonustype == DMU_OT_ZNODE) { + znode_phys_t *znp = data; *userp = znp->zp_uid; *groupp = znp->zp_gid; } else { int hdrsize; + sa_hdr_phys_t *sap = data; + sa_hdr_phys_t sa = *sap; + boolean_t swap = B_FALSE; ASSERT(bonustype == DMU_OT_SA); - hdrsize = sa_hdrsize(data); - if (hdrsize != 0) { - *userp = *((uint64_t *)((uintptr_t)data + hdrsize + - SA_UID_OFFSET)); - *groupp = *((uint64_t *)((uintptr_t)data + hdrsize + - SA_GID_OFFSET)); - } else { + if (sa.sa_magic == 0) { /* * This should only happen for newly created * files that haven't had the znode data filled @@ -429,6 +337,25 @@ zfs_space_delta_cb(dmu_object_type_t bonustype, void *data, */ *userp = 0; *groupp = 0; + return (0); + } + if (sa.sa_magic == BSWAP_32(SA_MAGIC)) { + sa.sa_magic = SA_MAGIC; + sa.sa_layout_info = BSWAP_16(sa.sa_layout_info); + swap = B_TRUE; + } else { + VERIFY3U(sa.sa_magic, ==, SA_MAGIC); + } + + hdrsize = sa_hdrsize(&sa); + VERIFY3U(hdrsize, >=, sizeof (sa_hdr_phys_t)); + *userp = *((uint64_t *)((uintptr_t)data + hdrsize + + SA_UID_OFFSET)); + *groupp = *((uint64_t *)((uintptr_t)data + hdrsize + + SA_GID_OFFSET)); + if (swap) { + *userp = BSWAP_64(*userp); + *groupp = BSWAP_64(*groupp); } } return (error); @@ -673,7 +600,7 @@ zfs_sb_create(const char *osname, zfs_sb_t **zsbp) int i, error; uint64_t sa_obj; - zsb = kmem_zalloc(sizeof (zfs_sb_t), KM_SLEEP); + zsb = kmem_zalloc(sizeof (zfs_sb_t), KM_SLEEP | KM_NODEBUG); /* * We claim to always be readonly so we can open snapshots; @@ -690,7 +617,7 @@ zfs_sb_create(const char *osname, zfs_sb_t **zsbp) * Should probably make this a kmem cache, shuffle fields, * and just bzero up to z_hold_mtx[]. */ - zsb->z_vfs = NULL; + zsb->z_sb = NULL; zsb->z_parent = zsb; zsb->z_max_blksz = SPA_MAXBLOCKSIZE; zsb->z_show_ctldir = ZFS_SNAPDIR_VISIBLE; @@ -736,7 +663,11 @@ zfs_sb_create(const char *osname, zfs_sb_t **zsbp) error = zap_lookup(os, MASTER_NODE_OBJ, ZFS_SA_ATTRS, 8, 1, &sa_obj); if (error) - return (error); + goto out; + + error = zfs_get_zplprop(os, ZFS_PROP_XATTR, &zval); + if ((error == 0) && (zval == ZFS_XATTR_SA)) + zsb->z_xattr_sa = B_TRUE; } else { /* * Pre SA versions file systems should never touch @@ -796,6 +727,10 @@ zfs_sb_create(const char *osname, zfs_sb_t **zsbp) for (i = 0; i != ZFS_OBJ_MTX_SZ; i++) mutex_init(&zsb->z_hold_mtx[i], NULL, MUTEX_DEFAULT, NULL); + avl_create(&zsb->z_ctldir_snaps, snapentry_compare, + sizeof (zfs_snapentry_t), offsetof(zfs_snapentry_t, se_node)); + mutex_init(&zsb->z_ctldir_lock, NULL, MUTEX_DEFAULT, NULL); + *zsbp = zsb; return (0); @@ -805,8 +740,9 @@ out: kmem_free(zsb, sizeof (zfs_sb_t)); return (error); } +EXPORT_SYMBOL(zfs_sb_create); -static int +int zfs_sb_setup(zfs_sb_t *zsb, boolean_t mounting) { int error; @@ -836,9 +772,9 @@ zfs_sb_setup(zfs_sb_t *zsb, boolean_t mounting) * During replay we remove the read only flag to * allow replays to succeed. */ - readonly = zsb->z_vfs->mnt_flags & MNT_READONLY; + readonly = zfs_is_readonly(zsb); if (readonly != 0) - zsb->z_vfs->mnt_flags &= ~MNT_READONLY; + readonly_changed_cb(zsb, B_FALSE); else zfs_unlinked_drain(zsb); @@ -879,11 +815,15 @@ zfs_sb_setup(zfs_sb_t *zsb, boolean_t mounting) zsb->z_replay = B_FALSE; } } - zsb->z_vfs->mnt_flags |= readonly; /* restore readonly bit */ + + /* restore readonly bit */ + if (readonly != 0) + readonly_changed_cb(zsb, B_TRUE); } return (0); } +EXPORT_SYMBOL(zfs_sb_setup); void zfs_sb_free(zfs_sb_t *zsb) @@ -900,8 +840,11 @@ zfs_sb_free(zfs_sb_t *zsb) rw_destroy(&zsb->z_fuid_lock); for (i = 0; i != ZFS_OBJ_MTX_SZ; i++) mutex_destroy(&zsb->z_hold_mtx[i]); + mutex_destroy(&zsb->z_ctldir_lock); + avl_destroy(&zsb->z_ctldir_snaps); kmem_free(zsb, sizeof (zfs_sb_t)); } +EXPORT_SYMBOL(zfs_sb_free); static void zfs_set_fuid_feature(zfs_sb_t *zsb) @@ -950,6 +893,9 @@ zfs_unregister_callbacks(zfs_sb_t *zsb) VERIFY(dsl_prop_unregister(ds, "vscan", vscan_changed_cb, zsb) == 0); + + VERIFY(dsl_prop_unregister(ds, "nbmand", + nbmand_changed_cb, zsb) == 0); } } EXPORT_SYMBOL(zfs_unregister_callbacks); @@ -982,6 +928,7 @@ zfs_check_global_label(const char *dsname, const char *hexsl) } return (EACCES); } +EXPORT_SYMBOL(zfs_check_global_label); #endif /* HAVE_MLSLABEL */ int @@ -989,6 +936,7 @@ zfs_statvfs(struct dentry *dentry, struct kstatfs *statp) { zfs_sb_t *zsb = dentry->d_sb->s_fs_info; uint64_t refdbytes, availbytes, usedobjs, availobjs; + uint64_t fsid; uint32_t bshift; ZFS_ENTER(zsb); @@ -996,12 +944,17 @@ zfs_statvfs(struct dentry *dentry, struct kstatfs *statp) dmu_objset_space(zsb->z_os, &refdbytes, &availbytes, &usedobjs, &availobjs); + fsid = dmu_objset_fsid_guid(zsb->z_os); /* - * The underlying storage pool actually uses multiple block sizes. - * We report the fragsize as the smallest block size we support, - * and we report our blocksize as the filesystem's maximum blocksize. + * The underlying storage pool actually uses multiple block + * size. Under Solaris frsize (fragment size) is reported as + * the smallest block size we support, and bsize (block size) + * as the filesystem's maximum block size. Unfortunately, + * under Linux the fragment size and block size are often used + * interchangeably. Thus we are forced to report both of them + * as the filesystem's maximum block size. */ - statp->f_frsize = 1UL << SPA_MINBLOCKSHIFT; + statp->f_frsize = zsb->z_max_blksz; statp->f_bsize = zsb->z_max_blksz; bshift = fls(statp->f_bsize) - 1; @@ -1023,10 +976,10 @@ zfs_statvfs(struct dentry *dentry, struct kstatfs *statp) * For f_ffree, report the smaller of the number of object available * and the number of blocks (each object will take at least a block). */ - statp->f_ffree = MIN(availobjs, statp->f_bfree); + statp->f_ffree = MIN(availobjs, availbytes >> DNODE_SHIFT); statp->f_files = statp->f_ffree + usedobjs; - statp->f_fsid.val[0] = 0; /* XXX: Map up some unique ID */ - statp->f_fsid.val[1] = 0; + statp->f_fsid.val[0] = (uint32_t)fsid; + statp->f_fsid.val[1] = (uint32_t)(fsid >> 32); statp->f_type = ZFS_SUPER_MAGIC; statp->f_namelen = ZFS_MAXNAMELEN; @@ -1058,14 +1011,34 @@ zfs_root(zfs_sb_t *zsb, struct inode **ipp) } EXPORT_SYMBOL(zfs_root); +#ifdef HAVE_SHRINK +int +zfs_sb_prune(struct super_block *sb, unsigned long nr_to_scan, int *objects) +{ + zfs_sb_t *zsb = sb->s_fs_info; + struct shrinker *shrinker = &sb->s_shrink; + struct shrink_control sc = { + .nr_to_scan = nr_to_scan, + .gfp_mask = GFP_KERNEL, + }; + + ZFS_ENTER(zsb); + *objects = (*shrinker->shrink)(shrinker, &sc); + ZFS_EXIT(zsb); + + return (0); +} +EXPORT_SYMBOL(zfs_sb_prune); +#endif /* HAVE_SHRINK */ + /* - * Teardown the zfs_sb_t::z_os. + * Teardown the zfs_sb_t. * * Note, if 'unmounting' if FALSE, we return with the 'z_teardown_lock' * and 'z_teardown_inactive_lock' held. */ int -zfsvfs_teardown(zfs_sb_t *zsb, boolean_t unmounting) +zfs_sb_teardown(zfs_sb_t *zsb, boolean_t unmounting) { znode_t *zp; @@ -1080,10 +1053,17 @@ zfsvfs_teardown(zfs_sb_t *zsb, boolean_t unmounting) * for non-snapshots. */ shrink_dcache_sb(zsb->z_parent->z_sb); - invalidate_inodes(zsb->z_parent->z_sb); } /* + * If someone has not already unmounted this file system, + * drain the iput_taskq to ensure all active references to the + * zfs_sb_t have been handled only then can it be safely destroyed. + */ + if (zsb->z_os) + taskq_wait(dsl_pool_iput_taskq(dmu_objset_pool(zsb->z_os))); + + /* * Close the zil. NB: Can't close the zil while zfs_inactive * threads are blocked as zil_close can call zfs_inactive. */ @@ -1106,25 +1086,26 @@ zfsvfs_teardown(zfs_sb_t *zsb, boolean_t unmounting) } /* - * At this point there are no vops active, and any new vops will - * fail with EIO since we have z_teardown_lock for writer (only - * relavent for forced unmount). + * At this point there are no VFS ops active, and any new VFS ops + * will fail with EIO since we have z_teardown_lock for writer (only + * relevant for forced unmount). * * Release all holds on dbufs. */ mutex_enter(&zsb->z_znodes_lock); for (zp = list_head(&zsb->z_all_znodes); zp != NULL; - zp = list_next(&zsb->z_all_znodes, zp)) + zp = list_next(&zsb->z_all_znodes, zp)) { if (zp->z_sa_hdl) { ASSERT(atomic_read(&ZTOI(zp)->i_count) > 0); zfs_znode_dmu_fini(zp); } + } mutex_exit(&zsb->z_znodes_lock); /* - * If we are unmounting, set the unmounted flag and let new vops + * If we are unmounting, set the unmounted flag and let new VFS ops * unblock. zfs_inactive will have the unmounted behavior, and all - * other vops will fail with EIO. + * other VFS ops will fail with EIO. */ if (unmounting) { zsb->z_unmounted = B_TRUE; @@ -1149,13 +1130,18 @@ zfsvfs_teardown(zfs_sb_t *zsb, boolean_t unmounting) /* * Evict cached data */ - if (dmu_objset_is_dirty_anywhere(zsb->z_os)) - if (!(zsb->z_vfs->mnt_flags & MNT_READONLY)) - txg_wait_synced(dmu_objset_pool(zsb->z_os), 0); + if (dsl_dataset_is_dirty(dmu_objset_ds(zsb->z_os)) && + !zfs_is_readonly(zsb)) + txg_wait_synced(dmu_objset_pool(zsb->z_os), 0); (void) dmu_objset_evict_dbufs(zsb->z_os); return (0); } +EXPORT_SYMBOL(zfs_sb_teardown); + +#if defined(HAVE_BDI) && !defined(HAVE_BDI_SETUP_AND_REGISTER) +atomic_long_t zfs_bdi_seq = ATOMIC_LONG_INIT(0); +#endif /* HAVE_BDI && !HAVE_BDI_SETUP_AND_REGISTER */ int zfs_domount(struct super_block *sb, void *data, int silent) @@ -1167,17 +1153,6 @@ zfs_domount(struct super_block *sb, void *data, int silent) uint64_t recordsize; int error; - /* - * Linux allows multiple vfs mounts per super block. However, the - * zfs_sb_t only contains a pointer for a single vfs mount. This - * back reference in the long term could be extended to a list of - * vfs mounts if a hook were added to the kernel to notify us when - * a vfsmount is destroyed. Until then we must limit the number - * of mounts per super block to one. - */ - if (atomic_read(&sb->s_active) > 1) - return (EBUSY); - error = zfs_sb_create(osname, &zsb); if (error) return (error); @@ -1187,7 +1162,6 @@ zfs_domount(struct super_block *sb, void *data, int silent) goto out; zsb->z_sb = sb; - zsb->z_vfs = zmd->z_vfs; sb->s_fs_info = zsb; sb->s_magic = ZFS_SUPER_MAGIC; sb->s_maxbytes = MAX_LFS_FILESIZE; @@ -1195,12 +1169,30 @@ zfs_domount(struct super_block *sb, void *data, int silent) sb->s_blocksize = recordsize; sb->s_blocksize_bits = ilog2(recordsize); +#ifdef HAVE_BDI + /* + * 2.6.32 API change, + * Added backing_device_info (BDI) per super block interfaces. A BDI + * must be configured when using a non-device backed filesystem for + * proper writeback. This is not required for older pdflush kernels. + * + * NOTE: Linux read-ahead is disabled in favor of zfs read-ahead. + */ + zsb->z_bdi.ra_pages = 0; + sb->s_bdi = &zsb->z_bdi; + + error = -bdi_setup_and_register(&zsb->z_bdi, "zfs", BDI_CAP_MAP_COPY); + if (error) + goto out; +#endif /* HAVE_BDI */ + /* Set callback operations for the file system. */ sb->s_op = &zpl_super_operations; sb->s_xattr = zpl_xattr_handlers; -#ifdef HAVE_EXPORTS - sb->s_export_op = &zpl_export_operations; -#endif /* HAVE_EXPORTS */ + sb->s_export_op = &zpl_export_operations; +#ifdef HAVE_S_D_OP + sb->s_d_op = &zpl_dentry_operations; +#endif /* HAVE_S_D_OP */ /* Set features for file system. */ zfs_set_fuid_feature(zsb); @@ -1221,9 +1213,6 @@ zfs_domount(struct super_block *sb, void *data, int silent) mutex_exit(&zsb->z_os->os_user_ptr_lock); } else { error = zfs_sb_setup(zsb, B_TRUE); -#ifdef HAVE_SNAPSHOT - (void) zfs_snap_create(zsb); -#endif /* HAVE_SNAPSHOT */ } /* Allocate a root inode for the filesystem. */ @@ -1234,12 +1223,15 @@ zfs_domount(struct super_block *sb, void *data, int silent) } /* Allocate a root dentry for the filesystem */ - sb->s_root = d_alloc_root(root_inode); + sb->s_root = d_make_root(root_inode); if (sb->s_root == NULL) { (void) zfs_umount(sb); error = ENOMEM; goto out; } + + if (!zsb->z_issnap) + zfsctl_create(zsb); out: if (error) { dmu_objset_disown(zsb->z_os, zsb); @@ -1250,6 +1242,27 @@ out: } EXPORT_SYMBOL(zfs_domount); +/* + * Called when an unmount is requested and certain sanity checks have + * already passed. At this point no dentries or inodes have been reclaimed + * from their respective caches. We drop the extra reference on the .zfs + * control directory to allow everything to be reclaimed. All snapshots + * must already have been unmounted to reach this point. + */ +void +zfs_preumount(struct super_block *sb) +{ + zfs_sb_t *zsb = sb->s_fs_info; + + if (zsb != NULL && zsb->z_ctldir != NULL) + zfsctl_destroy(zsb); +} +EXPORT_SYMBOL(zfs_preumount); + +/* + * Called once all other unmount released tear down has occurred. + * It is our responsibility to release any remaining infrastructure. + */ /*ARGSUSED*/ int zfs_umount(struct super_block *sb) @@ -1257,9 +1270,13 @@ zfs_umount(struct super_block *sb) zfs_sb_t *zsb = sb->s_fs_info; objset_t *os; - VERIFY(zfsvfs_teardown(zsb, B_TRUE) == 0); + VERIFY(zfs_sb_teardown(zsb, B_TRUE) == 0); os = zsb->z_os; +#ifdef HAVE_BDI + bdi_destroy(sb->s_bdi); +#endif /* HAVE_BDI */ + /* * z_os will be NULL if there was an error in * attempting to reopen zsb. @@ -1284,9 +1301,20 @@ zfs_umount(struct super_block *sb) EXPORT_SYMBOL(zfs_umount); int -zfs_vget(struct vfsmount *vfsp, struct inode **ipp, fid_t *fidp) +zfs_remount(struct super_block *sb, int *flags, char *data) { - zfs_sb_t *zsb = VTOZSB(vfsp); + /* + * All namespace flags (MNT_*) and super block flags (MS_*) will + * be handled by the Linux VFS. Only handle custom options here. + */ + return (0); +} +EXPORT_SYMBOL(zfs_remount); + +int +zfs_vget(struct super_block *sb, struct inode **ipp, fid_t *fidp) +{ + zfs_sb_t *zsb = sb->s_fs_info; znode_t *zp; uint64_t object = 0; uint64_t fid_gen = 0; @@ -1311,11 +1339,10 @@ zfs_vget(struct vfsmount *vfsp, struct inode **ipp, fid_t *fidp) ZFS_EXIT(zsb); -#ifdef HAVE_SNAPSHOT - err = zfsctl_lookup_objset(vfsp, objsetid, &zsb); + err = zfsctl_lookup_objset(sb, objsetid, &zsb); if (err) return (EINVAL); -#endif /* HAVE_SNAPSHOT */ + ZFS_ENTER(zsb); } @@ -1332,22 +1359,20 @@ zfs_vget(struct vfsmount *vfsp, struct inode **ipp, fid_t *fidp) return (EINVAL); } -#ifdef HAVE_SNAPSHOT /* A zero fid_gen means we are in the .zfs control directories */ if (fid_gen == 0 && (object == ZFSCTL_INO_ROOT || object == ZFSCTL_INO_SNAPDIR)) { *ipp = zsb->z_ctldir; ASSERT(*ipp != NULL); if (object == ZFSCTL_INO_SNAPDIR) { - VERIFY(zfsctl_root_lookup(*ipp, "snapshot", ipp, NULL, - 0, NULL, NULL, NULL, NULL, NULL) == 0); + VERIFY(zfsctl_root_lookup(*ipp, "snapshot", ipp, + 0, kcred, NULL, NULL) == 0); } else { igrab(*ipp); } ZFS_EXIT(zsb); return (0); } -#endif /* HAVE_SNAPSHOT */ gen_mask = -1ULL >> (64 - 8 * i); @@ -1378,7 +1403,7 @@ zfs_vget(struct vfsmount *vfsp, struct inode **ipp, fid_t *fidp) EXPORT_SYMBOL(zfs_vget); /* - * Block out VOPs and close zfs_sb_t::z_os + * Block out VFS ops and close zfs_sb_t * * Note, if successful, then we return with the 'z_teardown_lock' and * 'z_teardown_inactive_lock' write held. @@ -1388,8 +1413,9 @@ zfs_suspend_fs(zfs_sb_t *zsb) { int error; - if ((error = zfsvfs_teardown(zsb, B_FALSE)) != 0) + if ((error = zfs_sb_teardown(zsb, B_FALSE)) != 0) return (error); + dmu_objset_disown(zsb->z_os, zsb); return (0); @@ -1397,7 +1423,7 @@ zfs_suspend_fs(zfs_sb_t *zsb) EXPORT_SYMBOL(zfs_suspend_fs); /* - * Reopen zfs_sb_t::z_os and release VOPs. + * Reopen zfs_sb_t and release VFS ops. */ int zfs_resume_fs(zfs_sb_t *zsb, const char *osname) @@ -1426,33 +1452,41 @@ zfs_resume_fs(zfs_sb_t *zsb, const char *osname) goto bail; VERIFY(zfs_sb_setup(zsb, B_FALSE) == 0); + zsb->z_rollback_time = jiffies; /* - * Attempt to re-establish all the active znodes with - * their dbufs. If a zfs_rezget() fails, then we'll let - * any potential callers discover that via ZFS_ENTER_VERIFY_VP - * when they try to use their znode. + * Attempt to re-establish all the active inodes with their + * dbufs. If a zfs_rezget() fails, then we unhash the inode + * and mark it stale. This prevents a collision if a new + * inode/object is created which must use the same inode + * number. The stale inode will be be released when the + * VFS prunes the dentry holding the remaining references + * on the stale inode. */ mutex_enter(&zsb->z_znodes_lock); for (zp = list_head(&zsb->z_all_znodes); zp; zp = list_next(&zsb->z_all_znodes, zp)) { - (void) zfs_rezget(zp); + err2 = zfs_rezget(zp); + if (err2) { + remove_inode_hash(ZTOI(zp)); + zp->z_is_stale = B_TRUE; + } } mutex_exit(&zsb->z_znodes_lock); - } bail: - /* release the VOPs */ + /* release the VFS ops */ rw_exit(&zsb->z_teardown_inactive_lock); rrw_exit(&zsb->z_teardown_lock, FTAG); if (err) { /* - * Since we couldn't reopen zfs_sb_t::z_os, force - * unmount this file system. + * Since we couldn't reopen zfs_sb_t or, setup the + * sa framework, force unmount this file system. */ - (void) zfs_umount(zsb->z_sb); + if (zsb->z_os) + (void) zfs_umount(zsb->z_sb); } return (err); } @@ -1506,7 +1540,7 @@ zfs_set_version(zfs_sb_t *zsb, uint64_t newvers) error = zap_add(os, MASTER_NODE_OBJ, ZFS_SA_ATTRS, 8, 1, &sa_obj, tx); - ASSERT3U(error, ==, 0); + ASSERT0(error); VERIFY(0 == sa_set_sa_object(os, sa_obj)); sa_register_update_callback(os, zfs_sa_upgrade); @@ -1568,13 +1602,16 @@ zfs_get_zplprop(objset_t *os, zfs_prop_t prop, uint64_t *value) } return (error); } +EXPORT_SYMBOL(zfs_get_zplprop); void zfs_init(void) { + zfsctl_init(); zfs_znode_init(); dmu_objset_register_type(DMU_OST_ZFS, zfs_space_delta_cb); register_filesystem(&zpl_fs_type); + (void) arc_add_prune_callback(zpl_prune_sbs, NULL); } void @@ -1582,4 +1619,5 @@ zfs_fini(void) { unregister_filesystem(&zpl_fs_type); zfs_znode_fini(); + zfsctl_fini(); }