X-Git-Url: https://git.camperquake.de/gitweb.cgi?a=blobdiff_plain;f=module%2Fzfs%2Fzfs_vfsops.c;h=e618f2bbf555eba869fabe816c26fd0d7d8f2159;hb=c99c90015ece64746e20b74245caca41d1dbefe1;hp=e0987e9191026a023946a0249f17d2acebcf893f;hpb=2cf7f52bc42f215d4ef27d0fd75fc1b1417cb841;p=zfs.git diff --git a/module/zfs/zfs_vfsops.c b/module/zfs/zfs_vfsops.c index e0987e9..e618f2b 100644 --- a/module/zfs/zfs_vfsops.c +++ b/module/zfs/zfs_vfsops.c @@ -49,6 +49,7 @@ #include #include #include +#include #include #include #include @@ -56,13 +57,13 @@ #include #include #include +#include #include #include #include #include #include #include -#include #include #include "zfs_comutil.h" @@ -140,10 +141,16 @@ xattr_changed_cb(void *arg, uint64_t newval) { zfs_sb_t *zsb = arg; - if (newval == TRUE) - zsb->z_flags |= ZSB_XATTR; - else + if (newval == ZFS_XATTR_OFF) { zsb->z_flags &= ~ZSB_XATTR; + } else { + zsb->z_flags |= ZSB_XATTR; + + if (newval == ZFS_XATTR_SA) + zsb->z_xattr_sa = B_TRUE; + else + zsb->z_xattr_sa = B_FALSE; + } } static void @@ -293,7 +300,6 @@ static int zfs_space_delta_cb(dmu_object_type_t bonustype, void *data, uint64_t *userp, uint64_t *groupp) { - znode_phys_t *znp = data; int error = 0; /* @@ -312,20 +318,18 @@ zfs_space_delta_cb(dmu_object_type_t bonustype, void *data, return (EEXIST); if (bonustype == DMU_OT_ZNODE) { + znode_phys_t *znp = data; *userp = znp->zp_uid; *groupp = znp->zp_gid; } else { int hdrsize; + sa_hdr_phys_t *sap = data; + sa_hdr_phys_t sa = *sap; + boolean_t swap = B_FALSE; ASSERT(bonustype == DMU_OT_SA); - hdrsize = sa_hdrsize(data); - if (hdrsize != 0) { - *userp = *((uint64_t *)((uintptr_t)data + hdrsize + - SA_UID_OFFSET)); - *groupp = *((uint64_t *)((uintptr_t)data + hdrsize + - SA_GID_OFFSET)); - } else { + if (sa.sa_magic == 0) { /* * This should only happen for newly created * files that haven't had the znode data filled @@ -333,6 +337,25 @@ zfs_space_delta_cb(dmu_object_type_t bonustype, void *data, */ *userp = 0; *groupp = 0; + return (0); + } + if (sa.sa_magic == BSWAP_32(SA_MAGIC)) { + sa.sa_magic = SA_MAGIC; + sa.sa_layout_info = BSWAP_16(sa.sa_layout_info); + swap = B_TRUE; + } else { + VERIFY3U(sa.sa_magic, ==, SA_MAGIC); + } + + hdrsize = sa_hdrsize(&sa); + VERIFY3U(hdrsize, >=, sizeof (sa_hdr_phys_t)); + *userp = *((uint64_t *)((uintptr_t)data + hdrsize + + SA_UID_OFFSET)); + *groupp = *((uint64_t *)((uintptr_t)data + hdrsize + + SA_GID_OFFSET)); + if (swap) { + *userp = BSWAP_64(*userp); + *groupp = BSWAP_64(*groupp); } } return (error); @@ -577,7 +600,7 @@ zfs_sb_create(const char *osname, zfs_sb_t **zsbp) int i, error; uint64_t sa_obj; - zsb = kmem_zalloc(sizeof (zfs_sb_t), KM_SLEEP); + zsb = kmem_zalloc(sizeof (zfs_sb_t), KM_SLEEP | KM_NODEBUG); /* * We claim to always be readonly so we can open snapshots; @@ -640,7 +663,11 @@ zfs_sb_create(const char *osname, zfs_sb_t **zsbp) error = zap_lookup(os, MASTER_NODE_OBJ, ZFS_SA_ATTRS, 8, 1, &sa_obj); if (error) - return (error); + goto out; + + error = zfs_get_zplprop(os, ZFS_PROP_XATTR, &zval); + if ((error == 0) && (zval == ZFS_XATTR_SA)) + zsb->z_xattr_sa = B_TRUE; } else { /* * Pre SA versions file systems should never touch @@ -700,6 +727,10 @@ zfs_sb_create(const char *osname, zfs_sb_t **zsbp) for (i = 0; i != ZFS_OBJ_MTX_SZ; i++) mutex_init(&zsb->z_hold_mtx[i], NULL, MUTEX_DEFAULT, NULL); + avl_create(&zsb->z_ctldir_snaps, snapentry_compare, + sizeof (zfs_snapentry_t), offsetof(zfs_snapentry_t, se_node)); + mutex_init(&zsb->z_ctldir_lock, NULL, MUTEX_DEFAULT, NULL); + *zsbp = zsb; return (0); @@ -709,8 +740,9 @@ out: kmem_free(zsb, sizeof (zfs_sb_t)); return (error); } +EXPORT_SYMBOL(zfs_sb_create); -static int +int zfs_sb_setup(zfs_sb_t *zsb, boolean_t mounting) { int error; @@ -791,6 +823,7 @@ zfs_sb_setup(zfs_sb_t *zsb, boolean_t mounting) return (0); } +EXPORT_SYMBOL(zfs_sb_setup); void zfs_sb_free(zfs_sb_t *zsb) @@ -807,8 +840,11 @@ zfs_sb_free(zfs_sb_t *zsb) rw_destroy(&zsb->z_fuid_lock); for (i = 0; i != ZFS_OBJ_MTX_SZ; i++) mutex_destroy(&zsb->z_hold_mtx[i]); + mutex_destroy(&zsb->z_ctldir_lock); + avl_destroy(&zsb->z_ctldir_snaps); kmem_free(zsb, sizeof (zfs_sb_t)); } +EXPORT_SYMBOL(zfs_sb_free); static void zfs_set_fuid_feature(zfs_sb_t *zsb) @@ -892,6 +928,7 @@ zfs_check_global_label(const char *dsname, const char *hexsl) } return (EACCES); } +EXPORT_SYMBOL(zfs_check_global_label); #endif /* HAVE_MLSLABEL */ int @@ -899,6 +936,7 @@ zfs_statvfs(struct dentry *dentry, struct kstatfs *statp) { zfs_sb_t *zsb = dentry->d_sb->s_fs_info; uint64_t refdbytes, availbytes, usedobjs, availobjs; + uint64_t fsid; uint32_t bshift; ZFS_ENTER(zsb); @@ -906,6 +944,7 @@ zfs_statvfs(struct dentry *dentry, struct kstatfs *statp) dmu_objset_space(zsb->z_os, &refdbytes, &availbytes, &usedobjs, &availobjs); + fsid = dmu_objset_fsid_guid(zsb->z_os); /* * The underlying storage pool actually uses multiple block * size. Under Solaris frsize (fragment size) is reported as @@ -937,10 +976,10 @@ zfs_statvfs(struct dentry *dentry, struct kstatfs *statp) * For f_ffree, report the smaller of the number of object available * and the number of blocks (each object will take at least a block). */ - statp->f_ffree = MIN(availobjs, statp->f_bfree); + statp->f_ffree = MIN(availobjs, availbytes >> DNODE_SHIFT); statp->f_files = statp->f_ffree + usedobjs; - statp->f_fsid.val[0] = dentry->d_sb->s_dev; - statp->f_fsid.val[1] = 0; + statp->f_fsid.val[0] = (uint32_t)fsid; + statp->f_fsid.val[1] = (uint32_t)(fsid >> 32); statp->f_type = ZFS_SUPER_MAGIC; statp->f_namelen = ZFS_MAXNAMELEN; @@ -972,14 +1011,34 @@ zfs_root(zfs_sb_t *zsb, struct inode **ipp) } EXPORT_SYMBOL(zfs_root); +#ifdef HAVE_SHRINK +int +zfs_sb_prune(struct super_block *sb, unsigned long nr_to_scan, int *objects) +{ + zfs_sb_t *zsb = sb->s_fs_info; + struct shrinker *shrinker = &sb->s_shrink; + struct shrink_control sc = { + .nr_to_scan = nr_to_scan, + .gfp_mask = GFP_KERNEL, + }; + + ZFS_ENTER(zsb); + *objects = (*shrinker->shrink)(shrinker, &sc); + ZFS_EXIT(zsb); + + return (0); +} +EXPORT_SYMBOL(zfs_sb_prune); +#endif /* HAVE_SHRINK */ + /* - * Teardown the zfs_sb_t::z_os. + * Teardown the zfs_sb_t. * * Note, if 'unmounting' if FALSE, we return with the 'z_teardown_lock' * and 'z_teardown_inactive_lock' held. */ int -zfsvfs_teardown(zfs_sb_t *zsb, boolean_t unmounting) +zfs_sb_teardown(zfs_sb_t *zsb, boolean_t unmounting) { znode_t *zp; @@ -994,7 +1053,6 @@ zfsvfs_teardown(zfs_sb_t *zsb, boolean_t unmounting) * for non-snapshots. */ shrink_dcache_sb(zsb->z_parent->z_sb); - (void) spl_invalidate_inodes(zsb->z_parent->z_sb, 0); } /* @@ -1026,25 +1084,26 @@ zfsvfs_teardown(zfs_sb_t *zsb, boolean_t unmounting) } /* - * At this point there are no vops active, and any new vops will - * fail with EIO since we have z_teardown_lock for writer (only - * relavent for forced unmount). + * At this point there are no VFS ops active, and any new VFS ops + * will fail with EIO since we have z_teardown_lock for writer (only + * relevant for forced unmount). * * Release all holds on dbufs. */ mutex_enter(&zsb->z_znodes_lock); for (zp = list_head(&zsb->z_all_znodes); zp != NULL; - zp = list_next(&zsb->z_all_znodes, zp)) + zp = list_next(&zsb->z_all_znodes, zp)) { if (zp->z_sa_hdl) { ASSERT(atomic_read(&ZTOI(zp)->i_count) > 0); zfs_znode_dmu_fini(zp); } + } mutex_exit(&zsb->z_znodes_lock); /* - * If we are unmounting, set the unmounted flag and let new vops + * If we are unmounting, set the unmounted flag and let new VFS ops * unblock. zfs_inactive will have the unmounted behavior, and all - * other vops will fail with EIO. + * other VFS ops will fail with EIO. */ if (unmounting) { zsb->z_unmounted = B_TRUE; @@ -1069,13 +1128,18 @@ zfsvfs_teardown(zfs_sb_t *zsb, boolean_t unmounting) /* * Evict cached data */ - if (dmu_objset_is_dirty_anywhere(zsb->z_os)) - if (!zfs_is_readonly(zsb)) - txg_wait_synced(dmu_objset_pool(zsb->z_os), 0); + if (dsl_dataset_is_dirty(dmu_objset_ds(zsb->z_os)) && + !zfs_is_readonly(zsb)) + txg_wait_synced(dmu_objset_pool(zsb->z_os), 0); (void) dmu_objset_evict_dbufs(zsb->z_os); return (0); } +EXPORT_SYMBOL(zfs_sb_teardown); + +#if defined(HAVE_BDI) && !defined(HAVE_BDI_SETUP_AND_REGISTER) +atomic_long_t zfs_bdi_seq = ATOMIC_LONG_INIT(0); +#endif /* HAVE_BDI && !HAVE_BDI_SETUP_AND_REGISTER */ int zfs_domount(struct super_block *sb, void *data, int silent) @@ -1103,10 +1167,30 @@ zfs_domount(struct super_block *sb, void *data, int silent) sb->s_blocksize = recordsize; sb->s_blocksize_bits = ilog2(recordsize); +#ifdef HAVE_BDI + /* + * 2.6.32 API change, + * Added backing_device_info (BDI) per super block interfaces. A BDI + * must be configured when using a non-device backed filesystem for + * proper writeback. This is not required for older pdflush kernels. + * + * NOTE: Linux read-ahead is disabled in favor of zfs read-ahead. + */ + zsb->z_bdi.ra_pages = 0; + sb->s_bdi = &zsb->z_bdi; + + error = -bdi_setup_and_register(&zsb->z_bdi, "zfs", BDI_CAP_MAP_COPY); + if (error) + goto out; +#endif /* HAVE_BDI */ + /* Set callback operations for the file system. */ sb->s_op = &zpl_super_operations; sb->s_xattr = zpl_xattr_handlers; sb->s_export_op = &zpl_export_operations; +#ifdef HAVE_S_D_OP + sb->s_d_op = &zpl_dentry_operations; +#endif /* HAVE_S_D_OP */ /* Set features for file system. */ zfs_set_fuid_feature(zsb); @@ -1127,9 +1211,6 @@ zfs_domount(struct super_block *sb, void *data, int silent) mutex_exit(&zsb->z_os->os_user_ptr_lock); } else { error = zfs_sb_setup(zsb, B_TRUE); -#ifdef HAVE_SNAPSHOT - (void) zfs_snap_create(zsb); -#endif /* HAVE_SNAPSHOT */ } /* Allocate a root inode for the filesystem. */ @@ -1140,12 +1221,15 @@ zfs_domount(struct super_block *sb, void *data, int silent) } /* Allocate a root dentry for the filesystem */ - sb->s_root = d_alloc_root(root_inode); + sb->s_root = d_make_root(root_inode); if (sb->s_root == NULL) { (void) zfs_umount(sb); error = ENOMEM; goto out; } + + if (!zsb->z_issnap) + zfsctl_create(zsb); out: if (error) { dmu_objset_disown(zsb->z_os, zsb); @@ -1156,6 +1240,27 @@ out: } EXPORT_SYMBOL(zfs_domount); +/* + * Called when an unmount is requested and certain sanity checks have + * already passed. At this point no dentries or inodes have been reclaimed + * from their respective caches. We drop the extra reference on the .zfs + * control directory to allow everything to be reclaimed. All snapshots + * must already have been unmounted to reach this point. + */ +void +zfs_preumount(struct super_block *sb) +{ + zfs_sb_t *zsb = sb->s_fs_info; + + if (zsb != NULL && zsb->z_ctldir != NULL) + zfsctl_destroy(zsb); +} +EXPORT_SYMBOL(zfs_preumount); + +/* + * Called once all other unmount released tear down has occurred. + * It is our responsibility to release any remaining infrastructure. + */ /*ARGSUSED*/ int zfs_umount(struct super_block *sb) @@ -1163,9 +1268,13 @@ zfs_umount(struct super_block *sb) zfs_sb_t *zsb = sb->s_fs_info; objset_t *os; - VERIFY(zfsvfs_teardown(zsb, B_TRUE) == 0); + VERIFY(zfs_sb_teardown(zsb, B_TRUE) == 0); os = zsb->z_os; +#ifdef HAVE_BDI + bdi_destroy(sb->s_bdi); +#endif /* HAVE_BDI */ + /* * z_os will be NULL if there was an error in * attempting to reopen zsb. @@ -1228,11 +1337,10 @@ zfs_vget(struct super_block *sb, struct inode **ipp, fid_t *fidp) ZFS_EXIT(zsb); -#ifdef HAVE_SNAPSHOT - err = zfsctl_lookup_objset(vfsp, objsetid, &zsb); + err = zfsctl_lookup_objset(sb, objsetid, &zsb); if (err) return (EINVAL); -#endif /* HAVE_SNAPSHOT */ + ZFS_ENTER(zsb); } @@ -1249,22 +1357,20 @@ zfs_vget(struct super_block *sb, struct inode **ipp, fid_t *fidp) return (EINVAL); } -#ifdef HAVE_SNAPSHOT /* A zero fid_gen means we are in the .zfs control directories */ if (fid_gen == 0 && (object == ZFSCTL_INO_ROOT || object == ZFSCTL_INO_SNAPDIR)) { *ipp = zsb->z_ctldir; ASSERT(*ipp != NULL); if (object == ZFSCTL_INO_SNAPDIR) { - VERIFY(zfsctl_root_lookup(*ipp, "snapshot", ipp, NULL, - 0, NULL, NULL, NULL, NULL, NULL) == 0); + VERIFY(zfsctl_root_lookup(*ipp, "snapshot", ipp, + 0, kcred, NULL, NULL) == 0); } else { igrab(*ipp); } ZFS_EXIT(zsb); return (0); } -#endif /* HAVE_SNAPSHOT */ gen_mask = -1ULL >> (64 - 8 * i); @@ -1295,7 +1401,7 @@ zfs_vget(struct super_block *sb, struct inode **ipp, fid_t *fidp) EXPORT_SYMBOL(zfs_vget); /* - * Block out VOPs and close zfs_sb_t::z_os + * Block out VFS ops and close zfs_sb_t * * Note, if successful, then we return with the 'z_teardown_lock' and * 'z_teardown_inactive_lock' write held. @@ -1305,8 +1411,9 @@ zfs_suspend_fs(zfs_sb_t *zsb) { int error; - if ((error = zfsvfs_teardown(zsb, B_FALSE)) != 0) + if ((error = zfs_sb_teardown(zsb, B_FALSE)) != 0) return (error); + dmu_objset_disown(zsb->z_os, zsb); return (0); @@ -1314,7 +1421,7 @@ zfs_suspend_fs(zfs_sb_t *zsb) EXPORT_SYMBOL(zfs_suspend_fs); /* - * Reopen zfs_sb_t::z_os and release VOPs. + * Reopen zfs_sb_t and release VFS ops. */ int zfs_resume_fs(zfs_sb_t *zsb, const char *osname) @@ -1343,30 +1450,37 @@ zfs_resume_fs(zfs_sb_t *zsb, const char *osname) goto bail; VERIFY(zfs_sb_setup(zsb, B_FALSE) == 0); + zsb->z_rollback_time = jiffies; /* - * Attempt to re-establish all the active znodes with - * their dbufs. If a zfs_rezget() fails, then we'll let - * any potential callers discover that via ZFS_ENTER_VERIFY_VP - * when they try to use their znode. + * Attempt to re-establish all the active inodes with their + * dbufs. If a zfs_rezget() fails, then we unhash the inode + * and mark it stale. This prevents a collision if a new + * inode/object is created which must use the same inode + * number. The stale inode will be be released when the + * VFS prunes the dentry holding the remaining references + * on the stale inode. */ mutex_enter(&zsb->z_znodes_lock); for (zp = list_head(&zsb->z_all_znodes); zp; zp = list_next(&zsb->z_all_znodes, zp)) { - (void) zfs_rezget(zp); + err2 = zfs_rezget(zp); + if (err2) { + remove_inode_hash(ZTOI(zp)); + zp->z_is_stale = B_TRUE; + } } mutex_exit(&zsb->z_znodes_lock); - } bail: - /* release the VOPs */ + /* release the VFS ops */ rw_exit(&zsb->z_teardown_inactive_lock); rrw_exit(&zsb->z_teardown_lock, FTAG); if (err) { /* - * Since we couldn't reopen zfs_sb_t::z_os, force + * Since we couldn't reopen zfs_sb_t, force * unmount this file system. */ (void) zfs_umount(zsb->z_sb); @@ -1423,7 +1537,7 @@ zfs_set_version(zfs_sb_t *zsb, uint64_t newvers) error = zap_add(os, MASTER_NODE_OBJ, ZFS_SA_ATTRS, 8, 1, &sa_obj, tx); - ASSERT3U(error, ==, 0); + ASSERT0(error); VERIFY(0 == sa_set_sa_object(os, sa_obj)); sa_register_update_callback(os, zfs_sa_upgrade); @@ -1485,13 +1599,16 @@ zfs_get_zplprop(objset_t *os, zfs_prop_t prop, uint64_t *value) } return (error); } +EXPORT_SYMBOL(zfs_get_zplprop); void zfs_init(void) { + zfsctl_init(); zfs_znode_init(); dmu_objset_register_type(DMU_OST_ZFS, zfs_space_delta_cb); register_filesystem(&zpl_fs_type); + (void) arc_add_prune_callback(zpl_prune_sbs, NULL); } void @@ -1499,4 +1616,5 @@ zfs_fini(void) { unregister_filesystem(&zpl_fs_type); zfs_znode_fini(); + zfsctl_fini(); }