X-Git-Url: https://git.camperquake.de/gitweb.cgi?a=blobdiff_plain;f=module%2Fzfs%2Fzfs_znode.c;h=137a0681044a85744e7b9c11f3ae9c781c4c497a;hb=9ee7fac5312f9c6b33155aed77e50b0dfd68a77d;hp=4f6185fb53eb419aefaa6616035e694efa2a3676;hpb=c28b227942b421ebdc03c9df9a012642fb517223;p=zfs.git diff --git a/module/zfs/zfs_znode.c b/module/zfs/zfs_znode.c index 4f6185f..137a068 100644 --- a/module/zfs/zfs_znode.c +++ b/module/zfs/zfs_znode.c @@ -160,189 +160,6 @@ zfs_znode_cache_destructor(void *buf, void *arg) ASSERT(zp->z_acl_cached == NULL); } -#ifdef ZNODE_STATS -static struct { - uint64_t zms_zfsvfs_invalid; - uint64_t zms_zfsvfs_recheck1; - uint64_t zms_zfsvfs_unmounted; - uint64_t zms_zfsvfs_recheck2; - uint64_t zms_obj_held; - uint64_t zms_vnode_locked; - uint64_t zms_not_only_dnlc; -} znode_move_stats; -#endif /* ZNODE_STATS */ - -static void -zfs_znode_move_impl(znode_t *ozp, znode_t *nzp) -{ - vnode_t *vp; - - /* Copy fields. */ - nzp->z_zfsvfs = ozp->z_zfsvfs; - - /* Swap vnodes. */ - vp = nzp->z_vnode; - nzp->z_vnode = ozp->z_vnode; - ozp->z_vnode = vp; /* let destructor free the overwritten vnode */ - ZTOV(ozp)->v_data = ozp; - ZTOV(nzp)->v_data = nzp; - - nzp->z_id = ozp->z_id; - ASSERT(ozp->z_dirlocks == NULL); /* znode not in use */ - ASSERT(avl_numnodes(&ozp->z_range_avl) == 0); - nzp->z_unlinked = ozp->z_unlinked; - nzp->z_atime_dirty = ozp->z_atime_dirty; - nzp->z_zn_prefetch = ozp->z_zn_prefetch; - nzp->z_blksz = ozp->z_blksz; - nzp->z_seq = ozp->z_seq; - nzp->z_mapcnt = ozp->z_mapcnt; - nzp->z_gen = ozp->z_gen; - nzp->z_sync_cnt = ozp->z_sync_cnt; - nzp->z_is_sa = ozp->z_is_sa; - nzp->z_sa_hdl = ozp->z_sa_hdl; - bcopy(ozp->z_atime, nzp->z_atime, sizeof (uint64_t) * 2); - nzp->z_links = ozp->z_links; - nzp->z_size = ozp->z_size; - nzp->z_pflags = ozp->z_pflags; - nzp->z_uid = ozp->z_uid; - nzp->z_gid = ozp->z_gid; - nzp->z_mode = ozp->z_mode; - - /* - * Since this is just an idle znode and kmem is already dealing with - * memory pressure, release any cached ACL. - */ - if (ozp->z_acl_cached) { - zfs_acl_free(ozp->z_acl_cached); - ozp->z_acl_cached = NULL; - } - - sa_set_userp(nzp->z_sa_hdl, nzp); - - /* - * Invalidate the original znode by clearing fields that provide a - * pointer back to the znode. Set the low bit of the vfs pointer to - * ensure that zfs_znode_move() recognizes the znode as invalid in any - * subsequent callback. - */ - ozp->z_sa_hdl = NULL; - POINTER_INVALIDATE(&ozp->z_zfsvfs); - - /* - * Mark the znode. - */ - nzp->z_moved = 1; - ozp->z_moved = (uint8_t)-1; -} - -/*ARGSUSED*/ -static kmem_cbrc_t -zfs_znode_move(void *buf, void *newbuf, size_t size, void *arg) -{ - znode_t *ozp = buf, *nzp = newbuf; - zfsvfs_t *zfsvfs; - vnode_t *vp; - - /* - * The znode is on the file system's list of known znodes if the vfs - * pointer is valid. We set the low bit of the vfs pointer when freeing - * the znode to invalidate it, and the memory patterns written by kmem - * (baddcafe and deadbeef) set at least one of the two low bits. A newly - * created znode sets the vfs pointer last of all to indicate that the - * znode is known and in a valid state to be moved by this function. - */ - zfsvfs = ozp->z_zfsvfs; - if (!POINTER_IS_VALID(zfsvfs)) { - ZNODE_STAT_ADD(znode_move_stats.zms_zfsvfs_invalid); - return (KMEM_CBRC_DONT_KNOW); - } - - /* - * Close a small window in which it's possible that the filesystem could - * be unmounted and freed, and zfsvfs, though valid in the previous - * statement, could point to unrelated memory by the time we try to - * prevent the filesystem from being unmounted. - */ - rw_enter(&zfsvfs_lock, RW_WRITER); - if (zfsvfs != ozp->z_zfsvfs) { - rw_exit(&zfsvfs_lock); - ZNODE_STAT_ADD(znode_move_stats.zms_zfsvfs_recheck1); - return (KMEM_CBRC_DONT_KNOW); - } - - /* - * If the znode is still valid, then so is the file system. We know that - * no valid file system can be freed while we hold zfsvfs_lock, so we - * can safely ensure that the filesystem is not and will not be - * unmounted. The next statement is equivalent to ZFS_ENTER(). - */ - rrw_enter(&zfsvfs->z_teardown_lock, RW_READER, FTAG); - if (zfsvfs->z_unmounted) { - ZFS_EXIT(zfsvfs); - rw_exit(&zfsvfs_lock); - ZNODE_STAT_ADD(znode_move_stats.zms_zfsvfs_unmounted); - return (KMEM_CBRC_DONT_KNOW); - } - rw_exit(&zfsvfs_lock); - - mutex_enter(&zfsvfs->z_znodes_lock); - /* - * Recheck the vfs pointer in case the znode was removed just before - * acquiring the lock. - */ - if (zfsvfs != ozp->z_zfsvfs) { - mutex_exit(&zfsvfs->z_znodes_lock); - ZFS_EXIT(zfsvfs); - ZNODE_STAT_ADD(znode_move_stats.zms_zfsvfs_recheck2); - return (KMEM_CBRC_DONT_KNOW); - } - - /* - * At this point we know that as long as we hold z_znodes_lock, the - * znode cannot be freed and fields within the znode can be safely - * accessed. Now, prevent a race with zfs_zget(). - */ - if (ZFS_OBJ_HOLD_TRYENTER(zfsvfs, ozp->z_id) == 0) { - mutex_exit(&zfsvfs->z_znodes_lock); - ZFS_EXIT(zfsvfs); - ZNODE_STAT_ADD(znode_move_stats.zms_obj_held); - return (KMEM_CBRC_LATER); - } - - vp = ZTOV(ozp); - if (mutex_tryenter(&vp->v_lock) == 0) { - ZFS_OBJ_HOLD_EXIT(zfsvfs, ozp->z_id); - mutex_exit(&zfsvfs->z_znodes_lock); - ZFS_EXIT(zfsvfs); - ZNODE_STAT_ADD(znode_move_stats.zms_vnode_locked); - return (KMEM_CBRC_LATER); - } - - /* Only move znodes that are referenced _only_ by the DNLC. */ - if (vp->v_count != 1 || !vn_in_dnlc(vp)) { - mutex_exit(&vp->v_lock); - ZFS_OBJ_HOLD_EXIT(zfsvfs, ozp->z_id); - mutex_exit(&zfsvfs->z_znodes_lock); - ZFS_EXIT(zfsvfs); - ZNODE_STAT_ADD(znode_move_stats.zms_not_only_dnlc); - return (KMEM_CBRC_LATER); - } - - /* - * The znode is known and in a valid state to move. We're holding the - * locks needed to execute the critical section. - */ - zfs_znode_move_impl(ozp, nzp); - mutex_exit(&vp->v_lock); - ZFS_OBJ_HOLD_EXIT(zfsvfs, ozp->z_id); - - list_link_replace(&ozp->z_link_node, &nzp->z_link_node); - mutex_exit(&zfsvfs->z_znodes_lock); - ZFS_EXIT(zfsvfs); - - return (KMEM_CBRC_YES); -} - void zfs_znode_init(void) { @@ -354,7 +171,6 @@ zfs_znode_init(void) znode_cache = kmem_cache_create("zfs_znode_cache", sizeof (znode_t), 0, zfs_znode_cache_constructor, zfs_znode_cache_destructor, NULL, NULL, NULL, 0); - kmem_cache_set_move(znode_cache, zfs_znode_move); } void @@ -469,6 +285,7 @@ zfs_create_op_tables() int zfs_create_share_dir(zfsvfs_t *zfsvfs, dmu_tx_t *tx) { +#ifdef HAVE_SHARE zfs_acl_ids_t acl_ids; vattr_t vattr; znode_t *sharezp; @@ -510,6 +327,9 @@ zfs_create_share_dir(zfsvfs_t *zfsvfs, dmu_tx_t *tx) kmem_cache_free(znode_cache, sharezp); return (error); +#else + return (0); +#endif /* HAVE_SHARE */ } /* @@ -773,7 +593,7 @@ zfs_mknode(znode_t *dzp, vattr_t *vap, dmu_tx_t *tx, cred_t *cr, int bonuslen; sa_handle_t *sa_hdl; dmu_object_type_t obj_type; - sa_bulk_attr_t sa_attrs[ZPL_END]; + sa_bulk_attr_t *sa_attrs; int cnt = 0; zfs_acl_locator_cb_t locate = { 0 }; @@ -899,6 +719,7 @@ zfs_mknode(znode_t *dzp, vattr_t *vap, dmu_tx_t *tx, cred_t *cr, * order for DMU_OT_ZNODE is critical since it needs to be constructed * in the old znode_phys_t format. Don't change this ordering */ + sa_attrs = kmem_alloc(sizeof(sa_bulk_attr_t) * ZPL_END, KM_SLEEP); if (obj_type == DMU_OT_ZNODE) { SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_ATIME(zfsvfs), @@ -1000,8 +821,9 @@ zfs_mknode(znode_t *dzp, vattr_t *vap, dmu_tx_t *tx, cred_t *cr, if (obj_type == DMU_OT_ZNODE || acl_ids->z_aclp->z_version < ZFS_ACL_VERSION_FUID) { err = zfs_aclset_common(*zpp, acl_ids->z_aclp, cr, tx); - ASSERT3P(err, ==, 0); + ASSERT3S(err, ==, 0); } + kmem_free(sa_attrs, sizeof(sa_bulk_attr_t) * ZPL_END); ZFS_OBJ_HOLD_EXIT(zfsvfs, obj); }