X-Git-Url: https://git.camperquake.de/gitweb.cgi?a=blobdiff_plain;f=module%2Fzfs%2Fzfs_znode.c;h=f9ea2a0eb7b55a0aede88071498ee7535188e398;hb=0d3ac5e7356d29fbb7d2880c0a0c457656355ca0;hp=0bb9c09e57ad9fa445dcb995550326783f57df64;hpb=5484965ab650a56871a62a3373859a7a36ddafcb;p=zfs.git diff --git a/module/zfs/zfs_znode.c b/module/zfs/zfs_znode.c index 0bb9c09..f9ea2a0 100644 --- a/module/zfs/zfs_znode.c +++ b/module/zfs/zfs_znode.c @@ -174,7 +174,7 @@ zfs_create_share_dir(zfs_sb_t *zsb, dmu_tx_t *tx) vattr.va_uid = crgetuid(kcred); vattr.va_gid = crgetgid(kcred); - sharezp = kmem_cache_alloc(znode_cache, KM_SLEEP); + sharezp = kmem_cache_alloc(znode_cache, KM_PUSHPAGE); sharezp->z_moved = 0; sharezp->z_unlinked = 0; sharezp->z_atime_dirty = 0; @@ -248,7 +248,7 @@ zfs_inode_alloc(struct super_block *sb, struct inode **ip) { znode_t *zp; - zp = kmem_cache_alloc(znode_cache, KM_SLEEP); + zp = kmem_cache_alloc(znode_cache, KM_PUSHPAGE); *ip = ZTOI(zp); return (0); @@ -322,7 +322,8 @@ zfs_inode_set_ops(zfs_sb_t *zsb, struct inode *ip) */ static znode_t * zfs_znode_alloc(zfs_sb_t *zsb, dmu_buf_t *db, int blksz, - dmu_object_type_t obj_type, uint64_t obj, sa_handle_t *hdl) + dmu_object_type_t obj_type, uint64_t obj, sa_handle_t *hdl, + struct dentry *dentry) { znode_t *zp; struct inode *ip; @@ -347,6 +348,7 @@ zfs_znode_alloc(zfs_sb_t *zsb, dmu_buf_t *db, int blksz, zp->z_blksz = blksz; zp->z_seq = 0x7A4653; zp->z_sync_cnt = 0; + zp->z_is_zvol = 0; zfs_znode_sa_init(zsb, zp, db, obj_type, hdl); @@ -378,6 +380,9 @@ zfs_znode_alloc(zfs_sb_t *zsb, dmu_buf_t *db, int blksz, if (insert_inode_locked(ip)) goto error; + if (dentry) + d_instantiate(dentry, ip); + mutex_enter(&zsb->z_znodes_lock); list_insert_tail(&zsb->z_all_znodes, zp); membar_producer(); @@ -674,7 +679,8 @@ zfs_mknode(znode_t *dzp, vattr_t *vap, dmu_tx_t *tx, cred_t *cr, VERIFY(sa_replace_all_by_template(sa_hdl, sa_attrs, cnt, tx) == 0); if (!(flag & IS_ROOT_NODE)) { - *zpp = zfs_znode_alloc(zsb, db, 0, obj_type, obj, sa_hdl); + *zpp = zfs_znode_alloc(zsb, db, 0, obj_type, obj, sa_hdl, + vap->va_dentry); ASSERT(*zpp != NULL); ASSERT(dzp != NULL); err = zpl_xattr_security_init(ZTOI(*zpp), ZTOI(dzp)); @@ -865,7 +871,7 @@ zfs_zget(zfs_sb_t *zsb, uint64_t obj_num, znode_t **zpp) * bonus buffer. */ zp = zfs_znode_alloc(zsb, db, doi.doi_data_block_size, - doi.doi_bonus_type, obj_num, NULL); + doi.doi_bonus_type, obj_num, NULL, NULL); if (zp == NULL) { err = ENOENT; } else { @@ -979,13 +985,24 @@ zfs_zinactive(znode_t *zp) { zfs_sb_t *zsb = ZTOZSB(zp); uint64_t z_id = zp->z_id; + boolean_t drop_mutex = 0; ASSERT(zp->z_sa_hdl); /* - * Don't allow a zfs_zget() while were trying to release this znode + * Don't allow a zfs_zget() while were trying to release this znode. + * + * Linux allows direct memory reclaim which means that any KM_SLEEP + * allocation may trigger inode eviction. This can lead to a deadlock + * through the ->shrink_icache_memory()->evict()->zfs_inactive()-> + * zfs_zinactive() call path. To avoid this deadlock the process + * must not reacquire the mutex when it is already holding it. */ - ZFS_OBJ_HOLD_ENTER(zsb, z_id); + if (!ZFS_OBJ_HOLD_OWNED(zsb, z_id)) { + ZFS_OBJ_HOLD_ENTER(zsb, z_id); + drop_mutex = 1; + } + mutex_enter(&zp->z_lock); /* @@ -994,14 +1011,19 @@ zfs_zinactive(znode_t *zp) */ if (zp->z_unlinked) { mutex_exit(&zp->z_lock); - ZFS_OBJ_HOLD_EXIT(zsb, z_id); + + if (drop_mutex) + ZFS_OBJ_HOLD_EXIT(zsb, z_id); + zfs_rmnode(zp); return; } mutex_exit(&zp->z_lock); zfs_znode_dmu_fini(zp); - ZFS_OBJ_HOLD_EXIT(zsb, z_id); + + if (drop_mutex) + ZFS_OBJ_HOLD_EXIT(zsb, z_id); } void