vattr.va_uid = crgetuid(kcred);
vattr.va_gid = crgetgid(kcred);
- sharezp = kmem_cache_alloc(znode_cache, KM_SLEEP);
+ sharezp = kmem_cache_alloc(znode_cache, KM_PUSHPAGE);
sharezp->z_moved = 0;
sharezp->z_unlinked = 0;
sharezp->z_atime_dirty = 0;
{
znode_t *zp;
- zp = kmem_cache_alloc(znode_cache, KM_SLEEP);
+ zp = kmem_cache_alloc(znode_cache, KM_PUSHPAGE);
*ip = ZTOI(zp);
return (0);
*/
static znode_t *
zfs_znode_alloc(zfs_sb_t *zsb, dmu_buf_t *db, int blksz,
- dmu_object_type_t obj_type, uint64_t obj, sa_handle_t *hdl)
+ dmu_object_type_t obj_type, uint64_t obj, sa_handle_t *hdl,
+ struct dentry *dentry)
{
znode_t *zp;
struct inode *ip;
zp->z_blksz = blksz;
zp->z_seq = 0x7A4653;
zp->z_sync_cnt = 0;
+ zp->z_is_zvol = 0;
zfs_znode_sa_init(zsb, zp, db, obj_type, hdl);
if (insert_inode_locked(ip))
goto error;
+ if (dentry)
+ d_instantiate(dentry, ip);
+
mutex_enter(&zsb->z_znodes_lock);
list_insert_tail(&zsb->z_all_znodes, zp);
membar_producer();
VERIFY(sa_replace_all_by_template(sa_hdl, sa_attrs, cnt, tx) == 0);
if (!(flag & IS_ROOT_NODE)) {
- *zpp = zfs_znode_alloc(zsb, db, 0, obj_type, obj, sa_hdl);
+ *zpp = zfs_znode_alloc(zsb, db, 0, obj_type, obj, sa_hdl,
+ vap->va_dentry);
ASSERT(*zpp != NULL);
ASSERT(dzp != NULL);
err = zpl_xattr_security_init(ZTOI(*zpp), ZTOI(dzp));
ZFS_OBJ_HOLD_EXIT(zsb, obj);
}
+/*
+ * zfs_xvattr_set only updates the in-core attributes
+ * it is assumed the caller will be doing an sa_bulk_update
+ * to push the changes out
+ */
+void
+zfs_xvattr_set(znode_t *zp, xvattr_t *xvap, dmu_tx_t *tx)
+{
+ xoptattr_t *xoap;
+
+ xoap = xva_getxoptattr(xvap);
+ ASSERT(xoap);
+
+ if (XVA_ISSET_REQ(xvap, XAT_CREATETIME)) {
+ uint64_t times[2];
+ ZFS_TIME_ENCODE(&xoap->xoa_createtime, times);
+ (void) sa_update(zp->z_sa_hdl, SA_ZPL_CRTIME(ZTOZSB(zp)),
+ ×, sizeof (times), tx);
+ XVA_SET_RTN(xvap, XAT_CREATETIME);
+ }
+ if (XVA_ISSET_REQ(xvap, XAT_READONLY)) {
+ ZFS_ATTR_SET(zp, ZFS_READONLY, xoap->xoa_readonly,
+ zp->z_pflags, tx);
+ XVA_SET_RTN(xvap, XAT_READONLY);
+ }
+ if (XVA_ISSET_REQ(xvap, XAT_HIDDEN)) {
+ ZFS_ATTR_SET(zp, ZFS_HIDDEN, xoap->xoa_hidden,
+ zp->z_pflags, tx);
+ XVA_SET_RTN(xvap, XAT_HIDDEN);
+ }
+ if (XVA_ISSET_REQ(xvap, XAT_SYSTEM)) {
+ ZFS_ATTR_SET(zp, ZFS_SYSTEM, xoap->xoa_system,
+ zp->z_pflags, tx);
+ XVA_SET_RTN(xvap, XAT_SYSTEM);
+ }
+ if (XVA_ISSET_REQ(xvap, XAT_ARCHIVE)) {
+ ZFS_ATTR_SET(zp, ZFS_ARCHIVE, xoap->xoa_archive,
+ zp->z_pflags, tx);
+ XVA_SET_RTN(xvap, XAT_ARCHIVE);
+ }
+ if (XVA_ISSET_REQ(xvap, XAT_IMMUTABLE)) {
+ ZFS_ATTR_SET(zp, ZFS_IMMUTABLE, xoap->xoa_immutable,
+ zp->z_pflags, tx);
+ XVA_SET_RTN(xvap, XAT_IMMUTABLE);
+ }
+ if (XVA_ISSET_REQ(xvap, XAT_NOUNLINK)) {
+ ZFS_ATTR_SET(zp, ZFS_NOUNLINK, xoap->xoa_nounlink,
+ zp->z_pflags, tx);
+ XVA_SET_RTN(xvap, XAT_NOUNLINK);
+ }
+ if (XVA_ISSET_REQ(xvap, XAT_APPENDONLY)) {
+ ZFS_ATTR_SET(zp, ZFS_APPENDONLY, xoap->xoa_appendonly,
+ zp->z_pflags, tx);
+ XVA_SET_RTN(xvap, XAT_APPENDONLY);
+ }
+ if (XVA_ISSET_REQ(xvap, XAT_NODUMP)) {
+ ZFS_ATTR_SET(zp, ZFS_NODUMP, xoap->xoa_nodump,
+ zp->z_pflags, tx);
+ XVA_SET_RTN(xvap, XAT_NODUMP);
+ }
+ if (XVA_ISSET_REQ(xvap, XAT_OPAQUE)) {
+ ZFS_ATTR_SET(zp, ZFS_OPAQUE, xoap->xoa_opaque,
+ zp->z_pflags, tx);
+ XVA_SET_RTN(xvap, XAT_OPAQUE);
+ }
+ if (XVA_ISSET_REQ(xvap, XAT_AV_QUARANTINED)) {
+ ZFS_ATTR_SET(zp, ZFS_AV_QUARANTINED,
+ xoap->xoa_av_quarantined, zp->z_pflags, tx);
+ XVA_SET_RTN(xvap, XAT_AV_QUARANTINED);
+ }
+ if (XVA_ISSET_REQ(xvap, XAT_AV_MODIFIED)) {
+ ZFS_ATTR_SET(zp, ZFS_AV_MODIFIED, xoap->xoa_av_modified,
+ zp->z_pflags, tx);
+ XVA_SET_RTN(xvap, XAT_AV_MODIFIED);
+ }
+ if (XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP)) {
+ zfs_sa_set_scanstamp(zp, xvap, tx);
+ XVA_SET_RTN(xvap, XAT_AV_SCANSTAMP);
+ }
+ if (XVA_ISSET_REQ(xvap, XAT_REPARSE)) {
+ ZFS_ATTR_SET(zp, ZFS_REPARSE, xoap->xoa_reparse,
+ zp->z_pflags, tx);
+ XVA_SET_RTN(xvap, XAT_REPARSE);
+ }
+ if (XVA_ISSET_REQ(xvap, XAT_OFFLINE)) {
+ ZFS_ATTR_SET(zp, ZFS_OFFLINE, xoap->xoa_offline,
+ zp->z_pflags, tx);
+ XVA_SET_RTN(xvap, XAT_OFFLINE);
+ }
+ if (XVA_ISSET_REQ(xvap, XAT_SPARSE)) {
+ ZFS_ATTR_SET(zp, ZFS_SPARSE, xoap->xoa_sparse,
+ zp->z_pflags, tx);
+ XVA_SET_RTN(xvap, XAT_SPARSE);
+ }
+}
+
int
zfs_zget(zfs_sb_t *zsb, uint64_t obj_num, znode_t **zpp)
{
* bonus buffer.
*/
zp = zfs_znode_alloc(zsb, db, doi.doi_data_block_size,
- doi.doi_bonus_type, obj_num, NULL);
+ doi.doi_bonus_type, obj_num, NULL, NULL);
if (zp == NULL) {
err = ENOENT;
} else {
{
zfs_sb_t *zsb = ZTOZSB(zp);
uint64_t z_id = zp->z_id;
+ boolean_t drop_mutex = 0;
ASSERT(zp->z_sa_hdl);
/*
- * Don't allow a zfs_zget() while were trying to release this znode
+ * Don't allow a zfs_zget() while were trying to release this znode.
+ *
+ * Linux allows direct memory reclaim which means that any KM_SLEEP
+ * allocation may trigger inode eviction. This can lead to a deadlock
+ * through the ->shrink_icache_memory()->evict()->zfs_inactive()->
+ * zfs_zinactive() call path. To avoid this deadlock the process
+ * must not reacquire the mutex when it is already holding it.
*/
- ZFS_OBJ_HOLD_ENTER(zsb, z_id);
+ if (!ZFS_OBJ_HOLD_OWNED(zsb, z_id)) {
+ ZFS_OBJ_HOLD_ENTER(zsb, z_id);
+ drop_mutex = 1;
+ }
+
mutex_enter(&zp->z_lock);
/*
*/
if (zp->z_unlinked) {
mutex_exit(&zp->z_lock);
- ZFS_OBJ_HOLD_EXIT(zsb, z_id);
+
+ if (drop_mutex)
+ ZFS_OBJ_HOLD_EXIT(zsb, z_id);
+
zfs_rmnode(zp);
return;
}
mutex_exit(&zp->z_lock);
zfs_znode_dmu_fini(zp);
- ZFS_OBJ_HOLD_EXIT(zsb, z_id);
+
+ if (drop_mutex)
+ ZFS_OBJ_HOLD_EXIT(zsb, z_id);
}
void