};
#ifdef _KERNEL
-
int
zfs_sa_readlink(znode_t *zp, uio_t *uio)
{
}
}
+#ifdef HAVE_SCANSTAMP
void
zfs_sa_get_scanstamp(znode_t *zp, xvattr_t *xvap)
{
zfsvfs_t *zfsvfs = zp->z_zfsvfs;
xoptattr_t *xoap;
+ ASSERT(MUTEX_HELD(&zp->z_lock));
VERIFY((xoap = xva_getxoptattr(xvap)) != NULL);
if (zp->z_is_sa) {
if (sa_lookup(zp->z_sa_hdl, SA_ZPL_SCANSTAMP(zfsvfs),
zfsvfs_t *zfsvfs = zp->z_zfsvfs;
xoptattr_t *xoap;
+ ASSERT(MUTEX_HELD(&zp->z_lock));
VERIFY((xoap = xva_getxoptattr(xvap)) != NULL);
if (zp->z_is_sa)
VERIFY(0 == sa_update(zp->z_sa_hdl, SA_ZPL_SCANSTAMP(zfsvfs),
&zp->z_pflags, sizeof (uint64_t), tx));
}
}
+#endif /* HAVE_SCANSTAMP */
/*
* I'm not convinced we should do any of this upgrade.
dmu_buf_t *db = sa_get_db(hdl);
znode_t *zp = sa_get_userdata(hdl);
zfsvfs_t *zfsvfs = zp->z_zfsvfs;
- sa_bulk_attr_t bulk[20];
int count = 0;
- sa_bulk_attr_t sa_attrs[20] = { 0 };
+ sa_bulk_attr_t *bulk, *sa_attrs;
zfs_acl_locator_cb_t locate = { 0 };
uint64_t uid, gid, mode, rdev, xattr, parent;
uint64_t crtime[2], mtime[2], ctime[2];
zfs_acl_phys_t znode_acl;
+#ifdef HAVE_SCANSTAMP
char scanstamp[AV_SCANSTAMP_SZ];
+#endif /* HAVE_SCANSTAMP */
+ boolean_t drop_lock = B_FALSE;
/*
* No upgrade if ACL isn't cached
if (zp->z_acl_cached == NULL || ZTOV(zp)->v_type == VLNK)
return;
+ /*
+ * If the z_lock is held and we aren't the owner
+ * the just return since we don't want to deadlock
+ * trying to update the status of z_is_sa. This
+ * file can then be upgraded at a later time.
+ *
+ * Otherwise, we know we are doing the
+ * sa_update() that caused us to enter this function.
+ */
+ if (mutex_owner(&zp->z_lock) != curthread) {
+ if (mutex_tryenter(&zp->z_lock) == 0)
+ return;
+ else
+ drop_lock = B_TRUE;
+ }
+
/* First do a bulk query of the attributes that aren't cached */
+ bulk = kmem_alloc(sizeof(sa_bulk_attr_t) * 20, KM_SLEEP);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL, &mtime, 16);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL, &ctime, 16);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CRTIME(zfsvfs), NULL, &crtime, 16);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_ZNODE_ACL(zfsvfs), NULL,
&znode_acl, 88);
- if (sa_bulk_lookup_locked(hdl, bulk, count) != 0)
- return;
-
+ if (sa_bulk_lookup_locked(hdl, bulk, count) != 0) {
+ kmem_free(bulk, sizeof(sa_bulk_attr_t) * 20);
+ goto done;
+ }
/*
* While the order here doesn't matter its best to try and organize
* it is such a way to pick up an already existing layout number
*/
count = 0;
+ sa_attrs = kmem_zalloc(sizeof(sa_bulk_attr_t) * 20, KM_SLEEP);
SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_MODE(zfsvfs), NULL, &mode, 8);
SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_SIZE(zfsvfs), NULL,
&zp->z_size, 8);
locate.cb_aclp = zp->z_acl_cached;
SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_DACL_ACES(zfsvfs),
zfs_acl_data_locator, &locate, zp->z_acl_cached->z_acl_bytes);
+
if (xattr)
- SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_RDEV(zfsvfs),
- NULL, &rdev, 8);
+ SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_XATTR(zfsvfs),
+ NULL, &xattr, 8);
+#ifdef HAVE_SCANSTAMP
/* if scanstamp then add scanstamp */
if (zp->z_pflags & ZFS_BONUS_SCANSTAMP) {
NULL, scanstamp, AV_SCANSTAMP_SZ);
zp->z_pflags &= ~ZFS_BONUS_SCANSTAMP;
}
+#endif /* HAVE_SCANSTAMP */
VERIFY(dmu_set_bonustype(db, DMU_OT_SA, tx) == 0);
VERIFY(sa_replace_all_by_template_locked(hdl, sa_attrs,
znode_acl.z_acl_extern_obj, tx));
zp->z_is_sa = B_TRUE;
+ kmem_free(sa_attrs, sizeof(sa_bulk_attr_t) * 20);
+ kmem_free(bulk, sizeof(sa_bulk_attr_t) * 20);
+done:
+ if (drop_lock)
+ mutex_exit(&zp->z_lock);
}
void
if (!zp->z_zfsvfs->z_use_sa || zp->z_is_sa)
return;
- ASSERT(!zp->z_is_sa);
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_TRUE);
- if (ZFS_EXTERNAL_ACL(zp)) {
- dmu_tx_hold_free(tx, ZFS_EXTERNAL_ACL(zp), 0,
+ if (zfs_external_acl(zp)) {
+ dmu_tx_hold_free(tx, zfs_external_acl(zp), 0,
DMU_OBJECT_END);
}
}