* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
*/
+#ifdef HAVE_ZPL
+
#include <sys/types.h>
#include <sys/param.h>
#include <sys/time.h>
* an external ACL and what version of ACL previously existed on the
* file. Would really be nice to not need this, sigh.
*/
-
uint64_t
zfs_external_acl(znode_t *zp)
{
zfs_acl_phys_t acl_phys;
+ int error;
if (zp->z_is_sa)
return (0);
- VERIFY(0 == sa_lookup(zp->z_sa_hdl, SA_ZPL_ZNODE_ACL(zp->z_zfsvfs),
- &acl_phys, sizeof (acl_phys)));
+ /*
+ * Need to deal with a potential
+ * race where zfs_sa_upgrade could cause
+ * z_isa_sa to change.
+ *
+ * If the lookup fails then the state of z_is_sa should have
+ * changed.
+ */
- return (acl_phys.z_acl_extern_obj);
+ if ((error = sa_lookup(zp->z_sa_hdl, SA_ZPL_ZNODE_ACL(zp->z_zfsvfs),
+ &acl_phys, sizeof (acl_phys))) == 0)
+ return (acl_phys.z_acl_extern_obj);
+ else {
+ /*
+ * after upgrade the SA_ZPL_ZNODE_ACL should have been
+ * removed
+ */
+ VERIFY(zp->z_is_sa && error == ENOENT);
+ return (0);
+ }
}
/*
int size;
int error;
+ ASSERT(MUTEX_HELD(&zp->z_acl_lock));
if (zp->z_is_sa) {
if ((error = sa_size(zp->z_sa_hdl, SA_ZPL_DACL_ACES(zfsvfs),
&size)) != 0)
{
zfs_acl_phys_t acl_phys;
- if (zp->z_is_sa) {
+ if (zp->z_is_sa)
return (ZFS_ACL_VERSION_FUID);
- } else {
- VERIFY(0 == sa_lookup(zp->z_sa_hdl,
+ else {
+ int error;
+
+ /*
+ * Need to deal with a potential
+ * race where zfs_sa_upgrade could cause
+ * z_isa_sa to change.
+ *
+ * If the lookup fails then the state of z_is_sa should have
+ * changed.
+ */
+ if ((error = sa_lookup(zp->z_sa_hdl,
SA_ZPL_ZNODE_ACL(zp->z_zfsvfs),
- &acl_phys, sizeof (acl_phys)));
- return (acl_phys.z_acl_version);
+ &acl_phys, sizeof (acl_phys))) == 0)
+ return (acl_phys.z_acl_version);
+ else {
+ /*
+ * After upgrade SA_ZPL_ZNODE_ACL should have
+ * been removed.
+ */
+ VERIFY(zp->z_is_sa && error == ENOENT);
+ return (ZFS_ACL_VERSION_FUID);
+ }
}
}
{
zfs_acl_node_t *aclnode;
- while (aclnode = list_head(&aclp->z_acl)) {
+ while ((aclnode = list_head(&aclp->z_acl))) {
list_remove(&aclp->z_acl, aclnode);
zfs_acl_node_free(aclnode);
}
size_t ace_size;
uint16_t entry_type;
- while (zacep = zfs_acl_next_ace(aclp, zacep,
- &who, &access_mask, &iflags, &type)) {
+ while ((zacep = zfs_acl_next_ace(aclp, zacep,
+ &who, &access_mask, &iflags, &type))) {
switch (type) {
case ACE_ACCESS_ALLOWED_OBJECT_ACE_TYPE:
oldaclp = kmem_alloc(sizeof (zfs_oldace_t) * aclp->z_acl_count,
KM_SLEEP);
i = 0;
- while (cookie = zfs_acl_next_ace(aclp, cookie, &who,
- &access_mask, &iflags, &type)) {
+ while ((cookie = zfs_acl_next_ace(aclp, cookie, &who,
+ &access_mask, &iflags, &type))) {
oldaclp[i].z_flags = iflags;
oldaclp[i].z_type = type;
oldaclp[i].z_fuid = who;
mode = (fmode & (S_IFMT | S_ISUID | S_ISGID | S_ISVTX));
- while (acep = zfs_acl_next_ace(aclp, acep, &who,
- &access_mask, &iflags, &type)) {
+ while ((acep = zfs_acl_next_ace(aclp, acep, &who,
+ &access_mask, &iflags, &type))) {
if (!zfs_acl_valid_ace_type(type, iflags))
continue;
* create a new acl and leave any cached acl in place.
*/
static int
-zfs_acl_node_read(znode_t *zp, zfs_acl_t **aclpp, boolean_t will_modify)
+zfs_acl_node_read(znode_t *zp, boolean_t have_lock, zfs_acl_t **aclpp,
+ boolean_t will_modify)
{
zfs_acl_t *aclp;
int aclsize;
zfs_acl_phys_t znode_acl;
int version;
int error;
+ boolean_t drop_lock = B_FALSE;
ASSERT(MUTEX_HELD(&zp->z_acl_lock));
return (0);
}
- version = ZNODE_ACL_VERSION(zp);
+ /*
+ * close race where znode could be upgrade while trying to
+ * read the znode attributes.
+ *
+ * But this could only happen if the file isn't already an SA
+ * znode
+ */
+ if (!zp->z_is_sa && !have_lock) {
+ mutex_enter(&zp->z_lock);
+ drop_lock = B_TRUE;
+ }
+ version = zfs_znode_acl_version(zp);
if ((error = zfs_acl_znode_info(zp, &aclsize,
- &acl_count, &znode_acl)) != 0)
- return (error);
+ &acl_count, &znode_acl)) != 0) {
+ goto done;
+ }
aclp = zfs_acl_alloc(version);
/* convert checksum errors into IO errors */
if (error == ECKSUM)
error = EIO;
- return (error);
+ goto done;
}
list_insert_head(&aclp->z_acl, aclnode);
*aclpp = aclp;
if (!will_modify)
zp->z_acl_cached = aclp;
- return (0);
+done:
+ if (drop_lock)
+ mutex_exit(&zp->z_lock);
+ return (error);
}
/*ARGSUSED*/
*length = cb->cb_acl_node->z_size;
}
-
-static int
-zfs_acl_get_owner_fuids(znode_t *zp, uint64_t *fuid, uint64_t *fgid)
-{
- int count = 0;
- sa_bulk_attr_t bulk[2];
- int error;
-
- if (IS_EPHEMERAL(zp->z_uid) || IS_EPHEMERAL(zp->z_gid)) {
- SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_UID(zp->z_zfsvfs), NULL,
- &fuid, sizeof (fuid));
- SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GID(zp->z_zfsvfs), NULL,
- &fgid, sizeof (fuid));
- if ((error = sa_bulk_lookup(zp->z_sa_hdl, bulk, count)) != 0) {
- return (error);
- }
- } else {
- *fuid = zp->z_uid;
- *fgid = zp->z_gid;
- }
- return (0);
-}
-
int
zfs_acl_chown_setattr(znode_t *zp)
{
int error;
zfs_acl_t *aclp;
- uint64_t fuid, fgid;
- if ((error = zfs_acl_get_owner_fuids(zp, &fuid, &fgid)) != 0)
- return (error);
+ ASSERT(MUTEX_HELD(&zp->z_lock));
+ ASSERT(MUTEX_HELD(&zp->z_acl_lock));
- mutex_enter(&zp->z_acl_lock);
- if ((error = zfs_acl_node_read(zp, &aclp, B_FALSE)) == 0)
+ if ((error = zfs_acl_node_read(zp, B_TRUE, &aclp, B_FALSE)) == 0)
zp->z_mode = zfs_mode_compute(zp->z_mode, aclp,
- &zp->z_pflags, fuid, fgid);
- mutex_exit(&zp->z_acl_lock);
+ &zp->z_pflags, zp->z_uid, zp->z_gid);
return (error);
}
sa_bulk_attr_t bulk[5];
uint64_t ctime[2];
int count = 0;
- uint64_t fuid, fgid;
mode = zp->z_mode;
- if ((error = zfs_acl_get_owner_fuids(zp, &fuid, &fgid)) != 0)
- return (error);
-
- mode = zfs_mode_compute(mode, aclp, &zp->z_pflags, fuid, fgid);
+ mode = zfs_mode_compute(mode, aclp, &zp->z_pflags,
+ zp->z_uid, zp->z_gid);
zp->z_mode = mode;
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MODE(zfsvfs), NULL,
new_bytes += abstract_size;
}
- while (acep = zfs_acl_next_ace(aclp, acep, &who, &access_mask,
- &iflags, &type)) {
+ while ((acep = zfs_acl_next_ace(aclp, acep, &who, &access_mask,
+ &iflags, &type))) {
uint16_t inherit_flags;
entry_type = (iflags & ACE_TYPE_FLAGS);
list_insert_tail(&aclp->z_acl, newnode);
}
-int
+void
zfs_acl_chmod_setattr(znode_t *zp, zfs_acl_t **aclp, uint64_t mode)
{
- mutex_enter(&zp->z_lock);
mutex_enter(&zp->z_acl_lock);
+ mutex_enter(&zp->z_lock);
*aclp = zfs_acl_alloc(zfs_acl_version_zp(zp));
(*aclp)->z_hints = zp->z_pflags & V4_ACL_WIDE_FLAGS;
zfs_acl_chmod(zp->z_zfsvfs, mode, *aclp);
- mutex_exit(&zp->z_acl_lock);
mutex_exit(&zp->z_lock);
+ mutex_exit(&zp->z_acl_lock);
ASSERT(*aclp);
- return (0);
}
/*
aclp = zfs_acl_alloc(paclp->z_version);
if (zfsvfs->z_acl_inherit == ZFS_ACL_DISCARD || vtype == VLNK)
return (aclp);
- while (pacep = zfs_acl_next_ace(paclp, pacep, &who,
- &access_mask, &iflags, &type)) {
+ while ((pacep = zfs_acl_next_ace(paclp, pacep, &who,
+ &access_mask, &iflags, &type))) {
/*
* don't inherit bogus ACEs
gid_t gid;
boolean_t need_chmod = B_TRUE;
boolean_t inherited = B_FALSE;
- uint64_t parentgid;
bzero(acl_ids, sizeof (zfs_acl_ids_t));
acl_ids->z_mode = MAKEIMODE(vap->va_type, vap->va_mode);
ZFS_GROUP, &acl_ids->z_fuidp);
gid = vap->va_gid;
} else {
- if (IS_EPHEMERAL(dzp->z_gid))
- VERIFY(0 == sa_lookup(dzp->z_sa_hdl, SA_ZPL_GID(zfsvfs),
- &parentgid, sizeof (parentgid)));
- else
- parentgid = (uint64_t)dzp->z_gid;
-
acl_ids->z_fuid = zfs_fuid_create_cred(zfsvfs, ZFS_OWNER,
cr, &acl_ids->z_fuidp);
acl_ids->z_fgid = 0;
(uint64_t)vap->va_gid,
cr, ZFS_GROUP, &acl_ids->z_fuidp);
gid = vap->va_gid;
- if (acl_ids->z_fgid != parentgid &&
+ if (acl_ids->z_fgid != dzp->z_gid &&
!groupmember(vap->va_gid, cr) &&
secpolicy_vnode_create_gid(cr) != 0)
acl_ids->z_fgid = 0;
char *domain;
uint32_t rid;
- acl_ids->z_fgid = parentgid;
+ acl_ids->z_fgid = dzp->z_gid;
gid = zfs_fuid_map_id(zfsvfs, acl_ids->z_fgid,
cr, ZFS_GROUP);
}
if (acl_ids->z_aclp == NULL) {
+ mutex_enter(&dzp->z_acl_lock);
mutex_enter(&dzp->z_lock);
if (!(flag & IS_ROOT_NODE) && (ZTOV(dzp)->v_type == VDIR &&
(dzp->z_pflags & ZFS_INHERIT_ACE)) &&
!(dzp->z_pflags & ZFS_XATTR)) {
- mutex_enter(&dzp->z_acl_lock);
- VERIFY(0 == zfs_acl_node_read(dzp, &paclp, B_FALSE));
+ VERIFY(0 == zfs_acl_node_read(dzp, B_TRUE,
+ &paclp, B_FALSE));
acl_ids->z_aclp = zfs_acl_inherit(zfsvfs,
vap->va_type, paclp, acl_ids->z_mode, &need_chmod);
- mutex_exit(&dzp->z_acl_lock);
inherited = B_TRUE;
} else {
acl_ids->z_aclp =
acl_ids->z_aclp->z_hints |= ZFS_ACL_TRIVIAL;
}
mutex_exit(&dzp->z_lock);
+ mutex_exit(&dzp->z_acl_lock);
if (need_chmod) {
acl_ids->z_aclp->z_hints |= (vap->va_type == VDIR) ?
ZFS_ACL_AUTO_INHERIT : 0;
if (mask == 0)
return (ENOSYS);
- if (error = zfs_zaccess(zp, ACE_READ_ACL, 0, skipaclchk, cr))
+ if ((error = zfs_zaccess(zp, ACE_READ_ACL, 0, skipaclchk, cr)))
return (error);
mutex_enter(&zp->z_acl_lock);
- error = zfs_acl_node_read(zp, &aclp, B_FALSE);
+ error = zfs_acl_node_read(zp, B_FALSE, &aclp, B_FALSE);
if (error != 0) {
mutex_exit(&zp->z_acl_lock);
return (error);
uint32_t access_mask;
uint16_t type, iflags;
- while (zacep = zfs_acl_next_ace(aclp, zacep,
- &who, &access_mask, &iflags, &type)) {
+ while ((zacep = zfs_acl_next_ace(aclp, zacep,
+ &who, &access_mask, &iflags, &type))) {
switch (type) {
case ACE_ACCESS_ALLOWED_OBJECT_ACE_TYPE:
case ACE_ACCESS_DENIED_OBJECT_ACE_TYPE:
zfs_acl_t *aclp;
zfs_fuid_info_t *fuidp = NULL;
boolean_t fuid_dirtied;
+ uint64_t acl_obj;
if (mask == 0)
return (ENOSYS);
if (zp->z_pflags & ZFS_IMMUTABLE)
return (EPERM);
- if (error = zfs_zaccess(zp, ACE_WRITE_ACL, 0, skipaclchk, cr))
+ if ((error = zfs_zaccess(zp, ACE_WRITE_ACL, 0, skipaclchk, cr)))
return (error);
error = zfs_vsec_2_aclp(zfsvfs, ZTOV(zp)->v_type, vsecp, cr, &fuidp,
(zp->z_pflags & V4_ACL_WIDE_FLAGS);
}
top:
- mutex_enter(&zp->z_lock);
mutex_enter(&zp->z_acl_lock);
+ mutex_enter(&zp->z_lock);
tx = dmu_tx_create(zfsvfs->z_os);
* upgrading then take out necessary DMU holds
*/
- if (ZFS_EXTERNAL_ACL(zp)) {
- if (zfsvfs->z_version <= ZPL_VERSION_SA &&
- ZNODE_ACL_VERSION(zp) <= ZFS_ACL_VERSION_INITIAL) {
- dmu_tx_hold_free(tx, ZFS_EXTERNAL_ACL(zp), 0,
+ if ((acl_obj = zfs_external_acl(zp)) != 0) {
+ if (zfsvfs->z_version >= ZPL_VERSION_FUID &&
+ zfs_znode_acl_version(zp) <= ZFS_ACL_VERSION_INITIAL) {
+ dmu_tx_hold_free(tx, acl_obj, 0,
DMU_OBJECT_END);
+ dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0,
+ aclp->z_acl_bytes);
} else {
- dmu_tx_hold_write(tx, ZFS_EXTERNAL_ACL(zp),
- 0, aclp->z_acl_bytes);
+ dmu_tx_hold_write(tx, acl_obj, 0, aclp->z_acl_bytes);
}
} else if (!zp->z_is_sa && aclp->z_acl_bytes > ZFS_ACE_SPACE) {
dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0, aclp->z_acl_bytes);
error = zfs_aclset_common(zp, aclp, cr, tx);
ASSERT(error == 0);
+ ASSERT(zp->z_acl_cached == NULL);
zp->z_acl_cached = aclp;
if (fuid_dirtied)
zfs_fuid_info_free(fuidp);
dmu_tx_commit(tx);
done:
- mutex_exit(&zp->z_acl_lock);
mutex_exit(&zp->z_lock);
+ mutex_exit(&zp->z_acl_lock);
return (error);
}
uint32_t deny_mask = 0;
zfs_ace_hdr_t *acep = NULL;
boolean_t checkit;
- uint64_t gowner;
+ uid_t gowner;
+ uid_t fowner;
+
+ zfs_fuid_map_ids(zp, cr, &fowner, &gowner);
mutex_enter(&zp->z_acl_lock);
- error = zfs_acl_node_read(zp, &aclp, B_FALSE);
+ error = zfs_acl_node_read(zp, B_FALSE, &aclp, B_FALSE);
if (error != 0) {
mutex_exit(&zp->z_acl_lock);
return (error);
ASSERT(zp->z_acl_cached);
- if ((error = sa_lookup(zp->z_sa_hdl, SA_ZPL_GID(zfsvfs),
- &gowner, sizeof (gowner))) != 0) {
- mutex_exit(&zp->z_acl_lock);
- return (error);
- }
-
- while (acep = zfs_acl_next_ace(aclp, acep, &who, &access_mask,
- &iflags, &type)) {
+ while ((acep = zfs_acl_next_ace(aclp, acep, &who, &access_mask,
+ &iflags, &type))) {
uint32_t mask_matched;
if (!zfs_acl_valid_ace_type(type, iflags))
switch (entry_type) {
case ACE_OWNER:
- if (uid == zp->z_uid)
+ if (uid == fowner)
checkit = B_TRUE;
break;
case OWNING_GROUP:
uint32_t have = ACE_ALL_PERMS;
if (zfs_zaccess_aces_check(zp, &have, B_TRUE, cr) != 0) {
- return (secpolicy_vnode_any_access(cr, ZTOV(zp),
- zp->z_uid) == 0);
+ uid_t owner;
+
+ owner = zfs_fuid_map_id(zp->z_zfsvfs, zp->z_uid, cr, ZFS_OWNER);
+ return (secpolicy_vnode_any_access(cr, ZTOV(zp), owner) == 0);
}
return (B_TRUE);
}
return (0);
}
- if (IS_EPHEMERAL(zdp->z_uid) != 0 || IS_EPHEMERAL(zdp->z_gid) != 0) {
+ if (FUID_INDEX(zdp->z_uid) != 0 || FUID_INDEX(zdp->z_gid) != 0) {
mutex_exit(&zdp->z_acl_lock);
goto slow;
}
znode_t *xzp;
znode_t *check_zp = zp;
mode_t needed_bits;
+ uid_t owner;
is_attr = ((zp->z_pflags & ZFS_XATTR) && (ZTOV(zp)->v_type == VDIR));
}
}
+ owner = zfs_fuid_map_id(zp->z_zfsvfs, zp->z_uid, cr, ZFS_OWNER);
/*
* Map the bits required to the standard vnode flags VREAD|VWRITE|VEXEC
* in needed_bits. Map the bits mapped by working_mode (currently
working_mode = mode;
if ((working_mode & (ACE_READ_ACL|ACE_READ_ATTRIBUTES)) &&
- zp->z_uid == crgetuid(cr))
+ owner == crgetuid(cr))
working_mode &= ~(ACE_READ_ACL|ACE_READ_ATTRIBUTES);
if (working_mode & (ACE_READ_DATA|ACE_READ_NAMED_ATTRS|
&check_privs, skipaclchk, cr)) == 0) {
if (is_attr)
VN_RELE(ZTOV(xzp));
- return (secpolicy_vnode_access2(cr, ZTOV(zp), zp->z_uid,
+ return (secpolicy_vnode_access2(cr, ZTOV(zp), owner,
needed_bits, needed_bits));
}
ASSERT(working_mode != 0);
if ((working_mode & (ACE_READ_ACL|ACE_READ_ATTRIBUTES) &&
- zp->z_uid == crgetuid(cr)))
+ owner == crgetuid(cr)))
working_mode &= ~(ACE_READ_ACL|ACE_READ_ATTRIBUTES);
if (working_mode & (ACE_READ_DATA|ACE_READ_NAMED_ATTRS|
if (working_mode & ACE_EXECUTE)
checkmode |= VEXEC;
- error = secpolicy_vnode_access2(cr, ZTOV(check_zp), zp->z_uid,
+ error = secpolicy_vnode_access2(cr, ZTOV(check_zp), owner,
needed_bits & ~checkmode, needed_bits);
if (error == 0 && (working_mode & ACE_WRITE_OWNER))
- error = secpolicy_vnode_chown(cr, zp->z_uid);
+ error = secpolicy_vnode_chown(cr, owner);
if (error == 0 && (working_mode & ACE_WRITE_ACL))
- error = secpolicy_vnode_setdac(cr, zp->z_uid);
+ error = secpolicy_vnode_setdac(cr, owner);
if (error == 0 && (working_mode &
(ACE_DELETE|ACE_DELETE_CHILD)))
error = secpolicy_vnode_remove(cr);
if (error == 0 && (working_mode & ACE_SYNCHRONIZE)) {
- error = secpolicy_vnode_chown(cr, zp->z_uid);
+ error = secpolicy_vnode_chown(cr, owner);
}
if (error == 0) {
/*
}
}
} else if (error == 0) {
- error = secpolicy_vnode_access2(cr, ZTOV(zp), zp->z_uid,
+ error = secpolicy_vnode_access2(cr, ZTOV(zp), owner,
needed_bits, needed_bits);
}
mode_t available_perms, cred_t *cr)
{
int error;
+ uid_t downer;
+
+ downer = zfs_fuid_map_id(dzp->z_zfsvfs, dzp->z_uid, cr, ZFS_OWNER);
error = secpolicy_vnode_access2(cr, ZTOV(dzp),
- dzp->z_uid, available_perms, VWRITE|VEXEC);
+ downer, available_perms, VWRITE|VEXEC);
if (error == 0)
error = zfs_sticky_remove_access(dzp, zp, cr);
* If that succeeds then check for add_file/add_subdir permissions
*/
- if (error = zfs_zaccess_delete(sdzp, szp, cr))
+ if ((error = zfs_zaccess_delete(sdzp, szp, cr)))
return (error);
/*
* If we have a tzp, see if we can delete it?
*/
if (tzp) {
- if (error = zfs_zaccess_delete(tdzp, tzp, cr))
+ if ((error = zfs_zaccess_delete(tdzp, tzp, cr)))
return (error);
}
return (error);
}
+
+#endif /* HAVE_ZPL */