Add -p switch to "zpool get"
[zfs.git] / module / zfs / zfs_fuid.c
index 7cb5052..debb5f8 100644 (file)
  * CDDL HEADER END
  */
 /*
- * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
- * Use is subject to license terms.
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
  */
 
 #include <sys/zfs_context.h>
-#include <sys/sunddi.h>
 #include <sys/dmu.h>
 #include <sys/avl.h>
 #include <sys/zap.h>
  * During file system initialization the nvlist(s) are read and
  * two AVL trees are created.  One tree is keyed by the index number
  * and the other by the domain string.  Nodes are never removed from
- * trees, but new entries may be added.  If a new entry is added then the
- * on-disk packed nvlist will also be updated.
+ * trees, but new entries may be added.  If a new entry is added then
+ * the zsb->z_fuid_dirty flag is set to true and the caller will then
+ * be responsible for calling zfs_fuid_sync() to sync the changes to disk.
+ *
  */
 
 #define        FUID_IDX        "fuid_idx"
@@ -97,6 +97,15 @@ domain_compare(const void *arg1, const void *arg2)
        return (val > 0 ? 1 : -1);
 }
 
+void
+zfs_fuid_avl_tree_create(avl_tree_t *idx_tree, avl_tree_t *domain_tree)
+{
+       avl_create(idx_tree, idx_compare,
+           sizeof (fuid_domain_t), offsetof(fuid_domain_t, f_idxnode));
+       avl_create(domain_tree, domain_compare,
+           sizeof (fuid_domain_t), offsetof(fuid_domain_t, f_domnode));
+}
+
 /*
  * load initial fuid domain and idx trees.  This function is used by
  * both the kernel and zdb.
@@ -108,12 +117,9 @@ zfs_fuid_table_load(objset_t *os, uint64_t fuid_obj, avl_tree_t *idx_tree,
        dmu_buf_t *db;
        uint64_t fuid_size;
 
-       avl_create(idx_tree, idx_compare,
-           sizeof (fuid_domain_t), offsetof(fuid_domain_t, f_idxnode));
-       avl_create(domain_tree, domain_compare,
-           sizeof (fuid_domain_t), offsetof(fuid_domain_t, f_domnode));
-
-       VERIFY(0 == dmu_bonus_hold(os, fuid_obj, FTAG, &db));
+       ASSERT(fuid_obj != 0);
+       VERIFY(0 == dmu_bonus_hold(os, fuid_obj,
+           FTAG, &db));
        fuid_size = *(uint64_t *)db->db_data;
        dmu_buf_rele(db, FTAG);
 
@@ -125,7 +131,8 @@ zfs_fuid_table_load(objset_t *os, uint64_t fuid_obj, avl_tree_t *idx_tree,
                int i;
 
                packed = kmem_alloc(fuid_size, KM_SLEEP);
-               VERIFY(dmu_read(os, fuid_obj, 0, fuid_size, packed) == 0);
+               VERIFY(dmu_read(os, fuid_obj, 0,
+                   fuid_size, packed, DMU_READ_PREFETCH) == 0);
                VERIFY(nvlist_unpack(packed, fuid_size,
                    &nvp, 0) == 0);
                VERIFY(nvlist_lookup_nvlist_array(nvp, FUID_NVP_ARRAY,
@@ -161,12 +168,12 @@ zfs_fuid_table_destroy(avl_tree_t *idx_tree, avl_tree_t *domain_tree)
        void *cookie;
 
        cookie = NULL;
-       while (domnode = avl_destroy_nodes(domain_tree, &cookie))
+       while ((domnode = avl_destroy_nodes(domain_tree, &cookie)))
                ksiddomain_rele(domnode->f_ksid);
 
        avl_destroy(domain_tree);
        cookie = NULL;
-       while (domnode = avl_destroy_nodes(idx_tree, &cookie))
+       while ((domnode = avl_destroy_nodes(idx_tree, &cookie)))
                kmem_free(domnode, sizeof (fuid_domain_t));
        avl_destroy(idx_tree);
 }
@@ -189,52 +196,109 @@ zfs_fuid_idx_domain(avl_tree_t *idx_tree, uint32_t idx)
  * Load the fuid table(s) into memory.
  */
 static void
-zfs_fuid_init(zfsvfs_t *zfsvfs, dmu_tx_t *tx)
+zfs_fuid_init(zfs_sb_t *zsb)
 {
-       int error = 0;
-
-       rw_enter(&zfsvfs->z_fuid_lock, RW_WRITER);
+       rw_enter(&zsb->z_fuid_lock, RW_WRITER);
 
-       if (zfsvfs->z_fuid_loaded) {
-               rw_exit(&zfsvfs->z_fuid_lock);
+       if (zsb->z_fuid_loaded) {
+               rw_exit(&zsb->z_fuid_lock);
                return;
        }
 
-       if (zfsvfs->z_fuid_obj == 0) {
+       zfs_fuid_avl_tree_create(&zsb->z_fuid_idx, &zsb->z_fuid_domain);
 
-               /* first make sure we need to allocate object */
+       (void) zap_lookup(zsb->z_os, MASTER_NODE_OBJ,
+           ZFS_FUID_TABLES, 8, 1, &zsb->z_fuid_obj);
+       if (zsb->z_fuid_obj != 0) {
+               zsb->z_fuid_size = zfs_fuid_table_load(zsb->z_os,
+                   zsb->z_fuid_obj, &zsb->z_fuid_idx,
+                   &zsb->z_fuid_domain);
+       }
 
-               error = zap_lookup(zfsvfs->z_os, MASTER_NODE_OBJ,
-                   ZFS_FUID_TABLES, 8, 1, &zfsvfs->z_fuid_obj);
-               if (error == ENOENT && tx != NULL) {
-                       zfsvfs->z_fuid_obj = dmu_object_alloc(zfsvfs->z_os,
-                           DMU_OT_FUID, 1 << 14, DMU_OT_FUID_SIZE,
-                           sizeof (uint64_t), tx);
-                       VERIFY(zap_add(zfsvfs->z_os, MASTER_NODE_OBJ,
-                           ZFS_FUID_TABLES, sizeof (uint64_t), 1,
-                           &zfsvfs->z_fuid_obj, tx) == 0);
-               }
+       zsb->z_fuid_loaded = B_TRUE;
+       rw_exit(&zsb->z_fuid_lock);
+}
+
+/*
+ * sync out AVL trees to persistent storage.
+ */
+void
+zfs_fuid_sync(zfs_sb_t *zsb, dmu_tx_t *tx)
+{
+       nvlist_t *nvp;
+       nvlist_t **fuids;
+       size_t nvsize = 0;
+       char *packed;
+       dmu_buf_t *db;
+       fuid_domain_t *domnode;
+       int numnodes;
+       int i;
+
+       if (!zsb->z_fuid_dirty) {
+               return;
+       }
+
+       rw_enter(&zsb->z_fuid_lock, RW_WRITER);
+
+       /*
+        * First see if table needs to be created?
+        */
+       if (zsb->z_fuid_obj == 0) {
+               zsb->z_fuid_obj = dmu_object_alloc(zsb->z_os,
+                   DMU_OT_FUID, 1 << 14, DMU_OT_FUID_SIZE,
+                   sizeof (uint64_t), tx);
+               VERIFY(zap_add(zsb->z_os, MASTER_NODE_OBJ,
+                   ZFS_FUID_TABLES, sizeof (uint64_t), 1,
+                   &zsb->z_fuid_obj, tx) == 0);
        }
 
-       if (zfsvfs->z_fuid_obj != 0) {
-               zfsvfs->z_fuid_size = zfs_fuid_table_load(zfsvfs->z_os,
-                   zfsvfs->z_fuid_obj, &zfsvfs->z_fuid_idx,
-                   &zfsvfs->z_fuid_domain);
-               zfsvfs->z_fuid_loaded = B_TRUE;
+       VERIFY(nvlist_alloc(&nvp, NV_UNIQUE_NAME, KM_SLEEP) == 0);
+
+       numnodes = avl_numnodes(&zsb->z_fuid_idx);
+       fuids = kmem_alloc(numnodes * sizeof (void *), KM_SLEEP);
+       for (i = 0, domnode = avl_first(&zsb->z_fuid_domain); domnode; i++,
+           domnode = AVL_NEXT(&zsb->z_fuid_domain, domnode)) {
+               VERIFY(nvlist_alloc(&fuids[i], NV_UNIQUE_NAME, KM_SLEEP) == 0);
+               VERIFY(nvlist_add_uint64(fuids[i], FUID_IDX,
+                   domnode->f_idx) == 0);
+               VERIFY(nvlist_add_uint64(fuids[i], FUID_OFFSET, 0) == 0);
+               VERIFY(nvlist_add_string(fuids[i], FUID_DOMAIN,
+                   domnode->f_ksid->kd_name) == 0);
        }
+       VERIFY(nvlist_add_nvlist_array(nvp, FUID_NVP_ARRAY,
+           fuids, numnodes) == 0);
+       for (i = 0; i != numnodes; i++)
+               nvlist_free(fuids[i]);
+       kmem_free(fuids, numnodes * sizeof (void *));
+       VERIFY(nvlist_size(nvp, &nvsize, NV_ENCODE_XDR) == 0);
+       packed = kmem_alloc(nvsize, KM_SLEEP);
+       VERIFY(nvlist_pack(nvp, &packed, &nvsize,
+           NV_ENCODE_XDR, KM_SLEEP) == 0);
+       nvlist_free(nvp);
+       zsb->z_fuid_size = nvsize;
+       dmu_write(zsb->z_os, zsb->z_fuid_obj, 0, zsb->z_fuid_size, packed, tx);
+       kmem_free(packed, zsb->z_fuid_size);
+       VERIFY(0 == dmu_bonus_hold(zsb->z_os, zsb->z_fuid_obj,
+           FTAG, &db));
+       dmu_buf_will_dirty(db, tx);
+       *(uint64_t *)db->db_data = zsb->z_fuid_size;
+       dmu_buf_rele(db, FTAG);
 
-       rw_exit(&zfsvfs->z_fuid_lock);
+       zsb->z_fuid_dirty = B_FALSE;
+       rw_exit(&zsb->z_fuid_lock);
 }
 
 /*
  * Query domain table for a given domain.
  *
- * If domain isn't found it is added to AVL trees and
- * the results are pushed out to disk.
+ * If domain isn't found and addok is set, it is added to AVL trees and
+ * the zsb->z_fuid_dirty flag will be set to TRUE.  It will then be
+ * necessary for the caller or another thread to detect the dirty table
+ * and sync out the changes.
  */
 int
-zfs_fuid_find_by_domain(zfsvfs_t *zfsvfs, const char *domain, char **retdomain,
-    dmu_tx_t *tx)
+zfs_fuid_find_by_domain(zfs_sb_t *zsb, const char *domain,
+    char **retdomain, boolean_t addok)
 {
        fuid_domain_t searchnode, *findnode;
        avl_index_t loc;
@@ -246,37 +310,31 @@ zfs_fuid_find_by_domain(zfsvfs_t *zfsvfs, const char *domain, char **retdomain,
         * for the user nobody.
         */
        if (domain[0] == '\0') {
-               *retdomain = nulldomain;
+               if (retdomain)
+                       *retdomain = nulldomain;
                return (0);
        }
 
        searchnode.f_ksid = ksid_lookupdomain(domain);
-       if (retdomain) {
+       if (retdomain)
                *retdomain = searchnode.f_ksid->kd_name;
-       }
-       if (!zfsvfs->z_fuid_loaded)
-               zfs_fuid_init(zfsvfs, tx);
+       if (!zsb->z_fuid_loaded)
+               zfs_fuid_init(zsb);
 
 retry:
-       rw_enter(&zfsvfs->z_fuid_lock, rw);
-       findnode = avl_find(&zfsvfs->z_fuid_domain, &searchnode, &loc);
+       rw_enter(&zsb->z_fuid_lock, rw);
+       findnode = avl_find(&zsb->z_fuid_domain, &searchnode, &loc);
 
        if (findnode) {
-               rw_exit(&zfsvfs->z_fuid_lock);
+               rw_exit(&zsb->z_fuid_lock);
                ksiddomain_rele(searchnode.f_ksid);
                return (findnode->f_idx);
-       } else {
+       } else if (addok) {
                fuid_domain_t *domnode;
-               nvlist_t *nvp;
-               nvlist_t **fuids;
                uint64_t retidx;
-               size_t nvsize = 0;
-               char *packed;
-               dmu_buf_t *db;
-               int i = 0;
 
-               if (rw == RW_READER && !rw_tryupgrade(&zfsvfs->z_fuid_lock)) {
-                       rw_exit(&zfsvfs->z_fuid_lock);
+               if (rw == RW_READER && !rw_tryupgrade(&zsb->z_fuid_lock)) {
+                       rw_exit(&zsb->z_fuid_lock);
                        rw = RW_WRITER;
                        goto retry;
                }
@@ -284,50 +342,16 @@ retry:
                domnode = kmem_alloc(sizeof (fuid_domain_t), KM_SLEEP);
                domnode->f_ksid = searchnode.f_ksid;
 
-               retidx = domnode->f_idx = avl_numnodes(&zfsvfs->z_fuid_idx) + 1;
+               retidx = domnode->f_idx = avl_numnodes(&zsb->z_fuid_idx) + 1;
 
-               avl_add(&zfsvfs->z_fuid_domain, domnode);
-               avl_add(&zfsvfs->z_fuid_idx, domnode);
-               /*
-                * Now resync the on-disk nvlist.
-                */
-               VERIFY(nvlist_alloc(&nvp, NV_UNIQUE_NAME, KM_SLEEP) == 0);
-
-               domnode = avl_first(&zfsvfs->z_fuid_domain);
-               fuids = kmem_alloc(retidx * sizeof (void *), KM_SLEEP);
-               while (domnode) {
-                       VERIFY(nvlist_alloc(&fuids[i],
-                           NV_UNIQUE_NAME, KM_SLEEP) == 0);
-                       VERIFY(nvlist_add_uint64(fuids[i], FUID_IDX,
-                           domnode->f_idx) == 0);
-                       VERIFY(nvlist_add_uint64(fuids[i],
-                           FUID_OFFSET, 0) == 0);
-                       VERIFY(nvlist_add_string(fuids[i++], FUID_DOMAIN,
-                           domnode->f_ksid->kd_name) == 0);
-                       domnode = AVL_NEXT(&zfsvfs->z_fuid_domain, domnode);
-               }
-               VERIFY(nvlist_add_nvlist_array(nvp, FUID_NVP_ARRAY,
-                   fuids, retidx) == 0);
-               for (i = 0; i != retidx; i++)
-                       nvlist_free(fuids[i]);
-               kmem_free(fuids, retidx * sizeof (void *));
-               VERIFY(nvlist_size(nvp, &nvsize, NV_ENCODE_XDR) == 0);
-               packed = kmem_alloc(nvsize, KM_SLEEP);
-               VERIFY(nvlist_pack(nvp, &packed, &nvsize,
-                   NV_ENCODE_XDR, KM_SLEEP) == 0);
-               nvlist_free(nvp);
-               zfsvfs->z_fuid_size = nvsize;
-               dmu_write(zfsvfs->z_os, zfsvfs->z_fuid_obj, 0,
-                   zfsvfs->z_fuid_size, packed, tx);
-               kmem_free(packed, zfsvfs->z_fuid_size);
-               VERIFY(0 == dmu_bonus_hold(zfsvfs->z_os, zfsvfs->z_fuid_obj,
-                   FTAG, &db));
-               dmu_buf_will_dirty(db, tx);
-               *(uint64_t *)db->db_data = zfsvfs->z_fuid_size;
-               dmu_buf_rele(db, FTAG);
-
-               rw_exit(&zfsvfs->z_fuid_lock);
+               avl_add(&zsb->z_fuid_domain, domnode);
+               avl_add(&zsb->z_fuid_idx, domnode);
+               zsb->z_fuid_dirty = B_TRUE;
+               rw_exit(&zsb->z_fuid_lock);
                return (retidx);
+       } else {
+               rw_exit(&zsb->z_fuid_lock);
+               return (-1);
        }
 }
 
@@ -337,24 +361,24 @@ retry:
  * Returns a pointer from an avl node of the domain string.
  *
  */
-static char *
-zfs_fuid_find_by_idx(zfsvfs_t *zfsvfs, uint32_t idx)
+const char *
+zfs_fuid_find_by_idx(zfs_sb_t *zsb, uint32_t idx)
 {
        char *domain;
 
-       if (idx == 0 || !zfsvfs->z_use_fuids)
+       if (idx == 0 || !zsb->z_use_fuids)
                return (NULL);
 
-       if (!zfsvfs->z_fuid_loaded)
-               zfs_fuid_init(zfsvfs, NULL);
+       if (!zsb->z_fuid_loaded)
+               zfs_fuid_init(zsb);
 
-       rw_enter(&zfsvfs->z_fuid_lock, RW_READER);
+       rw_enter(&zsb->z_fuid_lock, RW_READER);
 
-       if (zfsvfs->z_fuid_obj)
-               domain = zfs_fuid_idx_domain(&zfsvfs->z_fuid_idx, idx);
+       if (zsb->z_fuid_obj || zsb->z_fuid_dirty)
+               domain = zfs_fuid_idx_domain(&zsb->z_fuid_idx, idx);
        else
                domain = nulldomain;
-       rw_exit(&zfsvfs->z_fuid_lock);
+       rw_exit(&zsb->z_fuid_lock);
 
        ASSERT(domain);
        return (domain);
@@ -363,24 +387,23 @@ zfs_fuid_find_by_idx(zfsvfs_t *zfsvfs, uint32_t idx)
 void
 zfs_fuid_map_ids(znode_t *zp, cred_t *cr, uid_t *uidp, uid_t *gidp)
 {
-       *uidp = zfs_fuid_map_id(zp->z_zfsvfs, zp->z_phys->zp_uid,
-           cr, ZFS_OWNER);
-       *gidp = zfs_fuid_map_id(zp->z_zfsvfs, zp->z_phys->zp_gid,
-           cr, ZFS_GROUP);
+       *uidp = zfs_fuid_map_id(ZTOZSB(zp), zp->z_uid, cr, ZFS_OWNER);
+       *gidp = zfs_fuid_map_id(ZTOZSB(zp), zp->z_gid, cr, ZFS_GROUP);
 }
 
 uid_t
-zfs_fuid_map_id(zfsvfs_t *zfsvfs, uint64_t fuid,
+zfs_fuid_map_id(zfs_sb_t *zsb, uint64_t fuid,
     cred_t *cr, zfs_fuid_type_t type)
 {
+#ifdef HAVE_KSID
        uint32_t index = FUID_INDEX(fuid);
-       char *domain;
+       const char *domain;
        uid_t id;
 
        if (index == 0)
                return (fuid);
 
-       domain = zfs_fuid_find_by_idx(zfsvfs, index);
+       domain = zfs_fuid_find_by_idx(zsb, index);
        ASSERT(domain != NULL);
 
        if (type == ZFS_OWNER || type == ZFS_ACE_USER) {
@@ -391,6 +414,12 @@ zfs_fuid_map_id(zfsvfs_t *zfsvfs, uint64_t fuid,
                    FUID_RID(fuid), &id);
        }
        return (id);
+#else
+       /*
+        * The Linux port only supports POSIX IDs, use the passed id.
+        */
+       return (fuid);
+#endif /* HAVE_KSID */
 }
 
 /*
@@ -400,7 +429,7 @@ zfs_fuid_map_id(zfsvfs_t *zfsvfs, uint64_t fuid,
  * If ACL has multiple domains, then keep only one copy of each unique
  * domain.
  */
-static void
+void
 zfs_fuid_node_add(zfs_fuid_info_t **fuidpp, const char *domain, uint32_t rid,
     uint64_t idx, uint64_t id, zfs_fuid_type_t type)
 {
@@ -439,6 +468,7 @@ zfs_fuid_node_add(zfs_fuid_info_t **fuidpp, const char *domain, uint32_t rid,
        }
 
        if (type == ZFS_ACE_USER || type == ZFS_ACE_GROUP) {
+
                /*
                 * Now allocate fuid entry and add it on the end of the list
                 */
@@ -458,44 +488,60 @@ zfs_fuid_node_add(zfs_fuid_info_t **fuidpp, const char *domain, uint32_t rid,
        }
 }
 
+#ifdef HAVE_KSID
 /*
  * Create a file system FUID, based on information in the users cred
+ *
+ * If cred contains KSID_OWNER then it should be used to determine
+ * the uid otherwise cred's uid will be used. By default cred's gid
+ * is used unless it's an ephemeral ID in which case KSID_GROUP will
+ * be used if it exists.
  */
 uint64_t
-zfs_fuid_create_cred(zfsvfs_t *zfsvfs, zfs_fuid_type_t type,
-    dmu_tx_t *tx, cred_t *cr, zfs_fuid_info_t **fuidp)
+zfs_fuid_create_cred(zfs_sb_t *zsb, zfs_fuid_type_t type,
+    cred_t *cr, zfs_fuid_info_t **fuidp)
 {
        uint64_t        idx;
        ksid_t          *ksid;
        uint32_t        rid;
-       char            *kdomain;
+       char            *kdomain;
        const char      *domain;
        uid_t           id;
 
        VERIFY(type == ZFS_OWNER || type == ZFS_GROUP);
 
        ksid = crgetsid(cr, (type == ZFS_OWNER) ? KSID_OWNER : KSID_GROUP);
-       if (ksid) {
-               id = ksid_getid(ksid);
-       } else {
-               if (type == ZFS_OWNER)
-                       id = crgetuid(cr);
-               else
-                       id = crgetgid(cr);
+
+       if (!zsb->z_use_fuids || (ksid == NULL)) {
+               id = (type == ZFS_OWNER) ? crgetuid(cr) : crgetgid(cr);
+
+               if (IS_EPHEMERAL(id))
+                       return ((type == ZFS_OWNER) ? UID_NOBODY : GID_NOBODY);
+
+               return ((uint64_t)id);
        }
 
-       if (!zfsvfs->z_use_fuids || (!IS_EPHEMERAL(id)))
+       /*
+        * ksid is present and FUID is supported
+        */
+       id = (type == ZFS_OWNER) ? ksid_getid(ksid) : crgetgid(cr);
+
+       if (!IS_EPHEMERAL(id))
                return ((uint64_t)id);
 
+       if (type == ZFS_GROUP)
+               id = ksid_getid(ksid);
+
        rid = ksid_getrid(ksid);
        domain = ksid_getdomain(ksid);
 
-       idx = zfs_fuid_find_by_domain(zfsvfs, domain, &kdomain, tx);
+       idx = zfs_fuid_find_by_domain(zsb, domain, &kdomain, B_TRUE);
 
        zfs_fuid_node_add(fuidp, kdomain, rid, idx, id, type);
 
        return (FUID_ENCODE(idx, rid));
 }
+#endif /* HAVE_KSID */
 
 /*
  * Create a file system FUID for an ACL ace
@@ -507,19 +553,19 @@ zfs_fuid_create_cred(zfsvfs_t *zfsvfs, zfs_fuid_type_t type,
  *
  * During replay operations the domain+rid information is
  * found in the zfs_fuid_info_t that the replay code has
- * attached to the zfsvfs of the file system.
+ * attached to the zsb of the file system.
  */
 uint64_t
-zfs_fuid_create(zfsvfs_t *zfsvfs, uint64_t id, cred_t *cr,
-    zfs_fuid_type_t type, dmu_tx_t *tx, zfs_fuid_info_t **fuidpp)
+zfs_fuid_create(zfs_sb_t *zsb, uint64_t id, cred_t *cr,
+    zfs_fuid_type_t type, zfs_fuid_info_t **fuidpp)
 {
+#ifdef HAVE_KSID
        const char *domain;
        char *kdomain;
        uint32_t fuid_idx = FUID_INDEX(id);
        uint32_t rid;
        idmap_stat status;
        uint64_t idx;
-       boolean_t is_replay = (zfsvfs->z_assign >= TXG_INITIAL);
        zfs_fuid_t *zfuid = NULL;
        zfs_fuid_info_t *fuidp;
 
@@ -531,11 +577,11 @@ zfs_fuid_create(zfsvfs_t *zfsvfs, uint64_t id, cred_t *cr,
         * chmod.
         */
 
-       if (!zfsvfs->z_use_fuids || !IS_EPHEMERAL(id) || fuid_idx != 0)
+       if (!zsb->z_use_fuids || !IS_EPHEMERAL(id) || fuid_idx != 0)
                return (id);
 
-       if (is_replay) {
-               fuidp = zfsvfs->z_fuid_replay;
+       if (zsb->z_replay) {
+               fuidp = zsb->z_fuid_replay;
 
                /*
                 * If we are passed an ephemeral id, but no
@@ -582,27 +628,34 @@ zfs_fuid_create(zfsvfs_t *zfsvfs, uint64_t id, cred_t *cr,
                }
        }
 
-       idx = zfs_fuid_find_by_domain(zfsvfs, domain, &kdomain, tx);
+       idx = zfs_fuid_find_by_domain(zsb, domain, &kdomain, B_TRUE);
 
-       if (!is_replay)
-               zfs_fuid_node_add(fuidpp, kdomain, rid, idx, id, type);
+       if (!zsb->z_replay)
+               zfs_fuid_node_add(fuidpp, kdomain,
+                   rid, idx, id, type);
        else if (zfuid != NULL) {
                list_remove(&fuidp->z_fuids, zfuid);
                kmem_free(zfuid, sizeof (zfs_fuid_t));
        }
        return (FUID_ENCODE(idx, rid));
+#else
+       /*
+        * The Linux port only supports POSIX IDs, use the passed id.
+        */
+       return (id);
+#endif
 }
 
 void
-zfs_fuid_destroy(zfsvfs_t *zfsvfs)
+zfs_fuid_destroy(zfs_sb_t *zsb)
 {
-       rw_enter(&zfsvfs->z_fuid_lock, RW_WRITER);
-       if (!zfsvfs->z_fuid_loaded) {
-               rw_exit(&zfsvfs->z_fuid_lock);
+       rw_enter(&zsb->z_fuid_lock, RW_WRITER);
+       if (!zsb->z_fuid_loaded) {
+               rw_exit(&zsb->z_fuid_lock);
                return;
        }
-       zfs_fuid_table_destroy(&zfsvfs->z_fuid_idx, &zfsvfs->z_fuid_domain);
-       rw_exit(&zfsvfs->z_fuid_lock);
+       zfs_fuid_table_destroy(&zsb->z_fuid_idx, &zsb->z_fuid_domain);
+       rw_exit(&zsb->z_fuid_lock);
 }
 
 /*
@@ -656,19 +709,19 @@ zfs_fuid_info_free(zfs_fuid_info_t *fuidp)
  * Will use a straight FUID compare when possible.
  */
 boolean_t
-zfs_groupmember(zfsvfs_t *zfsvfs, uint64_t id, cred_t *cr)
+zfs_groupmember(zfs_sb_t *zsb, uint64_t id, cred_t *cr)
 {
+#ifdef HAVE_KSID
        ksid_t          *ksid = crgetsid(cr, KSID_GROUP);
+       ksidlist_t      *ksidlist = crgetsidlist(cr);
        uid_t           gid;
 
-       if (ksid) {
-               int             i;
+       if (ksid && ksidlist) {
+               int             i;
                ksid_t          *ksid_groups;
-               ksidlist_t      *ksidlist = crgetsidlist(cr);
                uint32_t        idx = FUID_INDEX(id);
                uint32_t        rid = FUID_RID(id);
 
-               ASSERT(ksidlist);
                ksid_groups = ksidlist->ksl_sids;
 
                for (i = 0; i != ksidlist->ksl_nsid; i++) {
@@ -678,9 +731,9 @@ zfs_groupmember(zfsvfs_t *zfsvfs, uint64_t id, cred_t *cr)
                                        return (B_TRUE);
                                }
                        } else {
-                               char *domain;
+                               const char *domain;
 
-                               domain = zfs_fuid_find_by_idx(zfsvfs, idx);
+                               domain = zfs_fuid_find_by_idx(zsb, idx);
                                ASSERT(domain != NULL);
 
                                if (strcmp(domain,
@@ -698,7 +751,25 @@ zfs_groupmember(zfsvfs_t *zfsvfs, uint64_t id, cred_t *cr)
        /*
         * Not found in ksidlist, check posix groups
         */
-       gid = zfs_fuid_map_id(zfsvfs, id, cr, ZFS_GROUP);
+       gid = zfs_fuid_map_id(zsb, id, cr, ZFS_GROUP);
        return (groupmember(gid, cr));
+#else
+       return (B_TRUE);
+#endif
+}
+
+void
+zfs_fuid_txhold(zfs_sb_t *zsb, dmu_tx_t *tx)
+{
+       if (zsb->z_fuid_obj == 0) {
+               dmu_tx_hold_bonus(tx, DMU_NEW_OBJECT);
+               dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0,
+                   FUID_SIZE_ESTIMATE(zsb));
+               dmu_tx_hold_zap(tx, MASTER_NODE_OBJ, FALSE, NULL);
+       } else {
+               dmu_tx_hold_bonus(tx, zsb->z_fuid_obj);
+               dmu_tx_hold_write(tx, zsb->z_fuid_obj, 0,
+                   FUID_SIZE_ESTIMATE(zsb));
+       }
 }
 #endif