Update core ZFS code from build 121 to build 141.
[zfs.git] / module / zfs / zfs_vfsops.c
index 06b4dee..f68dde8 100644 (file)
  * CDDL HEADER END
  */
 /*
- * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
- * Use is subject to license terms.
+ * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
  */
 
+/* Portions Copyright 2010 Robert Milkowski */
+
 #include <sys/types.h>
 #include <sys/param.h>
 #include <sys/systm.h>
@@ -46,6 +47,7 @@
 #include <sys/dsl_deleg.h>
 #include <sys/spa.h>
 #include <sys/zap.h>
+#include <sys/sa.h>
 #include <sys/varargs.h>
 #include <sys/policy.h>
 #include <sys/atomic.h>
@@ -60,6 +62,8 @@
 #include <sys/dnlc.h>
 #include <sys/dmu_objset.h>
 #include <sys/spa_boot.h>
+#include <sys/sa.h>
+#include "zfs_comutil.h"
 
 int zfsfstype;
 vfsops_t *zfs_vfsops = NULL;
@@ -67,6 +71,8 @@ static major_t zfs_major;
 static minor_t zfs_minor;
 static kmutex_t        zfs_dev_mtx;
 
+extern int sys_shutdown;
+
 static int zfs_mount(vfs_t *vfsp, vnode_t *mvp, struct mounta *uap, cred_t *cr);
 static int zfs_umount(vfs_t *vfsp, int fflag, cred_t *cr);
 static int zfs_mountroot(vfs_t *vfsp, enum whymountroot);
@@ -145,12 +151,23 @@ zfs_sync(vfs_t *vfsp, short flag, cred_t *cr)
                 * Sync a specific filesystem.
                 */
                zfsvfs_t *zfsvfs = vfsp->vfs_data;
+               dsl_pool_t *dp;
 
                ZFS_ENTER(zfsvfs);
+               dp = dmu_objset_pool(zfsvfs->z_os);
+
+               /*
+                * If the system is shutting down, then skip any
+                * filesystems which may exist on a suspended pool.
+                */
+               if (sys_shutdown && spa_suspended(dp->dp_spa)) {
+                       ZFS_EXIT(zfsvfs);
+                       return (0);
+               }
+
                if (zfsvfs->z_log != NULL)
                        zil_commit(zfsvfs->z_log, UINT64_MAX, 0);
-               else
-                       txg_wait_synced(dmu_objset_pool(zfsvfs->z_os), 0);
+
                ZFS_EXIT(zfsvfs);
        } else {
                /*
@@ -367,14 +384,6 @@ vscan_changed_cb(void *arg, uint64_t newval)
 }
 
 static void
-acl_mode_changed_cb(void *arg, uint64_t newval)
-{
-       zfsvfs_t *zfsvfs = arg;
-
-       zfsvfs->z_acl_mode = newval;
-}
-
-static void
 acl_inherit_changed_cb(void *arg, uint64_t newval)
 {
        zfsvfs_t *zfsvfs = arg;
@@ -504,8 +513,6 @@ zfs_register_callbacks(vfs_t *vfsp)
        error = error ? error : dsl_prop_register(ds,
            "snapdir", snapdir_changed_cb, zfsvfs);
        error = error ? error : dsl_prop_register(ds,
-           "aclmode", acl_mode_changed_cb, zfsvfs);
-       error = error ? error : dsl_prop_register(ds,
            "aclinherit", acl_inherit_changed_cb, zfsvfs);
        error = error ? error : dsl_prop_register(ds,
            "vscan", vscan_changed_cb, zfsvfs);
@@ -546,7 +553,6 @@ unregister:
        (void) dsl_prop_unregister(ds, "setuid", setuid_changed_cb, zfsvfs);
        (void) dsl_prop_unregister(ds, "exec", exec_changed_cb, zfsvfs);
        (void) dsl_prop_unregister(ds, "snapdir", snapdir_changed_cb, zfsvfs);
-       (void) dsl_prop_unregister(ds, "aclmode", acl_mode_changed_cb, zfsvfs);
        (void) dsl_prop_unregister(ds, "aclinherit", acl_inherit_changed_cb,
            zfsvfs);
        (void) dsl_prop_unregister(ds, "vscan", vscan_changed_cb, zfsvfs);
@@ -555,6 +561,426 @@ unregister:
 }
 
 static int
+zfs_space_delta_cb(dmu_object_type_t bonustype, void *data,
+    uint64_t *userp, uint64_t *groupp)
+{
+       znode_phys_t *znp = data;
+       int error = 0;
+
+       /*
+        * Is it a valid type of object to track?
+        */
+       if (bonustype != DMU_OT_ZNODE && bonustype != DMU_OT_SA)
+               return (ENOENT);
+
+       /*
+        * If we have a NULL data pointer
+        * then assume the id's aren't changing and
+        * return EEXIST to the dmu to let it know to
+        * use the same ids
+        */
+       if (data == NULL)
+               return (EEXIST);
+
+       if (bonustype == DMU_OT_ZNODE) {
+               *userp = znp->zp_uid;
+               *groupp = znp->zp_gid;
+       } else {
+               int hdrsize;
+
+               ASSERT(bonustype == DMU_OT_SA);
+               hdrsize = sa_hdrsize(data);
+
+               if (hdrsize != 0) {
+                       *userp = *((uint64_t *)((uintptr_t)data + hdrsize +
+                           SA_UID_OFFSET));
+                       *groupp = *((uint64_t *)((uintptr_t)data + hdrsize +
+                           SA_GID_OFFSET));
+               } else {
+                       /*
+                        * This should only happen for newly created
+                        * files that haven't had the znode data filled
+                        * in yet.
+                        */
+                       *userp = 0;
+                       *groupp = 0;
+               }
+       }
+       return (error);
+}
+
+static void
+fuidstr_to_sid(zfsvfs_t *zfsvfs, const char *fuidstr,
+    char *domainbuf, int buflen, uid_t *ridp)
+{
+       uint64_t fuid;
+       const char *domain;
+
+       fuid = strtonum(fuidstr, NULL);
+
+       domain = zfs_fuid_find_by_idx(zfsvfs, FUID_INDEX(fuid));
+       if (domain)
+               (void) strlcpy(domainbuf, domain, buflen);
+       else
+               domainbuf[0] = '\0';
+       *ridp = FUID_RID(fuid);
+}
+
+static uint64_t
+zfs_userquota_prop_to_obj(zfsvfs_t *zfsvfs, zfs_userquota_prop_t type)
+{
+       switch (type) {
+       case ZFS_PROP_USERUSED:
+               return (DMU_USERUSED_OBJECT);
+       case ZFS_PROP_GROUPUSED:
+               return (DMU_GROUPUSED_OBJECT);
+       case ZFS_PROP_USERQUOTA:
+               return (zfsvfs->z_userquota_obj);
+       case ZFS_PROP_GROUPQUOTA:
+               return (zfsvfs->z_groupquota_obj);
+       }
+       return (0);
+}
+
+int
+zfs_userspace_many(zfsvfs_t *zfsvfs, zfs_userquota_prop_t type,
+    uint64_t *cookiep, void *vbuf, uint64_t *bufsizep)
+{
+       int error;
+       zap_cursor_t zc;
+       zap_attribute_t za;
+       zfs_useracct_t *buf = vbuf;
+       uint64_t obj;
+
+       if (!dmu_objset_userspace_present(zfsvfs->z_os))
+               return (ENOTSUP);
+
+       obj = zfs_userquota_prop_to_obj(zfsvfs, type);
+       if (obj == 0) {
+               *bufsizep = 0;
+               return (0);
+       }
+
+       for (zap_cursor_init_serialized(&zc, zfsvfs->z_os, obj, *cookiep);
+           (error = zap_cursor_retrieve(&zc, &za)) == 0;
+           zap_cursor_advance(&zc)) {
+               if ((uintptr_t)buf - (uintptr_t)vbuf + sizeof (zfs_useracct_t) >
+                   *bufsizep)
+                       break;
+
+               fuidstr_to_sid(zfsvfs, za.za_name,
+                   buf->zu_domain, sizeof (buf->zu_domain), &buf->zu_rid);
+
+               buf->zu_space = za.za_first_integer;
+               buf++;
+       }
+       if (error == ENOENT)
+               error = 0;
+
+       ASSERT3U((uintptr_t)buf - (uintptr_t)vbuf, <=, *bufsizep);
+       *bufsizep = (uintptr_t)buf - (uintptr_t)vbuf;
+       *cookiep = zap_cursor_serialize(&zc);
+       zap_cursor_fini(&zc);
+       return (error);
+}
+
+/*
+ * buf must be big enough (eg, 32 bytes)
+ */
+static int
+id_to_fuidstr(zfsvfs_t *zfsvfs, const char *domain, uid_t rid,
+    char *buf, boolean_t addok)
+{
+       uint64_t fuid;
+       int domainid = 0;
+
+       if (domain && domain[0]) {
+               domainid = zfs_fuid_find_by_domain(zfsvfs, domain, NULL, addok);
+               if (domainid == -1)
+                       return (ENOENT);
+       }
+       fuid = FUID_ENCODE(domainid, rid);
+       (void) sprintf(buf, "%llx", (longlong_t)fuid);
+       return (0);
+}
+
+int
+zfs_userspace_one(zfsvfs_t *zfsvfs, zfs_userquota_prop_t type,
+    const char *domain, uint64_t rid, uint64_t *valp)
+{
+       char buf[32];
+       int err;
+       uint64_t obj;
+
+       *valp = 0;
+
+       if (!dmu_objset_userspace_present(zfsvfs->z_os))
+               return (ENOTSUP);
+
+       obj = zfs_userquota_prop_to_obj(zfsvfs, type);
+       if (obj == 0)
+               return (0);
+
+       err = id_to_fuidstr(zfsvfs, domain, rid, buf, B_FALSE);
+       if (err)
+               return (err);
+
+       err = zap_lookup(zfsvfs->z_os, obj, buf, 8, 1, valp);
+       if (err == ENOENT)
+               err = 0;
+       return (err);
+}
+
+int
+zfs_set_userquota(zfsvfs_t *zfsvfs, zfs_userquota_prop_t type,
+    const char *domain, uint64_t rid, uint64_t quota)
+{
+       char buf[32];
+       int err;
+       dmu_tx_t *tx;
+       uint64_t *objp;
+       boolean_t fuid_dirtied;
+
+       if (type != ZFS_PROP_USERQUOTA && type != ZFS_PROP_GROUPQUOTA)
+               return (EINVAL);
+
+       if (zfsvfs->z_version < ZPL_VERSION_USERSPACE)
+               return (ENOTSUP);
+
+       objp = (type == ZFS_PROP_USERQUOTA) ? &zfsvfs->z_userquota_obj :
+           &zfsvfs->z_groupquota_obj;
+
+       err = id_to_fuidstr(zfsvfs, domain, rid, buf, B_TRUE);
+       if (err)
+               return (err);
+       fuid_dirtied = zfsvfs->z_fuid_dirty;
+
+       tx = dmu_tx_create(zfsvfs->z_os);
+       dmu_tx_hold_zap(tx, *objp ? *objp : DMU_NEW_OBJECT, B_TRUE, NULL);
+       if (*objp == 0) {
+               dmu_tx_hold_zap(tx, MASTER_NODE_OBJ, B_TRUE,
+                   zfs_userquota_prop_prefixes[type]);
+       }
+       if (fuid_dirtied)
+               zfs_fuid_txhold(zfsvfs, tx);
+       err = dmu_tx_assign(tx, TXG_WAIT);
+       if (err) {
+               dmu_tx_abort(tx);
+               return (err);
+       }
+
+       mutex_enter(&zfsvfs->z_lock);
+       if (*objp == 0) {
+               *objp = zap_create(zfsvfs->z_os, DMU_OT_USERGROUP_QUOTA,
+                   DMU_OT_NONE, 0, tx);
+               VERIFY(0 == zap_add(zfsvfs->z_os, MASTER_NODE_OBJ,
+                   zfs_userquota_prop_prefixes[type], 8, 1, objp, tx));
+       }
+       mutex_exit(&zfsvfs->z_lock);
+
+       if (quota == 0) {
+               err = zap_remove(zfsvfs->z_os, *objp, buf, tx);
+               if (err == ENOENT)
+                       err = 0;
+       } else {
+               err = zap_update(zfsvfs->z_os, *objp, buf, 8, 1, &quota, tx);
+       }
+       ASSERT(err == 0);
+       if (fuid_dirtied)
+               zfs_fuid_sync(zfsvfs, tx);
+       dmu_tx_commit(tx);
+       return (err);
+}
+
+boolean_t
+zfs_fuid_overquota(zfsvfs_t *zfsvfs, boolean_t isgroup, uint64_t fuid)
+{
+       char buf[32];
+       uint64_t used, quota, usedobj, quotaobj;
+       int err;
+
+       usedobj = isgroup ? DMU_GROUPUSED_OBJECT : DMU_USERUSED_OBJECT;
+       quotaobj = isgroup ? zfsvfs->z_groupquota_obj : zfsvfs->z_userquota_obj;
+
+       if (quotaobj == 0 || zfsvfs->z_replay)
+               return (B_FALSE);
+
+       (void) sprintf(buf, "%llx", (longlong_t)fuid);
+       err = zap_lookup(zfsvfs->z_os, quotaobj, buf, 8, 1, &quota);
+       if (err != 0)
+               return (B_FALSE);
+
+       err = zap_lookup(zfsvfs->z_os, usedobj, buf, 8, 1, &used);
+       if (err != 0)
+               return (B_FALSE);
+       return (used >= quota);
+}
+
+boolean_t
+zfs_owner_overquota(zfsvfs_t *zfsvfs, znode_t *zp, boolean_t isgroup)
+{
+       uint64_t fuid;
+       uint64_t quotaobj;
+       uid_t id;
+
+       quotaobj = isgroup ? zfsvfs->z_groupquota_obj : zfsvfs->z_userquota_obj;
+
+       id = isgroup ? zp->z_gid : zp->z_uid;
+
+       if (quotaobj == 0 || zfsvfs->z_replay)
+               return (B_FALSE);
+
+       if (IS_EPHEMERAL(id)) {
+               VERIFY(0 == sa_lookup(zp->z_sa_hdl,
+                   isgroup ? SA_ZPL_GID(zfsvfs) : SA_ZPL_UID(zfsvfs),
+                   &fuid, sizeof (fuid)));
+       } else {
+               fuid = (uint64_t)id;
+       }
+
+       return (zfs_fuid_overquota(zfsvfs, isgroup, fuid));
+}
+
+int
+zfsvfs_create(const char *osname, zfsvfs_t **zfvp)
+{
+       objset_t *os;
+       zfsvfs_t *zfsvfs;
+       uint64_t zval;
+       int i, error;
+       uint64_t sa_obj;
+
+       zfsvfs = kmem_zalloc(sizeof (zfsvfs_t), KM_SLEEP);
+
+       /*
+        * We claim to always be readonly so we can open snapshots;
+        * other ZPL code will prevent us from writing to snapshots.
+        */
+       error = dmu_objset_own(osname, DMU_OST_ZFS, B_TRUE, zfsvfs, &os);
+       if (error) {
+               kmem_free(zfsvfs, sizeof (zfsvfs_t));
+               return (error);
+       }
+
+       /*
+        * Initialize the zfs-specific filesystem structure.
+        * Should probably make this a kmem cache, shuffle fields,
+        * and just bzero up to z_hold_mtx[].
+        */
+       zfsvfs->z_vfs = NULL;
+       zfsvfs->z_parent = zfsvfs;
+       zfsvfs->z_max_blksz = SPA_MAXBLOCKSIZE;
+       zfsvfs->z_show_ctldir = ZFS_SNAPDIR_VISIBLE;
+       zfsvfs->z_os = os;
+
+       error = zfs_get_zplprop(os, ZFS_PROP_VERSION, &zfsvfs->z_version);
+       if (error) {
+               goto out;
+       } else if (zfsvfs->z_version >
+           zfs_zpl_version_map(spa_version(dmu_objset_spa(os)))) {
+               (void) printf("Can't mount a version %lld file system "
+                   "on a version %lld pool\n. Pool must be upgraded to mount "
+                   "this file system.", (u_longlong_t)zfsvfs->z_version,
+                   (u_longlong_t)spa_version(dmu_objset_spa(os)));
+               error = ENOTSUP;
+               goto out;
+       }
+       if ((error = zfs_get_zplprop(os, ZFS_PROP_NORMALIZE, &zval)) != 0)
+               goto out;
+       zfsvfs->z_norm = (int)zval;
+
+       if ((error = zfs_get_zplprop(os, ZFS_PROP_UTF8ONLY, &zval)) != 0)
+               goto out;
+       zfsvfs->z_utf8 = (zval != 0);
+
+       if ((error = zfs_get_zplprop(os, ZFS_PROP_CASE, &zval)) != 0)
+               goto out;
+       zfsvfs->z_case = (uint_t)zval;
+
+       /*
+        * Fold case on file systems that are always or sometimes case
+        * insensitive.
+        */
+       if (zfsvfs->z_case == ZFS_CASE_INSENSITIVE ||
+           zfsvfs->z_case == ZFS_CASE_MIXED)
+               zfsvfs->z_norm |= U8_TEXTPREP_TOUPPER;
+
+       zfsvfs->z_use_fuids = USE_FUIDS(zfsvfs->z_version, zfsvfs->z_os);
+       zfsvfs->z_use_sa = USE_SA(zfsvfs->z_version, zfsvfs->z_os);
+
+       if (zfsvfs->z_use_sa) {
+               /* should either have both of these objects or none */
+               error = zap_lookup(os, MASTER_NODE_OBJ, ZFS_SA_ATTRS, 8, 1,
+                   &sa_obj);
+               if (error)
+                       return (error);
+       } else {
+               /*
+                * Pre SA versions file systems should never touch
+                * either the attribute registration or layout objects.
+                */
+               sa_obj = 0;
+       }
+
+       zfsvfs->z_attr_table = sa_setup(os, sa_obj, zfs_attr_table, ZPL_END);
+
+       if (zfsvfs->z_version >= ZPL_VERSION_SA)
+               sa_register_update_callback(os, zfs_sa_upgrade);
+
+       error = zap_lookup(os, MASTER_NODE_OBJ, ZFS_ROOT_OBJ, 8, 1,
+           &zfsvfs->z_root);
+       if (error)
+               goto out;
+       ASSERT(zfsvfs->z_root != 0);
+
+       error = zap_lookup(os, MASTER_NODE_OBJ, ZFS_UNLINKED_SET, 8, 1,
+           &zfsvfs->z_unlinkedobj);
+       if (error)
+               goto out;
+
+       error = zap_lookup(os, MASTER_NODE_OBJ,
+           zfs_userquota_prop_prefixes[ZFS_PROP_USERQUOTA],
+           8, 1, &zfsvfs->z_userquota_obj);
+       if (error && error != ENOENT)
+               goto out;
+
+       error = zap_lookup(os, MASTER_NODE_OBJ,
+           zfs_userquota_prop_prefixes[ZFS_PROP_GROUPQUOTA],
+           8, 1, &zfsvfs->z_groupquota_obj);
+       if (error && error != ENOENT)
+               goto out;
+
+       error = zap_lookup(os, MASTER_NODE_OBJ, ZFS_FUID_TABLES, 8, 1,
+           &zfsvfs->z_fuid_obj);
+       if (error && error != ENOENT)
+               goto out;
+
+       error = zap_lookup(os, MASTER_NODE_OBJ, ZFS_SHARES_DIR, 8, 1,
+           &zfsvfs->z_shares_dir);
+       if (error && error != ENOENT)
+               goto out;
+
+       mutex_init(&zfsvfs->z_znodes_lock, NULL, MUTEX_DEFAULT, NULL);
+       mutex_init(&zfsvfs->z_lock, NULL, MUTEX_DEFAULT, NULL);
+       list_create(&zfsvfs->z_all_znodes, sizeof (znode_t),
+           offsetof(znode_t, z_link_node));
+       rrw_init(&zfsvfs->z_teardown_lock);
+       rw_init(&zfsvfs->z_teardown_inactive_lock, NULL, RW_DEFAULT, NULL);
+       rw_init(&zfsvfs->z_fuid_lock, NULL, RW_DEFAULT, NULL);
+       for (i = 0; i != ZFS_OBJ_MTX_SZ; i++)
+               mutex_init(&zfsvfs->z_hold_mtx[i], NULL, MUTEX_DEFAULT, NULL);
+
+       *zfvp = zfsvfs;
+       return (0);
+
+out:
+       dmu_objset_disown(os, zfsvfs);
+       *zfvp = NULL;
+       kmem_free(zfsvfs, sizeof (zfsvfs_t));
+       return (error);
+}
+
+static int
 zfsvfs_setup(zfsvfs_t *zfsvfs, boolean_t mounting)
 {
        int error;
@@ -566,9 +992,11 @@ zfsvfs_setup(zfsvfs_t *zfsvfs, boolean_t mounting)
        /*
         * Set the objset user_ptr to track its zfsvfs.
         */
-       mutex_enter(&zfsvfs->z_os->os->os_user_ptr_lock);
+       mutex_enter(&zfsvfs->z_os->os_user_ptr_lock);
        dmu_objset_set_user(zfsvfs->z_os, zfsvfs);
-       mutex_exit(&zfsvfs->z_os->os->os_user_ptr_lock);
+       mutex_exit(&zfsvfs->z_os->os_user_ptr_lock);
+
+       zfsvfs->z_log = zil_open(zfsvfs->z_os, zfs_get_data);
 
        /*
         * If we are not mounting (ie: online recv), then we don't
@@ -583,68 +1011,109 @@ zfsvfs_setup(zfsvfs_t *zfsvfs, boolean_t mounting)
                 * allow replays to succeed.
                 */
                readonly = zfsvfs->z_vfs->vfs_flag & VFS_RDONLY;
-               zfsvfs->z_vfs->vfs_flag &= ~VFS_RDONLY;
+               if (readonly != 0)
+                       zfsvfs->z_vfs->vfs_flag &= ~VFS_RDONLY;
+               else
+                       zfs_unlinked_drain(zfsvfs);
 
                /*
                 * Parse and replay the intent log.
+                *
+                * Because of ziltest, this must be done after
+                * zfs_unlinked_drain().  (Further note: ziltest
+                * doesn't use readonly mounts, where
+                * zfs_unlinked_drain() isn't called.)  This is because
+                * ziltest causes spa_sync() to think it's committed,
+                * but actually it is not, so the intent log contains
+                * many txg's worth of changes.
+                *
+                * In particular, if object N is in the unlinked set in
+                * the last txg to actually sync, then it could be
+                * actually freed in a later txg and then reallocated
+                * in a yet later txg.  This would write a "create
+                * object N" record to the intent log.  Normally, this
+                * would be fine because the spa_sync() would have
+                * written out the fact that object N is free, before
+                * we could write the "create object N" intent log
+                * record.
+                *
+                * But when we are in ziltest mode, we advance the "open
+                * txg" without actually spa_sync()-ing the changes to
+                * disk.  So we would see that object N is still
+                * allocated and in the unlinked set, and there is an
+                * intent log record saying to allocate it.
                 */
-               zil_replay(zfsvfs->z_os, zfsvfs, &zfsvfs->z_assign,
-                   zfs_replay_vector, zfs_unlinked_drain);
-
-               zfs_unlinked_drain(zfsvfs);
+               if (zil_replay_disable) {
+                       zil_destroy(zfsvfs->z_log, B_FALSE);
+               } else {
+                       zfsvfs->z_replay = B_TRUE;
+                       zil_replay(zfsvfs->z_os, zfsvfs, zfs_replay_vector);
+                       zfsvfs->z_replay = B_FALSE;
+               }
                zfsvfs->z_vfs->vfs_flag |= readonly; /* restore readonly bit */
        }
 
-       if (!zil_disable)
-               zfsvfs->z_log = zil_open(zfsvfs->z_os, zfs_get_data);
-
        return (0);
 }
 
-static void
-zfs_freezfsvfs(zfsvfs_t *zfsvfs)
+void
+zfsvfs_free(zfsvfs_t *zfsvfs)
 {
+       int i;
+       extern krwlock_t zfsvfs_lock; /* in zfs_znode.c */
+
+       /*
+        * This is a barrier to prevent the filesystem from going away in
+        * zfs_znode_move() until we can safely ensure that the filesystem is
+        * not unmounted. We consider the filesystem valid before the barrier
+        * and invalid after the barrier.
+        */
+       rw_enter(&zfsvfs_lock, RW_READER);
+       rw_exit(&zfsvfs_lock);
+
+       zfs_fuid_destroy(zfsvfs);
+
        mutex_destroy(&zfsvfs->z_znodes_lock);
-       mutex_destroy(&zfsvfs->z_online_recv_lock);
+       mutex_destroy(&zfsvfs->z_lock);
        list_destroy(&zfsvfs->z_all_znodes);
        rrw_destroy(&zfsvfs->z_teardown_lock);
        rw_destroy(&zfsvfs->z_teardown_inactive_lock);
        rw_destroy(&zfsvfs->z_fuid_lock);
+       for (i = 0; i != ZFS_OBJ_MTX_SZ; i++)
+               mutex_destroy(&zfsvfs->z_hold_mtx[i]);
        kmem_free(zfsvfs, sizeof (zfsvfs_t));
 }
 
+static void
+zfs_set_fuid_feature(zfsvfs_t *zfsvfs)
+{
+       zfsvfs->z_use_fuids = USE_FUIDS(zfsvfs->z_version, zfsvfs->z_os);
+       if (zfsvfs->z_use_fuids && zfsvfs->z_vfs) {
+               vfs_set_feature(zfsvfs->z_vfs, VFSFT_XVATTR);
+               vfs_set_feature(zfsvfs->z_vfs, VFSFT_SYSATTR_VIEWS);
+               vfs_set_feature(zfsvfs->z_vfs, VFSFT_ACEMASKONACCESS);
+               vfs_set_feature(zfsvfs->z_vfs, VFSFT_ACLONCREATE);
+               vfs_set_feature(zfsvfs->z_vfs, VFSFT_ACCESS_FILTER);
+               vfs_set_feature(zfsvfs->z_vfs, VFSFT_REPARSE);
+       }
+       zfsvfs->z_use_sa = USE_SA(zfsvfs->z_version, zfsvfs->z_os);
+}
+
 static int
 zfs_domount(vfs_t *vfsp, char *osname)
 {
        dev_t mount_dev;
-       uint64_t recordsize, readonly;
+       uint64_t recordsize, fsid_guid;
        int error = 0;
-       int mode;
        zfsvfs_t *zfsvfs;
-       znode_t *zp = NULL;
 
        ASSERT(vfsp);
        ASSERT(osname);
 
-       /*
-        * Initialize the zfs-specific filesystem structure.
-        * Should probably make this a kmem cache, shuffle fields,
-        * and just bzero up to z_hold_mtx[].
-        */
-       zfsvfs = kmem_zalloc(sizeof (zfsvfs_t), KM_SLEEP);
+       error = zfsvfs_create(osname, &zfsvfs);
+       if (error)
+               return (error);
        zfsvfs->z_vfs = vfsp;
-       zfsvfs->z_parent = zfsvfs;
-       zfsvfs->z_assign = TXG_NOWAIT;
-       zfsvfs->z_max_blksz = SPA_MAXBLOCKSIZE;
-       zfsvfs->z_show_ctldir = ZFS_SNAPDIR_VISIBLE;
-
-       mutex_init(&zfsvfs->z_znodes_lock, NULL, MUTEX_DEFAULT, NULL);
-       mutex_init(&zfsvfs->z_online_recv_lock, NULL, MUTEX_DEFAULT, NULL);
-       list_create(&zfsvfs->z_all_znodes, sizeof (znode_t),
-           offsetof(znode_t, z_link_node));
-       rrw_init(&zfsvfs->z_teardown_lock);
-       rw_init(&zfsvfs->z_teardown_inactive_lock, NULL, RW_DEFAULT, NULL);
-       rw_init(&zfsvfs->z_fuid_lock, NULL, RW_DEFAULT, NULL);
 
        /* Initialize the generic filesystem structure. */
        vfsp->vfs_bcount = 0;
@@ -666,39 +1135,24 @@ zfs_domount(vfs_t *vfsp, char *osname)
        vfsp->vfs_flag |= VFS_NOTRUNC;
        vfsp->vfs_data = zfsvfs;
 
-       if (error = dsl_prop_get_integer(osname, "readonly", &readonly, NULL))
-               goto out;
-
-       mode = DS_MODE_OWNER;
-       if (readonly)
-               mode |= DS_MODE_READONLY;
-
-       error = dmu_objset_open(osname, DMU_OST_ZFS, mode, &zfsvfs->z_os);
-       if (error == EROFS) {
-               mode = DS_MODE_OWNER | DS_MODE_READONLY;
-               error = dmu_objset_open(osname, DMU_OST_ZFS, mode,
-                   &zfsvfs->z_os);
-       }
-
-       if (error)
-               goto out;
-
-       if (error = zfs_init_fs(zfsvfs, &zp))
-               goto out;
-
-       /* The call to zfs_init_fs leaves the vnode held, release it here. */
-       VN_RELE(ZTOV(zp));
+       /*
+        * The fsid is 64 bits, composed of an 8-bit fs type, which
+        * separates our fsid from any other filesystem types, and a
+        * 56-bit objset unique ID.  The objset unique ID is unique to
+        * all objsets open on this system, provided by unique_create().
+        * The 8-bit fs type must be put in the low bits of fsid[1]
+        * because that's where other Solaris filesystems put it.
+        */
+       fsid_guid = dmu_objset_fsid_guid(zfsvfs->z_os);
+       ASSERT((fsid_guid & ~((1ULL<<56)-1)) == 0);
+       vfsp->vfs_fsid.val[0] = fsid_guid;
+       vfsp->vfs_fsid.val[1] = ((fsid_guid>>32) << 8) |
+           zfsfstype & 0xFF;
 
        /*
         * Set features for file system.
         */
-       zfsvfs->z_use_fuids = USE_FUIDS(zfsvfs->z_version, zfsvfs->z_os);
-       if (zfsvfs->z_use_fuids) {
-               vfs_set_feature(vfsp, VFSFT_XVATTR);
-               vfs_set_feature(vfsp, VFSFT_SYSATTR_VIEWS);
-               vfs_set_feature(vfsp, VFSFT_ACEMASKONACCESS);
-               vfs_set_feature(vfsp, VFSFT_ACLONCREATE);
-       }
+       zfs_set_fuid_feature(zfsvfs);
        if (zfsvfs->z_case == ZFS_CASE_INSENSITIVE) {
                vfs_set_feature(vfsp, VFSFT_DIRENTFLAGS);
                vfs_set_feature(vfsp, VFSFT_CASEINSENSITIVE);
@@ -707,17 +1161,21 @@ zfs_domount(vfs_t *vfsp, char *osname)
                vfs_set_feature(vfsp, VFSFT_DIRENTFLAGS);
                vfs_set_feature(vfsp, VFSFT_CASEINSENSITIVE);
        }
+       vfs_set_feature(vfsp, VFSFT_ZEROCOPY_SUPPORTED);
 
        if (dmu_objset_is_snapshot(zfsvfs->z_os)) {
                uint64_t pval;
 
-               ASSERT(mode & DS_MODE_READONLY);
                atime_changed_cb(zfsvfs, B_FALSE);
                readonly_changed_cb(zfsvfs, B_TRUE);
                if (error = dsl_prop_get_integer(osname, "xattr", &pval, NULL))
                        goto out;
                xattr_changed_cb(zfsvfs, pval);
                zfsvfs->z_issnap = B_TRUE;
+
+               mutex_enter(&zfsvfs->z_os->os_user_ptr_lock);
+               dmu_objset_set_user(zfsvfs->z_os, zfsvfs);
+               mutex_exit(&zfsvfs->z_os->os_user_ptr_lock);
        } else {
                error = zfsvfs_setup(zfsvfs, B_TRUE);
        }
@@ -726,9 +1184,8 @@ zfs_domount(vfs_t *vfsp, char *osname)
                zfsctl_create(zfsvfs);
 out:
        if (error) {
-               if (zfsvfs->z_os)
-                       dmu_objset_close(zfsvfs->z_os);
-               zfs_freezfsvfs(zfsvfs);
+               dmu_objset_disown(zfsvfs->z_os, zfsvfs);
+               zfsvfs_free(zfsvfs);
        } else {
                atomic_add_32(&zfs_active_fs_count, 1);
        }
@@ -771,9 +1228,6 @@ zfs_unregister_callbacks(zfsvfs_t *zfsvfs)
                VERIFY(dsl_prop_unregister(ds, "snapdir", snapdir_changed_cb,
                    zfsvfs) == 0);
 
-               VERIFY(dsl_prop_unregister(ds, "aclmode", acl_mode_changed_cb,
-                   zfsvfs) == 0);
-
                VERIFY(dsl_prop_unregister(ds, "aclinherit",
                    acl_inherit_changed_cb, zfsvfs) == 0);
 
@@ -837,6 +1291,139 @@ zfs_parse_bootfs(char *bpath, char *outpath)
        return (error);
 }
 
+/*
+ * zfs_check_global_label:
+ *     Check that the hex label string is appropriate for the dataset
+ *     being mounted into the global_zone proper.
+ *
+ *     Return an error if the hex label string is not default or
+ *     admin_low/admin_high.  For admin_low labels, the corresponding
+ *     dataset must be readonly.
+ */
+int
+zfs_check_global_label(const char *dsname, const char *hexsl)
+{
+       if (strcasecmp(hexsl, ZFS_MLSLABEL_DEFAULT) == 0)
+               return (0);
+       if (strcasecmp(hexsl, ADMIN_HIGH) == 0)
+               return (0);
+       if (strcasecmp(hexsl, ADMIN_LOW) == 0) {
+               /* must be readonly */
+               uint64_t rdonly;
+
+               if (dsl_prop_get_integer(dsname,
+                   zfs_prop_to_name(ZFS_PROP_READONLY), &rdonly, NULL))
+                       return (EACCES);
+               return (rdonly ? 0 : EACCES);
+       }
+       return (EACCES);
+}
+
+/*
+ * zfs_mount_label_policy:
+ *     Determine whether the mount is allowed according to MAC check.
+ *     by comparing (where appropriate) label of the dataset against
+ *     the label of the zone being mounted into.  If the dataset has
+ *     no label, create one.
+ *
+ *     Returns:
+ *              0 :    access allowed
+ *             >0 :    error code, such as EACCES
+ */
+static int
+zfs_mount_label_policy(vfs_t *vfsp, char *osname)
+{
+       int             error, retv;
+       zone_t          *mntzone = NULL;
+       ts_label_t      *mnt_tsl;
+       bslabel_t       *mnt_sl;
+       bslabel_t       ds_sl;
+       char            ds_hexsl[MAXNAMELEN];
+
+       retv = EACCES;                          /* assume the worst */
+
+       /*
+        * Start by getting the dataset label if it exists.
+        */
+       error = dsl_prop_get(osname, zfs_prop_to_name(ZFS_PROP_MLSLABEL),
+           1, sizeof (ds_hexsl), &ds_hexsl, NULL);
+       if (error)
+               return (EACCES);
+
+       /*
+        * If labeling is NOT enabled, then disallow the mount of datasets
+        * which have a non-default label already.  No other label checks
+        * are needed.
+        */
+       if (!is_system_labeled()) {
+               if (strcasecmp(ds_hexsl, ZFS_MLSLABEL_DEFAULT) == 0)
+                       return (0);
+               return (EACCES);
+       }
+
+       /*
+        * Get the label of the mountpoint.  If mounting into the global
+        * zone (i.e. mountpoint is not within an active zone and the
+        * zoned property is off), the label must be default or
+        * admin_low/admin_high only; no other checks are needed.
+        */
+       mntzone = zone_find_by_any_path(refstr_value(vfsp->vfs_mntpt), B_FALSE);
+       if (mntzone->zone_id == GLOBAL_ZONEID) {
+               uint64_t zoned;
+
+               zone_rele(mntzone);
+
+               if (dsl_prop_get_integer(osname,
+                   zfs_prop_to_name(ZFS_PROP_ZONED), &zoned, NULL))
+                       return (EACCES);
+               if (!zoned)
+                       return (zfs_check_global_label(osname, ds_hexsl));
+               else
+                       /*
+                        * This is the case of a zone dataset being mounted
+                        * initially, before the zone has been fully created;
+                        * allow this mount into global zone.
+                        */
+                       return (0);
+       }
+
+       mnt_tsl = mntzone->zone_slabel;
+       ASSERT(mnt_tsl != NULL);
+       label_hold(mnt_tsl);
+       mnt_sl = label2bslabel(mnt_tsl);
+
+       if (strcasecmp(ds_hexsl, ZFS_MLSLABEL_DEFAULT) == 0) {
+               /*
+                * The dataset doesn't have a real label, so fabricate one.
+                */
+               char *str = NULL;
+
+               if (l_to_str_internal(mnt_sl, &str) == 0 &&
+                   dsl_prop_set(osname, zfs_prop_to_name(ZFS_PROP_MLSLABEL),
+                   ZPROP_SRC_LOCAL, 1, strlen(str) + 1, str) == 0)
+                       retv = 0;
+               if (str != NULL)
+                       kmem_free(str, strlen(str) + 1);
+       } else if (hexstr_to_label(ds_hexsl, &ds_sl) == 0) {
+               /*
+                * Now compare labels to complete the MAC check.  If the
+                * labels are equal then allow access.  If the mountpoint
+                * label dominates the dataset label, allow readonly access.
+                * Otherwise, access is denied.
+                */
+               if (blequal(mnt_sl, &ds_sl))
+                       retv = 0;
+               else if (bldominates(mnt_sl, &ds_sl)) {
+                       vfs_setmntopt(vfsp, MNTOPT_RO, NULL, 0);
+                       retv = 0;
+               }
+       }
+
+       label_rele(mnt_tsl);
+       zone_rele(mntzone);
+       return (retv);
+}
+
 static int
 zfs_mountroot(vfs_t *vfsp, enum whymountroot why)
 {
@@ -989,8 +1576,7 @@ zfs_mount(vfs_t *vfsp, vnode_t *mvp, struct mounta *uap, cred_t *cr)
         */
        error = secpolicy_fs_mount(cr, mvp, vfsp);
        if (error) {
-               error = dsl_deleg_access(osname, ZFS_DELEG_PERM_MOUNT, cr);
-               if (error == 0) {
+               if (dsl_deleg_access(osname, ZFS_DELEG_PERM_MOUNT, cr) == 0) {
                        vattr_t         vattr;
 
                        /*
@@ -1000,16 +1586,14 @@ zfs_mount(vfs_t *vfsp, vnode_t *mvp, struct mounta *uap, cred_t *cr)
 
                        vattr.va_mask = AT_UID;
 
-                       if (error = VOP_GETATTR(mvp, &vattr, 0, cr, NULL)) {
+                       if (VOP_GETATTR(mvp, &vattr, 0, cr, NULL)) {
                                goto out;
                        }
 
                        if (secpolicy_vnode_owner(cr, vattr.va_uid) != 0 &&
                            VOP_ACCESS(mvp, VWRITE, 0, cr, NULL) != 0) {
-                               error = EPERM;
                                goto out;
                        }
-
                        secpolicy_fs_mount_clearopts(cr, vfsp);
                } else {
                        goto out;
@@ -1026,6 +1610,10 @@ zfs_mount(vfs_t *vfsp, vnode_t *mvp, struct mounta *uap, cred_t *cr)
                goto out;
        }
 
+       error = zfs_mount_label_policy(vfsp, osname);
+       if (error)
+               goto out;
+
        /*
         * When doing a remount, we simply refresh our temporary properties
         * according to those options set in the current VFS options.
@@ -1039,6 +1627,13 @@ zfs_mount(vfs_t *vfsp, vnode_t *mvp, struct mounta *uap, cred_t *cr)
 
        error = zfs_domount(vfsp, osname);
 
+       /*
+        * Add an extra VFS_HOLD on our parent vfs so that it can't
+        * disappear due to a forced unmount.
+        */
+       if (error == 0 && ((zfsvfs_t *)vfsp->vfs_data)->z_issnap)
+               VFS_HOLD(mvp->v_vfsp);
+
 out:
        pn_free(&spn);
        return (error);
@@ -1180,7 +1775,7 @@ zfsvfs_teardown(zfsvfs_t *zfsvfs, boolean_t unmounting)
        mutex_enter(&zfsvfs->z_znodes_lock);
        for (zp = list_head(&zfsvfs->z_all_znodes); zp != NULL;
            zp = list_next(&zfsvfs->z_all_znodes, zp))
-               if (zp->z_dbuf) {
+               if (zp->z_sa_hdl) {
                        ASSERT(ZTOV(zp)->v_count > 0);
                        zfs_znode_dmu_fini(zp);
                }
@@ -1231,9 +1826,8 @@ zfs_umount(vfs_t *vfsp, int fflag, cred_t *cr)
 
        ret = secpolicy_fs_unmount(cr, vfsp);
        if (ret) {
-               ret = dsl_deleg_access((char *)refstr_value(vfsp->vfs_resource),
-                   ZFS_DELEG_PERM_MOUNT, cr);
-               if (ret)
+               if (dsl_deleg_access((char *)refstr_value(vfsp->vfs_resource),
+                   ZFS_DELEG_PERM_MOUNT, cr))
                        return (ret);
        }
 
@@ -1288,14 +1882,14 @@ zfs_umount(vfs_t *vfsp, int fflag, cred_t *cr)
                /*
                 * Unset the objset user_ptr.
                 */
-               mutex_enter(&os->os->os_user_ptr_lock);
+               mutex_enter(&os->os_user_ptr_lock);
                dmu_objset_set_user(os, NULL);
-               mutex_exit(&os->os->os_user_ptr_lock);
+               mutex_exit(&os->os_user_ptr_lock);
 
                /*
                 * Finally release the objset
                 */
-               dmu_objset_close(os);
+               dmu_objset_disown(os, zfsvfs);
        }
 
        /*
@@ -1376,7 +1970,9 @@ zfs_vget(vfs_t *vfsp, vnode_t **vpp, fid_t *fidp)
                ZFS_EXIT(zfsvfs);
                return (err);
        }
-       zp_gen = zp->z_phys->zp_gen & gen_mask;
+       (void) sa_lookup(zp->z_sa_hdl, SA_ZPL_GEN(zfsvfs), &zp_gen,
+           sizeof (uint64_t));
+       zp_gen = zp_gen & gen_mask;
        if (zp_gen == 0)
                zp_gen = 1;
        if (zp->z_unlinked || zp_gen != fid_gen) {
@@ -1398,16 +1994,13 @@ zfs_vget(vfs_t *vfsp, vnode_t **vpp, fid_t *fidp)
  * 'z_teardown_inactive_lock' write held.
  */
 int
-zfs_suspend_fs(zfsvfs_t *zfsvfs, char *name, int *mode)
+zfs_suspend_fs(zfsvfs_t *zfsvfs)
 {
        int error;
 
        if ((error = zfsvfs_teardown(zfsvfs, B_FALSE)) != 0)
                return (error);
-
-       *mode = zfsvfs->z_os->os_mode;
-       dmu_objset_name(zfsvfs->z_os, name);
-       dmu_objset_close(zfsvfs->z_os);
+       dmu_objset_disown(zfsvfs->z_os, zfsvfs);
 
        return (0);
 }
@@ -1416,18 +2009,30 @@ zfs_suspend_fs(zfsvfs_t *zfsvfs, char *name, int *mode)
  * Reopen zfsvfs_t::z_os and release VOPs.
  */
 int
-zfs_resume_fs(zfsvfs_t *zfsvfs, const char *osname, int mode)
+zfs_resume_fs(zfsvfs_t *zfsvfs, const char *osname)
 {
-       int err;
+       int err, err2;
 
        ASSERT(RRW_WRITE_HELD(&zfsvfs->z_teardown_lock));
        ASSERT(RW_WRITE_HELD(&zfsvfs->z_teardown_inactive_lock));
 
-       err = dmu_objset_open(osname, DMU_OST_ZFS, mode, &zfsvfs->z_os);
+       err = dmu_objset_own(osname, DMU_OST_ZFS, B_FALSE, zfsvfs,
+           &zfsvfs->z_os);
        if (err) {
                zfsvfs->z_os = NULL;
        } else {
                znode_t *zp;
+               uint64_t sa_obj = 0;
+
+               err2 = zap_lookup(zfsvfs->z_os, MASTER_NODE_OBJ,
+                   ZFS_SA_ATTRS, 8, 1, &sa_obj);
+
+               if ((err || err2) && zfsvfs->z_version >= ZPL_VERSION_SA)
+                       goto bail;
+
+
+               zfsvfs->z_attr_table = sa_setup(zfsvfs->z_os, sa_obj,
+                   zfs_attr_table,  ZPL_END);
 
                VERIFY(zfsvfs_setup(zfsvfs, B_FALSE) == 0);
 
@@ -1446,6 +2051,7 @@ zfs_resume_fs(zfsvfs_t *zfsvfs, const char *osname, int mode)
 
        }
 
+bail:
        /* release the VOPs */
        rw_exit(&zfsvfs->z_teardown_inactive_lock);
        rrw_exit(&zfsvfs->z_teardown_lock, FTAG);
@@ -1465,13 +2071,17 @@ static void
 zfs_freevfs(vfs_t *vfsp)
 {
        zfsvfs_t *zfsvfs = vfsp->vfs_data;
-       int i;
 
-       for (i = 0; i != ZFS_OBJ_MTX_SZ; i++)
-               mutex_destroy(&zfsvfs->z_hold_mtx[i]);
+       /*
+        * If this is a snapshot, we have an extra VFS_HOLD on our parent
+        * from zfs_mount().  Release it here.  If we came through
+        * zfs_mountroot() instead, we didn't grab an extra hold, so
+        * skip the VFS_RELE for rootvfs.
+        */
+       if (zfsvfs->z_issnap && (vfsp != rootvfs))
+               VFS_RELE(zfsvfs->z_parent->z_vfs);
 
-       zfs_fuid_destroy(zfsvfs);
-       zfs_freezfsvfs(zfsvfs);
+       zfsvfs_free(zfsvfs);
 
        atomic_add_32(&zfs_active_fs_count, -1);
 }
@@ -1530,6 +2140,8 @@ zfs_init(void)
         * Initialize znode cache, vnode ops, etc...
         */
        zfs_znode_init();
+
+       dmu_objset_register_type(DMU_OST_ZFS, zfs_space_delta_cb);
 }
 
 void
@@ -1546,54 +2158,71 @@ zfs_busy(void)
 }
 
 int
-zfs_set_version(const char *name, uint64_t newvers)
+zfs_set_version(zfsvfs_t *zfsvfs, uint64_t newvers)
 {
        int error;
-       objset_t *os;
+       objset_t *os = zfsvfs->z_os;
        dmu_tx_t *tx;
-       uint64_t curvers;
-
-       /*
-        * XXX for now, require that the filesystem be unmounted.  Would
-        * be nice to find the zfsvfs_t and just update that if
-        * possible.
-        */
 
        if (newvers < ZPL_VERSION_INITIAL || newvers > ZPL_VERSION)
                return (EINVAL);
 
-       error = dmu_objset_open(name, DMU_OST_ZFS, DS_MODE_OWNER, &os);
-       if (error)
-               return (error);
+       if (newvers < zfsvfs->z_version)
+               return (EINVAL);
 
-       error = zap_lookup(os, MASTER_NODE_OBJ, ZPL_VERSION_STR,
-           8, 1, &curvers);
-       if (error)
-               goto out;
-       if (newvers < curvers) {
-               error = EINVAL;
-               goto out;
-       }
+       if (zfs_spa_version_map(newvers) >
+           spa_version(dmu_objset_spa(zfsvfs->z_os)))
+               return (ENOTSUP);
 
        tx = dmu_tx_create(os);
-       dmu_tx_hold_zap(tx, MASTER_NODE_OBJ, 0, ZPL_VERSION_STR);
+       dmu_tx_hold_zap(tx, MASTER_NODE_OBJ, B_FALSE, ZPL_VERSION_STR);
+       if (newvers >= ZPL_VERSION_SA && !zfsvfs->z_use_sa) {
+               dmu_tx_hold_zap(tx, MASTER_NODE_OBJ, B_TRUE,
+                   ZFS_SA_ATTRS);
+               dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, FALSE, NULL);
+       }
        error = dmu_tx_assign(tx, TXG_WAIT);
        if (error) {
                dmu_tx_abort(tx);
-               goto out;
+               return (error);
+       }
+
+       error = zap_update(os, MASTER_NODE_OBJ, ZPL_VERSION_STR,
+           8, 1, &newvers, tx);
+
+       if (error) {
+               dmu_tx_commit(tx);
+               return (error);
        }
-       error = zap_update(os, MASTER_NODE_OBJ, ZPL_VERSION_STR, 8, 1,
-           &newvers, tx);
 
-       spa_history_internal_log(LOG_DS_UPGRADE,
-           dmu_objset_spa(os), tx, CRED(),
-           "oldver=%llu newver=%llu dataset = %llu", curvers, newvers,
-           dmu_objset_id(os));
+       if (newvers >= ZPL_VERSION_SA && !zfsvfs->z_use_sa) {
+               uint64_t sa_obj;
+
+               ASSERT3U(spa_version(dmu_objset_spa(zfsvfs->z_os)), >=,
+                   SPA_VERSION_SA);
+               sa_obj = zap_create(os, DMU_OT_SA_MASTER_NODE,
+                   DMU_OT_NONE, 0, tx);
+
+               error = zap_add(os, MASTER_NODE_OBJ,
+                   ZFS_SA_ATTRS, 8, 1, &sa_obj, tx);
+               ASSERT3U(error, ==, 0);
+
+               VERIFY(0 == sa_set_sa_object(os, sa_obj));
+               sa_register_update_callback(os, zfs_sa_upgrade);
+       }
+
+       spa_history_log_internal(LOG_DS_UPGRADE,
+           dmu_objset_spa(os), tx, "oldver=%llu newver=%llu dataset = %llu",
+           zfsvfs->z_version, newvers, dmu_objset_id(os));
+
        dmu_tx_commit(tx);
 
-out:
-       dmu_objset_close(os);
-       return (error);
+       zfsvfs->z_version = newvers;
+
+       if (zfsvfs->z_version >= ZPL_VERSION_FUID)
+               zfs_set_fuid_feature(zfsvfs);
+
+       return (0);
 }
 
 /*