Introduce zpool_get_prop_literal interface
[zfs.git] / lib / libzfs / libzfs_pool.c
index 75ecc54..468243c 100644 (file)
  */
 
 /*
- * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
- * Use is subject to license terms.
+ * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
+ * Copyright (c) 2012 by Delphix. All rights reserved.
  */
 
-#include <alloca.h>
-#include <assert.h>
 #include <ctype.h>
 #include <errno.h>
 #include <devid.h>
-#include <dirent.h>
 #include <fcntl.h>
 #include <libintl.h>
 #include <stdio.h>
 #include <strings.h>
 #include <unistd.h>
 #include <zone.h>
+#include <sys/stat.h>
 #include <sys/efi_partition.h>
 #include <sys/vtoc.h>
 #include <sys/zfs_ioctl.h>
-#include <sys/zio.h>
-#include <strings.h>
+#include <dlfcn.h>
 
 #include "zfs_namecheck.h"
 #include "zfs_prop.h"
 #include "libzfs_impl.h"
+#include "zfs_comutil.h"
+#include "zfeature_common.h"
 
 static int read_efi_label(nvlist_t *config, diskaddr_t *sb);
 
-#if defined(__i386) || defined(__amd64)
-#define        BOOTCMD "installgrub(1M)"
-#else
-#define        BOOTCMD "installboot(1M)"
-#endif
+typedef struct prop_flags {
+       int create:1;   /* Validate property on creation */
+       int import:1;   /* Validate property on import */
+} prop_flags_t;
 
 /*
  * ====================================================================
@@ -64,7 +63,7 @@ static int read_efi_label(nvlist_t *config, diskaddr_t *sb);
 static int
 zpool_get_all_props(zpool_handle_t *zhp)
 {
-       zfs_cmd_t zc = { 0 };
+       zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
        libzfs_handle_t *hdl = zhp->zpool_hdl;
 
        (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
@@ -180,6 +179,8 @@ char *
 zpool_state_to_name(vdev_state_t state, vdev_aux_t aux)
 {
        switch (state) {
+       default:
+               break;
        case VDEV_STATE_CLOSED:
        case VDEV_STATE_OFFLINE:
                return (gettext("OFFLINE"));
@@ -188,6 +189,8 @@ zpool_state_to_name(vdev_state_t state, vdev_aux_t aux)
        case VDEV_STATE_CANT_OPEN:
                if (aux == VDEV_AUX_CORRUPT_DATA || aux == VDEV_AUX_BAD_LOG)
                        return (gettext("FAULTED"));
+               else if (aux == VDEV_AUX_SPLIT_POOL)
+                       return (gettext("SPLIT"));
                else
                        return (gettext("UNAVAIL"));
        case VDEV_STATE_FAULTED:
@@ -202,13 +205,53 @@ zpool_state_to_name(vdev_state_t state, vdev_aux_t aux)
 }
 
 /*
- * Get a zpool property value for 'prop' and return the value in
- * a pre-allocated buffer.
+ * Map POOL STATE to printed strings.
+ */
+const char *
+zpool_pool_state_to_name(pool_state_t state)
+{
+       switch (state) {
+       default:
+               break;
+       case POOL_STATE_ACTIVE:
+               return (gettext("ACTIVE"));
+       case POOL_STATE_EXPORTED:
+               return (gettext("EXPORTED"));
+       case POOL_STATE_DESTROYED:
+               return (gettext("DESTROYED"));
+       case POOL_STATE_SPARE:
+               return (gettext("SPARE"));
+       case POOL_STATE_L2CACHE:
+               return (gettext("L2CACHE"));
+       case POOL_STATE_UNINITIALIZED:
+               return (gettext("UNINITIALIZED"));
+       case POOL_STATE_UNAVAIL:
+               return (gettext("UNAVAIL"));
+       case POOL_STATE_POTENTIALLY_ACTIVE:
+               return (gettext("POTENTIALLY_ACTIVE"));
+       }
+
+       return (gettext("UNKNOWN"));
+}
+
+/*
+ * API compatibility wrapper around zpool_get_prop_literal
  */
 int
 zpool_get_prop(zpool_handle_t *zhp, zpool_prop_t prop, char *buf, size_t len,
     zprop_source_t *srctype)
 {
+    return zpool_get_prop_literal(zhp, prop, buf, len, srctype, B_FALSE);
+}
+
+/*
+ * Get a zpool property value for 'prop' and return the value in
+ * a pre-allocated buffer.
+ */
+int
+zpool_get_prop_literal(zpool_handle_t *zhp, zpool_prop_t prop, char *buf, size_t len,
+    zprop_source_t *srctype, boolean_t literal)
+{
        uint64_t intval;
        const char *strval;
        zprop_source_t src = ZPROP_SRC_NONE;
@@ -228,11 +271,12 @@ zpool_get_prop(zpool_handle_t *zhp, zpool_prop_t prop, char *buf, size_t len,
 
                case ZPOOL_PROP_GUID:
                        intval = zpool_get_prop_int(zhp, prop, &src);
-                       (void) snprintf(buf, len, "%llu", intval);
+                       (void) snprintf(buf, len, "%llu", (u_longlong_t)intval);
                        break;
 
                case ZPOOL_PROP_ALTROOT:
                case ZPOOL_PROP_CACHEFILE:
+               case ZPOOL_PROP_COMMENT:
                        if (zhp->zpool_props != NULL ||
                            zpool_get_all_props(zhp) == 0) {
                                (void) strlcpy(buf,
@@ -268,9 +312,16 @@ zpool_get_prop(zpool_handle_t *zhp, zpool_prop_t prop, char *buf, size_t len,
 
                switch (prop) {
                case ZPOOL_PROP_SIZE:
-               case ZPOOL_PROP_USED:
-               case ZPOOL_PROP_AVAILABLE:
-                       (void) zfs_nicenum(intval, buf, len);
+               case ZPOOL_PROP_ALLOCATED:
+               case ZPOOL_PROP_FREE:
+               case ZPOOL_PROP_FREEING:
+               case ZPOOL_PROP_EXPANDSZ:
+               case ZPOOL_PROP_ASHIFT:
+                       if (literal)
+                               (void) snprintf(buf, len, "%llu",
+                                       (u_longlong_t)intval);
+                       else
+                               (void) zfs_nicenum(intval, buf, len);
                        break;
 
                case ZPOOL_PROP_CAPACITY:
@@ -278,17 +329,30 @@ zpool_get_prop(zpool_handle_t *zhp, zpool_prop_t prop, char *buf, size_t len,
                            (u_longlong_t)intval);
                        break;
 
+               case ZPOOL_PROP_DEDUPRATIO:
+                       (void) snprintf(buf, len, "%llu.%02llux",
+                           (u_longlong_t)(intval / 100),
+                           (u_longlong_t)(intval % 100));
+                       break;
+
                case ZPOOL_PROP_HEALTH:
                        verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
                            ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
                        verify(nvlist_lookup_uint64_array(nvroot,
-                           ZPOOL_CONFIG_STATS, (uint64_t **)&vs, &vsc) == 0);
+                           ZPOOL_CONFIG_VDEV_STATS, (uint64_t **)&vs, &vsc)
+                           == 0);
 
                        (void) strlcpy(buf, zpool_state_to_name(intval,
                            vs->vs_aux), len);
                        break;
+               case ZPOOL_PROP_VERSION:
+                       if (intval >= SPA_VERSION_FEATURES) {
+                               (void) snprintf(buf, len, "-");
+                               break;
+                       }
+                       /* FALLTHROUGH */
                default:
-                       (void) snprintf(buf, len, "%llu", intval);
+                       (void) snprintf(buf, len, "%llu", (u_longlong_t)intval);
                }
                break;
 
@@ -329,6 +393,7 @@ bootfs_name_valid(const char *pool, char *bootfs)
        return (B_FALSE);
 }
 
+#if defined(__sun__) || defined(__sun)
 /*
  * Inspect the configuration to determine if any of the devices contain
  * an EFI label.
@@ -349,9 +414,10 @@ pool_uses_efi(nvlist_t *config)
        }
        return (B_FALSE);
 }
+#endif
 
-static boolean_t
-pool_is_bootable(zpool_handle_t *zhp)
+boolean_t
+zpool_is_bootable(zpool_handle_t *zhp)
 {
        char bootfs[ZPOOL_MAXNAMELEN];
 
@@ -368,14 +434,14 @@ pool_is_bootable(zpool_handle_t *zhp)
  */
 static nvlist_t *
 zpool_valid_proplist(libzfs_handle_t *hdl, const char *poolname,
-    nvlist_t *props, uint64_t version, boolean_t create_or_import, char *errbuf)
+    nvlist_t *props, uint64_t version, prop_flags_t flags, char *errbuf)
 {
        nvpair_t *elem;
        nvlist_t *retprops;
        zpool_prop_t prop;
        char *strval;
        uint64_t intval;
-       char *slash;
+       char *slash, *check;
        struct stat64 statbuf;
        zpool_handle_t *zhp;
        nvlist_t *nvroot;
@@ -389,10 +455,48 @@ zpool_valid_proplist(libzfs_handle_t *hdl, const char *poolname,
        while ((elem = nvlist_next_nvpair(props, elem)) != NULL) {
                const char *propname = nvpair_name(elem);
 
+               prop = zpool_name_to_prop(propname);
+               if (prop == ZPROP_INVAL && zpool_prop_feature(propname)) {
+                       int err;
+                       zfeature_info_t *feature;
+                       char *fname = strchr(propname, '@') + 1;
+
+                       err = zfeature_lookup_name(fname, &feature);
+                       if (err != 0) {
+                               ASSERT3U(err, ==, ENOENT);
+                               zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+                                   "invalid feature '%s'"), fname);
+                               (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
+                               goto error;
+                       }
+
+                       if (nvpair_type(elem) != DATA_TYPE_STRING) {
+                               zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+                                   "'%s' must be a string"), propname);
+                               (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
+                               goto error;
+                       }
+
+                       (void) nvpair_value_string(elem, &strval);
+                       if (strcmp(strval, ZFS_FEATURE_ENABLED) != 0) {
+                               zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+                                   "property '%s' can only be set to "
+                                   "'enabled'"), propname);
+                               (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
+                               goto error;
+                       }
+
+                       if (nvlist_add_uint64(retprops, propname, 0) != 0) {
+                               (void) no_memory(hdl);
+                               goto error;
+                       }
+                       continue;
+               }
+
                /*
                 * Make sure this property is valid and applies to this type.
                 */
-               if ((prop = zpool_name_to_prop(propname)) == ZPROP_INVAL) {
+               if (prop == ZPROP_INVAL) {
                        zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
                            "invalid property '%s'"), propname);
                        (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
@@ -414,8 +518,11 @@ zpool_valid_proplist(libzfs_handle_t *hdl, const char *poolname,
                 * Perform additional checking for specific properties.
                 */
                switch (prop) {
+               default:
+                       break;
                case ZPOOL_PROP_VERSION:
-                       if (intval < version || intval > SPA_VERSION) {
+                       if (intval < version ||
+                           !SPA_VERSION_IS_SUPPORTED(intval)) {
                                zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
                                    "property '%s' number %d is invalid."),
                                    propname, intval);
@@ -424,8 +531,26 @@ zpool_valid_proplist(libzfs_handle_t *hdl, const char *poolname,
                        }
                        break;
 
+               case ZPOOL_PROP_ASHIFT:
+                       if (!flags.create) {
+                               zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+                                   "property '%s' can only be set at "
+                                   "creation time"), propname);
+                               (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
+                               goto error;
+                       }
+
+                       if (intval != 0 && (intval < 9 || intval > 13)) {
+                               zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+                                   "property '%s' number %d is invalid."),
+                                   propname, intval);
+                               (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
+                               goto error;
+                       }
+                       break;
+
                case ZPOOL_PROP_BOOTFS:
-                       if (create_or_import) {
+                       if (flags.create || flags.import) {
                                zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
                                    "property '%s' cannot be set at creation "
                                    "or import time"), propname);
@@ -462,6 +587,7 @@ zpool_valid_proplist(libzfs_handle_t *hdl, const char *poolname,
                        verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
                            ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
 
+#if defined(__sun__) || defined(__sun)
                        /*
                         * bootfs property cannot be set on a disk which has
                         * been EFI labeled.
@@ -474,11 +600,12 @@ zpool_valid_proplist(libzfs_handle_t *hdl, const char *poolname,
                                zpool_close(zhp);
                                goto error;
                        }
+#endif
                        zpool_close(zhp);
                        break;
 
                case ZPOOL_PROP_ALTROOT:
-                       if (!create_or_import) {
+                       if (!flags.create && !flags.import) {
                                zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
                                    "property '%s' can only be set during pool "
                                    "creation or import"), propname);
@@ -533,6 +660,36 @@ zpool_valid_proplist(libzfs_handle_t *hdl, const char *poolname,
 
                        *slash = '/';
                        break;
+
+               case ZPOOL_PROP_COMMENT:
+                       for (check = strval; *check != '\0'; check++) {
+                               if (!isprint(*check)) {
+                                       zfs_error_aux(hdl,
+                                           dgettext(TEXT_DOMAIN,
+                                           "comment may only have printable "
+                                           "characters"));
+                                       (void) zfs_error(hdl, EZFS_BADPROP,
+                                           errbuf);
+                                       goto error;
+                               }
+                       }
+                       if (strlen(strval) > ZPROP_MAX_COMMENT) {
+                               zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+                                   "comment must not exceed %d characters"),
+                                   ZPROP_MAX_COMMENT);
+                               (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
+                               goto error;
+                       }
+                       break;
+               case ZPOOL_PROP_READONLY:
+                       if (!flags.import) {
+                               zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+                                   "property '%s' can only be set at "
+                                   "import time"), propname);
+                               (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
+                               goto error;
+                       }
+                       break;
                }
        }
 
@@ -548,12 +705,13 @@ error:
 int
 zpool_set_prop(zpool_handle_t *zhp, const char *propname, const char *propval)
 {
-       zfs_cmd_t zc = { 0 };
+       zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
        int ret = -1;
        char errbuf[1024];
        nvlist_t *nvl = NULL;
        nvlist_t *realprops;
        uint64_t version;
+       prop_flags_t flags = { 0 };
 
        (void) snprintf(errbuf, sizeof (errbuf),
            dgettext(TEXT_DOMAIN, "cannot set property for '%s'"),
@@ -569,7 +727,7 @@ zpool_set_prop(zpool_handle_t *zhp, const char *propname, const char *propval)
 
        version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL);
        if ((realprops = zpool_valid_proplist(zhp->zpool_hdl,
-           zhp->zpool_name, nvl, version, B_FALSE, errbuf)) == NULL) {
+           zhp->zpool_name, nvl, version, flags, errbuf)) == NULL) {
                nvlist_free(nvl);
                return (-1);
        }
@@ -606,10 +764,79 @@ zpool_expand_proplist(zpool_handle_t *zhp, zprop_list_t **plp)
        libzfs_handle_t *hdl = zhp->zpool_hdl;
        zprop_list_t *entry;
        char buf[ZFS_MAXPROPLEN];
+       nvlist_t *features = NULL;
+       nvpair_t *nvp;
+       zprop_list_t **last;
+       boolean_t firstexpand = (NULL == *plp);
+       int i;
 
        if (zprop_expand_list(hdl, plp, ZFS_TYPE_POOL) != 0)
                return (-1);
 
+       last = plp;
+       while (*last != NULL)
+               last = &(*last)->pl_next;
+
+       if ((*plp)->pl_all)
+               features = zpool_get_features(zhp);
+
+       if ((*plp)->pl_all && firstexpand) {
+               for (i = 0; i < SPA_FEATURES; i++) {
+                       zprop_list_t *entry = zfs_alloc(hdl,
+                           sizeof (zprop_list_t));
+                       entry->pl_prop = ZPROP_INVAL;
+                       entry->pl_user_prop = zfs_asprintf(hdl, "feature@%s",
+                           spa_feature_table[i].fi_uname);
+                       entry->pl_width = strlen(entry->pl_user_prop);
+                       entry->pl_all = B_TRUE;
+
+                       *last = entry;
+                       last = &entry->pl_next;
+               }
+       }
+
+       /* add any unsupported features */
+       for (nvp = nvlist_next_nvpair(features, NULL);
+           nvp != NULL; nvp = nvlist_next_nvpair(features, nvp)) {
+               char *propname;
+               boolean_t found;
+               zprop_list_t *entry;
+
+               if (zfeature_is_supported(nvpair_name(nvp)))
+                       continue;
+
+               propname = zfs_asprintf(hdl, "unsupported@%s",
+                   nvpair_name(nvp));
+
+               /*
+                * Before adding the property to the list make sure that no
+                * other pool already added the same property.
+                */
+               found = B_FALSE;
+               entry = *plp;
+               while (entry != NULL) {
+                       if (entry->pl_user_prop != NULL &&
+                           strcmp(propname, entry->pl_user_prop) == 0) {
+                               found = B_TRUE;
+                               break;
+                       }
+                       entry = entry->pl_next;
+               }
+               if (found) {
+                       free(propname);
+                       continue;
+               }
+
+               entry = zfs_alloc(hdl, sizeof (zprop_list_t));
+               entry->pl_prop = ZPROP_INVAL;
+               entry->pl_user_prop = propname;
+               entry->pl_width = strlen(entry->pl_user_prop);
+               entry->pl_all = B_TRUE;
+
+               *last = entry;
+               last = &entry->pl_next;
+       }
+
        for (entry = *plp; entry != NULL; entry = entry->pl_next) {
 
                if (entry->pl_fixed)
@@ -626,6 +853,77 @@ zpool_expand_proplist(zpool_handle_t *zhp, zprop_list_t **plp)
        return (0);
 }
 
+/*
+ * Get the state for the given feature on the given ZFS pool.
+ */
+int
+zpool_prop_get_feature(zpool_handle_t *zhp, const char *propname, char *buf,
+    size_t len)
+{
+       uint64_t refcount;
+       boolean_t found = B_FALSE;
+       nvlist_t *features = zpool_get_features(zhp);
+       boolean_t supported;
+       const char *feature = strchr(propname, '@') + 1;
+
+       supported = zpool_prop_feature(propname);
+       ASSERT(supported || zpool_prop_unsupported(propname));
+
+       /*
+        * Convert from feature name to feature guid. This conversion is
+        * unecessary for unsupported@... properties because they already
+        * use guids.
+        */
+       if (supported) {
+               int ret;
+               zfeature_info_t *fi;
+
+               ret = zfeature_lookup_name(feature, &fi);
+               if (ret != 0) {
+                       (void) strlcpy(buf, "-", len);
+                       return (ENOTSUP);
+               }
+               feature = fi->fi_guid;
+       }
+
+       if (nvlist_lookup_uint64(features, feature, &refcount) == 0)
+               found = B_TRUE;
+
+       if (supported) {
+               if (!found) {
+                       (void) strlcpy(buf, ZFS_FEATURE_DISABLED, len);
+               } else  {
+                       if (refcount == 0)
+                               (void) strlcpy(buf, ZFS_FEATURE_ENABLED, len);
+                       else
+                               (void) strlcpy(buf, ZFS_FEATURE_ACTIVE, len);
+               }
+       } else {
+               if (found) {
+                       if (refcount == 0) {
+                               (void) strcpy(buf, ZFS_UNSUPPORTED_INACTIVE);
+                       } else {
+                               (void) strcpy(buf, ZFS_UNSUPPORTED_READONLY);
+                       }
+               } else {
+                       (void) strlcpy(buf, "-", len);
+                       return (ENOTSUP);
+               }
+       }
+
+       return (0);
+}
+
+/*
+ * Don't start the slice at the default block of 34; many storage
+ * devices will use a stripe width of 128k, other vendors prefer a 1m
+ * alignment.  It is best to play it safe and ensure a 1m alignment
+ * given 512B blocks.  When the block size is larger by a power of 2
+ * we will still be 1m aligned.  Some devices are sensitive to the
+ * partition ending alignment as well.
+ */
+#define        NEW_START_BLOCK         2048
+#define        PARTITION_END_ALIGNMENT 2048
 
 /*
  * Validate the given pool name, optionally putting an extended error message in
@@ -706,7 +1004,10 @@ zpool_name_valid(libzfs_handle_t *hdl, boolean_t isopen, const char *pool)
                                zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
                                    "multiple '@' delimiters in name"));
                                break;
-
+                       case NAME_ERR_NO_AT:
+                               zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+                                   "permission set is missing '@'"));
+                               break;
                        }
                }
                return (B_FALSE);
@@ -853,7 +1154,7 @@ int
 zpool_create(libzfs_handle_t *hdl, const char *pool, nvlist_t *nvroot,
     nvlist_t *props, nvlist_t *fsprops)
 {
-       zfs_cmd_t zc = { 0 };
+       zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
        nvlist_t *zc_fsprops = NULL;
        nvlist_t *zc_props = NULL;
        char msg[1024];
@@ -870,8 +1171,10 @@ zpool_create(libzfs_handle_t *hdl, const char *pool, nvlist_t *nvroot,
                return (-1);
 
        if (props) {
+               prop_flags_t flags = { .create = B_TRUE, .import = B_FALSE };
+
                if ((zc_props = zpool_valid_proplist(hdl, pool, props,
-                   SPA_VERSION_1, B_TRUE, msg)) == NULL) {
+                   SPA_VERSION_1, flags, msg)) == NULL) {
                        goto create_failed;
                }
        }
@@ -915,10 +1218,12 @@ zpool_create(libzfs_handle_t *hdl, const char *pool, nvlist_t *nvroot,
                         * This can happen if the user has specified the same
                         * device multiple times.  We can't reliably detect this
                         * until we try to add it and see we already have a
-                        * label.
+                        * label.  This can also happen under if the device is
+                        * part of an active md or lvm device.
                         */
                        zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
-                           "one or more vdevs refer to the same device"));
+                           "one or more vdevs refer to the same device, or one of\n"
+                           "the devices is part of an active md or lvm device"));
                        return (zfs_error(hdl, EZFS_BADDEV, msg));
 
                case EOVERFLOW:
@@ -983,22 +1288,18 @@ create_failed:
 int
 zpool_destroy(zpool_handle_t *zhp)
 {
-       zfs_cmd_t zc = { 0 };
+       zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
        zfs_handle_t *zfp = NULL;
        libzfs_handle_t *hdl = zhp->zpool_hdl;
        char msg[1024];
 
        if (zhp->zpool_state == POOL_STATE_ACTIVE &&
-           (zfp = zfs_open(zhp->zpool_hdl, zhp->zpool_name,
-           ZFS_TYPE_FILESYSTEM)) == NULL)
-               return (-1);
-
-       if (zpool_remove_zvol_links(zhp) != 0)
+           (zfp = zfs_open(hdl, zhp->zpool_name, ZFS_TYPE_FILESYSTEM)) == NULL)
                return (-1);
 
        (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
 
-       if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_DESTROY, &zc) != 0) {
+       if (zfs_ioctl(hdl, ZFS_IOC_POOL_DESTROY, &zc) != 0) {
                (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
                    "cannot destroy '%s'"), zhp->zpool_name);
 
@@ -1030,7 +1331,7 @@ zpool_destroy(zpool_handle_t *zhp)
 int
 zpool_add(zpool_handle_t *zhp, nvlist_t *nvroot)
 {
-       zfs_cmd_t zc = { 0 };
+       zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
        int ret;
        libzfs_handle_t *hdl = zhp->zpool_hdl;
        char msg[1024];
@@ -1049,7 +1350,8 @@ zpool_add(zpool_handle_t *zhp, nvlist_t *nvroot)
                return (zfs_error(hdl, EZFS_BADVERSION, msg));
        }
 
-       if (pool_is_bootable(zhp) && nvlist_lookup_nvlist_array(nvroot,
+#if defined(__sun__) || defined(__sun)
+       if (zpool_is_bootable(zhp) && nvlist_lookup_nvlist_array(nvroot,
            ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0) {
                uint64_t s;
 
@@ -1061,11 +1363,13 @@ zpool_add(zpool_handle_t *zhp, nvlist_t *nvroot)
                                zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
                                    "device '%s' contains an EFI label and "
                                    "cannot be used on root pools."),
-                                   zpool_vdev_name(hdl, NULL, spares[s]));
+                                   zpool_vdev_name(hdl, NULL, spares[s],
+                                   B_FALSE));
                                return (zfs_error(hdl, EZFS_POOL_NOTSUP, msg));
                        }
                }
        }
+#endif
 
        if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) <
            SPA_VERSION_L2CACHE &&
@@ -1080,7 +1384,7 @@ zpool_add(zpool_handle_t *zhp, nvlist_t *nvroot)
                return (-1);
        (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
 
-       if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_VDEV_ADD, &zc) != 0) {
+       if (zfs_ioctl(hdl, ZFS_IOC_VDEV_ADD, &zc) != 0) {
                switch (errno) {
                case EBUSY:
                        /*
@@ -1153,12 +1457,9 @@ zpool_add(zpool_handle_t *zhp, nvlist_t *nvroot)
 int
 zpool_export_common(zpool_handle_t *zhp, boolean_t force, boolean_t hardforce)
 {
-       zfs_cmd_t zc = { 0 };
+       zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
        char msg[1024];
 
-       if (zpool_remove_zvol_links(zhp) != 0)
-               return (-1);
-
        (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
            "cannot export '%s'"), zhp->zpool_name);
 
@@ -1197,6 +1498,138 @@ zpool_export_force(zpool_handle_t *zhp)
        return (zpool_export_common(zhp, B_TRUE, B_TRUE));
 }
 
+static void
+zpool_rewind_exclaim(libzfs_handle_t *hdl, const char *name, boolean_t dryrun,
+    nvlist_t *config)
+{
+       nvlist_t *nv = NULL;
+       uint64_t rewindto;
+       int64_t loss = -1;
+       struct tm t;
+       char timestr[128];
+
+       if (!hdl->libzfs_printerr || config == NULL)
+               return;
+
+       if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nv) != 0 ||
+           nvlist_lookup_nvlist(nv, ZPOOL_CONFIG_REWIND_INFO, &nv) != 0) {
+               return;
+       }
+
+       if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_TIME, &rewindto) != 0)
+               return;
+       (void) nvlist_lookup_int64(nv, ZPOOL_CONFIG_REWIND_TIME, &loss);
+
+       if (localtime_r((time_t *)&rewindto, &t) != NULL &&
+           strftime(timestr, 128, "%c", &t) != 0) {
+               if (dryrun) {
+                       (void) printf(dgettext(TEXT_DOMAIN,
+                           "Would be able to return %s "
+                           "to its state as of %s.\n"),
+                           name, timestr);
+               } else {
+                       (void) printf(dgettext(TEXT_DOMAIN,
+                           "Pool %s returned to its state as of %s.\n"),
+                           name, timestr);
+               }
+               if (loss > 120) {
+                       (void) printf(dgettext(TEXT_DOMAIN,
+                           "%s approximately %lld "),
+                           dryrun ? "Would discard" : "Discarded",
+                           ((longlong_t)loss + 30) / 60);
+                       (void) printf(dgettext(TEXT_DOMAIN,
+                           "minutes of transactions.\n"));
+               } else if (loss > 0) {
+                       (void) printf(dgettext(TEXT_DOMAIN,
+                           "%s approximately %lld "),
+                           dryrun ? "Would discard" : "Discarded",
+                           (longlong_t)loss);
+                       (void) printf(dgettext(TEXT_DOMAIN,
+                           "seconds of transactions.\n"));
+               }
+       }
+}
+
+void
+zpool_explain_recover(libzfs_handle_t *hdl, const char *name, int reason,
+    nvlist_t *config)
+{
+       nvlist_t *nv = NULL;
+       int64_t loss = -1;
+       uint64_t edata = UINT64_MAX;
+       uint64_t rewindto;
+       struct tm t;
+       char timestr[128];
+
+       if (!hdl->libzfs_printerr)
+               return;
+
+       if (reason >= 0)
+               (void) printf(dgettext(TEXT_DOMAIN, "action: "));
+       else
+               (void) printf(dgettext(TEXT_DOMAIN, "\t"));
+
+       /* All attempted rewinds failed if ZPOOL_CONFIG_LOAD_TIME missing */
+       if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nv) != 0 ||
+           nvlist_lookup_nvlist(nv, ZPOOL_CONFIG_REWIND_INFO, &nv) != 0 ||
+           nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_TIME, &rewindto) != 0)
+               goto no_info;
+
+       (void) nvlist_lookup_int64(nv, ZPOOL_CONFIG_REWIND_TIME, &loss);
+       (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_DATA_ERRORS,
+           &edata);
+
+       (void) printf(dgettext(TEXT_DOMAIN,
+           "Recovery is possible, but will result in some data loss.\n"));
+
+       if (localtime_r((time_t *)&rewindto, &t) != NULL &&
+           strftime(timestr, 128, "%c", &t) != 0) {
+               (void) printf(dgettext(TEXT_DOMAIN,
+                   "\tReturning the pool to its state as of %s\n"
+                   "\tshould correct the problem.  "),
+                   timestr);
+       } else {
+               (void) printf(dgettext(TEXT_DOMAIN,
+                   "\tReverting the pool to an earlier state "
+                   "should correct the problem.\n\t"));
+       }
+
+       if (loss > 120) {
+               (void) printf(dgettext(TEXT_DOMAIN,
+                   "Approximately %lld minutes of data\n"
+                   "\tmust be discarded, irreversibly.  "),
+                   ((longlong_t)loss + 30) / 60);
+       } else if (loss > 0) {
+               (void) printf(dgettext(TEXT_DOMAIN,
+                   "Approximately %lld seconds of data\n"
+                   "\tmust be discarded, irreversibly.  "),
+                   (longlong_t)loss);
+       }
+       if (edata != 0 && edata != UINT64_MAX) {
+               if (edata == 1) {
+                       (void) printf(dgettext(TEXT_DOMAIN,
+                           "After rewind, at least\n"
+                           "\tone persistent user-data error will remain.  "));
+               } else {
+                       (void) printf(dgettext(TEXT_DOMAIN,
+                           "After rewind, several\n"
+                           "\tpersistent user-data errors will remain.  "));
+               }
+       }
+       (void) printf(dgettext(TEXT_DOMAIN,
+           "Recovery can be attempted\n\tby executing 'zpool %s -F %s'.  "),
+           reason >= 0 ? "clear" : "import", name);
+
+       (void) printf(dgettext(TEXT_DOMAIN,
+           "A scrub of the pool\n"
+           "\tis strongly recommended after recovery.\n"));
+       return;
+
+no_info:
+       (void) printf(dgettext(TEXT_DOMAIN,
+           "Destroy and re-create the pool from\n\ta backup source.\n"));
+}
+
 /*
  * zpool_import() is a contracted interface. Should be kept the same
  * if possible.
@@ -1229,12 +1662,65 @@ zpool_import(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,
                }
        }
 
-       ret = zpool_import_props(hdl, config, newname, props, B_FALSE);
+       ret = zpool_import_props(hdl, config, newname, props,
+           ZFS_IMPORT_NORMAL);
        if (props)
                nvlist_free(props);
        return (ret);
 }
 
+static void
+print_vdev_tree(libzfs_handle_t *hdl, const char *name, nvlist_t *nv,
+    int indent)
+{
+       nvlist_t **child;
+       uint_t c, children;
+       char *vname;
+       uint64_t is_log = 0;
+
+       (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_LOG,
+           &is_log);
+
+       if (name != NULL)
+               (void) printf("\t%*s%s%s\n", indent, "", name,
+                   is_log ? " [log]" : "");
+
+       if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
+           &child, &children) != 0)
+               return;
+
+       for (c = 0; c < children; c++) {
+               vname = zpool_vdev_name(hdl, NULL, child[c], B_TRUE);
+               print_vdev_tree(hdl, vname, child[c], indent + 2);
+               free(vname);
+       }
+}
+
+void
+zpool_print_unsup_feat(nvlist_t *config)
+{
+       nvlist_t *nvinfo, *unsup_feat;
+       nvpair_t *nvp;
+
+       verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nvinfo) ==
+           0);
+       verify(nvlist_lookup_nvlist(nvinfo, ZPOOL_CONFIG_UNSUP_FEAT,
+           &unsup_feat) == 0);
+
+       for (nvp = nvlist_next_nvpair(unsup_feat, NULL); nvp != NULL;
+           nvp = nvlist_next_nvpair(unsup_feat, nvp)) {
+               char *desc;
+
+               verify(nvpair_type(nvp) == DATA_TYPE_STRING);
+               verify(nvpair_value_string(nvp, &desc) == 0);
+
+               if (strlen(desc) > 0)
+                       (void) printf("\t%s (%s)\n", nvpair_name(nvp), desc);
+               else
+                       (void) printf("\t%s\n", nvpair_name(nvp));
+       }
+}
+
 /*
  * Import the given pool using the known configuration and a list of
  * properties to be set. The configuration should have come from
@@ -1243,12 +1729,17 @@ zpool_import(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,
  */
 int
 zpool_import_props(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,
-    nvlist_t *props, boolean_t importfaulted)
+    nvlist_t *props, int flags)
 {
-       zfs_cmd_t zc = { 0 };
+       zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
+       zpool_rewind_policy_t policy;
+       nvlist_t *nv = NULL;
+       nvlist_t *nvinfo = NULL;
+       nvlist_t *missing = NULL;
        char *thename;
        char *origname;
        int ret;
+       int error = 0;
        char errbuf[1024];
 
        verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
@@ -1269,12 +1760,13 @@ zpool_import_props(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,
 
        if (props) {
                uint64_t version;
+               prop_flags_t flags = { .create = B_FALSE, .import = B_TRUE };
 
                verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
                    &version) == 0);
 
                if ((props = zpool_valid_proplist(hdl, origname,
-                   props, version, B_TRUE, errbuf)) == NULL) {
+                   props, version, flags, errbuf)) == NULL) {
                        return (-1);
                } else if (zcmd_write_src_nvlist(hdl, &zc, props) != 0) {
                        nvlist_free(props);
@@ -1291,11 +1783,39 @@ zpool_import_props(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,
                nvlist_free(props);
                return (-1);
        }
+       if (zcmd_alloc_dst_nvlist(hdl, &zc, zc.zc_nvlist_conf_size * 2) != 0) {
+               nvlist_free(props);
+               return (-1);
+       }
+
+       zc.zc_cookie = flags;
+       while ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_IMPORT, &zc)) != 0 &&
+           errno == ENOMEM) {
+               if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
+                       zcmd_free_nvlists(&zc);
+                       return (-1);
+               }
+       }
+       if (ret != 0)
+               error = errno;
+
+       (void) zcmd_read_dst_nvlist(hdl, &zc, &nv);
+       zpool_get_rewind_policy(config, &policy);
 
-       zc.zc_cookie = (uint64_t)importfaulted;
-       ret = 0;
-       if (zfs_ioctl(hdl, ZFS_IOC_POOL_IMPORT, &zc) != 0) {
+       if (error) {
                char desc[1024];
+
+               /*
+                * Dry-run failed, but we print out what success
+                * looks like if we found a best txg
+                */
+               if (policy.zrp_request & ZPOOL_TRY_REWIND) {
+                       zpool_rewind_exclaim(hdl, newname ? origname : thename,
+                           B_TRUE, nv);
+                       nvlist_free(nv);
+                       return (-1);
+               }
+
                if (newname == NULL)
                        (void) snprintf(desc, sizeof (desc),
                            dgettext(TEXT_DOMAIN, "cannot import '%s'"),
@@ -1305,8 +1825,24 @@ zpool_import_props(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,
                            dgettext(TEXT_DOMAIN, "cannot import '%s' as '%s'"),
                            origname, thename);
 
-               switch (errno) {
+               switch (error) {
                case ENOTSUP:
+                       if (nv != NULL && nvlist_lookup_nvlist(nv,
+                           ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0 &&
+                           nvlist_exists(nvinfo, ZPOOL_CONFIG_UNSUP_FEAT)) {
+                               (void) printf(dgettext(TEXT_DOMAIN, "This "
+                                   "pool uses the following feature(s) not "
+                                   "supported by this system:\n"));
+                               zpool_print_unsup_feat(nv);
+                               if (nvlist_exists(nvinfo,
+                                   ZPOOL_CONFIG_CAN_RDONLY)) {
+                                       (void) printf(dgettext(TEXT_DOMAIN,
+                                           "All unsupported features are only "
+                                           "required for writing to the pool."
+                                           "\nThe pool can be imported using "
+                                           "'-o readonly=on'.\n"));
+                               }
+                       }
                        /*
                         * Unsupported version.
                         */
@@ -1317,10 +1853,44 @@ zpool_import_props(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,
                        (void) zfs_error(hdl, EZFS_INVALCONFIG, desc);
                        break;
 
+               case EROFS:
+                       zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+                           "one or more devices is read only"));
+                       (void) zfs_error(hdl, EZFS_BADDEV, desc);
+                       break;
+
+               case ENXIO:
+                       if (nv && nvlist_lookup_nvlist(nv,
+                           ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0 &&
+                           nvlist_lookup_nvlist(nvinfo,
+                           ZPOOL_CONFIG_MISSING_DEVICES, &missing) == 0) {
+                               (void) printf(dgettext(TEXT_DOMAIN,
+                                   "The devices below are missing, use "
+                                   "'-m' to import the pool anyway:\n"));
+                               print_vdev_tree(hdl, NULL, missing, 2);
+                               (void) printf("\n");
+                       }
+                       (void) zpool_standard_error(hdl, error, desc);
+                       break;
+
+               case EEXIST:
+                       (void) zpool_standard_error(hdl, error, desc);
+                       break;
+
+               case EBUSY:
+                       zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+                           "one or more devices are already in use\n"));
+                       (void) zfs_error(hdl, EZFS_BADDEV, desc);
+                       break;
+
                default:
-                       (void) zpool_standard_error(hdl, errno, desc);
+                       (void) zpool_standard_error(hdl, error, desc);
+                       zpool_explain_recover(hdl,
+                           newname ? origname : thename, -error, nv);
+                       break;
                }
 
+               nvlist_free(nv);
                ret = -1;
        } else {
                zpool_handle_t *zhp;
@@ -1328,13 +1898,17 @@ zpool_import_props(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,
                /*
                 * This should never fail, but play it safe anyway.
                 */
-               if (zpool_open_silent(hdl, thename, &zhp) != 0) {
+               if (zpool_open_silent(hdl, thename, &zhp) != 0)
                        ret = -1;
-               } else if (zhp != NULL) {
-                       ret = zpool_create_zvol_links(zhp);
+               else if (zhp != NULL)
                        zpool_close(zhp);
+               if (policy.zrp_request &
+                   (ZPOOL_DO_REWIND | ZPOOL_TRY_REWIND)) {
+                       zpool_rewind_exclaim(hdl, newname ? origname : thename,
+                           ((policy.zrp_request & ZPOOL_TRY_REWIND) != 0), nv);
                }
-
+               nvlist_free(nv);
+               return (0);
        }
 
        zcmd_free_nvlists(&zc);
@@ -1344,71 +1918,175 @@ zpool_import_props(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,
 }
 
 /*
- * Scrub the pool.
+ * Scan the pool.
  */
 int
-zpool_scrub(zpool_handle_t *zhp, pool_scrub_type_t type)
+zpool_scan(zpool_handle_t *zhp, pool_scan_func_t func)
 {
-       zfs_cmd_t zc = { 0 };
+       zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
        char msg[1024];
        libzfs_handle_t *hdl = zhp->zpool_hdl;
 
        (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
-       zc.zc_cookie = type;
+       zc.zc_cookie = func;
 
-       if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_SCRUB, &zc) == 0)
+       if (zfs_ioctl(hdl, ZFS_IOC_POOL_SCAN, &zc) == 0 ||
+           (errno == ENOENT && func != POOL_SCAN_NONE))
                return (0);
 
-       (void) snprintf(msg, sizeof (msg),
-           dgettext(TEXT_DOMAIN, "cannot scrub %s"), zc.zc_name);
+       if (func == POOL_SCAN_SCRUB) {
+               (void) snprintf(msg, sizeof (msg),
+                   dgettext(TEXT_DOMAIN, "cannot scrub %s"), zc.zc_name);
+       } else if (func == POOL_SCAN_NONE) {
+               (void) snprintf(msg, sizeof (msg),
+                   dgettext(TEXT_DOMAIN, "cannot cancel scrubbing %s"),
+                   zc.zc_name);
+       } else {
+               assert(!"unexpected result");
+       }
 
-       if (errno == EBUSY)
-               return (zfs_error(hdl, EZFS_RESILVERING, msg));
-       else
+       if (errno == EBUSY) {
+               nvlist_t *nvroot;
+               pool_scan_stat_t *ps = NULL;
+               uint_t psc;
+
+               verify(nvlist_lookup_nvlist(zhp->zpool_config,
+                   ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
+               (void) nvlist_lookup_uint64_array(nvroot,
+                   ZPOOL_CONFIG_SCAN_STATS, (uint64_t **)&ps, &psc);
+               if (ps && ps->pss_func == POOL_SCAN_SCRUB)
+                       return (zfs_error(hdl, EZFS_SCRUBBING, msg));
+               else
+                       return (zfs_error(hdl, EZFS_RESILVERING, msg));
+       } else if (errno == ENOENT) {
+               return (zfs_error(hdl, EZFS_NO_SCRUB, msg));
+       } else {
                return (zpool_standard_error(hdl, errno, msg));
+       }
 }
 
 /*
+ * Find a vdev that matches the search criteria specified. We use the
+ * the nvpair name to determine how we should look for the device.
  * 'avail_spare' is set to TRUE if the provided guid refers to an AVAIL
  * spare; but FALSE if its an INUSE spare.
  */
 static nvlist_t *
-vdev_to_nvlist_iter(nvlist_t *nv, const char *search, uint64_t guid,
-    boolean_t *avail_spare, boolean_t *l2cache, boolean_t *log)
+vdev_to_nvlist_iter(nvlist_t *nv, nvlist_t *search, boolean_t *avail_spare,
+    boolean_t *l2cache, boolean_t *log)
 {
        uint_t c, children;
        nvlist_t **child;
-       uint64_t theguid, present;
-       char *path;
-       uint64_t wholedisk = 0;
        nvlist_t *ret;
        uint64_t is_log;
+       char *srchkey;
+       nvpair_t *pair = nvlist_next_nvpair(search, NULL);
 
-       verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &theguid) == 0);
+       /* Nothing to look for */
+       if (search == NULL || pair == NULL)
+               return (NULL);
 
-       if (search == NULL &&
-           nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT, &present) == 0) {
-               /*
-                * If the device has never been present since import, the only
-                * reliable way to match the vdev is by GUID.
+       /* Obtain the key we will use to search */
+       srchkey = nvpair_name(pair);
+
+       switch (nvpair_type(pair)) {
+       case DATA_TYPE_UINT64:
+               if (strcmp(srchkey, ZPOOL_CONFIG_GUID) == 0) {
+                       uint64_t srchval, theguid;
+
+                       verify(nvpair_value_uint64(pair, &srchval) == 0);
+                       verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
+                           &theguid) == 0);
+                       if (theguid == srchval)
+                               return (nv);
+               }
+               break;
+
+       case DATA_TYPE_STRING: {
+               char *srchval, *val;
+
+               verify(nvpair_value_string(pair, &srchval) == 0);
+               if (nvlist_lookup_string(nv, srchkey, &val) != 0)
+                       break;
+
+               /*
+                * Search for the requested value. Special cases:
+                *
+                * - ZPOOL_CONFIG_PATH for whole disk entries.  These end in
+                *   "-part1", or "p1".  The suffix is hidden from the user,
+                *   but included in the string, so this matches around it.
+                * - ZPOOL_CONFIG_PATH for short names zfs_strcmp_shortname()
+                *   is used to check all possible expanded paths.
+                * - looking for a top-level vdev name (i.e. ZPOOL_CONFIG_TYPE).
+                *
+                * Otherwise, all other searches are simple string compares.
                 */
-               if (theguid == guid)
-                       return (nv);
-       } else if (search != NULL &&
-           nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) {
-               (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
-                   &wholedisk);
-               if (wholedisk) {
+               if (strcmp(srchkey, ZPOOL_CONFIG_PATH) == 0) {
+                       uint64_t wholedisk = 0;
+
+                       (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
+                           &wholedisk);
+                       if (zfs_strcmp_pathname(srchval, val, wholedisk) == 0)
+                               return (nv);
+
+               } else if (strcmp(srchkey, ZPOOL_CONFIG_TYPE) == 0 && val) {
+                       char *type, *idx, *end, *p;
+                       uint64_t id, vdev_id;
+
+                       /*
+                        * Determine our vdev type, keeping in mind
+                        * that the srchval is composed of a type and
+                        * vdev id pair (i.e. mirror-4).
+                        */
+                       if ((type = strdup(srchval)) == NULL)
+                               return (NULL);
+
+                       if ((p = strrchr(type, '-')) == NULL) {
+                               free(type);
+                               break;
+                       }
+                       idx = p + 1;
+                       *p = '\0';
+
+                       /*
+                        * If the types don't match then keep looking.
+                        */
+                       if (strncmp(val, type, strlen(val)) != 0) {
+                               free(type);
+                               break;
+                       }
+
+                       verify(strncmp(type, VDEV_TYPE_RAIDZ,
+                           strlen(VDEV_TYPE_RAIDZ)) == 0 ||
+                           strncmp(type, VDEV_TYPE_MIRROR,
+                           strlen(VDEV_TYPE_MIRROR)) == 0);
+                       verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID,
+                           &id) == 0);
+
+                       errno = 0;
+                       vdev_id = strtoull(idx, &end, 10);
+
+                       free(type);
+                       if (errno != 0)
+                               return (NULL);
+
                        /*
-                        * For whole disks, the internal path has 's0', but the
-                        * path passed in by the user doesn't.
+                        * Now verify that we have the correct vdev id.
                         */
-                       if (strlen(search) == strlen(path) - 2 &&
-                           strncmp(search, path, strlen(search)) == 0)
+                       if (vdev_id == id)
                                return (nv);
-               } else if (strcmp(search, path) == 0) {
-                       return (nv);
                }
+
+               /*
+                * Common case
+                */
+               if (strcmp(srchval, val) == 0)
+                       return (nv);
+               break;
+       }
+
+       default:
+               break;
        }
 
        if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
@@ -1416,7 +2094,7 @@ vdev_to_nvlist_iter(nvlist_t *nv, const char *search, uint64_t guid,
                return (NULL);
 
        for (c = 0; c < children; c++) {
-               if ((ret = vdev_to_nvlist_iter(child[c], search, guid,
+               if ((ret = vdev_to_nvlist_iter(child[c], search,
                    avail_spare, l2cache, NULL)) != NULL) {
                        /*
                         * The 'is_log' value is only set for the toplevel
@@ -1437,7 +2115,7 @@ vdev_to_nvlist_iter(nvlist_t *nv, const char *search, uint64_t guid,
        if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES,
            &child, &children) == 0) {
                for (c = 0; c < children; c++) {
-                       if ((ret = vdev_to_nvlist_iter(child[c], search, guid,
+                       if ((ret = vdev_to_nvlist_iter(child[c], search,
                            avail_spare, l2cache, NULL)) != NULL) {
                                *avail_spare = B_TRUE;
                                return (ret);
@@ -1448,7 +2126,7 @@ vdev_to_nvlist_iter(nvlist_t *nv, const char *search, uint64_t guid,
        if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE,
            &child, &children) == 0) {
                for (c = 0; c < children; c++) {
-                       if ((ret = vdev_to_nvlist_iter(child[c], search, guid,
+                       if ((ret = vdev_to_nvlist_iter(child[c], search,
                            avail_spare, l2cache, NULL)) != NULL) {
                                *l2cache = B_TRUE;
                                return (ret);
@@ -1459,24 +2137,61 @@ vdev_to_nvlist_iter(nvlist_t *nv, const char *search, uint64_t guid,
        return (NULL);
 }
 
+/*
+ * Given a physical path (minus the "/devices" prefix), find the
+ * associated vdev.
+ */
+nvlist_t *
+zpool_find_vdev_by_physpath(zpool_handle_t *zhp, const char *ppath,
+    boolean_t *avail_spare, boolean_t *l2cache, boolean_t *log)
+{
+       nvlist_t *search, *nvroot, *ret;
+
+       verify(nvlist_alloc(&search, NV_UNIQUE_NAME, KM_SLEEP) == 0);
+       verify(nvlist_add_string(search, ZPOOL_CONFIG_PHYS_PATH, ppath) == 0);
+
+       verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
+           &nvroot) == 0);
+
+       *avail_spare = B_FALSE;
+       *l2cache = B_FALSE;
+       if (log != NULL)
+               *log = B_FALSE;
+       ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log);
+       nvlist_free(search);
+
+       return (ret);
+}
+
+/*
+ * Determine if we have an "interior" top-level vdev (i.e mirror/raidz).
+ */
+boolean_t
+zpool_vdev_is_interior(const char *name)
+{
+       if (strncmp(name, VDEV_TYPE_RAIDZ, strlen(VDEV_TYPE_RAIDZ)) == 0 ||
+           strncmp(name, VDEV_TYPE_MIRROR, strlen(VDEV_TYPE_MIRROR)) == 0)
+               return (B_TRUE);
+       return (B_FALSE);
+}
+
 nvlist_t *
 zpool_find_vdev(zpool_handle_t *zhp, const char *path, boolean_t *avail_spare,
     boolean_t *l2cache, boolean_t *log)
 {
-       char buf[MAXPATHLEN];
-       const char *search;
        char *end;
-       nvlist_t *nvroot;
+       nvlist_t *nvroot, *search, *ret;
        uint64_t guid;
 
+       verify(nvlist_alloc(&search, NV_UNIQUE_NAME, KM_SLEEP) == 0);
+
        guid = strtoull(path, &end, 10);
        if (guid != 0 && *end == '\0') {
-               search = NULL;
-       } else if (path[0] != '/') {
-               (void) snprintf(buf, sizeof (buf), "%s%s", "/dev/dsk/", path);
-               search = buf;
+               verify(nvlist_add_uint64(search, ZPOOL_CONFIG_GUID, guid) == 0);
+       } else if (zpool_vdev_is_interior(path)) {
+               verify(nvlist_add_string(search, ZPOOL_CONFIG_TYPE, path) == 0);
        } else {
-               search = path;
+               verify(nvlist_add_string(search, ZPOOL_CONFIG_PATH, path) == 0);
        }
 
        verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
@@ -1486,8 +2201,10 @@ zpool_find_vdev(zpool_handle_t *zhp, const char *path, boolean_t *avail_spare,
        *l2cache = B_FALSE;
        if (log != NULL)
                *log = B_FALSE;
-       return (vdev_to_nvlist_iter(nvroot, search, guid, avail_spare,
-           l2cache, log));
+       ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log);
+       nvlist_free(search);
+
+       return (ret);
 }
 
 static int
@@ -1504,106 +2221,174 @@ vdev_online(nvlist_t *nv)
 }
 
 /*
- * Get phys_path for a root pool
- * Return 0 on success; non-zeron on failure.
+ * Helper function for zpool_get_physpaths().
  */
-int
-zpool_get_physpath(zpool_handle_t *zhp, char *physpath)
+static int
+vdev_get_one_physpath(nvlist_t *config, char *physpath, size_t physpath_size,
+    size_t *bytes_written)
+{
+       size_t bytes_left, pos, rsz;
+       char *tmppath;
+       const char *format;
+
+       if (nvlist_lookup_string(config, ZPOOL_CONFIG_PHYS_PATH,
+           &tmppath) != 0)
+               return (EZFS_NODEVICE);
+
+       pos = *bytes_written;
+       bytes_left = physpath_size - pos;
+       format = (pos == 0) ? "%s" : " %s";
+
+       rsz = snprintf(physpath + pos, bytes_left, format, tmppath);
+       *bytes_written += rsz;
+
+       if (rsz >= bytes_left) {
+               /* if physpath was not copied properly, clear it */
+               if (bytes_left != 0) {
+                       physpath[pos] = 0;
+               }
+               return (EZFS_NOSPC);
+       }
+       return (0);
+}
+
+static int
+vdev_get_physpaths(nvlist_t *nv, char *physpath, size_t phypath_size,
+    size_t *rsz, boolean_t is_spare)
 {
+       char *type;
+       int ret;
+
+       if (nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) != 0)
+               return (EZFS_INVALCONFIG);
+
+       if (strcmp(type, VDEV_TYPE_DISK) == 0) {
+               /*
+                * An active spare device has ZPOOL_CONFIG_IS_SPARE set.
+                * For a spare vdev, we only want to boot from the active
+                * spare device.
+                */
+               if (is_spare) {
+                       uint64_t spare = 0;
+                       (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_SPARE,
+                           &spare);
+                       if (!spare)
+                               return (EZFS_INVALCONFIG);
+               }
+
+               if (vdev_online(nv)) {
+                       if ((ret = vdev_get_one_physpath(nv, physpath,
+                           phypath_size, rsz)) != 0)
+                               return (ret);
+               }
+       } else if (strcmp(type, VDEV_TYPE_MIRROR) == 0 ||
+           strcmp(type, VDEV_TYPE_REPLACING) == 0 ||
+           (is_spare = (strcmp(type, VDEV_TYPE_SPARE) == 0))) {
+               nvlist_t **child;
+               uint_t count;
+               int i, ret;
+
+               if (nvlist_lookup_nvlist_array(nv,
+                   ZPOOL_CONFIG_CHILDREN, &child, &count) != 0)
+                       return (EZFS_INVALCONFIG);
+
+               for (i = 0; i < count; i++) {
+                       ret = vdev_get_physpaths(child[i], physpath,
+                           phypath_size, rsz, is_spare);
+                       if (ret == EZFS_NOSPC)
+                               return (ret);
+               }
+       }
+
+       return (EZFS_POOL_INVALARG);
+}
+
+/*
+ * Get phys_path for a root pool config.
+ * Return 0 on success; non-zero on failure.
+ */
+static int
+zpool_get_config_physpath(nvlist_t *config, char *physpath, size_t phypath_size)
+{
+       size_t rsz;
        nvlist_t *vdev_root;
        nvlist_t **child;
        uint_t count;
-       int i;
+       char *type;
 
-       /*
-        * Make sure this is a root pool, as phys_path doesn't mean
-        * anything to a non-root pool.
-        */
-       if (!pool_is_bootable(zhp))
-               return (-1);
+       rsz = 0;
 
-       verify(nvlist_lookup_nvlist(zhp->zpool_config,
-           ZPOOL_CONFIG_VDEV_TREE, &vdev_root) == 0);
+       if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
+           &vdev_root) != 0)
+               return (EZFS_INVALCONFIG);
 
-       if (nvlist_lookup_nvlist_array(vdev_root, ZPOOL_CONFIG_CHILDREN,
+       if (nvlist_lookup_string(vdev_root, ZPOOL_CONFIG_TYPE, &type) != 0 ||
+           nvlist_lookup_nvlist_array(vdev_root, ZPOOL_CONFIG_CHILDREN,
            &child, &count) != 0)
-               return (-2);
+               return (EZFS_INVALCONFIG);
 
-       for (i = 0; i < count; i++) {
-               nvlist_t **child2;
-               uint_t count2;
-               char *type;
-               char *tmppath;
-               int j;
+#if defined(__sun__) || defined(__sun)
+       /*
+        * root pool can not have EFI labeled disks and can only have
+        * a single top-level vdev.
+        */
+       if (strcmp(type, VDEV_TYPE_ROOT) != 0 || count != 1 ||
+           pool_uses_efi(vdev_root))
+               return (EZFS_POOL_INVALARG);
+#endif
 
-               if (nvlist_lookup_string(child[i], ZPOOL_CONFIG_TYPE, &type)
-                   != 0)
-                       return (-3);
+       (void) vdev_get_physpaths(child[0], physpath, phypath_size, &rsz,
+           B_FALSE);
 
-               if (strcmp(type, VDEV_TYPE_DISK) == 0) {
-                       if (!vdev_online(child[i]))
-                               return (-8);
-                       verify(nvlist_lookup_string(child[i],
-                           ZPOOL_CONFIG_PHYS_PATH, &tmppath) == 0);
-                       (void) strncpy(physpath, tmppath, strlen(tmppath));
-               } else if (strcmp(type, VDEV_TYPE_MIRROR) == 0) {
-                       if (nvlist_lookup_nvlist_array(child[i],
-                           ZPOOL_CONFIG_CHILDREN, &child2, &count2) != 0)
-                               return (-4);
-
-                       for (j = 0; j < count2; j++) {
-                               if (!vdev_online(child2[j]))
-                                       return (-8);
-                               if (nvlist_lookup_string(child2[j],
-                                   ZPOOL_CONFIG_PHYS_PATH, &tmppath) != 0)
-                                       return (-5);
-
-                               if ((strlen(physpath) + strlen(tmppath)) >
-                                   MAXNAMELEN)
-                                       return (-6);
-
-                               if (strlen(physpath) == 0) {
-                                       (void) strncpy(physpath, tmppath,
-                                           strlen(tmppath));
-                               } else {
-                                       (void) strcat(physpath, " ");
-                                       (void) strcat(physpath, tmppath);
-                               }
-                       }
-               } else {
-                       return (-7);
-               }
-       }
+       /* No online devices */
+       if (rsz == 0)
+               return (EZFS_NODEVICE);
 
        return (0);
 }
 
 /*
- * Returns TRUE if the given guid corresponds to the given type.
- * This is used to check for hot spares (INUSE or not), and level 2 cache
- * devices.
+ * Get phys_path for a root pool
+ * Return 0 on success; non-zero on failure.
  */
-static boolean_t
-is_guid_type(zpool_handle_t *zhp, uint64_t guid, const char *type)
+int
+zpool_get_physpath(zpool_handle_t *zhp, char *physpath, size_t phypath_size)
 {
-       uint64_t target_guid;
-       nvlist_t *nvroot;
-       nvlist_t **list;
-       uint_t count;
-       int i;
+       return (zpool_get_config_physpath(zhp->zpool_config, physpath,
+           phypath_size));
+}
 
-       verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
-           &nvroot) == 0);
-       if (nvlist_lookup_nvlist_array(nvroot, type, &list, &count) == 0) {
-               for (i = 0; i < count; i++) {
-                       verify(nvlist_lookup_uint64(list[i], ZPOOL_CONFIG_GUID,
-                           &target_guid) == 0);
-                       if (guid == target_guid)
-                               return (B_TRUE);
-               }
+/*
+ * If the device has being dynamically expanded then we need to relabel
+ * the disk to use the new unallocated space.
+ */
+static int
+zpool_relabel_disk(libzfs_handle_t *hdl, const char *path, const char *msg)
+{
+       int fd, error;
+
+       if ((fd = open(path, O_RDWR|O_DIRECT)) < 0) {
+               zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot "
+                   "relabel '%s': unable to open device: %d"), path, errno);
+               return (zfs_error(hdl, EZFS_OPENFAILED, msg));
        }
 
-       return (B_FALSE);
+       /*
+        * It's possible that we might encounter an error if the device
+        * does not have any unallocated space left. If so, we simply
+        * ignore that error and continue on.
+        *
+        * Also, we don't call efi_rescan() - that would just return EBUSY.
+        * The module will do it for us in vdev_disk_open().
+        */
+       error = efi_use_whole_disk(fd);
+       (void) close(fd);
+       if (error && error != VT_ENOSPC) {
+               zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot "
+                   "relabel '%s': unable to read disk capacity"), path);
+               return (zfs_error(hdl, EZFS_NOCAP, msg));
+       }
+       return (0);
 }
 
 /*
@@ -1614,31 +2399,79 @@ int
 zpool_vdev_online(zpool_handle_t *zhp, const char *path, int flags,
     vdev_state_t *newstate)
 {
-       zfs_cmd_t zc = { 0 };
+       zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
        char msg[1024];
        nvlist_t *tgt;
-       boolean_t avail_spare, l2cache;
+       boolean_t avail_spare, l2cache, islog;
        libzfs_handle_t *hdl = zhp->zpool_hdl;
+       int error;
 
-       (void) snprintf(msg, sizeof (msg),
-           dgettext(TEXT_DOMAIN, "cannot online %s"), path);
+       if (flags & ZFS_ONLINE_EXPAND) {
+               (void) snprintf(msg, sizeof (msg),
+                   dgettext(TEXT_DOMAIN, "cannot expand %s"), path);
+       } else {
+               (void) snprintf(msg, sizeof (msg),
+                   dgettext(TEXT_DOMAIN, "cannot online %s"), path);
+       }
 
        (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
        if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
-           NULL)) == NULL)
+           &islog)) == NULL)
                return (zfs_error(hdl, EZFS_NODEVICE, msg));
 
        verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
 
-       if (avail_spare ||
-           is_guid_type(zhp, zc.zc_guid, ZPOOL_CONFIG_SPARES) == B_TRUE)
+       if (avail_spare)
                return (zfs_error(hdl, EZFS_ISSPARE, msg));
 
+       if (flags & ZFS_ONLINE_EXPAND ||
+           zpool_get_prop_int(zhp, ZPOOL_PROP_AUTOEXPAND, NULL)) {
+               uint64_t wholedisk = 0;
+
+               (void) nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_WHOLE_DISK,
+                   &wholedisk);
+
+               /*
+                * XXX - L2ARC 1.0 devices can't support expansion.
+                */
+               if (l2cache) {
+                       zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+                           "cannot expand cache devices"));
+                       return (zfs_error(hdl, EZFS_VDEVNOTSUP, msg));
+               }
+
+               if (wholedisk) {
+                       const char *fullpath = path;
+                       char buf[MAXPATHLEN];
+
+                       if (path[0] != '/') {
+                               error = zfs_resolve_shortname(path, buf,
+                                   sizeof(buf));
+                               if (error != 0)
+                                       return (zfs_error(hdl, EZFS_NODEVICE,
+                                           msg));
+
+                               fullpath = buf;
+                       }
+
+                       error = zpool_relabel_disk(hdl, fullpath, msg);
+                       if (error != 0)
+                               return (error);
+               }
+       }
+
        zc.zc_cookie = VDEV_STATE_ONLINE;
        zc.zc_obj = flags;
 
-       if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_VDEV_SET_STATE, &zc) != 0)
+       if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) != 0) {
+               if (errno == EINVAL) {
+                       zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "was split "
+                           "from this pool into a new one.  Use '%s' "
+                           "instead"), "zpool detach");
+                       return (zfs_error(hdl, EZFS_POSTSPLIT_ONLINE, msg));
+               }
                return (zpool_standard_error(hdl, errno, msg));
+       }
 
        *newstate = zc.zc_cookie;
        return (0);
@@ -1650,7 +2483,7 @@ zpool_vdev_online(zpool_handle_t *zhp, const char *path, int flags,
 int
 zpool_vdev_offline(zpool_handle_t *zhp, const char *path, boolean_t istmp)
 {
-       zfs_cmd_t zc = { 0 };
+       zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
        char msg[1024];
        nvlist_t *tgt;
        boolean_t avail_spare, l2cache;
@@ -1666,14 +2499,13 @@ zpool_vdev_offline(zpool_handle_t *zhp, const char *path, boolean_t istmp)
 
        verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
 
-       if (avail_spare ||
-           is_guid_type(zhp, zc.zc_guid, ZPOOL_CONFIG_SPARES) == B_TRUE)
+       if (avail_spare)
                return (zfs_error(hdl, EZFS_ISSPARE, msg));
 
        zc.zc_cookie = VDEV_STATE_OFFLINE;
        zc.zc_obj = istmp ? ZFS_OFFLINE_TEMPORARY : 0;
 
-       if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
+       if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
                return (0);
 
        switch (errno) {
@@ -1684,6 +2516,12 @@ zpool_vdev_offline(zpool_handle_t *zhp, const char *path, boolean_t istmp)
                 */
                return (zfs_error(hdl, EZFS_NOREPLICAS, msg));
 
+       case EEXIST:
+               /*
+                * The log device has unplayed logs
+                */
+               return (zfs_error(hdl, EZFS_UNPLAYED_LOGS, msg));
+
        default:
                return (zpool_standard_error(hdl, errno, msg));
        }
@@ -1693,20 +2531,21 @@ zpool_vdev_offline(zpool_handle_t *zhp, const char *path, boolean_t istmp)
  * Mark the given vdev faulted.
  */
 int
-zpool_vdev_fault(zpool_handle_t *zhp, uint64_t guid)
+zpool_vdev_fault(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux)
 {
-       zfs_cmd_t zc = { 0 };
+       zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
        char msg[1024];
        libzfs_handle_t *hdl = zhp->zpool_hdl;
 
        (void) snprintf(msg, sizeof (msg),
-           dgettext(TEXT_DOMAIN, "cannot fault %llu"), guid);
+           dgettext(TEXT_DOMAIN, "cannot fault %llu"), (u_longlong_t)guid);
 
        (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
        zc.zc_guid = guid;
        zc.zc_cookie = VDEV_STATE_FAULTED;
+       zc.zc_obj = aux;
 
-       if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
+       if (ioctl(hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
                return (0);
 
        switch (errno) {
@@ -1727,20 +2566,21 @@ zpool_vdev_fault(zpool_handle_t *zhp, uint64_t guid)
  * Mark the given vdev degraded.
  */
 int
-zpool_vdev_degrade(zpool_handle_t *zhp, uint64_t guid)
+zpool_vdev_degrade(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux)
 {
-       zfs_cmd_t zc = { 0 };
+       zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
        char msg[1024];
        libzfs_handle_t *hdl = zhp->zpool_hdl;
 
        (void) snprintf(msg, sizeof (msg),
-           dgettext(TEXT_DOMAIN, "cannot degrade %llu"), guid);
+           dgettext(TEXT_DOMAIN, "cannot degrade %llu"), (u_longlong_t)guid);
 
        (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
        zc.zc_guid = guid;
        zc.zc_cookie = VDEV_STATE_DEGRADED;
+       zc.zc_obj = aux;
 
-       if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
+       if (ioctl(hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
                return (0);
 
        return (zpool_standard_error(hdl, errno, msg));
@@ -1782,18 +2622,18 @@ int
 zpool_vdev_attach(zpool_handle_t *zhp,
     const char *old_disk, const char *new_disk, nvlist_t *nvroot, int replacing)
 {
-       zfs_cmd_t zc = { 0 };
+       zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
        char msg[1024];
        int ret;
        nvlist_t *tgt;
        boolean_t avail_spare, l2cache, islog;
        uint64_t val;
-       char *path, *newname;
+       char *newname;
        nvlist_t **child;
        uint_t children;
        nvlist_t *config_root;
        libzfs_handle_t *hdl = zhp->zpool_hdl;
-       boolean_t rootpool = pool_is_bootable(zhp);
+       boolean_t rootpool = zpool_is_bootable(zhp);
 
        if (replacing)
                (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
@@ -1802,6 +2642,7 @@ zpool_vdev_attach(zpool_handle_t *zhp,
                (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
                    "cannot attach %s to %s"), new_disk, old_disk);
 
+#if defined(__sun__) || defined(__sun)
        /*
         * If this is a root pool, make sure that we're not attaching an
         * EFI labeled device.
@@ -1811,6 +2652,7 @@ zpool_vdev_attach(zpool_handle_t *zhp,
                    "EFI labeled devices are not supported on root pools."));
                return (zfs_error(hdl, EZFS_POOL_NOTSUP, msg));
        }
+#endif
 
        (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
        if ((tgt = zpool_find_vdev(zhp, old_disk, &avail_spare, &l2cache,
@@ -1836,7 +2678,7 @@ zpool_vdev_attach(zpool_handle_t *zhp,
        verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
            ZPOOL_CONFIG_VDEV_TREE, &config_root) == 0);
 
-       if ((newname = zpool_vdev_name(NULL, NULL, child[0])) == NULL)
+       if ((newname = zpool_vdev_name(NULL, NULL, child[0], B_FALSE)) == NULL)
                return (-1);
 
        /*
@@ -1854,40 +2696,24 @@ zpool_vdev_attach(zpool_handle_t *zhp,
                return (zfs_error(hdl, EZFS_BADTARGET, msg));
        }
 
-       /*
-        * If we are attempting to replace a spare, it canot be applied to an
-        * already spared device.
-        */
-       if (replacing &&
-           nvlist_lookup_string(child[0], ZPOOL_CONFIG_PATH, &path) == 0 &&
-           zpool_find_vdev(zhp, newname, &avail_spare,
-           &l2cache, NULL) != NULL && avail_spare &&
-           is_replacing_spare(config_root, tgt, 0)) {
-               zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
-                   "device has already been replaced with a spare"));
-               free(newname);
-               return (zfs_error(hdl, EZFS_BADTARGET, msg));
-       }
-
        free(newname);
 
        if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
                return (-1);
 
-       ret = zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_VDEV_ATTACH, &zc);
+       ret = zfs_ioctl(hdl, ZFS_IOC_VDEV_ATTACH, &zc);
 
        zcmd_free_nvlists(&zc);
 
        if (ret == 0) {
                if (rootpool) {
                        /*
-                        * XXX - This should be removed once we can
-                        * automatically install the bootblocks on the
-                        * newly attached disk.
+                        * XXX need a better way to prevent user from
+                        * booting up a half-baked vdev.
                         */
-                       (void) fprintf(stderr, dgettext(TEXT_DOMAIN, "Please "
-                           "be sure to invoke %s to make '%s' bootable.\n"),
-                           BOOTCMD, new_disk);
+                       (void) fprintf(stderr, dgettext(TEXT_DOMAIN, "Make "
+                           "sure to wait until resilver is done "
+                           "before rebooting.\n"));
                }
                return (0);
        }
@@ -1898,9 +2724,16 @@ zpool_vdev_attach(zpool_handle_t *zhp,
                 * Can't attach to or replace this type of vdev.
                 */
                if (replacing) {
+                       uint64_t version = zpool_get_prop_int(zhp,
+                           ZPOOL_PROP_VERSION, NULL);
+
                        if (islog)
                                zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
                                    "cannot replace a log with a spare"));
+                       else if (version >= SPA_VERSION_MULTI_REPLACE)
+                               zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+                                   "already in replacing/spare config; wait "
+                                   "for completion or use 'zpool detach'"));
                        else
                                zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
                                    "cannot replace a replacing device"));
@@ -1965,7 +2798,7 @@ zpool_vdev_attach(zpool_handle_t *zhp,
 int
 zpool_vdev_detach(zpool_handle_t *zhp, const char *path)
 {
-       zfs_cmd_t zc = { 0 };
+       zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
        char msg[1024];
        nvlist_t *tgt;
        boolean_t avail_spare, l2cache;
@@ -1998,7 +2831,7 @@ zpool_vdev_detach(zpool_handle_t *zhp, const char *path)
                 */
                zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "only "
                    "applicable to mirror and replacing vdevs"));
-               (void) zfs_error(zhp->zpool_hdl, EZFS_BADTARGET, msg);
+               (void) zfs_error(hdl, EZFS_BADTARGET, msg);
                break;
 
        case EBUSY:
@@ -2008,11 +2841,263 @@ zpool_vdev_detach(zpool_handle_t *zhp, const char *path)
                (void) zfs_error(hdl, EZFS_NOREPLICAS, msg);
                break;
 
-       default:
-               (void) zpool_standard_error(hdl, errno, msg);
+       default:
+               (void) zpool_standard_error(hdl, errno, msg);
+       }
+
+       return (-1);
+}
+
+/*
+ * Find a mirror vdev in the source nvlist.
+ *
+ * The mchild array contains a list of disks in one of the top-level mirrors
+ * of the source pool.  The schild array contains a list of disks that the
+ * user specified on the command line.  We loop over the mchild array to
+ * see if any entry in the schild array matches.
+ *
+ * If a disk in the mchild array is found in the schild array, we return
+ * the index of that entry.  Otherwise we return -1.
+ */
+static int
+find_vdev_entry(zpool_handle_t *zhp, nvlist_t **mchild, uint_t mchildren,
+    nvlist_t **schild, uint_t schildren)
+{
+       uint_t mc;
+
+       for (mc = 0; mc < mchildren; mc++) {
+               uint_t sc;
+               char *mpath = zpool_vdev_name(zhp->zpool_hdl, zhp,
+                   mchild[mc], B_FALSE);
+
+               for (sc = 0; sc < schildren; sc++) {
+                       char *spath = zpool_vdev_name(zhp->zpool_hdl, zhp,
+                           schild[sc], B_FALSE);
+                       boolean_t result = (strcmp(mpath, spath) == 0);
+
+                       free(spath);
+                       if (result) {
+                               free(mpath);
+                               return (mc);
+                       }
+               }
+
+               free(mpath);
+       }
+
+       return (-1);
+}
+
+/*
+ * Split a mirror pool.  If newroot points to null, then a new nvlist
+ * is generated and it is the responsibility of the caller to free it.
+ */
+int
+zpool_vdev_split(zpool_handle_t *zhp, char *newname, nvlist_t **newroot,
+    nvlist_t *props, splitflags_t flags)
+{
+       zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
+       char msg[1024];
+       nvlist_t *tree, *config, **child, **newchild, *newconfig = NULL;
+       nvlist_t **varray = NULL, *zc_props = NULL;
+       uint_t c, children, newchildren, lastlog = 0, vcount, found = 0;
+       libzfs_handle_t *hdl = zhp->zpool_hdl;
+       uint64_t vers;
+       boolean_t freelist = B_FALSE, memory_err = B_TRUE;
+       int retval = 0;
+
+       (void) snprintf(msg, sizeof (msg),
+           dgettext(TEXT_DOMAIN, "Unable to split %s"), zhp->zpool_name);
+
+       if (!zpool_name_valid(hdl, B_FALSE, newname))
+               return (zfs_error(hdl, EZFS_INVALIDNAME, msg));
+
+       if ((config = zpool_get_config(zhp, NULL)) == NULL) {
+               (void) fprintf(stderr, gettext("Internal error: unable to "
+                   "retrieve pool configuration\n"));
+               return (-1);
+       }
+
+       verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &tree)
+           == 0);
+       verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, &vers) == 0);
+
+       if (props) {
+               prop_flags_t flags = { .create = B_FALSE, .import = B_TRUE };
+               if ((zc_props = zpool_valid_proplist(hdl, zhp->zpool_name,
+                   props, vers, flags, msg)) == NULL)
+                       return (-1);
+       }
+
+       if (nvlist_lookup_nvlist_array(tree, ZPOOL_CONFIG_CHILDREN, &child,
+           &children) != 0) {
+               zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+                   "Source pool is missing vdev tree"));
+               if (zc_props)
+                       nvlist_free(zc_props);
+               return (-1);
+       }
+
+       varray = zfs_alloc(hdl, children * sizeof (nvlist_t *));
+       vcount = 0;
+
+       if (*newroot == NULL ||
+           nvlist_lookup_nvlist_array(*newroot, ZPOOL_CONFIG_CHILDREN,
+           &newchild, &newchildren) != 0)
+               newchildren = 0;
+
+       for (c = 0; c < children; c++) {
+               uint64_t is_log = B_FALSE, is_hole = B_FALSE;
+               char *type;
+               nvlist_t **mchild, *vdev;
+               uint_t mchildren;
+               int entry;
+
+               /*
+                * Unlike cache & spares, slogs are stored in the
+                * ZPOOL_CONFIG_CHILDREN array.  We filter them out here.
+                */
+               (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
+                   &is_log);
+               (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE,
+                   &is_hole);
+               if (is_log || is_hole) {
+                       /*
+                        * Create a hole vdev and put it in the config.
+                        */
+                       if (nvlist_alloc(&vdev, NV_UNIQUE_NAME, 0) != 0)
+                               goto out;
+                       if (nvlist_add_string(vdev, ZPOOL_CONFIG_TYPE,
+                           VDEV_TYPE_HOLE) != 0)
+                               goto out;
+                       if (nvlist_add_uint64(vdev, ZPOOL_CONFIG_IS_HOLE,
+                           1) != 0)
+                               goto out;
+                       if (lastlog == 0)
+                               lastlog = vcount;
+                       varray[vcount++] = vdev;
+                       continue;
+               }
+               lastlog = 0;
+               verify(nvlist_lookup_string(child[c], ZPOOL_CONFIG_TYPE, &type)
+                   == 0);
+               if (strcmp(type, VDEV_TYPE_MIRROR) != 0) {
+                       zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+                           "Source pool must be composed only of mirrors\n"));
+                       retval = zfs_error(hdl, EZFS_INVALCONFIG, msg);
+                       goto out;
+               }
+
+               verify(nvlist_lookup_nvlist_array(child[c],
+                   ZPOOL_CONFIG_CHILDREN, &mchild, &mchildren) == 0);
+
+               /* find or add an entry for this top-level vdev */
+               if (newchildren > 0 &&
+                   (entry = find_vdev_entry(zhp, mchild, mchildren,
+                   newchild, newchildren)) >= 0) {
+                       /* We found a disk that the user specified. */
+                       vdev = mchild[entry];
+                       ++found;
+               } else {
+                       /* User didn't specify a disk for this vdev. */
+                       vdev = mchild[mchildren - 1];
+               }
+
+               if (nvlist_dup(vdev, &varray[vcount++], 0) != 0)
+                       goto out;
+       }
+
+       /* did we find every disk the user specified? */
+       if (found != newchildren) {
+               zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "Device list must "
+                   "include at most one disk from each mirror"));
+               retval = zfs_error(hdl, EZFS_INVALCONFIG, msg);
+               goto out;
+       }
+
+       /* Prepare the nvlist for populating. */
+       if (*newroot == NULL) {
+               if (nvlist_alloc(newroot, NV_UNIQUE_NAME, 0) != 0)
+                       goto out;
+               freelist = B_TRUE;
+               if (nvlist_add_string(*newroot, ZPOOL_CONFIG_TYPE,
+                   VDEV_TYPE_ROOT) != 0)
+                       goto out;
+       } else {
+               verify(nvlist_remove_all(*newroot, ZPOOL_CONFIG_CHILDREN) == 0);
+       }
+
+       /* Add all the children we found */
+       if (nvlist_add_nvlist_array(*newroot, ZPOOL_CONFIG_CHILDREN, varray,
+           lastlog == 0 ? vcount : lastlog) != 0)
+               goto out;
+
+       /*
+        * If we're just doing a dry run, exit now with success.
+        */
+       if (flags.dryrun) {
+               memory_err = B_FALSE;
+               freelist = B_FALSE;
+               goto out;
+       }
+
+       /* now build up the config list & call the ioctl */
+       if (nvlist_alloc(&newconfig, NV_UNIQUE_NAME, 0) != 0)
+               goto out;
+
+       if (nvlist_add_nvlist(newconfig,
+           ZPOOL_CONFIG_VDEV_TREE, *newroot) != 0 ||
+           nvlist_add_string(newconfig,
+           ZPOOL_CONFIG_POOL_NAME, newname) != 0 ||
+           nvlist_add_uint64(newconfig, ZPOOL_CONFIG_VERSION, vers) != 0)
+               goto out;
+
+       /*
+        * The new pool is automatically part of the namespace unless we
+        * explicitly export it.
+        */
+       if (!flags.import)
+               zc.zc_cookie = ZPOOL_EXPORT_AFTER_SPLIT;
+       (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
+       (void) strlcpy(zc.zc_string, newname, sizeof (zc.zc_string));
+       if (zcmd_write_conf_nvlist(hdl, &zc, newconfig) != 0)
+               goto out;
+       if (zc_props != NULL && zcmd_write_src_nvlist(hdl, &zc, zc_props) != 0)
+               goto out;
+
+       if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SPLIT, &zc) != 0) {
+               retval = zpool_standard_error(hdl, errno, msg);
+               goto out;
+       }
+
+       freelist = B_FALSE;
+       memory_err = B_FALSE;
+
+out:
+       if (varray != NULL) {
+               int v;
+
+               for (v = 0; v < vcount; v++)
+                       nvlist_free(varray[v]);
+               free(varray);
+       }
+       zcmd_free_nvlists(&zc);
+       if (zc_props)
+               nvlist_free(zc_props);
+       if (newconfig)
+               nvlist_free(newconfig);
+       if (freelist) {
+               nvlist_free(*newroot);
+               *newroot = NULL;
        }
 
-       return (-1);
+       if (retval != 0)
+               return (retval);
+
+       if (memory_err)
+               return (no_memory(hdl));
+
+       return (0);
 }
 
 /*
@@ -2022,27 +3107,37 @@ zpool_vdev_detach(zpool_handle_t *zhp, const char *path)
 int
 zpool_vdev_remove(zpool_handle_t *zhp, const char *path)
 {
-       zfs_cmd_t zc = { 0 };
+       zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
        char msg[1024];
        nvlist_t *tgt;
-       boolean_t avail_spare, l2cache;
+       boolean_t avail_spare, l2cache, islog;
        libzfs_handle_t *hdl = zhp->zpool_hdl;
+       uint64_t version;
 
        (void) snprintf(msg, sizeof (msg),
            dgettext(TEXT_DOMAIN, "cannot remove %s"), path);
 
        (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
        if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
-           NULL)) == 0)
+           &islog)) == 0)
                return (zfs_error(hdl, EZFS_NODEVICE, msg));
-
-       if (!avail_spare && !l2cache) {
+       /*
+        * XXX - this should just go away.
+        */
+       if (!avail_spare && !l2cache && !islog) {
                zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
-                   "only inactive hot spares or cache devices "
-                   "can be removed"));
+                   "only inactive hot spares, cache, top-level, "
+                   "or log devices can be removed"));
                return (zfs_error(hdl, EZFS_NODEVICE, msg));
        }
 
+       version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL);
+       if (islog && version < SPA_VERSION_HOLES) {
+               zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+                   "pool must be upgrade to support log removal"));
+               return (zfs_error(hdl, EZFS_BADVERSION, msg));
+       }
+
        verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
 
        if (zfs_ioctl(hdl, ZFS_IOC_VDEV_REMOVE, &zc) == 0)
@@ -2055,13 +3150,16 @@ zpool_vdev_remove(zpool_handle_t *zhp, const char *path)
  * Clear the errors for the pool, or the particular device if specified.
  */
 int
-zpool_clear(zpool_handle_t *zhp, const char *path)
+zpool_clear(zpool_handle_t *zhp, const char *path, nvlist_t *rewindnvl)
 {
-       zfs_cmd_t zc = { 0 };
+       zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
        char msg[1024];
        nvlist_t *tgt;
+       zpool_rewind_policy_t policy;
        boolean_t avail_spare, l2cache;
        libzfs_handle_t *hdl = zhp->zpool_hdl;
+       nvlist_t *nvi = NULL;
+       int error;
 
        if (path)
                (void) snprintf(msg, sizeof (msg),
@@ -2089,9 +3187,38 @@ zpool_clear(zpool_handle_t *zhp, const char *path)
                    &zc.zc_guid) == 0);
        }
 
-       if (zfs_ioctl(hdl, ZFS_IOC_CLEAR, &zc) == 0)
+       zpool_get_rewind_policy(rewindnvl, &policy);
+       zc.zc_cookie = policy.zrp_request;
+
+       if (zcmd_alloc_dst_nvlist(hdl, &zc, zhp->zpool_config_size * 2) != 0)
+               return (-1);
+
+       if (zcmd_write_src_nvlist(hdl, &zc, rewindnvl) != 0)
+               return (-1);
+
+       while ((error = zfs_ioctl(hdl, ZFS_IOC_CLEAR, &zc)) != 0 &&
+           errno == ENOMEM) {
+               if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
+                       zcmd_free_nvlists(&zc);
+                       return (-1);
+               }
+       }
+
+       if (!error || ((policy.zrp_request & ZPOOL_TRY_REWIND) &&
+           errno != EPERM && errno != EACCES)) {
+               if (policy.zrp_request &
+                   (ZPOOL_DO_REWIND | ZPOOL_TRY_REWIND)) {
+                       (void) zcmd_read_dst_nvlist(hdl, &zc, &nvi);
+                       zpool_rewind_exclaim(hdl, zc.zc_name,
+                           ((policy.zrp_request & ZPOOL_TRY_REWIND) != 0),
+                           nvi);
+                       nvlist_free(nvi);
+               }
+               zcmd_free_nvlists(&zc);
                return (0);
+       }
 
+       zcmd_free_nvlists(&zc);
        return (zpool_standard_error(hdl, errno, msg));
 }
 
@@ -2101,16 +3228,17 @@ zpool_clear(zpool_handle_t *zhp, const char *path)
 int
 zpool_vdev_clear(zpool_handle_t *zhp, uint64_t guid)
 {
-       zfs_cmd_t zc = { 0 };
+       zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
        char msg[1024];
        libzfs_handle_t *hdl = zhp->zpool_hdl;
 
        (void) snprintf(msg, sizeof (msg),
            dgettext(TEXT_DOMAIN, "cannot clear errors for %llx"),
-           guid);
+           (u_longlong_t)guid);
 
        (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
        zc.zc_guid = guid;
+       zc.zc_cookie = ZPOOL_NO_REWIND;
 
        if (ioctl(hdl->libzfs_fd, ZFS_IOC_CLEAR, &zc) == 0)
                return (0);
@@ -2119,170 +3247,43 @@ zpool_vdev_clear(zpool_handle_t *zhp, uint64_t guid)
 }
 
 /*
- * Iterate over all zvols in a given pool by walking the /dev/zvol/dsk/<pool>
- * hierarchy.
+ * Change the GUID for a pool.
  */
 int
-zpool_iter_zvol(zpool_handle_t *zhp, int (*cb)(const char *, void *),
-    void *data)
+zpool_reguid(zpool_handle_t *zhp)
 {
+       char msg[1024];
        libzfs_handle_t *hdl = zhp->zpool_hdl;
-       char (*paths)[MAXPATHLEN];
-       size_t size = 4;
-       int curr, fd, base, ret = 0;
-       DIR *dirp;
-       struct dirent *dp;
-       struct stat st;
+       zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
 
-       if ((base = open("/dev/zvol/dsk", O_RDONLY)) < 0)
-               return (errno == ENOENT ? 0 : -1);
-
-       if (fstatat(base, zhp->zpool_name, &st, 0) != 0) {
-               int err = errno;
-               (void) close(base);
-               return (err == ENOENT ? 0 : -1);
-       }
+       (void) snprintf(msg, sizeof (msg),
+           dgettext(TEXT_DOMAIN, "cannot reguid '%s'"), zhp->zpool_name);
 
-       /*
-        * Oddly this wasn't a directory -- ignore that failure since we
-        * know there are no links lower in the (non-existant) hierarchy.
-        */
-       if (!S_ISDIR(st.st_mode)) {
-               (void) close(base);
+       (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
+       if (zfs_ioctl(hdl, ZFS_IOC_POOL_REGUID, &zc) == 0)
                return (0);
-       }
-
-       if ((paths = zfs_alloc(hdl, size * sizeof (paths[0]))) == NULL) {
-               (void) close(base);
-               return (-1);
-       }
-
-       (void) strlcpy(paths[0], zhp->zpool_name, sizeof (paths[0]));
-       curr = 0;
-
-       while (curr >= 0) {
-               if (fstatat(base, paths[curr], &st, AT_SYMLINK_NOFOLLOW) != 0)
-                       goto err;
-
-               if (S_ISDIR(st.st_mode)) {
-                       if ((fd = openat(base, paths[curr], O_RDONLY)) < 0)
-                               goto err;
-
-                       if ((dirp = fdopendir(fd)) == NULL) {
-                               (void) close(fd);
-                               goto err;
-                       }
-
-                       while ((dp = readdir(dirp)) != NULL) {
-                               if (dp->d_name[0] == '.')
-                                       continue;
-
-                               if (curr + 1 == size) {
-                                       paths = zfs_realloc(hdl, paths,
-                                           size * sizeof (paths[0]),
-                                           size * 2 * sizeof (paths[0]));
-                                       if (paths == NULL) {
-                                               (void) closedir(dirp);
-                                               (void) close(fd);
-                                               goto err;
-                                       }
-
-                                       size *= 2;
-                               }
-
-                               (void) strlcpy(paths[curr + 1], paths[curr],
-                                   sizeof (paths[curr + 1]));
-                               (void) strlcat(paths[curr], "/",
-                                   sizeof (paths[curr]));
-                               (void) strlcat(paths[curr], dp->d_name,
-                                   sizeof (paths[curr]));
-                               curr++;
-                       }
-
-                       (void) closedir(dirp);
-
-               } else {
-                       if ((ret = cb(paths[curr], data)) != 0)
-                               break;
-               }
-
-               curr--;
-       }
-
-       free(paths);
-       (void) close(base);
-
-       return (ret);
-
-err:
-       free(paths);
-       (void) close(base);
-       return (-1);
-}
-
-typedef struct zvol_cb {
-       zpool_handle_t *zcb_pool;
-       boolean_t zcb_create;
-} zvol_cb_t;
-
-/*ARGSUSED*/
-static int
-do_zvol_create(zfs_handle_t *zhp, void *data)
-{
-       int ret = 0;
-
-       if (ZFS_IS_VOLUME(zhp)) {
-               (void) zvol_create_link(zhp->zfs_hdl, zhp->zfs_name);
-               ret = zfs_iter_snapshots(zhp, do_zvol_create, NULL);
-       }
 
-       if (ret == 0)
-               ret = zfs_iter_filesystems(zhp, do_zvol_create, NULL);
-
-       zfs_close(zhp);
-
-       return (ret);
+       return (zpool_standard_error(hdl, errno, msg));
 }
 
 /*
- * Iterate over all zvols in the pool and make any necessary minor nodes.
+ * Reopen the pool.
  */
 int
-zpool_create_zvol_links(zpool_handle_t *zhp)
-{
-       zfs_handle_t *zfp;
-       int ret;
-
-       /*
-        * If the pool is unavailable, just return success.
-        */
-       if ((zfp = make_dataset_handle(zhp->zpool_hdl,
-           zhp->zpool_name)) == NULL)
-               return (0);
-
-       ret = zfs_iter_filesystems(zfp, do_zvol_create, NULL);
-
-       zfs_close(zfp);
-       return (ret);
-}
-
-static int
-do_zvol_remove(const char *dataset, void *data)
+zpool_reopen(zpool_handle_t *zhp)
 {
-       zpool_handle_t *zhp = data;
+       zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
+       char msg[1024];
+       libzfs_handle_t *hdl = zhp->zpool_hdl;
 
-       return (zvol_remove_link(zhp->zpool_hdl, dataset));
-}
+       (void) snprintf(msg, sizeof (msg),
+           dgettext(TEXT_DOMAIN, "cannot reopen '%s'"),
+           zhp->zpool_name);
 
-/*
- * Iterate over all zvols in the pool and remove any minor nodes.  We iterate
- * by examining the /dev links so that a corrupted pool doesn't impede this
- * operation.
- */
-int
-zpool_remove_zvol_links(zpool_handle_t *zhp)
-{
-       return (zpool_iter_zvol(zhp, do_zvol_remove, zhp));
+       (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
+       if (zfs_ioctl(hdl, ZFS_IOC_POOL_REOPEN, &zc) == 0)
+               return (0);
+       return (zpool_standard_error(hdl, errno, msg));
 }
 
 /*
@@ -2351,7 +3352,7 @@ path_to_devid(const char *path)
 static void
 set_path(zpool_handle_t *zhp, nvlist_t *nv, const char *path)
 {
-       zfs_cmd_t zc = { 0 };
+       zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
 
        (void) strncpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
        (void) strncpy(zc.zc_value, path, sizeof (zc.zc_value));
@@ -2362,6 +3363,37 @@ set_path(zpool_handle_t *zhp, nvlist_t *nv, const char *path)
 }
 
 /*
+ * Remove partition suffix from a vdev path.  Partition suffixes may take three
+ * forms: "-partX", "pX", or "X", where X is a string of digits.  The second
+ * case only occurs when the suffix is preceded by a digit, i.e. "md0p0" The
+ * third case only occurs when preceded by a string matching the regular
+ * expression "^[hs]d[a-z]+", i.e. a scsi or ide disk.
+ */
+static char *
+strip_partition(libzfs_handle_t *hdl, char *path)
+{
+       char *tmp = zfs_strdup(hdl, path);
+       char *part = NULL, *d = NULL;
+
+       if ((part = strstr(tmp, "-part")) && part != tmp) {
+               d = part + 5;
+       } else if ((part = strrchr(tmp, 'p')) &&
+           part > tmp + 1 && isdigit(*(part-1))) {
+               d = part + 1;
+       } else if ((tmp[0] == 'h' || tmp[0] == 's') && tmp[1] == 'd') {
+               for (d = &tmp[2]; isalpha(*d); part = ++d);
+       }
+       if (part && d && *d != '\0') {
+               for (; isdigit(*d); d++);
+               if (*d == '\0')
+                       *part = '\0';
+       }
+       return (tmp);
+}
+
+#define        PATH_BUF_LEN    64
+
+/*
  * Given a vdev, return the name to display in iostat.  If the vdev has a path,
  * we use that, stripping off any leading "/dev/dsk/"; if not, we use the type.
  * We also check if this is a whole disk, in which case we strip off the
@@ -2377,11 +3409,13 @@ set_path(zpool_handle_t *zhp, nvlist_t *nv, const char *path)
  * of these checks.
  */
 char *
-zpool_vdev_name(libzfs_handle_t *hdl, zpool_handle_t *zhp, nvlist_t *nv)
+zpool_vdev_name(libzfs_handle_t *hdl, zpool_handle_t *zhp, nvlist_t *nv,
+    boolean_t verbose)
 {
-       char *path, *devid;
+       char *path, *devid, *type;
        uint64_t value;
-       char buf[64];
+       char buf[PATH_BUF_LEN];
+       char tmpbuf[PATH_BUF_LEN];
        vdev_stat_t *vs;
        uint_t vsc;
 
@@ -2393,14 +3427,13 @@ zpool_vdev_name(libzfs_handle_t *hdl, zpool_handle_t *zhp, nvlist_t *nv)
                    (u_longlong_t)value);
                path = buf;
        } else if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) {
-
                /*
                 * If the device is dead (faulted, offline, etc) then don't
                 * bother opening it.  Otherwise we may be forcing the user to
                 * open a misbehaving device, which can have undesirable
                 * effects.
                 */
-               if ((nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_STATS,
+               if ((nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS,
                    (uint64_t **)&vs, &vsc) != 0 ||
                    vs->vs_state >= VDEV_STATE_DEGRADED) &&
                    zhp != NULL &&
@@ -2432,16 +3465,21 @@ zpool_vdev_name(libzfs_handle_t *hdl, zpool_handle_t *zhp, nvlist_t *nv)
                                devid_str_free(newdevid);
                }
 
-               if (strncmp(path, "/dev/dsk/", 9) == 0)
-                       path += 9;
+               /*
+                * For a block device only use the name.
+                */
+               verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) == 0);
+               if (strcmp(type, VDEV_TYPE_DISK) == 0) {
+                       path = strrchr(path, '/');
+                       path++;
+               }
 
+               /*
+                * Remove the partition from the path it this is a whole disk.
+                */
                if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
                    &value) == 0 && value) {
-                       char *tmp = zfs_strdup(hdl, path);
-                       if (tmp == NULL)
-                               return (NULL);
-                       tmp[strlen(path) - 2] = '\0';
-                       return (tmp);
+                       return strip_partition(hdl, path);
                }
        } else {
                verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &path) == 0);
@@ -2450,12 +3488,27 @@ zpool_vdev_name(libzfs_handle_t *hdl, zpool_handle_t *zhp, nvlist_t *nv)
                 * If it's a raidz device, we need to stick in the parity level.
                 */
                if (strcmp(path, VDEV_TYPE_RAIDZ) == 0) {
+
                        verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NPARITY,
                            &value) == 0);
                        (void) snprintf(buf, sizeof (buf), "%s%llu", path,
                            (u_longlong_t)value);
                        path = buf;
                }
+
+               /*
+                * We identify each top-level vdev by using a <type-id>
+                * naming convention.
+                */
+               if (verbose) {
+                       uint64_t id;
+
+                       verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID,
+                           &id) == 0);
+                       (void) snprintf(tmpbuf, sizeof (tmpbuf), "%s-%llu",
+                           path, (u_longlong_t)id);
+                       path = tmpbuf;
+               }
        }
 
        return (zfs_strdup(hdl, path));
@@ -2474,7 +3527,7 @@ zbookmark_compare(const void *a, const void *b)
 int
 zpool_get_errlog(zpool_handle_t *zhp, nvlist_t **nverrlistp)
 {
-       zfs_cmd_t zc = { 0 };
+       zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
        uint64_t count;
        zbookmark_t *zb = NULL;
        int i;
@@ -2570,7 +3623,7 @@ nomem:
 int
 zpool_upgrade(zpool_handle_t *zhp, uint64_t new_version)
 {
-       zfs_cmd_t zc = { 0 };
+       zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
        libzfs_handle_t *hdl = zhp->zpool_hdl;
 
        (void) strcpy(zc.zc_name, zhp->zpool_name);
@@ -2632,7 +3685,7 @@ zpool_stage_history(libzfs_handle_t *hdl, const char *history_str)
 static int
 get_history(zpool_handle_t *zhp, char *buf, uint64_t *off, uint64_t *len)
 {
-       zfs_cmd_t zc = { 0 };
+       zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
        libzfs_handle_t *hdl = zhp->zpool_hdl;
 
        (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
@@ -2674,7 +3727,7 @@ get_history(zpool_handle_t *zhp, char *buf, uint64_t *off, uint64_t *len)
  * into 'records'.  'leftover' is set to the number of bytes that weren't
  * processed as there wasn't a complete record.
  */
-static int
+int
 zpool_history_unpack(char *buf, uint64_t bytes_read, uint64_t *leftover,
     nvlist_t ***records, uint_t *numrecords)
 {
@@ -2755,18 +3808,106 @@ zpool_get_history(zpool_handle_t *zhp, nvlist_t **nvhisp)
        return (err);
 }
 
+/*
+ * Retrieve the next event.  If there is a new event available 'nvp' will
+ * contain a newly allocated nvlist and 'dropped' will be set to the number
+ * of missed events since the last call to this function.  When 'nvp' is
+ * set to NULL it indicates no new events are available.  In either case
+ * the function returns 0 and it is up to the caller to free 'nvp'.  In
+ * the case of a fatal error the function will return a non-zero value.
+ * When the function is called in blocking mode it will not return until
+ * a new event is available.
+ */
+int
+zpool_events_next(libzfs_handle_t *hdl, nvlist_t **nvp,
+    int *dropped, int block, int cleanup_fd)
+{
+       zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
+       int error = 0;
+
+       *nvp = NULL;
+       *dropped = 0;
+       zc.zc_cleanup_fd = cleanup_fd;
+
+       if (!block)
+               zc.zc_guid = ZEVENT_NONBLOCK;
+
+       if (zcmd_alloc_dst_nvlist(hdl, &zc, ZEVENT_SIZE) != 0)
+               return (-1);
+
+retry:
+       if (zfs_ioctl(hdl, ZFS_IOC_EVENTS_NEXT, &zc) != 0) {
+               switch (errno) {
+               case ESHUTDOWN:
+                       error = zfs_error_fmt(hdl, EZFS_POOLUNAVAIL,
+                           dgettext(TEXT_DOMAIN, "zfs shutdown"));
+                       goto out;
+               case ENOENT:
+                       /* Blocking error case should not occur */
+                       if (block)
+                               error = zpool_standard_error_fmt(hdl, errno,
+                                   dgettext(TEXT_DOMAIN, "cannot get event"));
+
+                       goto out;
+               case ENOMEM:
+                       if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
+                               error = zfs_error_fmt(hdl, EZFS_NOMEM,
+                                   dgettext(TEXT_DOMAIN, "cannot get event"));
+                               goto out;
+                       } else {
+                               goto retry;
+                       }
+               default:
+                       error = zpool_standard_error_fmt(hdl, errno,
+                           dgettext(TEXT_DOMAIN, "cannot get event"));
+                       goto out;
+               }
+       }
+
+       error = zcmd_read_dst_nvlist(hdl, &zc, nvp);
+       if (error != 0)
+               goto out;
+
+       *dropped = (int)zc.zc_cookie;
+out:
+       zcmd_free_nvlists(&zc);
+
+       return (error);
+}
+
+/*
+ * Clear all events.
+ */
+int
+zpool_events_clear(libzfs_handle_t *hdl, int *count)
+{
+       zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
+       char msg[1024];
+
+       (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
+           "cannot clear events"));
+
+       if (zfs_ioctl(hdl, ZFS_IOC_EVENTS_CLEAR, &zc) != 0)
+               return (zpool_standard_error_fmt(hdl, errno, msg));
+
+       if (count != NULL)
+               *count = (int)zc.zc_cookie; /* # of events cleared */
+
+       return (0);
+}
+
 void
 zpool_obj_to_path(zpool_handle_t *zhp, uint64_t dsobj, uint64_t obj,
     char *pathname, size_t len)
 {
-       zfs_cmd_t zc = { 0 };
+       zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
        boolean_t mounted = B_FALSE;
        char *mntpnt = NULL;
        char dsname[MAXNAMELEN];
 
        if (dsobj == 0) {
                /* special case for the MOS */
-               (void) snprintf(pathname, len, "<metadata>:<0x%llx>", obj);
+               (void) snprintf(pathname, len, "<metadata>:<0x%llx>", (longlong_t)obj);
                return;
        }
 
@@ -2777,7 +3918,7 @@ zpool_obj_to_path(zpool_handle_t *zhp, uint64_t dsobj, uint64_t obj,
            ZFS_IOC_DSOBJ_TO_DSNAME, &zc) != 0) {
                /* just write out a path of two object numbers */
                (void) snprintf(pathname, len, "<0x%llx>:<0x%llx>",
-                   dsobj, obj);
+                   (longlong_t)dsobj, (longlong_t)obj);
                return;
        }
        (void) strlcpy(dsname, zc.zc_value, sizeof (dsname));
@@ -2798,19 +3939,11 @@ zpool_obj_to_path(zpool_handle_t *zhp, uint64_t dsobj, uint64_t obj,
                            dsname, zc.zc_value);
                }
        } else {
-               (void) snprintf(pathname, len, "%s:<0x%llx>", dsname, obj);
+               (void) snprintf(pathname, len, "%s:<0x%llx>", dsname, (longlong_t)obj);
        }
        free(mntpnt);
 }
 
-#define        RDISK_ROOT      "/dev/rdsk"
-#define        BACKUP_SLICE    "s2"
-/*
- * Don't start the slice at the default block of 34; many storage
- * devices will use a stripe width of 128k, so start there instead.
- */
-#define        NEW_START_BLOCK 256
-
 /*
  * Read the EFI label from the config, if a label does not exist then
  * pass back the error to the caller. If the caller has passed a non-NULL
@@ -2828,9 +3961,9 @@ read_efi_label(nvlist_t *config, diskaddr_t *sb)
        if (nvlist_lookup_string(config, ZPOOL_CONFIG_PATH, &path) != 0)
                return (err);
 
-       (void) snprintf(diskname, sizeof (diskname), "%s%s", RDISK_ROOT,
+       (void) snprintf(diskname, sizeof (diskname), "%s%s", DISK_ROOT,
            strrchr(path, '/'));
-       if ((fd = open(diskname, O_RDONLY|O_NDELAY)) >= 0) {
+       if ((fd = open(diskname, O_RDWR|O_DIRECT)) >= 0) {
                struct dk_gpt *vtoc;
 
                if ((err = efi_alloc_and_read(fd, &vtoc)) >= 0) {
@@ -2876,6 +4009,54 @@ find_start_block(nvlist_t *config)
        return (MAXOFFSET_T);
 }
 
+int
+zpool_label_disk_wait(char *path, int timeout)
+{
+       struct stat64 statbuf;
+       int i;
+
+       /*
+        * Wait timeout miliseconds for a newly created device to be available
+        * from the given path.  There is a small window when a /dev/ device
+        * will exist and the udev link will not, so we must wait for the
+        * symlink.  Depending on the udev rules this may take a few seconds.
+        */
+       for (i = 0; i < timeout; i++) {
+               usleep(1000);
+
+               errno = 0;
+               if ((stat64(path, &statbuf) == 0) && (errno == 0))
+                       return (0);
+       }
+
+       return (ENOENT);
+}
+
+int
+zpool_label_disk_check(char *path)
+{
+       struct dk_gpt *vtoc;
+       int fd, err;
+
+       if ((fd = open(path, O_RDWR|O_DIRECT)) < 0)
+               return errno;
+
+       if ((err = efi_alloc_and_read(fd, &vtoc)) != 0) {
+               (void) close(fd);
+               return err;
+       }
+
+       if (vtoc->efi_flags & EFI_GPT_PRIMARY_CORRUPT) {
+               efi_free(vtoc);
+               (void) close(fd);
+               return EIDRM;
+       }
+
+       efi_free(vtoc);
+       (void) close(fd);
+       return 0;
+}
+
 /*
  * Label an individual disk.  The name provided is the short name,
  * stripped of any leading /dev path.
@@ -2885,7 +4066,7 @@ zpool_label_disk(libzfs_handle_t *hdl, zpool_handle_t *zhp, char *name)
 {
        char path[MAXPATHLEN];
        struct dk_gpt *vtoc;
-       int fd;
+       int rval, fd;
        size_t resv = EFI_MIN_RESV_SIZE;
        uint64_t slice_size;
        diskaddr_t start_block;
@@ -2898,12 +4079,14 @@ zpool_label_disk(libzfs_handle_t *hdl, zpool_handle_t *zhp, char *name)
        if (zhp) {
                nvlist_t *nvroot;
 
-               if (pool_is_bootable(zhp)) {
+#if defined(__sun__) || defined(__sun)
+               if (zpool_is_bootable(zhp)) {
                        zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
                            "EFI labeled devices are not supported on root "
                            "pools."));
                        return (zfs_error(hdl, EZFS_POOL_NOTSUP, errbuf));
                }
+#endif
 
                verify(nvlist_lookup_nvlist(zhp->zpool_config,
                    ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
@@ -2918,16 +4101,15 @@ zpool_label_disk(libzfs_handle_t *hdl, zpool_handle_t *zhp, char *name)
                start_block = NEW_START_BLOCK;
        }
 
-       (void) snprintf(path, sizeof (path), "%s/%s%s", RDISK_ROOT, name,
-           BACKUP_SLICE);
+       (void) snprintf(path, sizeof (path), "%s/%s", DISK_ROOT, name);
 
-       if ((fd = open(path, O_RDWR | O_NDELAY)) < 0) {
+       if ((fd = open(path, O_RDWR|O_DIRECT)) < 0) {
                /*
                 * This shouldn't happen.  We've long since verified that this
                 * is a valid device.
                 */
-               zfs_error_aux(hdl,
-                   dgettext(TEXT_DOMAIN, "unable to open device"));
+               zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot "
+                   "label '%s': unable to open device: %d"), path, errno);
                return (zfs_error(hdl, EZFS_OPENFAILED, errbuf));
        }
 
@@ -2940,8 +4122,8 @@ zpool_label_disk(libzfs_handle_t *hdl, zpool_handle_t *zhp, char *name)
                        (void) no_memory(hdl);
 
                (void) close(fd);
-               zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
-                   "unable to read disk capacity"), name);
+               zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot "
+                   "label '%s': unable to read disk capacity"), path);
 
                return (zfs_error(hdl, EZFS_NOCAP, errbuf));
        }
@@ -2951,6 +4133,7 @@ zpool_label_disk(libzfs_handle_t *hdl, zpool_handle_t *zhp, char *name)
        if (start_block == MAXOFFSET_T)
                start_block = NEW_START_BLOCK;
        slice_size -= start_block;
+       slice_size = P2ALIGN(slice_size, PARTITION_END_ALIGNMENT);
 
        vtoc->efi_parts[0].p_start = start_block;
        vtoc->efi_parts[0].p_size = slice_size;
@@ -2970,7 +4153,7 @@ zpool_label_disk(libzfs_handle_t *hdl, zpool_handle_t *zhp, char *name)
        vtoc->efi_parts[8].p_size = resv;
        vtoc->efi_parts[8].p_tag = V_RESERVED;
 
-       if (efi_write(fd, vtoc) != 0) {
+       if ((rval = efi_write(fd, vtoc)) != 0 || (rval = efi_rescan(fd)) != 0) {
                /*
                 * Some block drivers (like pcata) may not support EFI
                 * GPT labels.  Print out a helpful error message dir-
@@ -2980,122 +4163,36 @@ zpool_label_disk(libzfs_handle_t *hdl, zpool_handle_t *zhp, char *name)
                (void) close(fd);
                efi_free(vtoc);
 
-               zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
-                   "try using fdisk(1M) and then provide a specific slice"));
+               zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "try using "
+                   "parted(8) and then provide a specific slice: %d"), rval);
                return (zfs_error(hdl, EZFS_LABELFAILED, errbuf));
        }
 
        (void) close(fd);
        efi_free(vtoc);
-       return (0);
-}
-
-static boolean_t
-supported_dump_vdev_type(libzfs_handle_t *hdl, nvlist_t *config, char *errbuf)
-{
-       char *type;
-       nvlist_t **child;
-       uint_t children, c;
-
-       verify(nvlist_lookup_string(config, ZPOOL_CONFIG_TYPE, &type) == 0);
-       if (strcmp(type, VDEV_TYPE_RAIDZ) == 0 ||
-           strcmp(type, VDEV_TYPE_FILE) == 0 ||
-           strcmp(type, VDEV_TYPE_LOG) == 0 ||
-           strcmp(type, VDEV_TYPE_MISSING) == 0) {
-               zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
-                   "vdev type '%s' is not supported"), type);
-               (void) zfs_error(hdl, EZFS_VDEVNOTSUP, errbuf);
-               return (B_FALSE);
-       }
-       if (nvlist_lookup_nvlist_array(config, ZPOOL_CONFIG_CHILDREN,
-           &child, &children) == 0) {
-               for (c = 0; c < children; c++) {
-                       if (!supported_dump_vdev_type(hdl, child[c], errbuf))
-                               return (B_FALSE);
-               }
-       }
-       return (B_TRUE);
-}
-
-/*
- * check if this zvol is allowable for use as a dump device; zero if
- * it is, > 0 if it isn't, < 0 if it isn't a zvol
- */
-int
-zvol_check_dump_config(char *arg)
-{
-       zpool_handle_t *zhp = NULL;
-       nvlist_t *config, *nvroot;
-       char *p, *volname;
-       nvlist_t **top;
-       uint_t toplevels;
-       libzfs_handle_t *hdl;
-       char errbuf[1024];
-       char poolname[ZPOOL_MAXNAMELEN];
-       int pathlen = strlen(ZVOL_FULL_DEV_DIR);
-       int ret = 1;
-
-       if (strncmp(arg, ZVOL_FULL_DEV_DIR, pathlen)) {
-               return (-1);
-       }
-
-       (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
-           "dump is not supported on device '%s'"), arg);
-
-       if ((hdl = libzfs_init()) == NULL)
-               return (1);
-       libzfs_print_on_error(hdl, B_TRUE);
 
-       volname = arg + pathlen;
-
-       /* check the configuration of the pool */
-       if ((p = strchr(volname, '/')) == NULL) {
-               zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
-                   "malformed dataset name"));
-               (void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf);
-               return (1);
-       } else if (p - volname >= ZFS_MAXNAMELEN) {
-               zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
-                   "dataset name is too long"));
-               (void) zfs_error(hdl, EZFS_NAMETOOLONG, errbuf);
-               return (1);
-       } else {
-               (void) strncpy(poolname, volname, p - volname);
-               poolname[p - volname] = '\0';
-       }
+       /* Wait for the first expected partition to appear. */
 
-       if ((zhp = zpool_open(hdl, poolname)) == NULL) {
-               zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
-                   "could not open pool '%s'"), poolname);
-               (void) zfs_error(hdl, EZFS_OPENFAILED, errbuf);
-               goto out;
-       }
-       config = zpool_get_config(zhp, NULL);
-       if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
-           &nvroot) != 0) {
-               zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
-                   "could not obtain vdev configuration for  '%s'"), poolname);
-               (void) zfs_error(hdl, EZFS_INVALCONFIG, errbuf);
-               goto out;
-       }
+       (void) snprintf(path, sizeof (path), "%s/%s", DISK_ROOT, name);
+       (void) zfs_append_partition(path, MAXPATHLEN);
 
-       verify(nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
-           &top, &toplevels) == 0);
-       if (toplevels != 1) {
-               zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
-                   "'%s' has multiple top level vdevs"), poolname);
-               (void) zfs_error(hdl, EZFS_DEVOVERFLOW, errbuf);
-               goto out;
+       rval = zpool_label_disk_wait(path, 3000);
+       if (rval) {
+               zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "failed to "
+                   "detect device partitions on '%s': %d"), path, rval);
+               return (zfs_error(hdl, EZFS_LABELFAILED, errbuf));
        }
 
-       if (!supported_dump_vdev_type(hdl, top[0], errbuf)) {
-               goto out;
+       /* We can't be to paranoid.  Read the label back and verify it. */
+       (void) snprintf(path, sizeof (path), "%s/%s", DISK_ROOT, name);
+       rval = zpool_label_disk_check(path);
+       if (rval) {
+               zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "freshly written "
+                   "EFI label on '%s' is damaged.  Ensure\nthis device "
+                   "is not in in use, and is functioning properly: %d"),
+                   path, rval);
+               return (zfs_error(hdl, EZFS_LABELFAILED, errbuf));
        }
-       ret = 0;
 
-out:
-       if (zhp)
-               zpool_close(zhp);
-       libzfs_fini(hdl);
-       return (ret);
+       return 0;
 }