Readd zpool_clear_label() from OpenSolaris
[zfs.git] / lib / libzfs / libzfs_import.c
index d677768..58b6043 100644 (file)
  * CDDL HEADER END
  */
 /*
- * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
- * Use is subject to license terms.
+ * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
+ * Copyright (c) 2012 by Delphix. All rights reserved.
  */
 
-#pragma ident  "%Z%%M% %I%     %E% SMI"
-
 /*
  * Pool import support functions.
  *
  * using our derived config, and record the results.
  */
 
+#include <ctype.h>
 #include <devid.h>
 #include <dirent.h>
 #include <errno.h>
 #include <libintl.h>
+#include <stddef.h>
 #include <stdlib.h>
 #include <string.h>
 #include <sys/stat.h>
 #include <unistd.h>
 #include <fcntl.h>
+#include <sys/vtoc.h>
+#include <sys/dktp/fdisk.h>
+#include <sys/efi_partition.h>
 
 #include <sys/vdev_impl.h>
+#ifdef HAVE_LIBBLKID
+#include <blkid/blkid.h>
+#endif
 
 #include "libzfs.h"
 #include "libzfs_impl.h"
@@ -80,6 +87,7 @@ typedef struct pool_entry {
 typedef struct name_entry {
        char                    *ne_name;
        uint64_t                ne_guid;
+       uint64_t                ne_order;
        struct name_entry       *ne_next;
 } name_entry_t;
 
@@ -125,7 +133,6 @@ fix_paths(nvlist_t *nv, name_entry_t *names)
        uint64_t guid;
        name_entry_t *ne, *best;
        char *path, *devid;
-       int matched;
 
        if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
            &child, &children) == 0) {
@@ -141,44 +148,33 @@ fix_paths(nvlist_t *nv, name_entry_t *names)
         * the path and see if we can calculate a new devid.
         *
         * There may be multiple names associated with a particular guid, in
-        * which case we have overlapping slices or multiple paths to the same
-        * disk.  If this is the case, then we want to pick the path that is
-        * the most similar to the original, where "most similar" is the number
-        * of matching characters starting from the end of the path.  This will
-        * preserve slice numbers even if the disks have been reorganized, and
-        * will also catch preferred disk names if multiple paths exist.
+        * which case we have overlapping partitions or multiple paths to the
+        * same disk.  In this case we prefer to use the path name which
+        * matches the ZPOOL_CONFIG_PATH.  If no matching entry is found we
+        * use the lowest order device which corresponds to the first match
+        * while traversing the ZPOOL_IMPORT_PATH search path.
         */
        verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) == 0);
        if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) != 0)
                path = NULL;
 
-       matched = 0;
        best = NULL;
        for (ne = names; ne != NULL; ne = ne->ne_next) {
                if (ne->ne_guid == guid) {
-                       const char *src, *dst;
-                       int count;
 
                        if (path == NULL) {
                                best = ne;
                                break;
                        }
 
-                       src = ne->ne_name + strlen(ne->ne_name) - 1;
-                       dst = path + strlen(path) - 1;
-                       for (count = 0; src >= ne->ne_name && dst >= path;
-                           src--, dst--, count++)
-                               if (*src != *dst)
-                                       break;
-
-                       /*
-                        * At this point, 'count' is the number of characters
-                        * matched from the end.
-                        */
-                       if (count > matched || best == NULL) {
+                       if ((strlen(path) == strlen(ne->ne_name)) &&
+                           !strncmp(path, ne->ne_name, strlen(path))) {
                                best = ne;
-                               matched = count;
+                               break;
                        }
+
+                       if (best == NULL || ne->ne_order < best->ne_order)
+                               best = ne;
                }
        }
 
@@ -204,7 +200,7 @@ fix_paths(nvlist_t *nv, name_entry_t *names)
  */
 static int
 add_config(libzfs_handle_t *hdl, pool_list_t *pl, const char *path,
-    nvlist_t *config)
+    int order, nvlist_t *config)
 {
        uint64_t pool_guid, vdev_guid, top_guid, txg, state;
        pool_entry_t *pe;
@@ -229,6 +225,7 @@ add_config(libzfs_handle_t *hdl, pool_list_t *pl, const char *path,
                        return (-1);
                }
                ne->ne_guid = vdev_guid;
+               ne->ne_order = order;
                ne->ne_next = pl->names;
                pl->names = ne;
                return (0);
@@ -330,6 +327,7 @@ add_config(libzfs_handle_t *hdl, pool_list_t *pl, const char *path,
        }
 
        ne->ne_guid = vdev_guid;
+       ne->ne_order = order;
        ne->ne_next = pl->names;
        pl->names = ne;
 
@@ -367,7 +365,7 @@ static nvlist_t *
 refresh_config(libzfs_handle_t *hdl, nvlist_t *config)
 {
        nvlist_t *nvl;
-       zfs_cmd_t zc = { 0 };
+       zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
        int err;
 
        if (zcmd_write_conf_nvlist(hdl, &zc, config) != 0)
@@ -388,8 +386,6 @@ refresh_config(libzfs_handle_t *hdl, nvlist_t *config)
        }
 
        if (err) {
-               (void) zpool_standard_error(hdl, errno,
-                   dgettext(TEXT_DOMAIN, "cannot discover pools"));
                zcmd_free_nvlists(&zc);
                return (NULL);
        }
@@ -404,6 +400,23 @@ refresh_config(libzfs_handle_t *hdl, nvlist_t *config)
 }
 
 /*
+ * Determine if the vdev id is a hole in the namespace.
+ */
+boolean_t
+vdev_is_hole(uint64_t *hole_array, uint_t holes, uint_t id)
+{
+       int c;
+
+       for (c = 0; c < holes; c++) {
+
+               /* Top-level is a hole */
+               if (hole_array[c] == id)
+                       return (B_TRUE);
+       }
+       return (B_FALSE);
+}
+
+/*
  * Convert our list of pools into the definitive set of configurations.  We
  * start by picking the best config for each toplevel vdev.  Once that's done,
  * we assemble the toplevel vdevs into a full config for the pool.  We make a
@@ -416,26 +429,29 @@ get_configs(libzfs_handle_t *hdl, pool_list_t *pl, boolean_t active_ok)
        pool_entry_t *pe;
        vdev_entry_t *ve;
        config_entry_t *ce;
-       nvlist_t *ret = NULL, *config = NULL, *tmp, *nvtop, *nvroot;
+       nvlist_t *ret = NULL, *config = NULL, *tmp = NULL, *nvtop, *nvroot;
        nvlist_t **spares, **l2cache;
        uint_t i, nspares, nl2cache;
        boolean_t config_seen;
        uint64_t best_txg;
-       char *name, *hostname;
-       uint64_t version, guid;
+       char *name, *hostname = NULL;
+       uint64_t guid;
        uint_t children = 0;
        nvlist_t **child = NULL;
+       uint_t holes;
+       uint64_t *hole_array, max_id;
        uint_t c;
        boolean_t isactive;
        uint64_t hostid;
        nvlist_t *nvl;
        boolean_t found_one = B_FALSE;
+       boolean_t valid_top_config = B_FALSE;
 
        if (nvlist_alloc(&ret, 0, 0) != 0)
                goto nomem;
 
        for (pe = pl->pools; pe != NULL; pe = pe->pe_next) {
-               uint64_t id;
+               uint64_t id, max_txg = 0;
 
                if (nvlist_alloc(&config, NV_UNIQUE_NAME, 0) != 0)
                        goto nomem;
@@ -463,53 +479,90 @@ get_configs(libzfs_handle_t *hdl, pool_list_t *pl, boolean_t active_ok)
                                }
                        }
 
+                       /*
+                        * We rely on the fact that the max txg for the
+                        * pool will contain the most up-to-date information
+                        * about the valid top-levels in the vdev namespace.
+                        */
+                       if (best_txg > max_txg) {
+                               (void) nvlist_remove(config,
+                                   ZPOOL_CONFIG_VDEV_CHILDREN,
+                                   DATA_TYPE_UINT64);
+                               (void) nvlist_remove(config,
+                                   ZPOOL_CONFIG_HOLE_ARRAY,
+                                   DATA_TYPE_UINT64_ARRAY);
+
+                               max_txg = best_txg;
+                               hole_array = NULL;
+                               holes = 0;
+                               max_id = 0;
+                               valid_top_config = B_FALSE;
+
+                               if (nvlist_lookup_uint64(tmp,
+                                   ZPOOL_CONFIG_VDEV_CHILDREN, &max_id) == 0) {
+                                       verify(nvlist_add_uint64(config,
+                                           ZPOOL_CONFIG_VDEV_CHILDREN,
+                                           max_id) == 0);
+                                       valid_top_config = B_TRUE;
+                               }
+
+                               if (nvlist_lookup_uint64_array(tmp,
+                                   ZPOOL_CONFIG_HOLE_ARRAY, &hole_array,
+                                   &holes) == 0) {
+                                       verify(nvlist_add_uint64_array(config,
+                                           ZPOOL_CONFIG_HOLE_ARRAY,
+                                           hole_array, holes) == 0);
+                               }
+                       }
+
                        if (!config_seen) {
                                /*
                                 * Copy the relevant pieces of data to the pool
                                 * configuration:
                                 *
                                 *      version
-                                *      pool guid
-                                *      name
-                                *      pool state
+                                *      pool guid
+                                *      name
+                                *      comment (if available)
+                                *      pool state
                                 *      hostid (if available)
                                 *      hostname (if available)
                                 */
-                               uint64_t state;
+                               uint64_t state, version;
+                               char *comment = NULL;
+
+                               version = fnvlist_lookup_uint64(tmp,
+                                   ZPOOL_CONFIG_VERSION);
+                               fnvlist_add_uint64(config,
+                                   ZPOOL_CONFIG_VERSION, version);
+                               guid = fnvlist_lookup_uint64(tmp,
+                                   ZPOOL_CONFIG_POOL_GUID);
+                               fnvlist_add_uint64(config,
+                                   ZPOOL_CONFIG_POOL_GUID, guid);
+                               name = fnvlist_lookup_string(tmp,
+                                   ZPOOL_CONFIG_POOL_NAME);
+                               fnvlist_add_string(config,
+                                   ZPOOL_CONFIG_POOL_NAME, name);
+
+                               if (nvlist_lookup_string(tmp,
+                                   ZPOOL_CONFIG_COMMENT, &comment) == 0)
+                                       fnvlist_add_string(config,
+                                           ZPOOL_CONFIG_COMMENT, comment);
+
+                               state = fnvlist_lookup_uint64(tmp,
+                                   ZPOOL_CONFIG_POOL_STATE);
+                               fnvlist_add_uint64(config,
+                                   ZPOOL_CONFIG_POOL_STATE, state);
 
-                               verify(nvlist_lookup_uint64(tmp,
-                                   ZPOOL_CONFIG_VERSION, &version) == 0);
-                               if (nvlist_add_uint64(config,
-                                   ZPOOL_CONFIG_VERSION, version) != 0)
-                                       goto nomem;
-                               verify(nvlist_lookup_uint64(tmp,
-                                   ZPOOL_CONFIG_POOL_GUID, &guid) == 0);
-                               if (nvlist_add_uint64(config,
-                                   ZPOOL_CONFIG_POOL_GUID, guid) != 0)
-                                       goto nomem;
-                               verify(nvlist_lookup_string(tmp,
-                                   ZPOOL_CONFIG_POOL_NAME, &name) == 0);
-                               if (nvlist_add_string(config,
-                                   ZPOOL_CONFIG_POOL_NAME, name) != 0)
-                                       goto nomem;
-                               verify(nvlist_lookup_uint64(tmp,
-                                   ZPOOL_CONFIG_POOL_STATE, &state) == 0);
-                               if (nvlist_add_uint64(config,
-                                   ZPOOL_CONFIG_POOL_STATE, state) != 0)
-                                       goto nomem;
                                hostid = 0;
                                if (nvlist_lookup_uint64(tmp,
                                    ZPOOL_CONFIG_HOSTID, &hostid) == 0) {
-                                       if (nvlist_add_uint64(config,
-                                           ZPOOL_CONFIG_HOSTID, hostid) != 0)
-                                               goto nomem;
-                                       verify(nvlist_lookup_string(tmp,
-                                           ZPOOL_CONFIG_HOSTNAME,
-                                           &hostname) == 0);
-                                       if (nvlist_add_string(config,
-                                           ZPOOL_CONFIG_HOSTNAME,
-                                           hostname) != 0)
-                                               goto nomem;
+                                       fnvlist_add_uint64(config,
+                                           ZPOOL_CONFIG_HOSTID, hostid);
+                                       hostname = fnvlist_lookup_string(tmp,
+                                           ZPOOL_CONFIG_HOSTNAME);
+                                       fnvlist_add_string(config,
+                                           ZPOOL_CONFIG_HOSTNAME, hostname);
                                }
 
                                config_seen = B_TRUE;
@@ -522,6 +575,7 @@ get_configs(libzfs_handle_t *hdl, pool_list_t *pl, boolean_t active_ok)
                            ZPOOL_CONFIG_VDEV_TREE, &nvtop) == 0);
                        verify(nvlist_lookup_uint64(nvtop, ZPOOL_CONFIG_ID,
                            &id) == 0);
+
                        if (id >= children) {
                                nvlist_t **newchild;
 
@@ -542,17 +596,82 @@ get_configs(libzfs_handle_t *hdl, pool_list_t *pl, boolean_t active_ok)
 
                }
 
+               /*
+                * If we have information about all the top-levels then
+                * clean up the nvlist which we've constructed. This
+                * means removing any extraneous devices that are
+                * beyond the valid range or adding devices to the end
+                * of our array which appear to be missing.
+                */
+               if (valid_top_config) {
+                       if (max_id < children) {
+                               for (c = max_id; c < children; c++)
+                                       nvlist_free(child[c]);
+                               children = max_id;
+                       } else if (max_id > children) {
+                               nvlist_t **newchild;
+
+                               newchild = zfs_alloc(hdl, (max_id) *
+                                   sizeof (nvlist_t *));
+                               if (newchild == NULL)
+                                       goto nomem;
+
+                               for (c = 0; c < children; c++)
+                                       newchild[c] = child[c];
+
+                               free(child);
+                               child = newchild;
+                               children = max_id;
+                       }
+               }
+
                verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
                    &guid) == 0);
 
                /*
+                * The vdev namespace may contain holes as a result of
+                * device removal. We must add them back into the vdev
+                * tree before we process any missing devices.
+                */
+               if (holes > 0) {
+                       ASSERT(valid_top_config);
+
+                       for (c = 0; c < children; c++) {
+                               nvlist_t *holey;
+
+                               if (child[c] != NULL ||
+                                   !vdev_is_hole(hole_array, holes, c))
+                                       continue;
+
+                               if (nvlist_alloc(&holey, NV_UNIQUE_NAME,
+                                   0) != 0)
+                                       goto nomem;
+
+                               /*
+                                * Holes in the namespace are treated as
+                                * "hole" top-level vdevs and have a
+                                * special flag set on them.
+                                */
+                               if (nvlist_add_string(holey,
+                                   ZPOOL_CONFIG_TYPE,
+                                   VDEV_TYPE_HOLE) != 0 ||
+                                   nvlist_add_uint64(holey,
+                                   ZPOOL_CONFIG_ID, c) != 0 ||
+                                   nvlist_add_uint64(holey,
+                                   ZPOOL_CONFIG_GUID, 0ULL) != 0)
+                                       goto nomem;
+                               child[c] = holey;
+                       }
+               }
+
+               /*
                 * Look for any missing top-level vdevs.  If this is the case,
                 * create a faked up 'missing' vdev as a placeholder.  We cannot
                 * simply compress the child array, because the kernel performs
                 * certain checks to make sure the vdev IDs match their location
                 * in the configuration.
                 */
-               for (c = 0; c < children; c++)
+               for (c = 0; c < children; c++) {
                        if (child[c] == NULL) {
                                nvlist_t *missing;
                                if (nvlist_alloc(&missing, NV_UNIQUE_NAME,
@@ -570,6 +689,7 @@ get_configs(libzfs_handle_t *hdl, pool_list_t *pl, boolean_t active_ok)
                                }
                                child[c] = missing;
                        }
+               }
 
                /*
                 * Put all of this pool's top-level vdevs into a root vdev.
@@ -636,8 +756,11 @@ get_configs(libzfs_handle_t *hdl, pool_list_t *pl, boolean_t active_ok)
                        continue;
                }
 
-               if ((nvl = refresh_config(hdl, config)) == NULL)
-                       goto error;
+               if ((nvl = refresh_config(hdl, config)) == NULL) {
+                       nvlist_free(config);
+                       config = NULL;
+                       continue;
+               }
 
                nvlist_free(config);
                config = nvl;
@@ -778,6 +901,118 @@ zpool_read_label(int fd, nvlist_t **config)
 }
 
 /*
+ * Given a file descriptor, clear (zero) the label information.  This function
+ * is currently only used in the appliance stack as part of the ZFS sysevent
+ * module.
+ */
+int
+zpool_clear_label(int fd)
+{
+       struct stat64 statbuf;
+       int l;
+       vdev_label_t *label;
+       uint64_t size;
+
+       if (fstat64_blk(fd, &statbuf) == -1)
+               return (0);
+       size = P2ALIGN_TYPED(statbuf.st_size, sizeof (vdev_label_t), uint64_t);
+
+       if ((label = calloc(sizeof (vdev_label_t), 1)) == NULL)
+               return (-1);
+
+       for (l = 0; l < VDEV_LABELS; l++) {
+               if (pwrite64(fd, label, sizeof (vdev_label_t),
+                   label_offset(size, l)) != sizeof (vdev_label_t))
+                       return (-1);
+       }
+
+       free(label);
+       return (0);
+}
+
+#ifdef HAVE_LIBBLKID
+/*
+ * Use libblkid to quickly search for zfs devices
+ */
+static int
+zpool_find_import_blkid(libzfs_handle_t *hdl, pool_list_t *pools)
+{
+       blkid_cache cache;
+       blkid_dev_iterate iter;
+       blkid_dev dev;
+       const char *devname;
+       nvlist_t *config;
+       int fd, err;
+
+       err = blkid_get_cache(&cache, NULL);
+       if (err != 0) {
+               (void) zfs_error_fmt(hdl, EZFS_BADCACHE,
+                   dgettext(TEXT_DOMAIN, "blkid_get_cache() %d"), err);
+               goto err_blkid1;
+       }
+
+       err = blkid_probe_all(cache);
+       if (err != 0) {
+               (void) zfs_error_fmt(hdl, EZFS_BADCACHE,
+                   dgettext(TEXT_DOMAIN, "blkid_probe_all() %d"), err);
+               goto err_blkid2;
+       }
+
+       iter = blkid_dev_iterate_begin(cache);
+       if (iter == NULL) {
+               (void) zfs_error_fmt(hdl, EZFS_BADCACHE,
+                   dgettext(TEXT_DOMAIN, "blkid_dev_iterate_begin()"));
+               goto err_blkid2;
+       }
+
+       err = blkid_dev_set_search(iter, "TYPE", "zfs");
+       if (err != 0) {
+               (void) zfs_error_fmt(hdl, EZFS_BADCACHE,
+                   dgettext(TEXT_DOMAIN, "blkid_dev_set_search() %d"), err);
+               goto err_blkid3;
+       }
+
+       while (blkid_dev_next(iter, &dev) == 0) {
+               devname = blkid_dev_devname(dev);
+               if ((fd = open64(devname, O_RDONLY)) < 0)
+                       continue;
+
+               err = zpool_read_label(fd, &config);
+               (void) close(fd);
+
+               if (err != 0) {
+                       (void) no_memory(hdl);
+                       goto err_blkid3;
+               }
+
+               if (config != NULL) {
+                       err = add_config(hdl, pools, devname, 0, config);
+                       if (err != 0)
+                               goto err_blkid3;
+               }
+       }
+
+err_blkid3:
+       blkid_dev_iterate_end(iter);
+err_blkid2:
+       blkid_put_cache(cache);
+err_blkid1:
+       return err;
+}
+#endif /* HAVE_LIBBLKID */
+
+char *
+zpool_default_import_path[DEFAULT_IMPORT_PATH_SIZE] = {
+       "/dev/disk/by-vdev",    /* Custom rules, use first if they exist */
+       "/dev/mapper",          /* Use multipath devices before components */
+       "/dev/disk/by-uuid",    /* Single unique entry and persistent */
+       "/dev/disk/by-id",      /* May be multiple entries and persistent */
+       "/dev/disk/by-path",    /* Encodes physical location and persistent */
+       "/dev/disk/by-label",   /* Custom persistent labels */
+       "/dev"                  /* UNSAFE device names will change */
+};
+
+/*
  * Given a list of directories to search, find all pools stored on disk.  This
  * includes partial pools which are not available to import.  If no args are
  * given (argc is 0), then the default directory (/dev/dsk) is searched.
@@ -785,18 +1020,16 @@ zpool_read_label(int fd, nvlist_t **config)
  * to import a specific pool.
  */
 static nvlist_t *
-zpool_find_import_impl(libzfs_handle_t *hdl, int argc, char **argv,
-    boolean_t active_ok, char *poolname, uint64_t guid)
+zpool_find_import_impl(libzfs_handle_t *hdl, importargs_t *iarg)
 {
-       int i;
+       int i, dirs = iarg->paths;
        DIR *dirp = NULL;
        struct dirent64 *dp;
        char path[MAXPATHLEN];
-       char *end;
+       char *end, **dir = iarg->path;
        size_t pathleft;
        struct stat64 statbuf;
        nvlist_t *ret = NULL, *config;
-       static char *default_dir = "/dev/dsk";
        int fd;
        pool_list_t pools = { 0 };
        pool_entry_t *pe, *penext;
@@ -804,11 +1037,21 @@ zpool_find_import_impl(libzfs_handle_t *hdl, int argc, char **argv,
        config_entry_t *ce, *cenext;
        name_entry_t *ne, *nenext;
 
-       verify(poolname == NULL || guid == 0);
+       verify(iarg->poolname == NULL || iarg->guid == 0);
 
-       if (argc == 0) {
-               argc = 1;
-               argv = &default_dir;
+       if (dirs == 0) {
+#ifdef HAVE_LIBBLKID
+               /* Use libblkid to scan all device for their type */
+               if (zpool_find_import_blkid(hdl, &pools) == 0)
+                       goto skip_scanning;
+
+               (void) zfs_error_fmt(hdl, EZFS_BADCACHE,
+                   dgettext(TEXT_DOMAIN, "blkid failure falling back "
+                   "to manual probing"));
+#endif /* HAVE_LIBBLKID */
+
+               dir = zpool_default_import_path;
+               dirs = DEFAULT_IMPORT_PATH_SIZE;
        }
 
        /*
@@ -816,15 +1059,20 @@ zpool_find_import_impl(libzfs_handle_t *hdl, int argc, char **argv,
         * possible device, organizing the information according to pool GUID
         * and toplevel GUID.
         */
-       for (i = 0; i < argc; i++) {
+       for (i = 0; i < dirs; i++) {
                char *rdsk;
                int dfd;
 
                /* use realpath to normalize the path */
-               if (realpath(argv[i], path) == 0) {
+               if (realpath(dir[i], path) == 0) {
+
+                       /* it is safe to skip missing search paths */
+                       if (errno == ENOENT)
+                               continue;
+
+                       zfs_error_aux(hdl, strerror(errno));
                        (void) zfs_error_fmt(hdl, EZFS_BADPATH,
-                           dgettext(TEXT_DOMAIN, "cannot open '%s'"),
-                           argv[i]);
+                           dgettext(TEXT_DOMAIN, "cannot open '%s'"), dir[i]);
                        goto error;
                }
                end = &path[strlen(path)];
@@ -860,20 +1108,45 @@ zpool_find_import_impl(libzfs_handle_t *hdl, int argc, char **argv,
                            (name[1] == 0 || (name[1] == '.' && name[2] == 0)))
                                continue;
 
-                       if ((fd = openat64(dfd, name, O_RDONLY)) < 0)
+                       /*
+                        * Skip checking devices with well known prefixes:
+                        * watchdog - A special close is required to avoid
+                        *            triggering it and resetting the system.
+                        * fuse     - Fuse control device.
+                        * ppp      - Generic PPP driver.
+                        * tty*     - Generic serial interface.
+                        * vcs*     - Virtual console memory.
+                        * parport* - Parallel port interface.
+                        * lp*      - Printer interface.
+                        * fd*      - Floppy interface.
+                        * hpet     - High Precision Event Timer, crashes qemu
+                        *            when accessed from a virtual machine.
+                        * core     - Symlink to /proc/kcore, causes a crash
+                        *            when access from Xen dom0.
+                        */
+                       if ((strncmp(name, "watchdog", 8) == 0) ||
+                           (strncmp(name, "fuse", 4) == 0)     ||
+                           (strncmp(name, "ppp", 3) == 0)      ||
+                           (strncmp(name, "tty", 3) == 0)      ||
+                           (strncmp(name, "vcs", 3) == 0)      ||
+                           (strncmp(name, "parport", 7) == 0)  ||
+                           (strncmp(name, "lp", 2) == 0)       ||
+                           (strncmp(name, "fd", 2) == 0)       ||
+                           (strncmp(name, "hpet", 4) == 0)     ||
+                           (strncmp(name, "core", 4) == 0))
                                continue;
 
                        /*
                         * Ignore failed stats.  We only want regular
-                        * files, character devs and block devs.
+                        * files and block devices.
                         */
-                       if (fstat64(fd, &statbuf) != 0 ||
+                       if ((fstatat64(dfd, name, &statbuf, 0) != 0) ||
                            (!S_ISREG(statbuf.st_mode) &&
-                           !S_ISCHR(statbuf.st_mode) &&
-                           !S_ISBLK(statbuf.st_mode))) {
-                               (void) close(fd);
+                           !S_ISBLK(statbuf.st_mode)))
+                               continue;
+
+                       if ((fd = openat64(dfd, name, O_RDONLY)) < 0)
                                continue;
-                       }
 
                        if ((zpool_read_label(fd, &config)) != 0) {
                                (void) close(fd);
@@ -885,21 +1158,22 @@ zpool_find_import_impl(libzfs_handle_t *hdl, int argc, char **argv,
 
                        if (config != NULL) {
                                boolean_t matched = B_TRUE;
+                               char *pname;
+
+                               if ((iarg->poolname != NULL) &&
+                                   (nvlist_lookup_string(config,
+                                   ZPOOL_CONFIG_POOL_NAME, &pname) == 0)) {
 
-                               if (poolname != NULL) {
-                                       char *pname;
+                                       if (strcmp(iarg->poolname, pname))
+                                              matched = B_FALSE;
 
-                                       matched = nvlist_lookup_string(config,
-                                           ZPOOL_CONFIG_POOL_NAME,
-                                           &pname) == 0 &&
-                                           strcmp(poolname, pname) == 0;
-                               } else if (guid != 0) {
+                               } else if (iarg->guid != 0) {
                                        uint64_t this_guid;
 
                                        matched = nvlist_lookup_uint64(config,
                                            ZPOOL_CONFIG_POOL_GUID,
                                            &this_guid) == 0 &&
-                                           guid == this_guid;
+                                           iarg->guid == this_guid;
                                }
                                if (!matched) {
                                        nvlist_free(config);
@@ -908,7 +1182,7 @@ zpool_find_import_impl(libzfs_handle_t *hdl, int argc, char **argv,
                                }
                                /* use the non-raw path for the config */
                                (void) strlcpy(end, name, pathleft);
-                               if (add_config(hdl, &pools, path, config) != 0)
+                               if (add_config(hdl, &pools, path, i+1, config))
                                        goto error;
                        }
                }
@@ -917,7 +1191,10 @@ zpool_find_import_impl(libzfs_handle_t *hdl, int argc, char **argv,
                dirp = NULL;
        }
 
-       ret = get_configs(hdl, &pools, active_ok);
+#ifdef HAVE_LIBBLKID
+skip_scanning:
+#endif
+       ret = get_configs(hdl, &pools, iarg->can_be_active);
 
 error:
        for (pe = pools.pools; pe != NULL; pe = penext) {
@@ -951,27 +1228,12 @@ error:
 nvlist_t *
 zpool_find_import(libzfs_handle_t *hdl, int argc, char **argv)
 {
-       return (zpool_find_import_impl(hdl, argc, argv, B_FALSE, NULL, 0));
-}
-
-nvlist_t *
-zpool_find_import_byname(libzfs_handle_t *hdl, int argc, char **argv,
-    char *pool)
-{
-       return (zpool_find_import_impl(hdl, argc, argv, B_FALSE, pool, 0));
-}
+       importargs_t iarg = { 0 };
 
-nvlist_t *
-zpool_find_import_byguid(libzfs_handle_t *hdl, int argc, char **argv,
-    uint64_t guid)
-{
-       return (zpool_find_import_impl(hdl, argc, argv, B_FALSE, NULL, guid));
-}
+       iarg.paths = argc;
+       iarg.path = argv;
 
-nvlist_t *
-zpool_find_import_activeok(libzfs_handle_t *hdl, int argc, char **argv)
-{
-       return (zpool_find_import_impl(hdl, argc, argv, B_TRUE, NULL, 0));
+       return (zpool_find_import_impl(hdl, &iarg));
 }
 
 /*
@@ -1093,6 +1355,46 @@ zpool_find_import_cached(libzfs_handle_t *hdl, const char *cachefile,
        return (pools);
 }
 
+static int
+name_or_guid_exists(zpool_handle_t *zhp, void *data)
+{
+       importargs_t *import = data;
+       int found = 0;
+
+       if (import->poolname != NULL) {
+               char *pool_name;
+
+               verify(nvlist_lookup_string(zhp->zpool_config,
+                   ZPOOL_CONFIG_POOL_NAME, &pool_name) == 0);
+               if (strcmp(pool_name, import->poolname) == 0)
+                       found = 1;
+       } else {
+               uint64_t pool_guid;
+
+               verify(nvlist_lookup_uint64(zhp->zpool_config,
+                   ZPOOL_CONFIG_POOL_GUID, &pool_guid) == 0);
+               if (pool_guid == import->guid)
+                       found = 1;
+       }
+
+       zpool_close(zhp);
+       return (found);
+}
+
+nvlist_t *
+zpool_search_import(libzfs_handle_t *hdl, importargs_t *import)
+{
+       verify(import->poolname == NULL || import->guid == 0);
+
+       if (import->unique)
+               import->exists = zpool_iter(hdl, name_or_guid_exists, import);
+
+       if (import->cachefile != NULL)
+               return (zpool_find_import_cached(hdl, import->cachefile,
+                   import->poolname, import->guid));
+
+       return (zpool_find_import_impl(hdl, import));
+}
 
 boolean_t
 find_guid(nvlist_t *nv, uint64_t guid)
@@ -1192,6 +1494,17 @@ zpool_in_use(libzfs_handle_t *hdl, int fd, pool_state_t *state, char **namestr,
 
        switch (stateval) {
        case POOL_STATE_EXPORTED:
+               /*
+                * A pool with an exported state may in fact be imported
+                * read-only, so check the in-core state to see if it's
+                * active and imported read-only.  If it is, set
+                * its state to active.
+                */
+               if (pool_active(hdl, name, guid, &isactive) == 0 && isactive &&
+                   (zhp = zpool_open_canfail(hdl, name)) != NULL &&
+                   zpool_get_prop_int(zhp, ZPOOL_PROP_READONLY, NULL))
+                       stateval = POOL_STATE_ACTIVE;
+
                ret = B_TRUE;
                break;