4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
26 #pragma ident "@(#)libzfs_import.c 1.24 08/04/08 SMI"
29 * Pool import support functions.
31 * To import a pool, we rely on reading the configuration information from the
32 * ZFS label of each device. If we successfully read the label, then we
33 * organize the configuration information in the following hierarchy:
35 * pool guid -> toplevel vdev guid -> label txg
37 * Duplicate entries matching this same tuple will be discarded. Once we have
38 * examined every device, we pick the best label txg config for each toplevel
39 * vdev. We then arrange these toplevel vdevs into a complete pool config, and
40 * update any paths that have changed. Finally, we attempt to import the pool
41 * using our derived config, and record the results.
54 #include <sys/vdev_impl.h>
57 #include "libzfs_impl.h"
60 * Intermediate structures used to gather configuration information.
62 typedef struct config_entry {
65 struct config_entry *ce_next;
68 typedef struct vdev_entry {
70 config_entry_t *ve_configs;
71 struct vdev_entry *ve_next;
74 typedef struct pool_entry {
76 vdev_entry_t *pe_vdevs;
77 struct pool_entry *pe_next;
80 typedef struct name_entry {
83 struct name_entry *ne_next;
86 typedef struct pool_list {
92 get_devid(const char *path)
98 if ((fd = open(path, O_RDONLY)) < 0)
103 if (devid_get(fd, &devid) == 0) {
104 if (devid_get_minor_name(fd, &minor) == 0)
105 ret = devid_str_encode(devid, minor);
107 devid_str_free(minor);
117 * Go through and fix up any path and/or devid information for the given vdev
121 fix_paths(nvlist_t *nv, name_entry_t *names)
126 name_entry_t *ne, *best;
130 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
131 &child, &children) == 0) {
132 for (c = 0; c < children; c++)
133 if (fix_paths(child[c], names) != 0)
139 * This is a leaf (file or disk) vdev. In either case, go through
140 * the name list and see if we find a matching guid. If so, replace
141 * the path and see if we can calculate a new devid.
143 * There may be multiple names associated with a particular guid, in
144 * which case we have overlapping slices or multiple paths to the same
145 * disk. If this is the case, then we want to pick the path that is
146 * the most similar to the original, where "most similar" is the number
147 * of matching characters starting from the end of the path. This will
148 * preserve slice numbers even if the disks have been reorganized, and
149 * will also catch preferred disk names if multiple paths exist.
151 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) == 0);
152 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) != 0)
157 for (ne = names; ne != NULL; ne = ne->ne_next) {
158 if (ne->ne_guid == guid) {
159 const char *src, *dst;
167 src = ne->ne_name + strlen(ne->ne_name) - 1;
168 dst = path + strlen(path) - 1;
169 for (count = 0; src >= ne->ne_name && dst >= path;
170 src--, dst--, count++)
175 * At this point, 'count' is the number of characters
176 * matched from the end.
178 if (count > matched || best == NULL) {
188 if (nvlist_add_string(nv, ZPOOL_CONFIG_PATH, best->ne_name) != 0)
191 if ((devid = get_devid(best->ne_name)) == NULL) {
192 (void) nvlist_remove_all(nv, ZPOOL_CONFIG_DEVID);
194 if (nvlist_add_string(nv, ZPOOL_CONFIG_DEVID, devid) != 0)
196 devid_str_free(devid);
203 * Add the given configuration to the list of known devices.
206 add_config(libzfs_handle_t *hdl, pool_list_t *pl, const char *path,
209 uint64_t pool_guid, vdev_guid, top_guid, txg, state;
216 * If this is a hot spare not currently in use or level 2 cache
217 * device, add it to the list of names to translate, but don't do
220 if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE,
222 (state == POOL_STATE_SPARE || state == POOL_STATE_L2CACHE) &&
223 nvlist_lookup_uint64(config, ZPOOL_CONFIG_GUID, &vdev_guid) == 0) {
224 if ((ne = zfs_alloc(hdl, sizeof (name_entry_t))) == NULL)
227 if ((ne->ne_name = zfs_strdup(hdl, path)) == NULL) {
231 ne->ne_guid = vdev_guid;
232 ne->ne_next = pl->names;
238 * If we have a valid config but cannot read any of these fields, then
239 * it means we have a half-initialized label. In vdev_label_init()
240 * we write a label with txg == 0 so that we can identify the device
241 * in case the user refers to the same disk later on. If we fail to
242 * create the pool, we'll be left with a label in this state
243 * which should not be considered part of a valid pool.
245 if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
247 nvlist_lookup_uint64(config, ZPOOL_CONFIG_GUID,
249 nvlist_lookup_uint64(config, ZPOOL_CONFIG_TOP_GUID,
251 nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG,
252 &txg) != 0 || txg == 0) {
258 * First, see if we know about this pool. If not, then add it to the
259 * list of known pools.
261 for (pe = pl->pools; pe != NULL; pe = pe->pe_next) {
262 if (pe->pe_guid == pool_guid)
267 if ((pe = zfs_alloc(hdl, sizeof (pool_entry_t))) == NULL) {
271 pe->pe_guid = pool_guid;
272 pe->pe_next = pl->pools;
277 * Second, see if we know about this toplevel vdev. Add it if its
280 for (ve = pe->pe_vdevs; ve != NULL; ve = ve->ve_next) {
281 if (ve->ve_guid == top_guid)
286 if ((ve = zfs_alloc(hdl, sizeof (vdev_entry_t))) == NULL) {
290 ve->ve_guid = top_guid;
291 ve->ve_next = pe->pe_vdevs;
296 * Third, see if we have a config with a matching transaction group. If
297 * so, then we do nothing. Otherwise, add it to the list of known
300 for (ce = ve->ve_configs; ce != NULL; ce = ce->ce_next) {
301 if (ce->ce_txg == txg)
306 if ((ce = zfs_alloc(hdl, sizeof (config_entry_t))) == NULL) {
311 ce->ce_config = config;
312 ce->ce_next = ve->ve_configs;
319 * At this point we've successfully added our config to the list of
320 * known configs. The last thing to do is add the vdev guid -> path
321 * mappings so that we can fix up the configuration as necessary before
324 if ((ne = zfs_alloc(hdl, sizeof (name_entry_t))) == NULL)
327 if ((ne->ne_name = zfs_strdup(hdl, path)) == NULL) {
332 ne->ne_guid = vdev_guid;
333 ne->ne_next = pl->names;
340 * Returns true if the named pool matches the given GUID.
343 pool_active(libzfs_handle_t *hdl, const char *name, uint64_t guid,
349 if (zpool_open_silent(hdl, name, &zhp) != 0)
357 verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_POOL_GUID,
362 *isactive = (theguid == guid);
367 refresh_config(libzfs_handle_t *hdl, nvlist_t *config)
370 zfs_cmd_t zc = { 0 };
373 if (zcmd_write_conf_nvlist(hdl, &zc, config) != 0)
376 if (zcmd_alloc_dst_nvlist(hdl, &zc,
377 zc.zc_nvlist_conf_size * 2) != 0) {
378 zcmd_free_nvlists(&zc);
382 while ((err = ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_TRYIMPORT,
383 &zc)) != 0 && errno == ENOMEM) {
384 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
385 zcmd_free_nvlists(&zc);
391 (void) zpool_standard_error(hdl, errno,
392 dgettext(TEXT_DOMAIN, "cannot discover pools"));
393 zcmd_free_nvlists(&zc);
397 if (zcmd_read_dst_nvlist(hdl, &zc, &nvl) != 0) {
398 zcmd_free_nvlists(&zc);
402 zcmd_free_nvlists(&zc);
407 * Convert our list of pools into the definitive set of configurations. We
408 * start by picking the best config for each toplevel vdev. Once that's done,
409 * we assemble the toplevel vdevs into a full config for the pool. We make a
410 * pass to fix up any incorrect paths, and then add it to the main list to
411 * return to the user.
414 get_configs(libzfs_handle_t *hdl, pool_list_t *pl, boolean_t active_ok)
419 nvlist_t *ret = NULL, *config = NULL, *tmp, *nvtop, *nvroot;
420 nvlist_t **spares, **l2cache;
421 uint_t i, nspares, nl2cache;
422 boolean_t config_seen;
424 char *name, *hostname;
425 uint64_t version, guid;
427 nvlist_t **child = NULL;
433 if (nvlist_alloc(&ret, 0, 0) != 0)
436 for (pe = pl->pools; pe != NULL; pe = pe->pe_next) {
439 if (nvlist_alloc(&config, NV_UNIQUE_NAME, 0) != 0)
441 config_seen = B_FALSE;
444 * Iterate over all toplevel vdevs. Grab the pool configuration
445 * from the first one we find, and then go through the rest and
446 * add them as necessary to the 'vdevs' member of the config.
448 for (ve = pe->pe_vdevs; ve != NULL; ve = ve->ve_next) {
451 * Determine the best configuration for this vdev by
452 * selecting the config with the latest transaction
456 for (ce = ve->ve_configs; ce != NULL;
459 if (ce->ce_txg > best_txg) {
461 best_txg = ce->ce_txg;
467 * Copy the relevant pieces of data to the pool
474 * hostid (if available)
475 * hostname (if available)
479 verify(nvlist_lookup_uint64(tmp,
480 ZPOOL_CONFIG_VERSION, &version) == 0);
481 if (nvlist_add_uint64(config,
482 ZPOOL_CONFIG_VERSION, version) != 0)
484 verify(nvlist_lookup_uint64(tmp,
485 ZPOOL_CONFIG_POOL_GUID, &guid) == 0);
486 if (nvlist_add_uint64(config,
487 ZPOOL_CONFIG_POOL_GUID, guid) != 0)
489 verify(nvlist_lookup_string(tmp,
490 ZPOOL_CONFIG_POOL_NAME, &name) == 0);
491 if (nvlist_add_string(config,
492 ZPOOL_CONFIG_POOL_NAME, name) != 0)
494 verify(nvlist_lookup_uint64(tmp,
495 ZPOOL_CONFIG_POOL_STATE, &state) == 0);
496 if (nvlist_add_uint64(config,
497 ZPOOL_CONFIG_POOL_STATE, state) != 0)
500 if (nvlist_lookup_uint64(tmp,
501 ZPOOL_CONFIG_HOSTID, &hostid) == 0) {
502 if (nvlist_add_uint64(config,
503 ZPOOL_CONFIG_HOSTID, hostid) != 0)
505 verify(nvlist_lookup_string(tmp,
506 ZPOOL_CONFIG_HOSTNAME,
508 if (nvlist_add_string(config,
509 ZPOOL_CONFIG_HOSTNAME,
514 config_seen = B_TRUE;
518 * Add this top-level vdev to the child array.
520 verify(nvlist_lookup_nvlist(tmp,
521 ZPOOL_CONFIG_VDEV_TREE, &nvtop) == 0);
522 verify(nvlist_lookup_uint64(nvtop, ZPOOL_CONFIG_ID,
524 if (id >= children) {
527 newchild = zfs_alloc(hdl, (id + 1) *
528 sizeof (nvlist_t *));
529 if (newchild == NULL)
532 for (c = 0; c < children; c++)
533 newchild[c] = child[c];
539 if (nvlist_dup(nvtop, &child[id], 0) != 0)
544 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
548 * Look for any missing top-level vdevs. If this is the case,
549 * create a faked up 'missing' vdev as a placeholder. We cannot
550 * simply compress the child array, because the kernel performs
551 * certain checks to make sure the vdev IDs match their location
552 * in the configuration.
554 for (c = 0; c < children; c++)
555 if (child[c] == NULL) {
557 if (nvlist_alloc(&missing, NV_UNIQUE_NAME,
560 if (nvlist_add_string(missing,
562 VDEV_TYPE_MISSING) != 0 ||
563 nvlist_add_uint64(missing,
564 ZPOOL_CONFIG_ID, c) != 0 ||
565 nvlist_add_uint64(missing,
566 ZPOOL_CONFIG_GUID, 0ULL) != 0) {
567 nvlist_free(missing);
574 * Put all of this pool's top-level vdevs into a root vdev.
576 if (nvlist_alloc(&nvroot, NV_UNIQUE_NAME, 0) != 0)
578 if (nvlist_add_string(nvroot, ZPOOL_CONFIG_TYPE,
579 VDEV_TYPE_ROOT) != 0 ||
580 nvlist_add_uint64(nvroot, ZPOOL_CONFIG_ID, 0ULL) != 0 ||
581 nvlist_add_uint64(nvroot, ZPOOL_CONFIG_GUID, guid) != 0 ||
582 nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
583 child, children) != 0) {
588 for (c = 0; c < children; c++)
589 nvlist_free(child[c]);
595 * Go through and fix up any paths and/or devids based on our
596 * known list of vdev GUID -> path mappings.
598 if (fix_paths(nvroot, pl->names) != 0) {
604 * Add the root vdev to this pool's configuration.
606 if (nvlist_add_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
614 * zdb uses this path to report on active pools that were
615 * imported or created using -R.
621 * Determine if this pool is currently active, in which case we
622 * can't actually import it.
624 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
626 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
629 if (pool_active(hdl, name, guid, &isactive) != 0)
638 if ((nvl = refresh_config(hdl, config)) == NULL)
645 * Go through and update the paths for spares, now that we have
648 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
650 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
651 &spares, &nspares) == 0) {
652 for (i = 0; i < nspares; i++) {
653 if (fix_paths(spares[i], pl->names) != 0)
659 * Update the paths for l2cache devices.
661 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
662 &l2cache, &nl2cache) == 0) {
663 for (i = 0; i < nl2cache; i++) {
664 if (fix_paths(l2cache[i], pl->names) != 0)
670 * Restore the original information read from the actual label.
672 (void) nvlist_remove(config, ZPOOL_CONFIG_HOSTID,
674 (void) nvlist_remove(config, ZPOOL_CONFIG_HOSTNAME,
677 verify(nvlist_add_uint64(config, ZPOOL_CONFIG_HOSTID,
679 verify(nvlist_add_string(config, ZPOOL_CONFIG_HOSTNAME,
685 * Add this pool to the list of configs.
687 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
689 if (nvlist_add_nvlist(ret, name, config) != 0)
699 (void) no_memory(hdl);
703 for (c = 0; c < children; c++)
704 nvlist_free(child[c]);
711 * Return the offset of the given label.
714 label_offset(uint64_t size, int l)
716 ASSERT(P2PHASE_TYPED(size, sizeof (vdev_label_t), uint64_t) == 0);
717 return (l * sizeof (vdev_label_t) + (l < VDEV_LABELS / 2 ?
718 0 : size - VDEV_LABELS * sizeof (vdev_label_t)));
722 * Given a file descriptor, read the label information and return an nvlist
723 * describing the configuration, if there is one.
726 zpool_read_label(int fd, nvlist_t **config)
728 struct stat64 statbuf;
731 uint64_t state, txg, size;
735 if (fstat64(fd, &statbuf) == -1)
737 size = P2ALIGN_TYPED(statbuf.st_size, sizeof (vdev_label_t), uint64_t);
739 if ((label = malloc(sizeof (vdev_label_t))) == NULL)
742 for (l = 0; l < VDEV_LABELS; l++) {
743 if (pread(fd, label, sizeof (vdev_label_t),
744 label_offset(size, l)) != sizeof (vdev_label_t))
747 if (nvlist_unpack(label->vl_vdev_phys.vp_nvlist,
748 sizeof (label->vl_vdev_phys.vp_nvlist), config, 0) != 0)
751 if (nvlist_lookup_uint64(*config, ZPOOL_CONFIG_POOL_STATE,
752 &state) != 0 || state > POOL_STATE_L2CACHE) {
753 nvlist_free(*config);
757 if (state != POOL_STATE_SPARE && state != POOL_STATE_L2CACHE &&
758 (nvlist_lookup_uint64(*config, ZPOOL_CONFIG_POOL_TXG,
759 &txg) != 0 || txg == 0)) {
760 nvlist_free(*config);
774 * Given a list of directories to search, find all pools stored on disk. This
775 * includes partial pools which are not available to import. If no args are
776 * given (argc is 0), then the default directory (/dev/dsk) is searched.
779 zpool_find_import(libzfs_handle_t *hdl, int argc, char **argv,
785 char path[MAXPATHLEN];
788 struct stat64 statbuf;
789 nvlist_t *ret = NULL, *config;
790 static char *default_dir = "/dev/dsk";
792 pool_list_t pools = { 0 };
793 pool_entry_t *pe, *penext;
794 vdev_entry_t *ve, *venext;
795 config_entry_t *ce, *cenext;
796 name_entry_t *ne, *nenext;
804 * Go through and read the label configuration information from every
805 * possible device, organizing the information according to pool GUID
808 for (i = 0; i < argc; i++) {
812 /* use realpath to normalize the path */
813 if (realpath(argv[i], path) == 0) {
814 (void) zfs_error_fmt(hdl, EZFS_BADPATH,
815 dgettext(TEXT_DOMAIN, "cannot open '%s'"),
819 end = &path[strlen(path)];
822 pathleft = &path[sizeof (path)] - end;
825 * Using raw devices instead of block devices when we're
826 * reading the labels skips a bunch of slow operations during
827 * close(2) processing, so we replace /dev/dsk with /dev/rdsk.
829 if (strcmp(path, "/dev/dsk/") == 0)
834 if ((dfd = open64(rdsk, O_RDONLY)) < 0 ||
835 (dirp = fdopendir(dfd)) == NULL) {
836 zfs_error_aux(hdl, strerror(errno));
837 (void) zfs_error_fmt(hdl, EZFS_BADPATH,
838 dgettext(TEXT_DOMAIN, "cannot open '%s'"),
844 * This is not MT-safe, but we have no MT consumers of libzfs
846 while ((dp = readdir64(dirp)) != NULL) {
847 const char *name = dp->d_name;
848 if (name[0] == '.' &&
849 (name[1] == 0 || (name[1] == '.' && name[2] == 0)))
852 if ((fd = openat64(dfd, name, O_RDONLY)) < 0)
856 * Ignore failed stats. We only want regular
857 * files, character devs and block devs.
859 if (fstat64(fd, &statbuf) != 0 ||
860 (!S_ISREG(statbuf.st_mode) &&
861 !S_ISCHR(statbuf.st_mode) &&
862 !S_ISBLK(statbuf.st_mode))) {
867 if ((zpool_read_label(fd, &config)) != 0) {
869 (void) no_memory(hdl);
875 if (config != NULL) {
876 /* use the non-raw path for the config */
877 (void) strlcpy(end, name, pathleft);
878 if (add_config(hdl, &pools, path, config) != 0)
883 (void) closedir(dirp);
887 ret = get_configs(hdl, &pools, active_ok);
890 for (pe = pools.pools; pe != NULL; pe = penext) {
891 penext = pe->pe_next;
892 for (ve = pe->pe_vdevs; ve != NULL; ve = venext) {
893 venext = ve->ve_next;
894 for (ce = ve->ve_configs; ce != NULL; ce = cenext) {
895 cenext = ce->ce_next;
897 nvlist_free(ce->ce_config);
905 for (ne = pools.names; ne != NULL; ne = nenext) {
906 nenext = ne->ne_next;
913 (void) closedir(dirp);
919 * Given a cache file, return the contents as a list of importable pools.
922 zpool_find_import_cached(libzfs_handle_t *hdl, const char *cachefile,
927 struct stat64 statbuf;
928 nvlist_t *raw, *src, *dst;
935 if ((fd = open(cachefile, O_RDONLY)) < 0) {
936 zfs_error_aux(hdl, "%s", strerror(errno));
937 (void) zfs_error(hdl, EZFS_BADCACHE,
938 dgettext(TEXT_DOMAIN, "failed to open cache file"));
942 if (fstat64(fd, &statbuf) != 0) {
943 zfs_error_aux(hdl, "%s", strerror(errno));
945 (void) zfs_error(hdl, EZFS_BADCACHE,
946 dgettext(TEXT_DOMAIN, "failed to get size of cache file"));
950 if ((buf = zfs_alloc(hdl, statbuf.st_size)) == NULL) {
955 if (read(fd, buf, statbuf.st_size) != statbuf.st_size) {
958 (void) zfs_error(hdl, EZFS_BADCACHE,
959 dgettext(TEXT_DOMAIN,
960 "failed to read cache file contents"));
966 if (nvlist_unpack(buf, statbuf.st_size, &raw, 0) != 0) {
968 (void) zfs_error(hdl, EZFS_BADCACHE,
969 dgettext(TEXT_DOMAIN,
970 "invalid or corrupt cache file contents"));
977 * Go through and get the current state of the pools and refresh their
980 if (nvlist_alloc(&pools, 0, 0) != 0) {
981 (void) no_memory(hdl);
987 while ((elem = nvlist_next_nvpair(raw, elem)) != NULL) {
988 verify(nvpair_value_nvlist(elem, &src) == 0);
990 verify(nvlist_lookup_string(src, ZPOOL_CONFIG_POOL_NAME,
992 verify(nvlist_lookup_uint64(src, ZPOOL_CONFIG_POOL_GUID,
996 if (pool_active(hdl, name, guid, &active) != 0) {
1005 if ((dst = refresh_config(hdl, src)) == NULL) {
1011 if (nvlist_add_nvlist(pools, nvpair_name(elem), dst)
1013 (void) no_memory(hdl);
1021 if (nvlist_add_nvlist(pools, nvpair_name(elem), src)
1023 (void) no_memory(hdl);
1037 find_guid(nvlist_t *nv, uint64_t guid)
1043 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &tmp) == 0);
1047 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
1048 &child, &children) == 0) {
1049 for (c = 0; c < children; c++)
1050 if (find_guid(child[c], guid))
1057 typedef struct aux_cbdata {
1058 const char *cb_type;
1060 zpool_handle_t *cb_zhp;
1064 find_aux(zpool_handle_t *zhp, void *data)
1066 aux_cbdata_t *cbp = data;
1072 verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
1075 if (nvlist_lookup_nvlist_array(nvroot, cbp->cb_type,
1076 &list, &count) == 0) {
1077 for (i = 0; i < count; i++) {
1078 verify(nvlist_lookup_uint64(list[i],
1079 ZPOOL_CONFIG_GUID, &guid) == 0);
1080 if (guid == cbp->cb_guid) {
1092 * Determines if the pool is in use. If so, it returns true and the state of
1093 * the pool as well as the name of the pool. Both strings are allocated and
1094 * must be freed by the caller.
1097 zpool_in_use(libzfs_handle_t *hdl, int fd, pool_state_t *state, char **namestr,
1103 uint64_t guid, vdev_guid;
1104 zpool_handle_t *zhp;
1105 nvlist_t *pool_config;
1106 uint64_t stateval, isspare;
1107 aux_cbdata_t cb = { 0 };
1112 if (zpool_read_label(fd, &config) != 0) {
1113 (void) no_memory(hdl);
1120 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE,
1122 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_GUID,
1125 if (stateval != POOL_STATE_SPARE && stateval != POOL_STATE_L2CACHE) {
1126 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
1128 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
1133 case POOL_STATE_EXPORTED:
1137 case POOL_STATE_ACTIVE:
1139 * For an active pool, we have to determine if it's really part
1140 * of a currently active pool (in which case the pool will exist
1141 * and the guid will be the same), or whether it's part of an
1142 * active pool that was disconnected without being explicitly
1145 if (pool_active(hdl, name, guid, &isactive) != 0) {
1146 nvlist_free(config);
1152 * Because the device may have been removed while
1153 * offlined, we only report it as active if the vdev is
1154 * still present in the config. Otherwise, pretend like
1157 if ((zhp = zpool_open_canfail(hdl, name)) != NULL &&
1158 (pool_config = zpool_get_config(zhp, NULL))
1162 verify(nvlist_lookup_nvlist(pool_config,
1163 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
1164 ret = find_guid(nvroot, vdev_guid);
1170 * If this is an active spare within another pool, we
1171 * treat it like an unused hot spare. This allows the
1172 * user to create a pool with a hot spare that currently
1173 * in use within another pool. Since we return B_TRUE,
1174 * libdiskmgt will continue to prevent generic consumers
1175 * from using the device.
1177 if (ret && nvlist_lookup_uint64(config,
1178 ZPOOL_CONFIG_IS_SPARE, &isspare) == 0 && isspare)
1179 stateval = POOL_STATE_SPARE;
1184 stateval = POOL_STATE_POTENTIALLY_ACTIVE;
1189 case POOL_STATE_SPARE:
1191 * For a hot spare, it can be either definitively in use, or
1192 * potentially active. To determine if it's in use, we iterate
1193 * over all pools in the system and search for one with a spare
1194 * with a matching guid.
1196 * Due to the shared nature of spares, we don't actually report
1197 * the potentially active case as in use. This means the user
1198 * can freely create pools on the hot spares of exported pools,
1199 * but to do otherwise makes the resulting code complicated, and
1200 * we end up having to deal with this case anyway.
1203 cb.cb_guid = vdev_guid;
1204 cb.cb_type = ZPOOL_CONFIG_SPARES;
1205 if (zpool_iter(hdl, find_aux, &cb) == 1) {
1206 name = (char *)zpool_get_name(cb.cb_zhp);
1213 case POOL_STATE_L2CACHE:
1216 * Check if any pool is currently using this l2cache device.
1219 cb.cb_guid = vdev_guid;
1220 cb.cb_type = ZPOOL_CONFIG_L2CACHE;
1221 if (zpool_iter(hdl, find_aux, &cb) == 1) {
1222 name = (char *)zpool_get_name(cb.cb_zhp);
1235 if ((*namestr = zfs_strdup(hdl, name)) == NULL) {
1237 zpool_close(cb.cb_zhp);
1238 nvlist_free(config);
1241 *state = (pool_state_t)stateval;
1245 zpool_close(cb.cb_zhp);
1247 nvlist_free(config);