Update spare and cache device names on import
[zfs.git] / lib / libzfs / libzfs_import.c
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23  * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
24  * Copyright (c) 2011 by Delphix. All rights reserved.
25  */
26
27 /*
28  * Pool import support functions.
29  *
30  * To import a pool, we rely on reading the configuration information from the
31  * ZFS label of each device.  If we successfully read the label, then we
32  * organize the configuration information in the following hierarchy:
33  *
34  *      pool guid -> toplevel vdev guid -> label txg
35  *
36  * Duplicate entries matching this same tuple will be discarded.  Once we have
37  * examined every device, we pick the best label txg config for each toplevel
38  * vdev.  We then arrange these toplevel vdevs into a complete pool config, and
39  * update any paths that have changed.  Finally, we attempt to import the pool
40  * using our derived config, and record the results.
41  */
42
43 #include <ctype.h>
44 #include <devid.h>
45 #include <dirent.h>
46 #include <errno.h>
47 #include <libintl.h>
48 #include <stddef.h>
49 #include <stdlib.h>
50 #include <string.h>
51 #include <sys/stat.h>
52 #include <unistd.h>
53 #include <fcntl.h>
54 #include <sys/vtoc.h>
55 #include <sys/dktp/fdisk.h>
56 #include <sys/efi_partition.h>
57
58 #include <sys/vdev_impl.h>
59 #ifdef HAVE_LIBBLKID
60 #include <blkid/blkid.h>
61 #endif
62
63 #include "libzfs.h"
64 #include "libzfs_impl.h"
65
66 /*
67  * Intermediate structures used to gather configuration information.
68  */
69 typedef struct config_entry {
70         uint64_t                ce_txg;
71         nvlist_t                *ce_config;
72         struct config_entry     *ce_next;
73 } config_entry_t;
74
75 typedef struct vdev_entry {
76         uint64_t                ve_guid;
77         config_entry_t          *ve_configs;
78         struct vdev_entry       *ve_next;
79 } vdev_entry_t;
80
81 typedef struct pool_entry {
82         uint64_t                pe_guid;
83         vdev_entry_t            *pe_vdevs;
84         struct pool_entry       *pe_next;
85 } pool_entry_t;
86
87 typedef struct name_entry {
88         char                    *ne_name;
89         uint64_t                ne_guid;
90         uint64_t                ne_order;
91         struct name_entry       *ne_next;
92 } name_entry_t;
93
94 typedef struct pool_list {
95         pool_entry_t            *pools;
96         name_entry_t            *names;
97 } pool_list_t;
98
99 static char *
100 get_devid(const char *path)
101 {
102         int fd;
103         ddi_devid_t devid;
104         char *minor, *ret;
105
106         if ((fd = open(path, O_RDONLY)) < 0)
107                 return (NULL);
108
109         minor = NULL;
110         ret = NULL;
111         if (devid_get(fd, &devid) == 0) {
112                 if (devid_get_minor_name(fd, &minor) == 0)
113                         ret = devid_str_encode(devid, minor);
114                 if (minor != NULL)
115                         devid_str_free(minor);
116                 devid_free(devid);
117         }
118         (void) close(fd);
119
120         return (ret);
121 }
122
123
124 /*
125  * Go through and fix up any path and/or devid information for the given vdev
126  * configuration.
127  */
128 static int
129 fix_paths(nvlist_t *nv, name_entry_t *names)
130 {
131         nvlist_t **child;
132         uint_t c, children;
133         uint64_t guid;
134         name_entry_t *ne, *best;
135         char *path, *devid;
136
137         if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
138             &child, &children) == 0) {
139                 for (c = 0; c < children; c++)
140                         if (fix_paths(child[c], names) != 0)
141                                 return (-1);
142                 return (0);
143         }
144
145         /*
146          * This is a leaf (file or disk) vdev.  In either case, go through
147          * the name list and see if we find a matching guid.  If so, replace
148          * the path and see if we can calculate a new devid.
149          *
150          * There may be multiple names associated with a particular guid, in
151          * which case we have overlapping partitions or multiple paths to the
152          * same disk.  In this case we prefer to use the path name which
153          * matches the ZPOOL_CONFIG_PATH.  If no matching entry is found we
154          * use the lowest order device which corresponds to the first match
155          * while traversing the ZPOOL_IMPORT_PATH search path.
156          */
157         verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) == 0);
158         if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) != 0)
159                 path = NULL;
160
161         best = NULL;
162         for (ne = names; ne != NULL; ne = ne->ne_next) {
163                 if (ne->ne_guid == guid) {
164
165                         if (path == NULL) {
166                                 best = ne;
167                                 break;
168                         }
169
170                         if ((strlen(path) == strlen(ne->ne_name)) &&
171                             !strncmp(path, ne->ne_name, strlen(path))) {
172                                 best = ne;
173                                 break;
174                         }
175
176                         if (best == NULL || ne->ne_order < best->ne_order)
177                                 best = ne;
178                 }
179         }
180
181         if (best == NULL)
182                 return (0);
183
184         if (nvlist_add_string(nv, ZPOOL_CONFIG_PATH, best->ne_name) != 0)
185                 return (-1);
186
187         if ((devid = get_devid(best->ne_name)) == NULL) {
188                 (void) nvlist_remove_all(nv, ZPOOL_CONFIG_DEVID);
189         } else {
190                 if (nvlist_add_string(nv, ZPOOL_CONFIG_DEVID, devid) != 0)
191                         return (-1);
192                 devid_str_free(devid);
193         }
194
195         return (0);
196 }
197
198 /*
199  * Add the given configuration to the list of known devices.
200  */
201 static int
202 add_config(libzfs_handle_t *hdl, pool_list_t *pl, const char *path,
203     int order, nvlist_t *config)
204 {
205         uint64_t pool_guid, vdev_guid, top_guid, txg, state;
206         pool_entry_t *pe;
207         vdev_entry_t *ve;
208         config_entry_t *ce;
209         name_entry_t *ne;
210
211         /*
212          * If this is a hot spare not currently in use or level 2 cache
213          * device, add it to the list of names to translate, but don't do
214          * anything else.
215          */
216         if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE,
217             &state) == 0 &&
218             (state == POOL_STATE_SPARE || state == POOL_STATE_L2CACHE) &&
219             nvlist_lookup_uint64(config, ZPOOL_CONFIG_GUID, &vdev_guid) == 0) {
220                 if ((ne = zfs_alloc(hdl, sizeof (name_entry_t))) == NULL)
221                         return (-1);
222
223                 if ((ne->ne_name = zfs_strdup(hdl, path)) == NULL) {
224                         free(ne);
225                         return (-1);
226                 }
227                 ne->ne_guid = vdev_guid;
228                 ne->ne_order = order;
229                 ne->ne_next = pl->names;
230                 pl->names = ne;
231                 return (0);
232         }
233
234         /*
235          * If we have a valid config but cannot read any of these fields, then
236          * it means we have a half-initialized label.  In vdev_label_init()
237          * we write a label with txg == 0 so that we can identify the device
238          * in case the user refers to the same disk later on.  If we fail to
239          * create the pool, we'll be left with a label in this state
240          * which should not be considered part of a valid pool.
241          */
242         if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
243             &pool_guid) != 0 ||
244             nvlist_lookup_uint64(config, ZPOOL_CONFIG_GUID,
245             &vdev_guid) != 0 ||
246             nvlist_lookup_uint64(config, ZPOOL_CONFIG_TOP_GUID,
247             &top_guid) != 0 ||
248             nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG,
249             &txg) != 0 || txg == 0) {
250                 nvlist_free(config);
251                 return (0);
252         }
253
254         /*
255          * First, see if we know about this pool.  If not, then add it to the
256          * list of known pools.
257          */
258         for (pe = pl->pools; pe != NULL; pe = pe->pe_next) {
259                 if (pe->pe_guid == pool_guid)
260                         break;
261         }
262
263         if (pe == NULL) {
264                 if ((pe = zfs_alloc(hdl, sizeof (pool_entry_t))) == NULL) {
265                         nvlist_free(config);
266                         return (-1);
267                 }
268                 pe->pe_guid = pool_guid;
269                 pe->pe_next = pl->pools;
270                 pl->pools = pe;
271         }
272
273         /*
274          * Second, see if we know about this toplevel vdev.  Add it if its
275          * missing.
276          */
277         for (ve = pe->pe_vdevs; ve != NULL; ve = ve->ve_next) {
278                 if (ve->ve_guid == top_guid)
279                         break;
280         }
281
282         if (ve == NULL) {
283                 if ((ve = zfs_alloc(hdl, sizeof (vdev_entry_t))) == NULL) {
284                         nvlist_free(config);
285                         return (-1);
286                 }
287                 ve->ve_guid = top_guid;
288                 ve->ve_next = pe->pe_vdevs;
289                 pe->pe_vdevs = ve;
290         }
291
292         /*
293          * Third, see if we have a config with a matching transaction group.  If
294          * so, then we do nothing.  Otherwise, add it to the list of known
295          * configs.
296          */
297         for (ce = ve->ve_configs; ce != NULL; ce = ce->ce_next) {
298                 if (ce->ce_txg == txg)
299                         break;
300         }
301
302         if (ce == NULL) {
303                 if ((ce = zfs_alloc(hdl, sizeof (config_entry_t))) == NULL) {
304                         nvlist_free(config);
305                         return (-1);
306                 }
307                 ce->ce_txg = txg;
308                 ce->ce_config = config;
309                 ce->ce_next = ve->ve_configs;
310                 ve->ve_configs = ce;
311         } else {
312                 nvlist_free(config);
313         }
314
315         /*
316          * At this point we've successfully added our config to the list of
317          * known configs.  The last thing to do is add the vdev guid -> path
318          * mappings so that we can fix up the configuration as necessary before
319          * doing the import.
320          */
321         if ((ne = zfs_alloc(hdl, sizeof (name_entry_t))) == NULL)
322                 return (-1);
323
324         if ((ne->ne_name = zfs_strdup(hdl, path)) == NULL) {
325                 free(ne);
326                 return (-1);
327         }
328
329         ne->ne_guid = vdev_guid;
330         ne->ne_order = order;
331         ne->ne_next = pl->names;
332         pl->names = ne;
333
334         return (0);
335 }
336
337 /*
338  * Returns true if the named pool matches the given GUID.
339  */
340 static int
341 pool_active(libzfs_handle_t *hdl, const char *name, uint64_t guid,
342     boolean_t *isactive)
343 {
344         zpool_handle_t *zhp;
345         uint64_t theguid;
346
347         if (zpool_open_silent(hdl, name, &zhp) != 0)
348                 return (-1);
349
350         if (zhp == NULL) {
351                 *isactive = B_FALSE;
352                 return (0);
353         }
354
355         verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_POOL_GUID,
356             &theguid) == 0);
357
358         zpool_close(zhp);
359
360         *isactive = (theguid == guid);
361         return (0);
362 }
363
364 static nvlist_t *
365 refresh_config(libzfs_handle_t *hdl, nvlist_t *config)
366 {
367         nvlist_t *nvl;
368         zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
369         int err;
370
371         if (zcmd_write_conf_nvlist(hdl, &zc, config) != 0)
372                 return (NULL);
373
374         if (zcmd_alloc_dst_nvlist(hdl, &zc,
375             zc.zc_nvlist_conf_size * 2) != 0) {
376                 zcmd_free_nvlists(&zc);
377                 return (NULL);
378         }
379
380         while ((err = ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_TRYIMPORT,
381             &zc)) != 0 && errno == ENOMEM) {
382                 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
383                         zcmd_free_nvlists(&zc);
384                         return (NULL);
385                 }
386         }
387
388         if (err) {
389                 zcmd_free_nvlists(&zc);
390                 return (NULL);
391         }
392
393         if (zcmd_read_dst_nvlist(hdl, &zc, &nvl) != 0) {
394                 zcmd_free_nvlists(&zc);
395                 return (NULL);
396         }
397
398         zcmd_free_nvlists(&zc);
399         return (nvl);
400 }
401
402 /*
403  * Determine if the vdev id is a hole in the namespace.
404  */
405 boolean_t
406 vdev_is_hole(uint64_t *hole_array, uint_t holes, uint_t id)
407 {
408         int c;
409
410         for (c = 0; c < holes; c++) {
411
412                 /* Top-level is a hole */
413                 if (hole_array[c] == id)
414                         return (B_TRUE);
415         }
416         return (B_FALSE);
417 }
418
419 /*
420  * Convert our list of pools into the definitive set of configurations.  We
421  * start by picking the best config for each toplevel vdev.  Once that's done,
422  * we assemble the toplevel vdevs into a full config for the pool.  We make a
423  * pass to fix up any incorrect paths, and then add it to the main list to
424  * return to the user.
425  */
426 static nvlist_t *
427 get_configs(libzfs_handle_t *hdl, pool_list_t *pl, boolean_t active_ok)
428 {
429         pool_entry_t *pe;
430         vdev_entry_t *ve;
431         config_entry_t *ce;
432         nvlist_t *ret = NULL, *config = NULL, *tmp = NULL, *nvtop, *nvroot;
433         nvlist_t **spares, **l2cache;
434         uint_t i, nspares, nl2cache;
435         boolean_t config_seen;
436         uint64_t best_txg;
437         char *name, *hostname, *comment;
438         uint64_t version, guid;
439         uint_t children = 0;
440         nvlist_t **child = NULL;
441         uint_t holes;
442         uint64_t *hole_array, max_id;
443         uint_t c;
444         boolean_t isactive;
445         uint64_t hostid;
446         nvlist_t *nvl;
447         boolean_t found_one = B_FALSE;
448         boolean_t valid_top_config = B_FALSE;
449
450         if (nvlist_alloc(&ret, 0, 0) != 0)
451                 goto nomem;
452
453         for (pe = pl->pools; pe != NULL; pe = pe->pe_next) {
454                 uint64_t id, max_txg = 0;
455
456                 if (nvlist_alloc(&config, NV_UNIQUE_NAME, 0) != 0)
457                         goto nomem;
458                 config_seen = B_FALSE;
459
460                 /*
461                  * Iterate over all toplevel vdevs.  Grab the pool configuration
462                  * from the first one we find, and then go through the rest and
463                  * add them as necessary to the 'vdevs' member of the config.
464                  */
465                 for (ve = pe->pe_vdevs; ve != NULL; ve = ve->ve_next) {
466
467                         /*
468                          * Determine the best configuration for this vdev by
469                          * selecting the config with the latest transaction
470                          * group.
471                          */
472                         best_txg = 0;
473                         for (ce = ve->ve_configs; ce != NULL;
474                             ce = ce->ce_next) {
475
476                                 if (ce->ce_txg > best_txg) {
477                                         tmp = ce->ce_config;
478                                         best_txg = ce->ce_txg;
479                                 }
480                         }
481
482                         /*
483                          * We rely on the fact that the max txg for the
484                          * pool will contain the most up-to-date information
485                          * about the valid top-levels in the vdev namespace.
486                          */
487                         if (best_txg > max_txg) {
488                                 (void) nvlist_remove(config,
489                                     ZPOOL_CONFIG_VDEV_CHILDREN,
490                                     DATA_TYPE_UINT64);
491                                 (void) nvlist_remove(config,
492                                     ZPOOL_CONFIG_HOLE_ARRAY,
493                                     DATA_TYPE_UINT64_ARRAY);
494
495                                 max_txg = best_txg;
496                                 hole_array = NULL;
497                                 holes = 0;
498                                 max_id = 0;
499                                 valid_top_config = B_FALSE;
500
501                                 if (nvlist_lookup_uint64(tmp,
502                                     ZPOOL_CONFIG_VDEV_CHILDREN, &max_id) == 0) {
503                                         verify(nvlist_add_uint64(config,
504                                             ZPOOL_CONFIG_VDEV_CHILDREN,
505                                             max_id) == 0);
506                                         valid_top_config = B_TRUE;
507                                 }
508
509                                 if (nvlist_lookup_uint64_array(tmp,
510                                     ZPOOL_CONFIG_HOLE_ARRAY, &hole_array,
511                                     &holes) == 0) {
512                                         verify(nvlist_add_uint64_array(config,
513                                             ZPOOL_CONFIG_HOLE_ARRAY,
514                                             hole_array, holes) == 0);
515                                 }
516                         }
517
518                         if (!config_seen) {
519                                 /*
520                                  * Copy the relevant pieces of data to the pool
521                                  * configuration:
522                                  *
523                                  *      version
524                                  *      pool guid
525                                  *      name
526                                  *      comment (if available)
527                                  *      pool state
528                                  *      hostid (if available)
529                                  *      hostname (if available)
530                                  */
531                                 uint64_t state;
532
533                                 verify(nvlist_lookup_uint64(tmp,
534                                     ZPOOL_CONFIG_VERSION, &version) == 0);
535                                 if (nvlist_add_uint64(config,
536                                     ZPOOL_CONFIG_VERSION, version) != 0)
537                                         goto nomem;
538                                 verify(nvlist_lookup_uint64(tmp,
539                                     ZPOOL_CONFIG_POOL_GUID, &guid) == 0);
540                                 if (nvlist_add_uint64(config,
541                                     ZPOOL_CONFIG_POOL_GUID, guid) != 0)
542                                         goto nomem;
543                                 verify(nvlist_lookup_string(tmp,
544                                     ZPOOL_CONFIG_POOL_NAME, &name) == 0);
545                                 if (nvlist_add_string(config,
546                                     ZPOOL_CONFIG_POOL_NAME, name) != 0)
547                                         goto nomem;
548
549                                 /*
550                                  * COMMENT is optional, don't bail if it's not
551                                  * there, instead, set it to NULL.
552                                  */
553                                 if (nvlist_lookup_string(tmp,
554                                     ZPOOL_CONFIG_COMMENT, &comment) != 0)
555                                         comment = NULL;
556                                 else if (nvlist_add_string(config,
557                                     ZPOOL_CONFIG_COMMENT, comment) != 0)
558                                         goto nomem;
559
560                                 verify(nvlist_lookup_uint64(tmp,
561                                     ZPOOL_CONFIG_POOL_STATE, &state) == 0);
562                                 if (nvlist_add_uint64(config,
563                                     ZPOOL_CONFIG_POOL_STATE, state) != 0)
564                                         goto nomem;
565
566                                 hostid = 0;
567                                 if (nvlist_lookup_uint64(tmp,
568                                     ZPOOL_CONFIG_HOSTID, &hostid) == 0) {
569                                         if (nvlist_add_uint64(config,
570                                             ZPOOL_CONFIG_HOSTID, hostid) != 0)
571                                                 goto nomem;
572                                         verify(nvlist_lookup_string(tmp,
573                                             ZPOOL_CONFIG_HOSTNAME,
574                                             &hostname) == 0);
575                                         if (nvlist_add_string(config,
576                                             ZPOOL_CONFIG_HOSTNAME,
577                                             hostname) != 0)
578                                                 goto nomem;
579                                 }
580
581                                 config_seen = B_TRUE;
582                         }
583
584                         /*
585                          * Add this top-level vdev to the child array.
586                          */
587                         verify(nvlist_lookup_nvlist(tmp,
588                             ZPOOL_CONFIG_VDEV_TREE, &nvtop) == 0);
589                         verify(nvlist_lookup_uint64(nvtop, ZPOOL_CONFIG_ID,
590                             &id) == 0);
591
592                         if (id >= children) {
593                                 nvlist_t **newchild;
594
595                                 newchild = zfs_alloc(hdl, (id + 1) *
596                                     sizeof (nvlist_t *));
597                                 if (newchild == NULL)
598                                         goto nomem;
599
600                                 for (c = 0; c < children; c++)
601                                         newchild[c] = child[c];
602
603                                 free(child);
604                                 child = newchild;
605                                 children = id + 1;
606                         }
607                         if (nvlist_dup(nvtop, &child[id], 0) != 0)
608                                 goto nomem;
609
610                 }
611
612                 /*
613                  * If we have information about all the top-levels then
614                  * clean up the nvlist which we've constructed. This
615                  * means removing any extraneous devices that are
616                  * beyond the valid range or adding devices to the end
617                  * of our array which appear to be missing.
618                  */
619                 if (valid_top_config) {
620                         if (max_id < children) {
621                                 for (c = max_id; c < children; c++)
622                                         nvlist_free(child[c]);
623                                 children = max_id;
624                         } else if (max_id > children) {
625                                 nvlist_t **newchild;
626
627                                 newchild = zfs_alloc(hdl, (max_id) *
628                                     sizeof (nvlist_t *));
629                                 if (newchild == NULL)
630                                         goto nomem;
631
632                                 for (c = 0; c < children; c++)
633                                         newchild[c] = child[c];
634
635                                 free(child);
636                                 child = newchild;
637                                 children = max_id;
638                         }
639                 }
640
641                 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
642                     &guid) == 0);
643
644                 /*
645                  * The vdev namespace may contain holes as a result of
646                  * device removal. We must add them back into the vdev
647                  * tree before we process any missing devices.
648                  */
649                 if (holes > 0) {
650                         ASSERT(valid_top_config);
651
652                         for (c = 0; c < children; c++) {
653                                 nvlist_t *holey;
654
655                                 if (child[c] != NULL ||
656                                     !vdev_is_hole(hole_array, holes, c))
657                                         continue;
658
659                                 if (nvlist_alloc(&holey, NV_UNIQUE_NAME,
660                                     0) != 0)
661                                         goto nomem;
662
663                                 /*
664                                  * Holes in the namespace are treated as
665                                  * "hole" top-level vdevs and have a
666                                  * special flag set on them.
667                                  */
668                                 if (nvlist_add_string(holey,
669                                     ZPOOL_CONFIG_TYPE,
670                                     VDEV_TYPE_HOLE) != 0 ||
671                                     nvlist_add_uint64(holey,
672                                     ZPOOL_CONFIG_ID, c) != 0 ||
673                                     nvlist_add_uint64(holey,
674                                     ZPOOL_CONFIG_GUID, 0ULL) != 0)
675                                         goto nomem;
676                                 child[c] = holey;
677                         }
678                 }
679
680                 /*
681                  * Look for any missing top-level vdevs.  If this is the case,
682                  * create a faked up 'missing' vdev as a placeholder.  We cannot
683                  * simply compress the child array, because the kernel performs
684                  * certain checks to make sure the vdev IDs match their location
685                  * in the configuration.
686                  */
687                 for (c = 0; c < children; c++) {
688                         if (child[c] == NULL) {
689                                 nvlist_t *missing;
690                                 if (nvlist_alloc(&missing, NV_UNIQUE_NAME,
691                                     0) != 0)
692                                         goto nomem;
693                                 if (nvlist_add_string(missing,
694                                     ZPOOL_CONFIG_TYPE,
695                                     VDEV_TYPE_MISSING) != 0 ||
696                                     nvlist_add_uint64(missing,
697                                     ZPOOL_CONFIG_ID, c) != 0 ||
698                                     nvlist_add_uint64(missing,
699                                     ZPOOL_CONFIG_GUID, 0ULL) != 0) {
700                                         nvlist_free(missing);
701                                         goto nomem;
702                                 }
703                                 child[c] = missing;
704                         }
705                 }
706
707                 /*
708                  * Put all of this pool's top-level vdevs into a root vdev.
709                  */
710                 if (nvlist_alloc(&nvroot, NV_UNIQUE_NAME, 0) != 0)
711                         goto nomem;
712                 if (nvlist_add_string(nvroot, ZPOOL_CONFIG_TYPE,
713                     VDEV_TYPE_ROOT) != 0 ||
714                     nvlist_add_uint64(nvroot, ZPOOL_CONFIG_ID, 0ULL) != 0 ||
715                     nvlist_add_uint64(nvroot, ZPOOL_CONFIG_GUID, guid) != 0 ||
716                     nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
717                     child, children) != 0) {
718                         nvlist_free(nvroot);
719                         goto nomem;
720                 }
721
722                 for (c = 0; c < children; c++)
723                         nvlist_free(child[c]);
724                 free(child);
725                 children = 0;
726                 child = NULL;
727
728                 /*
729                  * Go through and fix up any paths and/or devids based on our
730                  * known list of vdev GUID -> path mappings.
731                  */
732                 if (fix_paths(nvroot, pl->names) != 0) {
733                         nvlist_free(nvroot);
734                         goto nomem;
735                 }
736
737                 /*
738                  * Add the root vdev to this pool's configuration.
739                  */
740                 if (nvlist_add_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
741                     nvroot) != 0) {
742                         nvlist_free(nvroot);
743                         goto nomem;
744                 }
745                 nvlist_free(nvroot);
746
747                 /*
748                  * zdb uses this path to report on active pools that were
749                  * imported or created using -R.
750                  */
751                 if (active_ok)
752                         goto add_pool;
753
754                 /*
755                  * Determine if this pool is currently active, in which case we
756                  * can't actually import it.
757                  */
758                 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
759                     &name) == 0);
760                 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
761                     &guid) == 0);
762
763                 if (pool_active(hdl, name, guid, &isactive) != 0)
764                         goto error;
765
766                 if (isactive) {
767                         nvlist_free(config);
768                         config = NULL;
769                         continue;
770                 }
771
772                 if ((nvl = refresh_config(hdl, config)) == NULL) {
773                         nvlist_free(config);
774                         config = NULL;
775                         continue;
776                 }
777
778                 nvlist_free(config);
779                 config = nvl;
780
781                 /*
782                  * Go through and update the paths for spares, now that we have
783                  * them.
784                  */
785                 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
786                     &nvroot) == 0);
787                 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
788                     &spares, &nspares) == 0) {
789                         for (i = 0; i < nspares; i++) {
790                                 if (fix_paths(spares[i], pl->names) != 0)
791                                         goto nomem;
792                         }
793                 }
794
795                 /*
796                  * Update the paths for l2cache devices.
797                  */
798                 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
799                     &l2cache, &nl2cache) == 0) {
800                         for (i = 0; i < nl2cache; i++) {
801                                 if (fix_paths(l2cache[i], pl->names) != 0)
802                                         goto nomem;
803                         }
804                 }
805
806                 /*
807                  * Restore the original information read from the actual label.
808                  */
809                 (void) nvlist_remove(config, ZPOOL_CONFIG_HOSTID,
810                     DATA_TYPE_UINT64);
811                 (void) nvlist_remove(config, ZPOOL_CONFIG_HOSTNAME,
812                     DATA_TYPE_STRING);
813                 if (hostid != 0) {
814                         verify(nvlist_add_uint64(config, ZPOOL_CONFIG_HOSTID,
815                             hostid) == 0);
816                         verify(nvlist_add_string(config, ZPOOL_CONFIG_HOSTNAME,
817                             hostname) == 0);
818                 }
819
820 add_pool:
821                 /*
822                  * Add this pool to the list of configs.
823                  */
824                 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
825                     &name) == 0);
826                 if (nvlist_add_nvlist(ret, name, config) != 0)
827                         goto nomem;
828
829                 found_one = B_TRUE;
830                 nvlist_free(config);
831                 config = NULL;
832         }
833
834         if (!found_one) {
835                 nvlist_free(ret);
836                 ret = NULL;
837         }
838
839         return (ret);
840
841 nomem:
842         (void) no_memory(hdl);
843 error:
844         nvlist_free(config);
845         nvlist_free(ret);
846         for (c = 0; c < children; c++)
847                 nvlist_free(child[c]);
848         free(child);
849
850         return (NULL);
851 }
852
853 /*
854  * Return the offset of the given label.
855  */
856 static uint64_t
857 label_offset(uint64_t size, int l)
858 {
859         ASSERT(P2PHASE_TYPED(size, sizeof (vdev_label_t), uint64_t) == 0);
860         return (l * sizeof (vdev_label_t) + (l < VDEV_LABELS / 2 ?
861             0 : size - VDEV_LABELS * sizeof (vdev_label_t)));
862 }
863
864 /*
865  * Given a file descriptor, read the label information and return an nvlist
866  * describing the configuration, if there is one.
867  */
868 int
869 zpool_read_label(int fd, nvlist_t **config)
870 {
871         struct stat64 statbuf;
872         int l;
873         vdev_label_t *label;
874         uint64_t state, txg, size;
875
876         *config = NULL;
877
878         if (fstat64(fd, &statbuf) == -1)
879                 return (0);
880         size = P2ALIGN_TYPED(statbuf.st_size, sizeof (vdev_label_t), uint64_t);
881
882         if ((label = malloc(sizeof (vdev_label_t))) == NULL)
883                 return (-1);
884
885         for (l = 0; l < VDEV_LABELS; l++) {
886                 if (pread64(fd, label, sizeof (vdev_label_t),
887                     label_offset(size, l)) != sizeof (vdev_label_t))
888                         continue;
889
890                 if (nvlist_unpack(label->vl_vdev_phys.vp_nvlist,
891                     sizeof (label->vl_vdev_phys.vp_nvlist), config, 0) != 0)
892                         continue;
893
894                 if (nvlist_lookup_uint64(*config, ZPOOL_CONFIG_POOL_STATE,
895                     &state) != 0 || state > POOL_STATE_L2CACHE) {
896                         nvlist_free(*config);
897                         continue;
898                 }
899
900                 if (state != POOL_STATE_SPARE && state != POOL_STATE_L2CACHE &&
901                     (nvlist_lookup_uint64(*config, ZPOOL_CONFIG_POOL_TXG,
902                     &txg) != 0 || txg == 0)) {
903                         nvlist_free(*config);
904                         continue;
905                 }
906
907                 free(label);
908                 return (0);
909         }
910
911         free(label);
912         *config = NULL;
913         return (0);
914 }
915
916 #ifdef HAVE_LIBBLKID
917 /*
918  * Use libblkid to quickly search for zfs devices
919  */
920 static int
921 zpool_find_import_blkid(libzfs_handle_t *hdl, pool_list_t *pools)
922 {
923         blkid_cache cache;
924         blkid_dev_iterate iter;
925         blkid_dev dev;
926         const char *devname;
927         nvlist_t *config;
928         int fd, err;
929
930         err = blkid_get_cache(&cache, NULL);
931         if (err != 0) {
932                 (void) zfs_error_fmt(hdl, EZFS_BADCACHE,
933                     dgettext(TEXT_DOMAIN, "blkid_get_cache() %d"), err);
934                 goto err_blkid1;
935         }
936
937         err = blkid_probe_all(cache);
938         if (err != 0) {
939                 (void) zfs_error_fmt(hdl, EZFS_BADCACHE,
940                     dgettext(TEXT_DOMAIN, "blkid_probe_all() %d"), err);
941                 goto err_blkid2;
942         }
943
944         iter = blkid_dev_iterate_begin(cache);
945         if (iter == NULL) {
946                 (void) zfs_error_fmt(hdl, EZFS_BADCACHE,
947                     dgettext(TEXT_DOMAIN, "blkid_dev_iterate_begin()"));
948                 goto err_blkid2;
949         }
950
951         err = blkid_dev_set_search(iter, "TYPE", "zfs");
952         if (err != 0) {
953                 (void) zfs_error_fmt(hdl, EZFS_BADCACHE,
954                     dgettext(TEXT_DOMAIN, "blkid_dev_set_search() %d"), err);
955                 goto err_blkid3;
956         }
957
958         while (blkid_dev_next(iter, &dev) == 0) {
959                 devname = blkid_dev_devname(dev);
960                 if ((fd = open64(devname, O_RDONLY)) < 0)
961                         continue;
962
963                 err = zpool_read_label(fd, &config);
964                 (void) close(fd);
965
966                 if (err != 0) {
967                         (void) no_memory(hdl);
968                         goto err_blkid3;
969                 }
970
971                 if (config != NULL) {
972                         err = add_config(hdl, pools, devname, 0, config);
973                         if (err != 0)
974                                 goto err_blkid3;
975                 }
976         }
977
978 err_blkid3:
979         blkid_dev_iterate_end(iter);
980 err_blkid2:
981         blkid_put_cache(cache);
982 err_blkid1:
983         return err;
984 }
985 #endif /* HAVE_LIBBLKID */
986
987 char *
988 zpool_default_import_path[DEFAULT_IMPORT_PATH_SIZE] = {
989         "/dev/disk/by-vdev",    /* Custom rules, use first if they exist */
990         "/dev/disk/zpool",      /* Custom rules, use first if they exist */
991         "/dev/mapper",          /* Use multipath devices before components */
992         "/dev/disk/by-uuid",    /* Single unique entry and persistent */
993         "/dev/disk/by-id",      /* May be multiple entries and persistent */
994         "/dev/disk/by-path",    /* Encodes physical location and persistent */
995         "/dev/disk/by-label",   /* Custom persistent labels */
996         "/dev"                  /* UNSAFE device names will change */
997 };
998
999 /*
1000  * Given a list of directories to search, find all pools stored on disk.  This
1001  * includes partial pools which are not available to import.  If no args are
1002  * given (argc is 0), then the default directory (/dev/dsk) is searched.
1003  * poolname or guid (but not both) are provided by the caller when trying
1004  * to import a specific pool.
1005  */
1006 static nvlist_t *
1007 zpool_find_import_impl(libzfs_handle_t *hdl, importargs_t *iarg)
1008 {
1009         int i, dirs = iarg->paths;
1010         DIR *dirp = NULL;
1011         struct dirent64 *dp;
1012         char path[MAXPATHLEN];
1013         char *end, **dir = iarg->path;
1014         size_t pathleft;
1015         struct stat64 statbuf;
1016         nvlist_t *ret = NULL, *config;
1017         int fd;
1018         pool_list_t pools = { 0 };
1019         pool_entry_t *pe, *penext;
1020         vdev_entry_t *ve, *venext;
1021         config_entry_t *ce, *cenext;
1022         name_entry_t *ne, *nenext;
1023
1024         verify(iarg->poolname == NULL || iarg->guid == 0);
1025
1026         if (dirs == 0) {
1027 #ifdef HAVE_LIBBLKID
1028                 /* Use libblkid to scan all device for their type */
1029                 if (zpool_find_import_blkid(hdl, &pools) == 0)
1030                         goto skip_scanning;
1031
1032                 (void) zfs_error_fmt(hdl, EZFS_BADCACHE,
1033                     dgettext(TEXT_DOMAIN, "blkid failure falling back "
1034                     "to manual probing"));
1035 #endif /* HAVE_LIBBLKID */
1036
1037                 dir = zpool_default_import_path;
1038                 dirs = DEFAULT_IMPORT_PATH_SIZE;
1039         }
1040
1041         /*
1042          * Go through and read the label configuration information from every
1043          * possible device, organizing the information according to pool GUID
1044          * and toplevel GUID.
1045          */
1046         for (i = 0; i < dirs; i++) {
1047                 char *rdsk;
1048                 int dfd;
1049
1050                 /* use realpath to normalize the path */
1051                 if (realpath(dir[i], path) == 0) {
1052
1053                         /* it is safe to skip missing search paths */
1054                         if (errno == ENOENT)
1055                                 continue;
1056
1057                         zfs_error_aux(hdl, strerror(errno));
1058                         (void) zfs_error_fmt(hdl, EZFS_BADPATH,
1059                             dgettext(TEXT_DOMAIN, "cannot open '%s'"), dir[i]);
1060                         goto error;
1061                 }
1062                 end = &path[strlen(path)];
1063                 *end++ = '/';
1064                 *end = 0;
1065                 pathleft = &path[sizeof (path)] - end;
1066
1067                 /*
1068                  * Using raw devices instead of block devices when we're
1069                  * reading the labels skips a bunch of slow operations during
1070                  * close(2) processing, so we replace /dev/dsk with /dev/rdsk.
1071                  */
1072                 if (strcmp(path, "/dev/dsk/") == 0)
1073                         rdsk = "/dev/rdsk/";
1074                 else
1075                         rdsk = path;
1076
1077                 if ((dfd = open64(rdsk, O_RDONLY)) < 0 ||
1078                     (dirp = fdopendir(dfd)) == NULL) {
1079                         zfs_error_aux(hdl, strerror(errno));
1080                         (void) zfs_error_fmt(hdl, EZFS_BADPATH,
1081                             dgettext(TEXT_DOMAIN, "cannot open '%s'"),
1082                             rdsk);
1083                         goto error;
1084                 }
1085
1086                 /*
1087                  * This is not MT-safe, but we have no MT consumers of libzfs
1088                  */
1089                 while ((dp = readdir64(dirp)) != NULL) {
1090                         const char *name = dp->d_name;
1091                         if (name[0] == '.' &&
1092                             (name[1] == 0 || (name[1] == '.' && name[2] == 0)))
1093                                 continue;
1094
1095                         /*
1096                          * Skip checking devices with well known prefixes:
1097                          * watchdog - A special close is required to avoid
1098                          *            triggering it and resetting the system.
1099                          * fuse     - Fuse control device.
1100                          * ppp      - Generic PPP driver.
1101                          * tty*     - Generic serial interface.
1102                          * vcs*     - Virtual console memory.
1103                          * parport* - Parallel port interface.
1104                          * lp*      - Printer interface.
1105                          * fd*      - Floppy interface.
1106                          * hpet     - High Precision Event Timer, crashes qemu
1107                          *            when accessed from a virtual machine.
1108                          * core     - Symlink to /proc/kcore, causes a crash
1109                          *            when access from Xen dom0.
1110                          */
1111                         if ((strncmp(name, "watchdog", 8) == 0) ||
1112                             (strncmp(name, "fuse", 4) == 0)     ||
1113                             (strncmp(name, "ppp", 3) == 0)      ||
1114                             (strncmp(name, "tty", 3) == 0)      ||
1115                             (strncmp(name, "vcs", 3) == 0)      ||
1116                             (strncmp(name, "parport", 7) == 0)  ||
1117                             (strncmp(name, "lp", 2) == 0)       ||
1118                             (strncmp(name, "fd", 2) == 0)       ||
1119                             (strncmp(name, "hpet", 4) == 0)     ||
1120                             (strncmp(name, "core", 4) == 0))
1121                                 continue;
1122
1123                         /*
1124                          * Ignore failed stats.  We only want regular
1125                          * files and block devices.
1126                          */
1127                         if ((fstatat64(dfd, name, &statbuf, 0) != 0) ||
1128                             (!S_ISREG(statbuf.st_mode) &&
1129                             !S_ISBLK(statbuf.st_mode)))
1130                                 continue;
1131
1132                         if ((fd = openat64(dfd, name, O_RDONLY)) < 0)
1133                                 continue;
1134
1135                         if ((zpool_read_label(fd, &config)) != 0) {
1136                                 (void) close(fd);
1137                                 (void) no_memory(hdl);
1138                                 goto error;
1139                         }
1140
1141                         (void) close(fd);
1142
1143                         if (config != NULL) {
1144                                 boolean_t matched = B_TRUE;
1145                                 char *pname;
1146
1147                                 if ((iarg->poolname != NULL) &&
1148                                     (nvlist_lookup_string(config,
1149                                     ZPOOL_CONFIG_POOL_NAME, &pname) == 0)) {
1150
1151                                         if (strcmp(iarg->poolname, pname))
1152                                                matched = B_FALSE;
1153
1154                                 } else if (iarg->guid != 0) {
1155                                         uint64_t this_guid;
1156
1157                                         matched = nvlist_lookup_uint64(config,
1158                                             ZPOOL_CONFIG_POOL_GUID,
1159                                             &this_guid) == 0 &&
1160                                             iarg->guid == this_guid;
1161                                 }
1162                                 if (!matched) {
1163                                         nvlist_free(config);
1164                                         config = NULL;
1165                                         continue;
1166                                 }
1167                                 /* use the non-raw path for the config */
1168                                 (void) strlcpy(end, name, pathleft);
1169                                 if (add_config(hdl, &pools, path, i+1, config))
1170                                         goto error;
1171                         }
1172                 }
1173
1174                 (void) closedir(dirp);
1175                 dirp = NULL;
1176         }
1177
1178 #ifdef HAVE_LIBBLKID
1179 skip_scanning:
1180 #endif
1181         ret = get_configs(hdl, &pools, iarg->can_be_active);
1182
1183 error:
1184         for (pe = pools.pools; pe != NULL; pe = penext) {
1185                 penext = pe->pe_next;
1186                 for (ve = pe->pe_vdevs; ve != NULL; ve = venext) {
1187                         venext = ve->ve_next;
1188                         for (ce = ve->ve_configs; ce != NULL; ce = cenext) {
1189                                 cenext = ce->ce_next;
1190                                 if (ce->ce_config)
1191                                         nvlist_free(ce->ce_config);
1192                                 free(ce);
1193                         }
1194                         free(ve);
1195                 }
1196                 free(pe);
1197         }
1198
1199         for (ne = pools.names; ne != NULL; ne = nenext) {
1200                 nenext = ne->ne_next;
1201                 if (ne->ne_name)
1202                         free(ne->ne_name);
1203                 free(ne);
1204         }
1205
1206         if (dirp)
1207                 (void) closedir(dirp);
1208
1209         return (ret);
1210 }
1211
1212 nvlist_t *
1213 zpool_find_import(libzfs_handle_t *hdl, int argc, char **argv)
1214 {
1215         importargs_t iarg = { 0 };
1216
1217         iarg.paths = argc;
1218         iarg.path = argv;
1219
1220         return (zpool_find_import_impl(hdl, &iarg));
1221 }
1222
1223 /*
1224  * Given a cache file, return the contents as a list of importable pools.
1225  * poolname or guid (but not both) are provided by the caller when trying
1226  * to import a specific pool.
1227  */
1228 nvlist_t *
1229 zpool_find_import_cached(libzfs_handle_t *hdl, const char *cachefile,
1230     char *poolname, uint64_t guid)
1231 {
1232         char *buf;
1233         int fd;
1234         struct stat64 statbuf;
1235         nvlist_t *raw, *src, *dst;
1236         nvlist_t *pools;
1237         nvpair_t *elem;
1238         char *name;
1239         uint64_t this_guid;
1240         boolean_t active;
1241
1242         verify(poolname == NULL || guid == 0);
1243
1244         if ((fd = open(cachefile, O_RDONLY)) < 0) {
1245                 zfs_error_aux(hdl, "%s", strerror(errno));
1246                 (void) zfs_error(hdl, EZFS_BADCACHE,
1247                     dgettext(TEXT_DOMAIN, "failed to open cache file"));
1248                 return (NULL);
1249         }
1250
1251         if (fstat64(fd, &statbuf) != 0) {
1252                 zfs_error_aux(hdl, "%s", strerror(errno));
1253                 (void) close(fd);
1254                 (void) zfs_error(hdl, EZFS_BADCACHE,
1255                     dgettext(TEXT_DOMAIN, "failed to get size of cache file"));
1256                 return (NULL);
1257         }
1258
1259         if ((buf = zfs_alloc(hdl, statbuf.st_size)) == NULL) {
1260                 (void) close(fd);
1261                 return (NULL);
1262         }
1263
1264         if (read(fd, buf, statbuf.st_size) != statbuf.st_size) {
1265                 (void) close(fd);
1266                 free(buf);
1267                 (void) zfs_error(hdl, EZFS_BADCACHE,
1268                     dgettext(TEXT_DOMAIN,
1269                     "failed to read cache file contents"));
1270                 return (NULL);
1271         }
1272
1273         (void) close(fd);
1274
1275         if (nvlist_unpack(buf, statbuf.st_size, &raw, 0) != 0) {
1276                 free(buf);
1277                 (void) zfs_error(hdl, EZFS_BADCACHE,
1278                     dgettext(TEXT_DOMAIN,
1279                     "invalid or corrupt cache file contents"));
1280                 return (NULL);
1281         }
1282
1283         free(buf);
1284
1285         /*
1286          * Go through and get the current state of the pools and refresh their
1287          * state.
1288          */
1289         if (nvlist_alloc(&pools, 0, 0) != 0) {
1290                 (void) no_memory(hdl);
1291                 nvlist_free(raw);
1292                 return (NULL);
1293         }
1294
1295         elem = NULL;
1296         while ((elem = nvlist_next_nvpair(raw, elem)) != NULL) {
1297                 verify(nvpair_value_nvlist(elem, &src) == 0);
1298
1299                 verify(nvlist_lookup_string(src, ZPOOL_CONFIG_POOL_NAME,
1300                     &name) == 0);
1301                 if (poolname != NULL && strcmp(poolname, name) != 0)
1302                         continue;
1303
1304                 verify(nvlist_lookup_uint64(src, ZPOOL_CONFIG_POOL_GUID,
1305                     &this_guid) == 0);
1306                 if (guid != 0) {
1307                         verify(nvlist_lookup_uint64(src, ZPOOL_CONFIG_POOL_GUID,
1308                             &this_guid) == 0);
1309                         if (guid != this_guid)
1310                                 continue;
1311                 }
1312
1313                 if (pool_active(hdl, name, this_guid, &active) != 0) {
1314                         nvlist_free(raw);
1315                         nvlist_free(pools);
1316                         return (NULL);
1317                 }
1318
1319                 if (active)
1320                         continue;
1321
1322                 if ((dst = refresh_config(hdl, src)) == NULL) {
1323                         nvlist_free(raw);
1324                         nvlist_free(pools);
1325                         return (NULL);
1326                 }
1327
1328                 if (nvlist_add_nvlist(pools, nvpair_name(elem), dst) != 0) {
1329                         (void) no_memory(hdl);
1330                         nvlist_free(dst);
1331                         nvlist_free(raw);
1332                         nvlist_free(pools);
1333                         return (NULL);
1334                 }
1335                 nvlist_free(dst);
1336         }
1337
1338         nvlist_free(raw);
1339         return (pools);
1340 }
1341
1342 static int
1343 name_or_guid_exists(zpool_handle_t *zhp, void *data)
1344 {
1345         importargs_t *import = data;
1346         int found = 0;
1347
1348         if (import->poolname != NULL) {
1349                 char *pool_name;
1350
1351                 verify(nvlist_lookup_string(zhp->zpool_config,
1352                     ZPOOL_CONFIG_POOL_NAME, &pool_name) == 0);
1353                 if (strcmp(pool_name, import->poolname) == 0)
1354                         found = 1;
1355         } else {
1356                 uint64_t pool_guid;
1357
1358                 verify(nvlist_lookup_uint64(zhp->zpool_config,
1359                     ZPOOL_CONFIG_POOL_GUID, &pool_guid) == 0);
1360                 if (pool_guid == import->guid)
1361                         found = 1;
1362         }
1363
1364         zpool_close(zhp);
1365         return (found);
1366 }
1367
1368 nvlist_t *
1369 zpool_search_import(libzfs_handle_t *hdl, importargs_t *import)
1370 {
1371         verify(import->poolname == NULL || import->guid == 0);
1372
1373         if (import->unique)
1374                 import->exists = zpool_iter(hdl, name_or_guid_exists, import);
1375
1376         if (import->cachefile != NULL)
1377                 return (zpool_find_import_cached(hdl, import->cachefile,
1378                     import->poolname, import->guid));
1379
1380         return (zpool_find_import_impl(hdl, import));
1381 }
1382
1383 boolean_t
1384 find_guid(nvlist_t *nv, uint64_t guid)
1385 {
1386         uint64_t tmp;
1387         nvlist_t **child;
1388         uint_t c, children;
1389
1390         verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &tmp) == 0);
1391         if (tmp == guid)
1392                 return (B_TRUE);
1393
1394         if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
1395             &child, &children) == 0) {
1396                 for (c = 0; c < children; c++)
1397                         if (find_guid(child[c], guid))
1398                                 return (B_TRUE);
1399         }
1400
1401         return (B_FALSE);
1402 }
1403
1404 typedef struct aux_cbdata {
1405         const char      *cb_type;
1406         uint64_t        cb_guid;
1407         zpool_handle_t  *cb_zhp;
1408 } aux_cbdata_t;
1409
1410 static int
1411 find_aux(zpool_handle_t *zhp, void *data)
1412 {
1413         aux_cbdata_t *cbp = data;
1414         nvlist_t **list;
1415         uint_t i, count;
1416         uint64_t guid;
1417         nvlist_t *nvroot;
1418
1419         verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
1420             &nvroot) == 0);
1421
1422         if (nvlist_lookup_nvlist_array(nvroot, cbp->cb_type,
1423             &list, &count) == 0) {
1424                 for (i = 0; i < count; i++) {
1425                         verify(nvlist_lookup_uint64(list[i],
1426                             ZPOOL_CONFIG_GUID, &guid) == 0);
1427                         if (guid == cbp->cb_guid) {
1428                                 cbp->cb_zhp = zhp;
1429                                 return (1);
1430                         }
1431                 }
1432         }
1433
1434         zpool_close(zhp);
1435         return (0);
1436 }
1437
1438 /*
1439  * Determines if the pool is in use.  If so, it returns true and the state of
1440  * the pool as well as the name of the pool.  Both strings are allocated and
1441  * must be freed by the caller.
1442  */
1443 int
1444 zpool_in_use(libzfs_handle_t *hdl, int fd, pool_state_t *state, char **namestr,
1445     boolean_t *inuse)
1446 {
1447         nvlist_t *config;
1448         char *name;
1449         boolean_t ret;
1450         uint64_t guid, vdev_guid;
1451         zpool_handle_t *zhp;
1452         nvlist_t *pool_config;
1453         uint64_t stateval, isspare;
1454         aux_cbdata_t cb = { 0 };
1455         boolean_t isactive;
1456
1457         *inuse = B_FALSE;
1458
1459         if (zpool_read_label(fd, &config) != 0) {
1460                 (void) no_memory(hdl);
1461                 return (-1);
1462         }
1463
1464         if (config == NULL)
1465                 return (0);
1466
1467         verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE,
1468             &stateval) == 0);
1469         verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_GUID,
1470             &vdev_guid) == 0);
1471
1472         if (stateval != POOL_STATE_SPARE && stateval != POOL_STATE_L2CACHE) {
1473                 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
1474                     &name) == 0);
1475                 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
1476                     &guid) == 0);
1477         }
1478
1479         switch (stateval) {
1480         case POOL_STATE_EXPORTED:
1481                 /*
1482                  * A pool with an exported state may in fact be imported
1483                  * read-only, so check the in-core state to see if it's
1484                  * active and imported read-only.  If it is, set
1485                  * its state to active.
1486                  */
1487                 if (pool_active(hdl, name, guid, &isactive) == 0 && isactive &&
1488                     (zhp = zpool_open_canfail(hdl, name)) != NULL &&
1489                     zpool_get_prop_int(zhp, ZPOOL_PROP_READONLY, NULL))
1490                         stateval = POOL_STATE_ACTIVE;
1491
1492                 ret = B_TRUE;
1493                 break;
1494
1495         case POOL_STATE_ACTIVE:
1496                 /*
1497                  * For an active pool, we have to determine if it's really part
1498                  * of a currently active pool (in which case the pool will exist
1499                  * and the guid will be the same), or whether it's part of an
1500                  * active pool that was disconnected without being explicitly
1501                  * exported.
1502                  */
1503                 if (pool_active(hdl, name, guid, &isactive) != 0) {
1504                         nvlist_free(config);
1505                         return (-1);
1506                 }
1507
1508                 if (isactive) {
1509                         /*
1510                          * Because the device may have been removed while
1511                          * offlined, we only report it as active if the vdev is
1512                          * still present in the config.  Otherwise, pretend like
1513                          * it's not in use.
1514                          */
1515                         if ((zhp = zpool_open_canfail(hdl, name)) != NULL &&
1516                             (pool_config = zpool_get_config(zhp, NULL))
1517                             != NULL) {
1518                                 nvlist_t *nvroot;
1519
1520                                 verify(nvlist_lookup_nvlist(pool_config,
1521                                     ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
1522                                 ret = find_guid(nvroot, vdev_guid);
1523                         } else {
1524                                 ret = B_FALSE;
1525                         }
1526
1527                         /*
1528                          * If this is an active spare within another pool, we
1529                          * treat it like an unused hot spare.  This allows the
1530                          * user to create a pool with a hot spare that currently
1531                          * in use within another pool.  Since we return B_TRUE,
1532                          * libdiskmgt will continue to prevent generic consumers
1533                          * from using the device.
1534                          */
1535                         if (ret && nvlist_lookup_uint64(config,
1536                             ZPOOL_CONFIG_IS_SPARE, &isspare) == 0 && isspare)
1537                                 stateval = POOL_STATE_SPARE;
1538
1539                         if (zhp != NULL)
1540                                 zpool_close(zhp);
1541                 } else {
1542                         stateval = POOL_STATE_POTENTIALLY_ACTIVE;
1543                         ret = B_TRUE;
1544                 }
1545                 break;
1546
1547         case POOL_STATE_SPARE:
1548                 /*
1549                  * For a hot spare, it can be either definitively in use, or
1550                  * potentially active.  To determine if it's in use, we iterate
1551                  * over all pools in the system and search for one with a spare
1552                  * with a matching guid.
1553                  *
1554                  * Due to the shared nature of spares, we don't actually report
1555                  * the potentially active case as in use.  This means the user
1556                  * can freely create pools on the hot spares of exported pools,
1557                  * but to do otherwise makes the resulting code complicated, and
1558                  * we end up having to deal with this case anyway.
1559                  */
1560                 cb.cb_zhp = NULL;
1561                 cb.cb_guid = vdev_guid;
1562                 cb.cb_type = ZPOOL_CONFIG_SPARES;
1563                 if (zpool_iter(hdl, find_aux, &cb) == 1) {
1564                         name = (char *)zpool_get_name(cb.cb_zhp);
1565                         ret = TRUE;
1566                 } else {
1567                         ret = FALSE;
1568                 }
1569                 break;
1570
1571         case POOL_STATE_L2CACHE:
1572
1573                 /*
1574                  * Check if any pool is currently using this l2cache device.
1575                  */
1576                 cb.cb_zhp = NULL;
1577                 cb.cb_guid = vdev_guid;
1578                 cb.cb_type = ZPOOL_CONFIG_L2CACHE;
1579                 if (zpool_iter(hdl, find_aux, &cb) == 1) {
1580                         name = (char *)zpool_get_name(cb.cb_zhp);
1581                         ret = TRUE;
1582                 } else {
1583                         ret = FALSE;
1584                 }
1585                 break;
1586
1587         default:
1588                 ret = B_FALSE;
1589         }
1590
1591
1592         if (ret) {
1593                 if ((*namestr = zfs_strdup(hdl, name)) == NULL) {
1594                         if (cb.cb_zhp)
1595                                 zpool_close(cb.cb_zhp);
1596                         nvlist_free(config);
1597                         return (-1);
1598                 }
1599                 *state = (pool_state_t)stateval;
1600         }
1601
1602         if (cb.cb_zhp)
1603                 zpool_close(cb.cb_zhp);
1604
1605         nvlist_free(config);
1606         *inuse = ret;
1607         return (0);
1608 }