Fix zpool_read_label()
[zfs.git] / lib / libzfs / libzfs_import.c
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23  * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
24  * Copyright (c) 2012 by Delphix. All rights reserved.
25  */
26
27 /*
28  * Pool import support functions.
29  *
30  * To import a pool, we rely on reading the configuration information from the
31  * ZFS label of each device.  If we successfully read the label, then we
32  * organize the configuration information in the following hierarchy:
33  *
34  *      pool guid -> toplevel vdev guid -> label txg
35  *
36  * Duplicate entries matching this same tuple will be discarded.  Once we have
37  * examined every device, we pick the best label txg config for each toplevel
38  * vdev.  We then arrange these toplevel vdevs into a complete pool config, and
39  * update any paths that have changed.  Finally, we attempt to import the pool
40  * using our derived config, and record the results.
41  */
42
43 #include <ctype.h>
44 #include <devid.h>
45 #include <dirent.h>
46 #include <errno.h>
47 #include <libintl.h>
48 #include <stddef.h>
49 #include <stdlib.h>
50 #include <string.h>
51 #include <sys/stat.h>
52 #include <unistd.h>
53 #include <fcntl.h>
54 #include <sys/vtoc.h>
55 #include <sys/dktp/fdisk.h>
56 #include <sys/efi_partition.h>
57
58 #include <sys/vdev_impl.h>
59 #ifdef HAVE_LIBBLKID
60 #include <blkid/blkid.h>
61 #endif
62
63 #include "libzfs.h"
64 #include "libzfs_impl.h"
65
66 /*
67  * Intermediate structures used to gather configuration information.
68  */
69 typedef struct config_entry {
70         uint64_t                ce_txg;
71         nvlist_t                *ce_config;
72         struct config_entry     *ce_next;
73 } config_entry_t;
74
75 typedef struct vdev_entry {
76         uint64_t                ve_guid;
77         config_entry_t          *ve_configs;
78         struct vdev_entry       *ve_next;
79 } vdev_entry_t;
80
81 typedef struct pool_entry {
82         uint64_t                pe_guid;
83         vdev_entry_t            *pe_vdevs;
84         struct pool_entry       *pe_next;
85 } pool_entry_t;
86
87 typedef struct name_entry {
88         char                    *ne_name;
89         uint64_t                ne_guid;
90         uint64_t                ne_order;
91         struct name_entry       *ne_next;
92 } name_entry_t;
93
94 typedef struct pool_list {
95         pool_entry_t            *pools;
96         name_entry_t            *names;
97 } pool_list_t;
98
99 static char *
100 get_devid(const char *path)
101 {
102         int fd;
103         ddi_devid_t devid;
104         char *minor, *ret;
105
106         if ((fd = open(path, O_RDONLY)) < 0)
107                 return (NULL);
108
109         minor = NULL;
110         ret = NULL;
111         if (devid_get(fd, &devid) == 0) {
112                 if (devid_get_minor_name(fd, &minor) == 0)
113                         ret = devid_str_encode(devid, minor);
114                 if (minor != NULL)
115                         devid_str_free(minor);
116                 devid_free(devid);
117         }
118         (void) close(fd);
119
120         return (ret);
121 }
122
123
124 /*
125  * Go through and fix up any path and/or devid information for the given vdev
126  * configuration.
127  */
128 static int
129 fix_paths(nvlist_t *nv, name_entry_t *names)
130 {
131         nvlist_t **child;
132         uint_t c, children;
133         uint64_t guid;
134         name_entry_t *ne, *best;
135         char *path, *devid;
136
137         if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
138             &child, &children) == 0) {
139                 for (c = 0; c < children; c++)
140                         if (fix_paths(child[c], names) != 0)
141                                 return (-1);
142                 return (0);
143         }
144
145         /*
146          * This is a leaf (file or disk) vdev.  In either case, go through
147          * the name list and see if we find a matching guid.  If so, replace
148          * the path and see if we can calculate a new devid.
149          *
150          * There may be multiple names associated with a particular guid, in
151          * which case we have overlapping partitions or multiple paths to the
152          * same disk.  In this case we prefer to use the path name which
153          * matches the ZPOOL_CONFIG_PATH.  If no matching entry is found we
154          * use the lowest order device which corresponds to the first match
155          * while traversing the ZPOOL_IMPORT_PATH search path.
156          */
157         verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) == 0);
158         if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) != 0)
159                 path = NULL;
160
161         best = NULL;
162         for (ne = names; ne != NULL; ne = ne->ne_next) {
163                 if (ne->ne_guid == guid) {
164
165                         if (path == NULL) {
166                                 best = ne;
167                                 break;
168                         }
169
170                         if ((strlen(path) == strlen(ne->ne_name)) &&
171                             !strncmp(path, ne->ne_name, strlen(path))) {
172                                 best = ne;
173                                 break;
174                         }
175
176                         if (best == NULL || ne->ne_order < best->ne_order)
177                                 best = ne;
178                 }
179         }
180
181         if (best == NULL)
182                 return (0);
183
184         if (nvlist_add_string(nv, ZPOOL_CONFIG_PATH, best->ne_name) != 0)
185                 return (-1);
186
187         if ((devid = get_devid(best->ne_name)) == NULL) {
188                 (void) nvlist_remove_all(nv, ZPOOL_CONFIG_DEVID);
189         } else {
190                 if (nvlist_add_string(nv, ZPOOL_CONFIG_DEVID, devid) != 0)
191                         return (-1);
192                 devid_str_free(devid);
193         }
194
195         return (0);
196 }
197
198 /*
199  * Add the given configuration to the list of known devices.
200  */
201 static int
202 add_config(libzfs_handle_t *hdl, pool_list_t *pl, const char *path,
203     int order, nvlist_t *config)
204 {
205         uint64_t pool_guid, vdev_guid, top_guid, txg, state;
206         pool_entry_t *pe;
207         vdev_entry_t *ve;
208         config_entry_t *ce;
209         name_entry_t *ne;
210
211         /*
212          * If this is a hot spare not currently in use or level 2 cache
213          * device, add it to the list of names to translate, but don't do
214          * anything else.
215          */
216         if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE,
217             &state) == 0 &&
218             (state == POOL_STATE_SPARE || state == POOL_STATE_L2CACHE) &&
219             nvlist_lookup_uint64(config, ZPOOL_CONFIG_GUID, &vdev_guid) == 0) {
220                 if ((ne = zfs_alloc(hdl, sizeof (name_entry_t))) == NULL)
221                         return (-1);
222
223                 if ((ne->ne_name = zfs_strdup(hdl, path)) == NULL) {
224                         free(ne);
225                         return (-1);
226                 }
227                 ne->ne_guid = vdev_guid;
228                 ne->ne_order = order;
229                 ne->ne_next = pl->names;
230                 pl->names = ne;
231                 return (0);
232         }
233
234         /*
235          * If we have a valid config but cannot read any of these fields, then
236          * it means we have a half-initialized label.  In vdev_label_init()
237          * we write a label with txg == 0 so that we can identify the device
238          * in case the user refers to the same disk later on.  If we fail to
239          * create the pool, we'll be left with a label in this state
240          * which should not be considered part of a valid pool.
241          */
242         if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
243             &pool_guid) != 0 ||
244             nvlist_lookup_uint64(config, ZPOOL_CONFIG_GUID,
245             &vdev_guid) != 0 ||
246             nvlist_lookup_uint64(config, ZPOOL_CONFIG_TOP_GUID,
247             &top_guid) != 0 ||
248             nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG,
249             &txg) != 0 || txg == 0) {
250                 nvlist_free(config);
251                 return (0);
252         }
253
254         /*
255          * First, see if we know about this pool.  If not, then add it to the
256          * list of known pools.
257          */
258         for (pe = pl->pools; pe != NULL; pe = pe->pe_next) {
259                 if (pe->pe_guid == pool_guid)
260                         break;
261         }
262
263         if (pe == NULL) {
264                 if ((pe = zfs_alloc(hdl, sizeof (pool_entry_t))) == NULL) {
265                         nvlist_free(config);
266                         return (-1);
267                 }
268                 pe->pe_guid = pool_guid;
269                 pe->pe_next = pl->pools;
270                 pl->pools = pe;
271         }
272
273         /*
274          * Second, see if we know about this toplevel vdev.  Add it if its
275          * missing.
276          */
277         for (ve = pe->pe_vdevs; ve != NULL; ve = ve->ve_next) {
278                 if (ve->ve_guid == top_guid)
279                         break;
280         }
281
282         if (ve == NULL) {
283                 if ((ve = zfs_alloc(hdl, sizeof (vdev_entry_t))) == NULL) {
284                         nvlist_free(config);
285                         return (-1);
286                 }
287                 ve->ve_guid = top_guid;
288                 ve->ve_next = pe->pe_vdevs;
289                 pe->pe_vdevs = ve;
290         }
291
292         /*
293          * Third, see if we have a config with a matching transaction group.  If
294          * so, then we do nothing.  Otherwise, add it to the list of known
295          * configs.
296          */
297         for (ce = ve->ve_configs; ce != NULL; ce = ce->ce_next) {
298                 if (ce->ce_txg == txg)
299                         break;
300         }
301
302         if (ce == NULL) {
303                 if ((ce = zfs_alloc(hdl, sizeof (config_entry_t))) == NULL) {
304                         nvlist_free(config);
305                         return (-1);
306                 }
307                 ce->ce_txg = txg;
308                 ce->ce_config = config;
309                 ce->ce_next = ve->ve_configs;
310                 ve->ve_configs = ce;
311         } else {
312                 nvlist_free(config);
313         }
314
315         /*
316          * At this point we've successfully added our config to the list of
317          * known configs.  The last thing to do is add the vdev guid -> path
318          * mappings so that we can fix up the configuration as necessary before
319          * doing the import.
320          */
321         if ((ne = zfs_alloc(hdl, sizeof (name_entry_t))) == NULL)
322                 return (-1);
323
324         if ((ne->ne_name = zfs_strdup(hdl, path)) == NULL) {
325                 free(ne);
326                 return (-1);
327         }
328
329         ne->ne_guid = vdev_guid;
330         ne->ne_order = order;
331         ne->ne_next = pl->names;
332         pl->names = ne;
333
334         return (0);
335 }
336
337 /*
338  * Returns true if the named pool matches the given GUID.
339  */
340 static int
341 pool_active(libzfs_handle_t *hdl, const char *name, uint64_t guid,
342     boolean_t *isactive)
343 {
344         zpool_handle_t *zhp;
345         uint64_t theguid;
346
347         if (zpool_open_silent(hdl, name, &zhp) != 0)
348                 return (-1);
349
350         if (zhp == NULL) {
351                 *isactive = B_FALSE;
352                 return (0);
353         }
354
355         verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_POOL_GUID,
356             &theguid) == 0);
357
358         zpool_close(zhp);
359
360         *isactive = (theguid == guid);
361         return (0);
362 }
363
364 static nvlist_t *
365 refresh_config(libzfs_handle_t *hdl, nvlist_t *config)
366 {
367         nvlist_t *nvl;
368         zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
369         int err;
370
371         if (zcmd_write_conf_nvlist(hdl, &zc, config) != 0)
372                 return (NULL);
373
374         if (zcmd_alloc_dst_nvlist(hdl, &zc,
375             zc.zc_nvlist_conf_size * 2) != 0) {
376                 zcmd_free_nvlists(&zc);
377                 return (NULL);
378         }
379
380         while ((err = ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_TRYIMPORT,
381             &zc)) != 0 && errno == ENOMEM) {
382                 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
383                         zcmd_free_nvlists(&zc);
384                         return (NULL);
385                 }
386         }
387
388         if (err) {
389                 zcmd_free_nvlists(&zc);
390                 return (NULL);
391         }
392
393         if (zcmd_read_dst_nvlist(hdl, &zc, &nvl) != 0) {
394                 zcmd_free_nvlists(&zc);
395                 return (NULL);
396         }
397
398         zcmd_free_nvlists(&zc);
399         return (nvl);
400 }
401
402 /*
403  * Determine if the vdev id is a hole in the namespace.
404  */
405 boolean_t
406 vdev_is_hole(uint64_t *hole_array, uint_t holes, uint_t id)
407 {
408         int c;
409
410         for (c = 0; c < holes; c++) {
411
412                 /* Top-level is a hole */
413                 if (hole_array[c] == id)
414                         return (B_TRUE);
415         }
416         return (B_FALSE);
417 }
418
419 /*
420  * Convert our list of pools into the definitive set of configurations.  We
421  * start by picking the best config for each toplevel vdev.  Once that's done,
422  * we assemble the toplevel vdevs into a full config for the pool.  We make a
423  * pass to fix up any incorrect paths, and then add it to the main list to
424  * return to the user.
425  */
426 static nvlist_t *
427 get_configs(libzfs_handle_t *hdl, pool_list_t *pl, boolean_t active_ok)
428 {
429         pool_entry_t *pe;
430         vdev_entry_t *ve;
431         config_entry_t *ce;
432         nvlist_t *ret = NULL, *config = NULL, *tmp = NULL, *nvtop, *nvroot;
433         nvlist_t **spares, **l2cache;
434         uint_t i, nspares, nl2cache;
435         boolean_t config_seen;
436         uint64_t best_txg;
437         char *name, *hostname = NULL;
438         uint64_t guid;
439         uint_t children = 0;
440         nvlist_t **child = NULL;
441         uint_t holes;
442         uint64_t *hole_array, max_id;
443         uint_t c;
444         boolean_t isactive;
445         uint64_t hostid;
446         nvlist_t *nvl;
447         boolean_t found_one = B_FALSE;
448         boolean_t valid_top_config = B_FALSE;
449
450         if (nvlist_alloc(&ret, 0, 0) != 0)
451                 goto nomem;
452
453         for (pe = pl->pools; pe != NULL; pe = pe->pe_next) {
454                 uint64_t id, max_txg = 0;
455
456                 if (nvlist_alloc(&config, NV_UNIQUE_NAME, 0) != 0)
457                         goto nomem;
458                 config_seen = B_FALSE;
459
460                 /*
461                  * Iterate over all toplevel vdevs.  Grab the pool configuration
462                  * from the first one we find, and then go through the rest and
463                  * add them as necessary to the 'vdevs' member of the config.
464                  */
465                 for (ve = pe->pe_vdevs; ve != NULL; ve = ve->ve_next) {
466
467                         /*
468                          * Determine the best configuration for this vdev by
469                          * selecting the config with the latest transaction
470                          * group.
471                          */
472                         best_txg = 0;
473                         for (ce = ve->ve_configs; ce != NULL;
474                             ce = ce->ce_next) {
475
476                                 if (ce->ce_txg > best_txg) {
477                                         tmp = ce->ce_config;
478                                         best_txg = ce->ce_txg;
479                                 }
480                         }
481
482                         /*
483                          * We rely on the fact that the max txg for the
484                          * pool will contain the most up-to-date information
485                          * about the valid top-levels in the vdev namespace.
486                          */
487                         if (best_txg > max_txg) {
488                                 (void) nvlist_remove(config,
489                                     ZPOOL_CONFIG_VDEV_CHILDREN,
490                                     DATA_TYPE_UINT64);
491                                 (void) nvlist_remove(config,
492                                     ZPOOL_CONFIG_HOLE_ARRAY,
493                                     DATA_TYPE_UINT64_ARRAY);
494
495                                 max_txg = best_txg;
496                                 hole_array = NULL;
497                                 holes = 0;
498                                 max_id = 0;
499                                 valid_top_config = B_FALSE;
500
501                                 if (nvlist_lookup_uint64(tmp,
502                                     ZPOOL_CONFIG_VDEV_CHILDREN, &max_id) == 0) {
503                                         verify(nvlist_add_uint64(config,
504                                             ZPOOL_CONFIG_VDEV_CHILDREN,
505                                             max_id) == 0);
506                                         valid_top_config = B_TRUE;
507                                 }
508
509                                 if (nvlist_lookup_uint64_array(tmp,
510                                     ZPOOL_CONFIG_HOLE_ARRAY, &hole_array,
511                                     &holes) == 0) {
512                                         verify(nvlist_add_uint64_array(config,
513                                             ZPOOL_CONFIG_HOLE_ARRAY,
514                                             hole_array, holes) == 0);
515                                 }
516                         }
517
518                         if (!config_seen) {
519                                 /*
520                                  * Copy the relevant pieces of data to the pool
521                                  * configuration:
522                                  *
523                                  *      version
524                                  *      pool guid
525                                  *      name
526                                  *      comment (if available)
527                                  *      pool state
528                                  *      hostid (if available)
529                                  *      hostname (if available)
530                                  */
531                                 uint64_t state, version;
532                                 char *comment = NULL;
533
534                                 version = fnvlist_lookup_uint64(tmp,
535                                     ZPOOL_CONFIG_VERSION);
536                                 fnvlist_add_uint64(config,
537                                     ZPOOL_CONFIG_VERSION, version);
538                                 guid = fnvlist_lookup_uint64(tmp,
539                                     ZPOOL_CONFIG_POOL_GUID);
540                                 fnvlist_add_uint64(config,
541                                     ZPOOL_CONFIG_POOL_GUID, guid);
542                                 name = fnvlist_lookup_string(tmp,
543                                     ZPOOL_CONFIG_POOL_NAME);
544                                 fnvlist_add_string(config,
545                                     ZPOOL_CONFIG_POOL_NAME, name);
546
547                                 if (nvlist_lookup_string(tmp,
548                                     ZPOOL_CONFIG_COMMENT, &comment) == 0)
549                                         fnvlist_add_string(config,
550                                             ZPOOL_CONFIG_COMMENT, comment);
551
552                                 state = fnvlist_lookup_uint64(tmp,
553                                     ZPOOL_CONFIG_POOL_STATE);
554                                 fnvlist_add_uint64(config,
555                                     ZPOOL_CONFIG_POOL_STATE, state);
556
557                                 hostid = 0;
558                                 if (nvlist_lookup_uint64(tmp,
559                                     ZPOOL_CONFIG_HOSTID, &hostid) == 0) {
560                                         fnvlist_add_uint64(config,
561                                             ZPOOL_CONFIG_HOSTID, hostid);
562                                         hostname = fnvlist_lookup_string(tmp,
563                                             ZPOOL_CONFIG_HOSTNAME);
564                                         fnvlist_add_string(config,
565                                             ZPOOL_CONFIG_HOSTNAME, hostname);
566                                 }
567
568                                 config_seen = B_TRUE;
569                         }
570
571                         /*
572                          * Add this top-level vdev to the child array.
573                          */
574                         verify(nvlist_lookup_nvlist(tmp,
575                             ZPOOL_CONFIG_VDEV_TREE, &nvtop) == 0);
576                         verify(nvlist_lookup_uint64(nvtop, ZPOOL_CONFIG_ID,
577                             &id) == 0);
578
579                         if (id >= children) {
580                                 nvlist_t **newchild;
581
582                                 newchild = zfs_alloc(hdl, (id + 1) *
583                                     sizeof (nvlist_t *));
584                                 if (newchild == NULL)
585                                         goto nomem;
586
587                                 for (c = 0; c < children; c++)
588                                         newchild[c] = child[c];
589
590                                 free(child);
591                                 child = newchild;
592                                 children = id + 1;
593                         }
594                         if (nvlist_dup(nvtop, &child[id], 0) != 0)
595                                 goto nomem;
596
597                 }
598
599                 /*
600                  * If we have information about all the top-levels then
601                  * clean up the nvlist which we've constructed. This
602                  * means removing any extraneous devices that are
603                  * beyond the valid range or adding devices to the end
604                  * of our array which appear to be missing.
605                  */
606                 if (valid_top_config) {
607                         if (max_id < children) {
608                                 for (c = max_id; c < children; c++)
609                                         nvlist_free(child[c]);
610                                 children = max_id;
611                         } else if (max_id > children) {
612                                 nvlist_t **newchild;
613
614                                 newchild = zfs_alloc(hdl, (max_id) *
615                                     sizeof (nvlist_t *));
616                                 if (newchild == NULL)
617                                         goto nomem;
618
619                                 for (c = 0; c < children; c++)
620                                         newchild[c] = child[c];
621
622                                 free(child);
623                                 child = newchild;
624                                 children = max_id;
625                         }
626                 }
627
628                 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
629                     &guid) == 0);
630
631                 /*
632                  * The vdev namespace may contain holes as a result of
633                  * device removal. We must add them back into the vdev
634                  * tree before we process any missing devices.
635                  */
636                 if (holes > 0) {
637                         ASSERT(valid_top_config);
638
639                         for (c = 0; c < children; c++) {
640                                 nvlist_t *holey;
641
642                                 if (child[c] != NULL ||
643                                     !vdev_is_hole(hole_array, holes, c))
644                                         continue;
645
646                                 if (nvlist_alloc(&holey, NV_UNIQUE_NAME,
647                                     0) != 0)
648                                         goto nomem;
649
650                                 /*
651                                  * Holes in the namespace are treated as
652                                  * "hole" top-level vdevs and have a
653                                  * special flag set on them.
654                                  */
655                                 if (nvlist_add_string(holey,
656                                     ZPOOL_CONFIG_TYPE,
657                                     VDEV_TYPE_HOLE) != 0 ||
658                                     nvlist_add_uint64(holey,
659                                     ZPOOL_CONFIG_ID, c) != 0 ||
660                                     nvlist_add_uint64(holey,
661                                     ZPOOL_CONFIG_GUID, 0ULL) != 0)
662                                         goto nomem;
663                                 child[c] = holey;
664                         }
665                 }
666
667                 /*
668                  * Look for any missing top-level vdevs.  If this is the case,
669                  * create a faked up 'missing' vdev as a placeholder.  We cannot
670                  * simply compress the child array, because the kernel performs
671                  * certain checks to make sure the vdev IDs match their location
672                  * in the configuration.
673                  */
674                 for (c = 0; c < children; c++) {
675                         if (child[c] == NULL) {
676                                 nvlist_t *missing;
677                                 if (nvlist_alloc(&missing, NV_UNIQUE_NAME,
678                                     0) != 0)
679                                         goto nomem;
680                                 if (nvlist_add_string(missing,
681                                     ZPOOL_CONFIG_TYPE,
682                                     VDEV_TYPE_MISSING) != 0 ||
683                                     nvlist_add_uint64(missing,
684                                     ZPOOL_CONFIG_ID, c) != 0 ||
685                                     nvlist_add_uint64(missing,
686                                     ZPOOL_CONFIG_GUID, 0ULL) != 0) {
687                                         nvlist_free(missing);
688                                         goto nomem;
689                                 }
690                                 child[c] = missing;
691                         }
692                 }
693
694                 /*
695                  * Put all of this pool's top-level vdevs into a root vdev.
696                  */
697                 if (nvlist_alloc(&nvroot, NV_UNIQUE_NAME, 0) != 0)
698                         goto nomem;
699                 if (nvlist_add_string(nvroot, ZPOOL_CONFIG_TYPE,
700                     VDEV_TYPE_ROOT) != 0 ||
701                     nvlist_add_uint64(nvroot, ZPOOL_CONFIG_ID, 0ULL) != 0 ||
702                     nvlist_add_uint64(nvroot, ZPOOL_CONFIG_GUID, guid) != 0 ||
703                     nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
704                     child, children) != 0) {
705                         nvlist_free(nvroot);
706                         goto nomem;
707                 }
708
709                 for (c = 0; c < children; c++)
710                         nvlist_free(child[c]);
711                 free(child);
712                 children = 0;
713                 child = NULL;
714
715                 /*
716                  * Go through and fix up any paths and/or devids based on our
717                  * known list of vdev GUID -> path mappings.
718                  */
719                 if (fix_paths(nvroot, pl->names) != 0) {
720                         nvlist_free(nvroot);
721                         goto nomem;
722                 }
723
724                 /*
725                  * Add the root vdev to this pool's configuration.
726                  */
727                 if (nvlist_add_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
728                     nvroot) != 0) {
729                         nvlist_free(nvroot);
730                         goto nomem;
731                 }
732                 nvlist_free(nvroot);
733
734                 /*
735                  * zdb uses this path to report on active pools that were
736                  * imported or created using -R.
737                  */
738                 if (active_ok)
739                         goto add_pool;
740
741                 /*
742                  * Determine if this pool is currently active, in which case we
743                  * can't actually import it.
744                  */
745                 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
746                     &name) == 0);
747                 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
748                     &guid) == 0);
749
750                 if (pool_active(hdl, name, guid, &isactive) != 0)
751                         goto error;
752
753                 if (isactive) {
754                         nvlist_free(config);
755                         config = NULL;
756                         continue;
757                 }
758
759                 if ((nvl = refresh_config(hdl, config)) == NULL) {
760                         nvlist_free(config);
761                         config = NULL;
762                         continue;
763                 }
764
765                 nvlist_free(config);
766                 config = nvl;
767
768                 /*
769                  * Go through and update the paths for spares, now that we have
770                  * them.
771                  */
772                 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
773                     &nvroot) == 0);
774                 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
775                     &spares, &nspares) == 0) {
776                         for (i = 0; i < nspares; i++) {
777                                 if (fix_paths(spares[i], pl->names) != 0)
778                                         goto nomem;
779                         }
780                 }
781
782                 /*
783                  * Update the paths for l2cache devices.
784                  */
785                 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
786                     &l2cache, &nl2cache) == 0) {
787                         for (i = 0; i < nl2cache; i++) {
788                                 if (fix_paths(l2cache[i], pl->names) != 0)
789                                         goto nomem;
790                         }
791                 }
792
793                 /*
794                  * Restore the original information read from the actual label.
795                  */
796                 (void) nvlist_remove(config, ZPOOL_CONFIG_HOSTID,
797                     DATA_TYPE_UINT64);
798                 (void) nvlist_remove(config, ZPOOL_CONFIG_HOSTNAME,
799                     DATA_TYPE_STRING);
800                 if (hostid != 0) {
801                         verify(nvlist_add_uint64(config, ZPOOL_CONFIG_HOSTID,
802                             hostid) == 0);
803                         verify(nvlist_add_string(config, ZPOOL_CONFIG_HOSTNAME,
804                             hostname) == 0);
805                 }
806
807 add_pool:
808                 /*
809                  * Add this pool to the list of configs.
810                  */
811                 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
812                     &name) == 0);
813                 if (nvlist_add_nvlist(ret, name, config) != 0)
814                         goto nomem;
815
816                 found_one = B_TRUE;
817                 nvlist_free(config);
818                 config = NULL;
819         }
820
821         if (!found_one) {
822                 nvlist_free(ret);
823                 ret = NULL;
824         }
825
826         return (ret);
827
828 nomem:
829         (void) no_memory(hdl);
830 error:
831         nvlist_free(config);
832         nvlist_free(ret);
833         for (c = 0; c < children; c++)
834                 nvlist_free(child[c]);
835         free(child);
836
837         return (NULL);
838 }
839
840 /*
841  * Return the offset of the given label.
842  */
843 static uint64_t
844 label_offset(uint64_t size, int l)
845 {
846         ASSERT(P2PHASE_TYPED(size, sizeof (vdev_label_t), uint64_t) == 0);
847         return (l * sizeof (vdev_label_t) + (l < VDEV_LABELS / 2 ?
848             0 : size - VDEV_LABELS * sizeof (vdev_label_t)));
849 }
850
851 /*
852  * Given a file descriptor, read the label information and return an nvlist
853  * describing the configuration, if there is one.
854  */
855 int
856 zpool_read_label(int fd, nvlist_t **config)
857 {
858         struct stat64 statbuf;
859         int l;
860         vdev_label_t *label;
861         uint64_t state, txg, size;
862
863         *config = NULL;
864
865         if (fstat64_blk(fd, &statbuf) == -1)
866                 return (0);
867         size = P2ALIGN_TYPED(statbuf.st_size, sizeof (vdev_label_t), uint64_t);
868
869         if ((label = malloc(sizeof (vdev_label_t))) == NULL)
870                 return (-1);
871
872         for (l = 0; l < VDEV_LABELS; l++) {
873                 if (pread64(fd, label, sizeof (vdev_label_t),
874                     label_offset(size, l)) != sizeof (vdev_label_t))
875                         continue;
876
877                 if (nvlist_unpack(label->vl_vdev_phys.vp_nvlist,
878                     sizeof (label->vl_vdev_phys.vp_nvlist), config, 0) != 0)
879                         continue;
880
881                 if (nvlist_lookup_uint64(*config, ZPOOL_CONFIG_POOL_STATE,
882                     &state) != 0 || state > POOL_STATE_L2CACHE) {
883                         nvlist_free(*config);
884                         continue;
885                 }
886
887                 if (state != POOL_STATE_SPARE && state != POOL_STATE_L2CACHE &&
888                     (nvlist_lookup_uint64(*config, ZPOOL_CONFIG_POOL_TXG,
889                     &txg) != 0 || txg == 0)) {
890                         nvlist_free(*config);
891                         continue;
892                 }
893
894                 free(label);
895                 return (0);
896         }
897
898         free(label);
899         *config = NULL;
900         return (0);
901 }
902
903 /*
904  * Given a file descriptor, clear (zero) the label information.  This function
905  * is used in the appliance stack as part of the ZFS sysevent module and
906  * to implement the "zpool labelclear" command.
907  */
908 int
909 zpool_clear_label(int fd)
910 {
911         struct stat64 statbuf;
912         int l;
913         vdev_label_t *label;
914         uint64_t size;
915
916         if (fstat64_blk(fd, &statbuf) == -1)
917                 return (0);
918         size = P2ALIGN_TYPED(statbuf.st_size, sizeof (vdev_label_t), uint64_t);
919
920         if ((label = calloc(sizeof (vdev_label_t), 1)) == NULL)
921                 return (-1);
922
923         for (l = 0; l < VDEV_LABELS; l++) {
924                 if (pwrite64(fd, label, sizeof (vdev_label_t),
925                     label_offset(size, l)) != sizeof (vdev_label_t))
926                         return (-1);
927         }
928
929         free(label);
930         return (0);
931 }
932
933 #ifdef HAVE_LIBBLKID
934 /*
935  * Use libblkid to quickly search for zfs devices
936  */
937 static int
938 zpool_find_import_blkid(libzfs_handle_t *hdl, pool_list_t *pools)
939 {
940         blkid_cache cache;
941         blkid_dev_iterate iter;
942         blkid_dev dev;
943         const char *devname;
944         nvlist_t *config;
945         int fd, err;
946
947         err = blkid_get_cache(&cache, NULL);
948         if (err != 0) {
949                 (void) zfs_error_fmt(hdl, EZFS_BADCACHE,
950                     dgettext(TEXT_DOMAIN, "blkid_get_cache() %d"), err);
951                 goto err_blkid1;
952         }
953
954         err = blkid_probe_all(cache);
955         if (err != 0) {
956                 (void) zfs_error_fmt(hdl, EZFS_BADCACHE,
957                     dgettext(TEXT_DOMAIN, "blkid_probe_all() %d"), err);
958                 goto err_blkid2;
959         }
960
961         iter = blkid_dev_iterate_begin(cache);
962         if (iter == NULL) {
963                 (void) zfs_error_fmt(hdl, EZFS_BADCACHE,
964                     dgettext(TEXT_DOMAIN, "blkid_dev_iterate_begin()"));
965                 goto err_blkid2;
966         }
967
968         err = blkid_dev_set_search(iter, "TYPE", "zfs");
969         if (err != 0) {
970                 (void) zfs_error_fmt(hdl, EZFS_BADCACHE,
971                     dgettext(TEXT_DOMAIN, "blkid_dev_set_search() %d"), err);
972                 goto err_blkid3;
973         }
974
975         while (blkid_dev_next(iter, &dev) == 0) {
976                 devname = blkid_dev_devname(dev);
977                 if ((fd = open64(devname, O_RDONLY)) < 0)
978                         continue;
979
980                 err = zpool_read_label(fd, &config);
981                 (void) close(fd);
982
983                 if (err != 0) {
984                         (void) no_memory(hdl);
985                         goto err_blkid3;
986                 }
987
988                 if (config != NULL) {
989                         err = add_config(hdl, pools, devname, 0, config);
990                         if (err != 0)
991                                 goto err_blkid3;
992                 }
993         }
994
995 err_blkid3:
996         blkid_dev_iterate_end(iter);
997 err_blkid2:
998         blkid_put_cache(cache);
999 err_blkid1:
1000         return err;
1001 }
1002 #endif /* HAVE_LIBBLKID */
1003
1004 char *
1005 zpool_default_import_path[DEFAULT_IMPORT_PATH_SIZE] = {
1006         "/dev/disk/by-vdev",    /* Custom rules, use first if they exist */
1007         "/dev/mapper",          /* Use multipath devices before components */
1008         "/dev/disk/by-uuid",    /* Single unique entry and persistent */
1009         "/dev/disk/by-id",      /* May be multiple entries and persistent */
1010         "/dev/disk/by-path",    /* Encodes physical location and persistent */
1011         "/dev/disk/by-label",   /* Custom persistent labels */
1012         "/dev"                  /* UNSAFE device names will change */
1013 };
1014
1015 /*
1016  * Given a list of directories to search, find all pools stored on disk.  This
1017  * includes partial pools which are not available to import.  If no args are
1018  * given (argc is 0), then the default directory (/dev/dsk) is searched.
1019  * poolname or guid (but not both) are provided by the caller when trying
1020  * to import a specific pool.
1021  */
1022 static nvlist_t *
1023 zpool_find_import_impl(libzfs_handle_t *hdl, importargs_t *iarg)
1024 {
1025         int i, dirs = iarg->paths;
1026         DIR *dirp = NULL;
1027         struct dirent64 *dp;
1028         char path[MAXPATHLEN];
1029         char *end, **dir = iarg->path;
1030         size_t pathleft;
1031         struct stat64 statbuf;
1032         nvlist_t *ret = NULL, *config;
1033         int fd;
1034         pool_list_t pools = { 0 };
1035         pool_entry_t *pe, *penext;
1036         vdev_entry_t *ve, *venext;
1037         config_entry_t *ce, *cenext;
1038         name_entry_t *ne, *nenext;
1039
1040         verify(iarg->poolname == NULL || iarg->guid == 0);
1041
1042         if (dirs == 0) {
1043 #ifdef HAVE_LIBBLKID
1044                 /* Use libblkid to scan all device for their type */
1045                 if (zpool_find_import_blkid(hdl, &pools) == 0)
1046                         goto skip_scanning;
1047
1048                 (void) zfs_error_fmt(hdl, EZFS_BADCACHE,
1049                     dgettext(TEXT_DOMAIN, "blkid failure falling back "
1050                     "to manual probing"));
1051 #endif /* HAVE_LIBBLKID */
1052
1053                 dir = zpool_default_import_path;
1054                 dirs = DEFAULT_IMPORT_PATH_SIZE;
1055         }
1056
1057         /*
1058          * Go through and read the label configuration information from every
1059          * possible device, organizing the information according to pool GUID
1060          * and toplevel GUID.
1061          */
1062         for (i = 0; i < dirs; i++) {
1063                 char *rdsk;
1064                 int dfd;
1065
1066                 /* use realpath to normalize the path */
1067                 if (realpath(dir[i], path) == 0) {
1068
1069                         /* it is safe to skip missing search paths */
1070                         if (errno == ENOENT)
1071                                 continue;
1072
1073                         zfs_error_aux(hdl, strerror(errno));
1074                         (void) zfs_error_fmt(hdl, EZFS_BADPATH,
1075                             dgettext(TEXT_DOMAIN, "cannot open '%s'"), dir[i]);
1076                         goto error;
1077                 }
1078                 end = &path[strlen(path)];
1079                 *end++ = '/';
1080                 *end = 0;
1081                 pathleft = &path[sizeof (path)] - end;
1082
1083                 /*
1084                  * Using raw devices instead of block devices when we're
1085                  * reading the labels skips a bunch of slow operations during
1086                  * close(2) processing, so we replace /dev/dsk with /dev/rdsk.
1087                  */
1088                 if (strcmp(path, "/dev/dsk/") == 0)
1089                         rdsk = "/dev/rdsk/";
1090                 else
1091                         rdsk = path;
1092
1093                 if ((dfd = open64(rdsk, O_RDONLY)) < 0 ||
1094                     (dirp = fdopendir(dfd)) == NULL) {
1095                         zfs_error_aux(hdl, strerror(errno));
1096                         (void) zfs_error_fmt(hdl, EZFS_BADPATH,
1097                             dgettext(TEXT_DOMAIN, "cannot open '%s'"),
1098                             rdsk);
1099                         goto error;
1100                 }
1101
1102                 /*
1103                  * This is not MT-safe, but we have no MT consumers of libzfs
1104                  */
1105                 while ((dp = readdir64(dirp)) != NULL) {
1106                         const char *name = dp->d_name;
1107                         if (name[0] == '.' &&
1108                             (name[1] == 0 || (name[1] == '.' && name[2] == 0)))
1109                                 continue;
1110
1111                         /*
1112                          * Skip checking devices with well known prefixes:
1113                          * watchdog - A special close is required to avoid
1114                          *            triggering it and resetting the system.
1115                          * fuse     - Fuse control device.
1116                          * ppp      - Generic PPP driver.
1117                          * tty*     - Generic serial interface.
1118                          * vcs*     - Virtual console memory.
1119                          * parport* - Parallel port interface.
1120                          * lp*      - Printer interface.
1121                          * fd*      - Floppy interface.
1122                          * hpet     - High Precision Event Timer, crashes qemu
1123                          *            when accessed from a virtual machine.
1124                          * core     - Symlink to /proc/kcore, causes a crash
1125                          *            when access from Xen dom0.
1126                          */
1127                         if ((strncmp(name, "watchdog", 8) == 0) ||
1128                             (strncmp(name, "fuse", 4) == 0)     ||
1129                             (strncmp(name, "ppp", 3) == 0)      ||
1130                             (strncmp(name, "tty", 3) == 0)      ||
1131                             (strncmp(name, "vcs", 3) == 0)      ||
1132                             (strncmp(name, "parport", 7) == 0)  ||
1133                             (strncmp(name, "lp", 2) == 0)       ||
1134                             (strncmp(name, "fd", 2) == 0)       ||
1135                             (strncmp(name, "hpet", 4) == 0)     ||
1136                             (strncmp(name, "core", 4) == 0))
1137                                 continue;
1138
1139                         /*
1140                          * Ignore failed stats.  We only want regular
1141                          * files and block devices.
1142                          */
1143                         if ((fstatat64(dfd, name, &statbuf, 0) != 0) ||
1144                             (!S_ISREG(statbuf.st_mode) &&
1145                             !S_ISBLK(statbuf.st_mode)))
1146                                 continue;
1147
1148                         if ((fd = openat64(dfd, name, O_RDONLY)) < 0)
1149                                 continue;
1150
1151                         if ((zpool_read_label(fd, &config)) != 0) {
1152                                 (void) close(fd);
1153                                 (void) no_memory(hdl);
1154                                 goto error;
1155                         }
1156
1157                         (void) close(fd);
1158
1159                         if (config != NULL) {
1160                                 boolean_t matched = B_TRUE;
1161                                 char *pname;
1162
1163                                 if ((iarg->poolname != NULL) &&
1164                                     (nvlist_lookup_string(config,
1165                                     ZPOOL_CONFIG_POOL_NAME, &pname) == 0)) {
1166
1167                                         if (strcmp(iarg->poolname, pname))
1168                                                matched = B_FALSE;
1169
1170                                 } else if (iarg->guid != 0) {
1171                                         uint64_t this_guid;
1172
1173                                         matched = nvlist_lookup_uint64(config,
1174                                             ZPOOL_CONFIG_POOL_GUID,
1175                                             &this_guid) == 0 &&
1176                                             iarg->guid == this_guid;
1177                                 }
1178                                 if (!matched) {
1179                                         nvlist_free(config);
1180                                         config = NULL;
1181                                         continue;
1182                                 }
1183                                 /* use the non-raw path for the config */
1184                                 (void) strlcpy(end, name, pathleft);
1185                                 if (add_config(hdl, &pools, path, i+1, config))
1186                                         goto error;
1187                         }
1188                 }
1189
1190                 (void) closedir(dirp);
1191                 dirp = NULL;
1192         }
1193
1194 #ifdef HAVE_LIBBLKID
1195 skip_scanning:
1196 #endif
1197         ret = get_configs(hdl, &pools, iarg->can_be_active);
1198
1199 error:
1200         for (pe = pools.pools; pe != NULL; pe = penext) {
1201                 penext = pe->pe_next;
1202                 for (ve = pe->pe_vdevs; ve != NULL; ve = venext) {
1203                         venext = ve->ve_next;
1204                         for (ce = ve->ve_configs; ce != NULL; ce = cenext) {
1205                                 cenext = ce->ce_next;
1206                                 if (ce->ce_config)
1207                                         nvlist_free(ce->ce_config);
1208                                 free(ce);
1209                         }
1210                         free(ve);
1211                 }
1212                 free(pe);
1213         }
1214
1215         for (ne = pools.names; ne != NULL; ne = nenext) {
1216                 nenext = ne->ne_next;
1217                 if (ne->ne_name)
1218                         free(ne->ne_name);
1219                 free(ne);
1220         }
1221
1222         if (dirp)
1223                 (void) closedir(dirp);
1224
1225         return (ret);
1226 }
1227
1228 nvlist_t *
1229 zpool_find_import(libzfs_handle_t *hdl, int argc, char **argv)
1230 {
1231         importargs_t iarg = { 0 };
1232
1233         iarg.paths = argc;
1234         iarg.path = argv;
1235
1236         return (zpool_find_import_impl(hdl, &iarg));
1237 }
1238
1239 /*
1240  * Given a cache file, return the contents as a list of importable pools.
1241  * poolname or guid (but not both) are provided by the caller when trying
1242  * to import a specific pool.
1243  */
1244 nvlist_t *
1245 zpool_find_import_cached(libzfs_handle_t *hdl, const char *cachefile,
1246     char *poolname, uint64_t guid)
1247 {
1248         char *buf;
1249         int fd;
1250         struct stat64 statbuf;
1251         nvlist_t *raw, *src, *dst;
1252         nvlist_t *pools;
1253         nvpair_t *elem;
1254         char *name;
1255         uint64_t this_guid;
1256         boolean_t active;
1257
1258         verify(poolname == NULL || guid == 0);
1259
1260         if ((fd = open(cachefile, O_RDONLY)) < 0) {
1261                 zfs_error_aux(hdl, "%s", strerror(errno));
1262                 (void) zfs_error(hdl, EZFS_BADCACHE,
1263                     dgettext(TEXT_DOMAIN, "failed to open cache file"));
1264                 return (NULL);
1265         }
1266
1267         if (fstat64(fd, &statbuf) != 0) {
1268                 zfs_error_aux(hdl, "%s", strerror(errno));
1269                 (void) close(fd);
1270                 (void) zfs_error(hdl, EZFS_BADCACHE,
1271                     dgettext(TEXT_DOMAIN, "failed to get size of cache file"));
1272                 return (NULL);
1273         }
1274
1275         if ((buf = zfs_alloc(hdl, statbuf.st_size)) == NULL) {
1276                 (void) close(fd);
1277                 return (NULL);
1278         }
1279
1280         if (read(fd, buf, statbuf.st_size) != statbuf.st_size) {
1281                 (void) close(fd);
1282                 free(buf);
1283                 (void) zfs_error(hdl, EZFS_BADCACHE,
1284                     dgettext(TEXT_DOMAIN,
1285                     "failed to read cache file contents"));
1286                 return (NULL);
1287         }
1288
1289         (void) close(fd);
1290
1291         if (nvlist_unpack(buf, statbuf.st_size, &raw, 0) != 0) {
1292                 free(buf);
1293                 (void) zfs_error(hdl, EZFS_BADCACHE,
1294                     dgettext(TEXT_DOMAIN,
1295                     "invalid or corrupt cache file contents"));
1296                 return (NULL);
1297         }
1298
1299         free(buf);
1300
1301         /*
1302          * Go through and get the current state of the pools and refresh their
1303          * state.
1304          */
1305         if (nvlist_alloc(&pools, 0, 0) != 0) {
1306                 (void) no_memory(hdl);
1307                 nvlist_free(raw);
1308                 return (NULL);
1309         }
1310
1311         elem = NULL;
1312         while ((elem = nvlist_next_nvpair(raw, elem)) != NULL) {
1313                 verify(nvpair_value_nvlist(elem, &src) == 0);
1314
1315                 verify(nvlist_lookup_string(src, ZPOOL_CONFIG_POOL_NAME,
1316                     &name) == 0);
1317                 if (poolname != NULL && strcmp(poolname, name) != 0)
1318                         continue;
1319
1320                 verify(nvlist_lookup_uint64(src, ZPOOL_CONFIG_POOL_GUID,
1321                     &this_guid) == 0);
1322                 if (guid != 0) {
1323                         verify(nvlist_lookup_uint64(src, ZPOOL_CONFIG_POOL_GUID,
1324                             &this_guid) == 0);
1325                         if (guid != this_guid)
1326                                 continue;
1327                 }
1328
1329                 if (pool_active(hdl, name, this_guid, &active) != 0) {
1330                         nvlist_free(raw);
1331                         nvlist_free(pools);
1332                         return (NULL);
1333                 }
1334
1335                 if (active)
1336                         continue;
1337
1338                 if ((dst = refresh_config(hdl, src)) == NULL) {
1339                         nvlist_free(raw);
1340                         nvlist_free(pools);
1341                         return (NULL);
1342                 }
1343
1344                 if (nvlist_add_nvlist(pools, nvpair_name(elem), dst) != 0) {
1345                         (void) no_memory(hdl);
1346                         nvlist_free(dst);
1347                         nvlist_free(raw);
1348                         nvlist_free(pools);
1349                         return (NULL);
1350                 }
1351                 nvlist_free(dst);
1352         }
1353
1354         nvlist_free(raw);
1355         return (pools);
1356 }
1357
1358 static int
1359 name_or_guid_exists(zpool_handle_t *zhp, void *data)
1360 {
1361         importargs_t *import = data;
1362         int found = 0;
1363
1364         if (import->poolname != NULL) {
1365                 char *pool_name;
1366
1367                 verify(nvlist_lookup_string(zhp->zpool_config,
1368                     ZPOOL_CONFIG_POOL_NAME, &pool_name) == 0);
1369                 if (strcmp(pool_name, import->poolname) == 0)
1370                         found = 1;
1371         } else {
1372                 uint64_t pool_guid;
1373
1374                 verify(nvlist_lookup_uint64(zhp->zpool_config,
1375                     ZPOOL_CONFIG_POOL_GUID, &pool_guid) == 0);
1376                 if (pool_guid == import->guid)
1377                         found = 1;
1378         }
1379
1380         zpool_close(zhp);
1381         return (found);
1382 }
1383
1384 nvlist_t *
1385 zpool_search_import(libzfs_handle_t *hdl, importargs_t *import)
1386 {
1387         verify(import->poolname == NULL || import->guid == 0);
1388
1389         if (import->unique)
1390                 import->exists = zpool_iter(hdl, name_or_guid_exists, import);
1391
1392         if (import->cachefile != NULL)
1393                 return (zpool_find_import_cached(hdl, import->cachefile,
1394                     import->poolname, import->guid));
1395
1396         return (zpool_find_import_impl(hdl, import));
1397 }
1398
1399 boolean_t
1400 find_guid(nvlist_t *nv, uint64_t guid)
1401 {
1402         uint64_t tmp;
1403         nvlist_t **child;
1404         uint_t c, children;
1405
1406         verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &tmp) == 0);
1407         if (tmp == guid)
1408                 return (B_TRUE);
1409
1410         if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
1411             &child, &children) == 0) {
1412                 for (c = 0; c < children; c++)
1413                         if (find_guid(child[c], guid))
1414                                 return (B_TRUE);
1415         }
1416
1417         return (B_FALSE);
1418 }
1419
1420 typedef struct aux_cbdata {
1421         const char      *cb_type;
1422         uint64_t        cb_guid;
1423         zpool_handle_t  *cb_zhp;
1424 } aux_cbdata_t;
1425
1426 static int
1427 find_aux(zpool_handle_t *zhp, void *data)
1428 {
1429         aux_cbdata_t *cbp = data;
1430         nvlist_t **list;
1431         uint_t i, count;
1432         uint64_t guid;
1433         nvlist_t *nvroot;
1434
1435         verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
1436             &nvroot) == 0);
1437
1438         if (nvlist_lookup_nvlist_array(nvroot, cbp->cb_type,
1439             &list, &count) == 0) {
1440                 for (i = 0; i < count; i++) {
1441                         verify(nvlist_lookup_uint64(list[i],
1442                             ZPOOL_CONFIG_GUID, &guid) == 0);
1443                         if (guid == cbp->cb_guid) {
1444                                 cbp->cb_zhp = zhp;
1445                                 return (1);
1446                         }
1447                 }
1448         }
1449
1450         zpool_close(zhp);
1451         return (0);
1452 }
1453
1454 /*
1455  * Determines if the pool is in use.  If so, it returns true and the state of
1456  * the pool as well as the name of the pool.  Both strings are allocated and
1457  * must be freed by the caller.
1458  */
1459 int
1460 zpool_in_use(libzfs_handle_t *hdl, int fd, pool_state_t *state, char **namestr,
1461     boolean_t *inuse)
1462 {
1463         nvlist_t *config;
1464         char *name;
1465         boolean_t ret;
1466         uint64_t guid, vdev_guid;
1467         zpool_handle_t *zhp;
1468         nvlist_t *pool_config;
1469         uint64_t stateval, isspare;
1470         aux_cbdata_t cb = { 0 };
1471         boolean_t isactive;
1472
1473         *inuse = B_FALSE;
1474
1475         if (zpool_read_label(fd, &config) != 0) {
1476                 (void) no_memory(hdl);
1477                 return (-1);
1478         }
1479
1480         if (config == NULL)
1481                 return (0);
1482
1483         verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE,
1484             &stateval) == 0);
1485         verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_GUID,
1486             &vdev_guid) == 0);
1487
1488         if (stateval != POOL_STATE_SPARE && stateval != POOL_STATE_L2CACHE) {
1489                 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
1490                     &name) == 0);
1491                 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
1492                     &guid) == 0);
1493         }
1494
1495         switch (stateval) {
1496         case POOL_STATE_EXPORTED:
1497                 /*
1498                  * A pool with an exported state may in fact be imported
1499                  * read-only, so check the in-core state to see if it's
1500                  * active and imported read-only.  If it is, set
1501                  * its state to active.
1502                  */
1503                 if (pool_active(hdl, name, guid, &isactive) == 0 && isactive &&
1504                     (zhp = zpool_open_canfail(hdl, name)) != NULL &&
1505                     zpool_get_prop_int(zhp, ZPOOL_PROP_READONLY, NULL))
1506                         stateval = POOL_STATE_ACTIVE;
1507
1508                 ret = B_TRUE;
1509                 break;
1510
1511         case POOL_STATE_ACTIVE:
1512                 /*
1513                  * For an active pool, we have to determine if it's really part
1514                  * of a currently active pool (in which case the pool will exist
1515                  * and the guid will be the same), or whether it's part of an
1516                  * active pool that was disconnected without being explicitly
1517                  * exported.
1518                  */
1519                 if (pool_active(hdl, name, guid, &isactive) != 0) {
1520                         nvlist_free(config);
1521                         return (-1);
1522                 }
1523
1524                 if (isactive) {
1525                         /*
1526                          * Because the device may have been removed while
1527                          * offlined, we only report it as active if the vdev is
1528                          * still present in the config.  Otherwise, pretend like
1529                          * it's not in use.
1530                          */
1531                         if ((zhp = zpool_open_canfail(hdl, name)) != NULL &&
1532                             (pool_config = zpool_get_config(zhp, NULL))
1533                             != NULL) {
1534                                 nvlist_t *nvroot;
1535
1536                                 verify(nvlist_lookup_nvlist(pool_config,
1537                                     ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
1538                                 ret = find_guid(nvroot, vdev_guid);
1539                         } else {
1540                                 ret = B_FALSE;
1541                         }
1542
1543                         /*
1544                          * If this is an active spare within another pool, we
1545                          * treat it like an unused hot spare.  This allows the
1546                          * user to create a pool with a hot spare that currently
1547                          * in use within another pool.  Since we return B_TRUE,
1548                          * libdiskmgt will continue to prevent generic consumers
1549                          * from using the device.
1550                          */
1551                         if (ret && nvlist_lookup_uint64(config,
1552                             ZPOOL_CONFIG_IS_SPARE, &isspare) == 0 && isspare)
1553                                 stateval = POOL_STATE_SPARE;
1554
1555                         if (zhp != NULL)
1556                                 zpool_close(zhp);
1557                 } else {
1558                         stateval = POOL_STATE_POTENTIALLY_ACTIVE;
1559                         ret = B_TRUE;
1560                 }
1561                 break;
1562
1563         case POOL_STATE_SPARE:
1564                 /*
1565                  * For a hot spare, it can be either definitively in use, or
1566                  * potentially active.  To determine if it's in use, we iterate
1567                  * over all pools in the system and search for one with a spare
1568                  * with a matching guid.
1569                  *
1570                  * Due to the shared nature of spares, we don't actually report
1571                  * the potentially active case as in use.  This means the user
1572                  * can freely create pools on the hot spares of exported pools,
1573                  * but to do otherwise makes the resulting code complicated, and
1574                  * we end up having to deal with this case anyway.
1575                  */
1576                 cb.cb_zhp = NULL;
1577                 cb.cb_guid = vdev_guid;
1578                 cb.cb_type = ZPOOL_CONFIG_SPARES;
1579                 if (zpool_iter(hdl, find_aux, &cb) == 1) {
1580                         name = (char *)zpool_get_name(cb.cb_zhp);
1581                         ret = TRUE;
1582                 } else {
1583                         ret = FALSE;
1584                 }
1585                 break;
1586
1587         case POOL_STATE_L2CACHE:
1588
1589                 /*
1590                  * Check if any pool is currently using this l2cache device.
1591                  */
1592                 cb.cb_zhp = NULL;
1593                 cb.cb_guid = vdev_guid;
1594                 cb.cb_type = ZPOOL_CONFIG_L2CACHE;
1595                 if (zpool_iter(hdl, find_aux, &cb) == 1) {
1596                         name = (char *)zpool_get_name(cb.cb_zhp);
1597                         ret = TRUE;
1598                 } else {
1599                         ret = FALSE;
1600                 }
1601                 break;
1602
1603         default:
1604                 ret = B_FALSE;
1605         }
1606
1607
1608         if (ret) {
1609                 if ((*namestr = zfs_strdup(hdl, name)) == NULL) {
1610                         if (cb.cb_zhp)
1611                                 zpool_close(cb.cb_zhp);
1612                         nvlist_free(config);
1613                         return (-1);
1614                 }
1615                 *state = (pool_state_t)stateval;
1616         }
1617
1618         if (cb.cb_zhp)
1619                 zpool_close(cb.cb_zhp);
1620
1621         nvlist_free(config);
1622         *inuse = ret;
1623         return (0);
1624 }