Illumos #1693: persistent 'comment' field for a zpool
[zfs.git] / lib / libzfs / libzfs_import.c
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23  * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
24  * Copyright (c) 2011 by Delphix. All rights reserved.
25  */
26
27 /*
28  * Pool import support functions.
29  *
30  * To import a pool, we rely on reading the configuration information from the
31  * ZFS label of each device.  If we successfully read the label, then we
32  * organize the configuration information in the following hierarchy:
33  *
34  *      pool guid -> toplevel vdev guid -> label txg
35  *
36  * Duplicate entries matching this same tuple will be discarded.  Once we have
37  * examined every device, we pick the best label txg config for each toplevel
38  * vdev.  We then arrange these toplevel vdevs into a complete pool config, and
39  * update any paths that have changed.  Finally, we attempt to import the pool
40  * using our derived config, and record the results.
41  */
42
43 #include <ctype.h>
44 #include <devid.h>
45 #include <dirent.h>
46 #include <errno.h>
47 #include <libintl.h>
48 #include <stddef.h>
49 #include <stdlib.h>
50 #include <string.h>
51 #include <sys/stat.h>
52 #include <unistd.h>
53 #include <fcntl.h>
54 #include <sys/vtoc.h>
55 #include <sys/dktp/fdisk.h>
56 #include <sys/efi_partition.h>
57
58 #include <sys/vdev_impl.h>
59 #ifdef HAVE_LIBBLKID
60 #include <blkid/blkid.h>
61 #endif
62
63 #include "libzfs.h"
64 #include "libzfs_impl.h"
65
66 /*
67  * Intermediate structures used to gather configuration information.
68  */
69 typedef struct config_entry {
70         uint64_t                ce_txg;
71         nvlist_t                *ce_config;
72         struct config_entry     *ce_next;
73 } config_entry_t;
74
75 typedef struct vdev_entry {
76         uint64_t                ve_guid;
77         config_entry_t          *ve_configs;
78         struct vdev_entry       *ve_next;
79 } vdev_entry_t;
80
81 typedef struct pool_entry {
82         uint64_t                pe_guid;
83         vdev_entry_t            *pe_vdevs;
84         struct pool_entry       *pe_next;
85 } pool_entry_t;
86
87 typedef struct name_entry {
88         char                    *ne_name;
89         uint64_t                ne_guid;
90         struct name_entry       *ne_next;
91 } name_entry_t;
92
93 typedef struct pool_list {
94         pool_entry_t            *pools;
95         name_entry_t            *names;
96 } pool_list_t;
97
98 static char *
99 get_devid(const char *path)
100 {
101         int fd;
102         ddi_devid_t devid;
103         char *minor, *ret;
104
105         if ((fd = open(path, O_RDONLY)) < 0)
106                 return (NULL);
107
108         minor = NULL;
109         ret = NULL;
110         if (devid_get(fd, &devid) == 0) {
111                 if (devid_get_minor_name(fd, &minor) == 0)
112                         ret = devid_str_encode(devid, minor);
113                 if (minor != NULL)
114                         devid_str_free(minor);
115                 devid_free(devid);
116         }
117         (void) close(fd);
118
119         return (ret);
120 }
121
122
123 /*
124  * Go through and fix up any path and/or devid information for the given vdev
125  * configuration.
126  */
127 static int
128 fix_paths(nvlist_t *nv, name_entry_t *names)
129 {
130         nvlist_t **child;
131         uint_t c, children;
132         uint64_t guid;
133         name_entry_t *ne, *best;
134         char *path, *devid;
135         int matched;
136
137         if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
138             &child, &children) == 0) {
139                 for (c = 0; c < children; c++)
140                         if (fix_paths(child[c], names) != 0)
141                                 return (-1);
142                 return (0);
143         }
144
145         /*
146          * This is a leaf (file or disk) vdev.  In either case, go through
147          * the name list and see if we find a matching guid.  If so, replace
148          * the path and see if we can calculate a new devid.
149          *
150          * There may be multiple names associated with a particular guid, in
151          * which case we have overlapping slices or multiple paths to the same
152          * disk.  If this is the case, then we want to pick the path that is
153          * the most similar to the original, where "most similar" is the number
154          * of matching characters starting from the end of the path.  This will
155          * preserve slice numbers even if the disks have been reorganized, and
156          * will also catch preferred disk names if multiple paths exist.
157          */
158         verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) == 0);
159         if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) != 0)
160                 path = NULL;
161
162         matched = 0;
163         best = NULL;
164         for (ne = names; ne != NULL; ne = ne->ne_next) {
165                 if (ne->ne_guid == guid) {
166                         const char *src, *dst;
167                         int count;
168
169                         if (path == NULL) {
170                                 best = ne;
171                                 break;
172                         }
173
174                         src = ne->ne_name + strlen(ne->ne_name) - 1;
175                         dst = path + strlen(path) - 1;
176                         for (count = 0; src >= ne->ne_name && dst >= path;
177                             src--, dst--, count++)
178                                 if (*src != *dst)
179                                         break;
180
181                         /*
182                          * At this point, 'count' is the number of characters
183                          * matched from the end.
184                          */
185                         if (count > matched || best == NULL) {
186                                 best = ne;
187                                 matched = count;
188                         }
189                 }
190         }
191
192         if (best == NULL)
193                 return (0);
194
195         if (nvlist_add_string(nv, ZPOOL_CONFIG_PATH, best->ne_name) != 0)
196                 return (-1);
197
198         if ((devid = get_devid(best->ne_name)) == NULL) {
199                 (void) nvlist_remove_all(nv, ZPOOL_CONFIG_DEVID);
200         } else {
201                 if (nvlist_add_string(nv, ZPOOL_CONFIG_DEVID, devid) != 0)
202                         return (-1);
203                 devid_str_free(devid);
204         }
205
206         return (0);
207 }
208
209 /*
210  * Add the given configuration to the list of known devices.
211  */
212 static int
213 add_config(libzfs_handle_t *hdl, pool_list_t *pl, const char *path,
214     nvlist_t *config)
215 {
216         uint64_t pool_guid, vdev_guid, top_guid, txg, state;
217         pool_entry_t *pe;
218         vdev_entry_t *ve;
219         config_entry_t *ce;
220         name_entry_t *ne;
221
222         /*
223          * If this is a hot spare not currently in use or level 2 cache
224          * device, add it to the list of names to translate, but don't do
225          * anything else.
226          */
227         if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE,
228             &state) == 0 &&
229             (state == POOL_STATE_SPARE || state == POOL_STATE_L2CACHE) &&
230             nvlist_lookup_uint64(config, ZPOOL_CONFIG_GUID, &vdev_guid) == 0) {
231                 if ((ne = zfs_alloc(hdl, sizeof (name_entry_t))) == NULL)
232                         return (-1);
233
234                 if ((ne->ne_name = zfs_strdup(hdl, path)) == NULL) {
235                         free(ne);
236                         return (-1);
237                 }
238                 ne->ne_guid = vdev_guid;
239                 ne->ne_next = pl->names;
240                 pl->names = ne;
241                 return (0);
242         }
243
244         /*
245          * If we have a valid config but cannot read any of these fields, then
246          * it means we have a half-initialized label.  In vdev_label_init()
247          * we write a label with txg == 0 so that we can identify the device
248          * in case the user refers to the same disk later on.  If we fail to
249          * create the pool, we'll be left with a label in this state
250          * which should not be considered part of a valid pool.
251          */
252         if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
253             &pool_guid) != 0 ||
254             nvlist_lookup_uint64(config, ZPOOL_CONFIG_GUID,
255             &vdev_guid) != 0 ||
256             nvlist_lookup_uint64(config, ZPOOL_CONFIG_TOP_GUID,
257             &top_guid) != 0 ||
258             nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG,
259             &txg) != 0 || txg == 0) {
260                 nvlist_free(config);
261                 return (0);
262         }
263
264         /*
265          * First, see if we know about this pool.  If not, then add it to the
266          * list of known pools.
267          */
268         for (pe = pl->pools; pe != NULL; pe = pe->pe_next) {
269                 if (pe->pe_guid == pool_guid)
270                         break;
271         }
272
273         if (pe == NULL) {
274                 if ((pe = zfs_alloc(hdl, sizeof (pool_entry_t))) == NULL) {
275                         nvlist_free(config);
276                         return (-1);
277                 }
278                 pe->pe_guid = pool_guid;
279                 pe->pe_next = pl->pools;
280                 pl->pools = pe;
281         }
282
283         /*
284          * Second, see if we know about this toplevel vdev.  Add it if its
285          * missing.
286          */
287         for (ve = pe->pe_vdevs; ve != NULL; ve = ve->ve_next) {
288                 if (ve->ve_guid == top_guid)
289                         break;
290         }
291
292         if (ve == NULL) {
293                 if ((ve = zfs_alloc(hdl, sizeof (vdev_entry_t))) == NULL) {
294                         nvlist_free(config);
295                         return (-1);
296                 }
297                 ve->ve_guid = top_guid;
298                 ve->ve_next = pe->pe_vdevs;
299                 pe->pe_vdevs = ve;
300         }
301
302         /*
303          * Third, see if we have a config with a matching transaction group.  If
304          * so, then we do nothing.  Otherwise, add it to the list of known
305          * configs.
306          */
307         for (ce = ve->ve_configs; ce != NULL; ce = ce->ce_next) {
308                 if (ce->ce_txg == txg)
309                         break;
310         }
311
312         if (ce == NULL) {
313                 if ((ce = zfs_alloc(hdl, sizeof (config_entry_t))) == NULL) {
314                         nvlist_free(config);
315                         return (-1);
316                 }
317                 ce->ce_txg = txg;
318                 ce->ce_config = config;
319                 ce->ce_next = ve->ve_configs;
320                 ve->ve_configs = ce;
321         } else {
322                 nvlist_free(config);
323         }
324
325         /*
326          * At this point we've successfully added our config to the list of
327          * known configs.  The last thing to do is add the vdev guid -> path
328          * mappings so that we can fix up the configuration as necessary before
329          * doing the import.
330          */
331         if ((ne = zfs_alloc(hdl, sizeof (name_entry_t))) == NULL)
332                 return (-1);
333
334         if ((ne->ne_name = zfs_strdup(hdl, path)) == NULL) {
335                 free(ne);
336                 return (-1);
337         }
338
339         ne->ne_guid = vdev_guid;
340         ne->ne_next = pl->names;
341         pl->names = ne;
342
343         return (0);
344 }
345
346 /*
347  * Returns true if the named pool matches the given GUID.
348  */
349 static int
350 pool_active(libzfs_handle_t *hdl, const char *name, uint64_t guid,
351     boolean_t *isactive)
352 {
353         zpool_handle_t *zhp;
354         uint64_t theguid;
355
356         if (zpool_open_silent(hdl, name, &zhp) != 0)
357                 return (-1);
358
359         if (zhp == NULL) {
360                 *isactive = B_FALSE;
361                 return (0);
362         }
363
364         verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_POOL_GUID,
365             &theguid) == 0);
366
367         zpool_close(zhp);
368
369         *isactive = (theguid == guid);
370         return (0);
371 }
372
373 static nvlist_t *
374 refresh_config(libzfs_handle_t *hdl, nvlist_t *config)
375 {
376         nvlist_t *nvl;
377         zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
378         int err;
379
380         if (zcmd_write_conf_nvlist(hdl, &zc, config) != 0)
381                 return (NULL);
382
383         if (zcmd_alloc_dst_nvlist(hdl, &zc,
384             zc.zc_nvlist_conf_size * 2) != 0) {
385                 zcmd_free_nvlists(&zc);
386                 return (NULL);
387         }
388
389         while ((err = ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_TRYIMPORT,
390             &zc)) != 0 && errno == ENOMEM) {
391                 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
392                         zcmd_free_nvlists(&zc);
393                         return (NULL);
394                 }
395         }
396
397         if (err) {
398                 zcmd_free_nvlists(&zc);
399                 return (NULL);
400         }
401
402         if (zcmd_read_dst_nvlist(hdl, &zc, &nvl) != 0) {
403                 zcmd_free_nvlists(&zc);
404                 return (NULL);
405         }
406
407         zcmd_free_nvlists(&zc);
408         return (nvl);
409 }
410
411 /*
412  * Determine if the vdev id is a hole in the namespace.
413  */
414 boolean_t
415 vdev_is_hole(uint64_t *hole_array, uint_t holes, uint_t id)
416 {
417         int c;
418
419         for (c = 0; c < holes; c++) {
420
421                 /* Top-level is a hole */
422                 if (hole_array[c] == id)
423                         return (B_TRUE);
424         }
425         return (B_FALSE);
426 }
427
428 /*
429  * Convert our list of pools into the definitive set of configurations.  We
430  * start by picking the best config for each toplevel vdev.  Once that's done,
431  * we assemble the toplevel vdevs into a full config for the pool.  We make a
432  * pass to fix up any incorrect paths, and then add it to the main list to
433  * return to the user.
434  */
435 static nvlist_t *
436 get_configs(libzfs_handle_t *hdl, pool_list_t *pl, boolean_t active_ok)
437 {
438         pool_entry_t *pe;
439         vdev_entry_t *ve;
440         config_entry_t *ce;
441         nvlist_t *ret = NULL, *config = NULL, *tmp = NULL, *nvtop, *nvroot;
442         nvlist_t **spares, **l2cache;
443         uint_t i, nspares, nl2cache;
444         boolean_t config_seen;
445         uint64_t best_txg;
446         char *name, *hostname, *comment;
447         uint64_t version, guid;
448         uint_t children = 0;
449         nvlist_t **child = NULL;
450         uint_t holes;
451         uint64_t *hole_array, max_id;
452         uint_t c;
453         boolean_t isactive;
454         uint64_t hostid;
455         nvlist_t *nvl;
456         boolean_t found_one = B_FALSE;
457         boolean_t valid_top_config = B_FALSE;
458
459         if (nvlist_alloc(&ret, 0, 0) != 0)
460                 goto nomem;
461
462         for (pe = pl->pools; pe != NULL; pe = pe->pe_next) {
463                 uint64_t id, max_txg = 0;
464
465                 if (nvlist_alloc(&config, NV_UNIQUE_NAME, 0) != 0)
466                         goto nomem;
467                 config_seen = B_FALSE;
468
469                 /*
470                  * Iterate over all toplevel vdevs.  Grab the pool configuration
471                  * from the first one we find, and then go through the rest and
472                  * add them as necessary to the 'vdevs' member of the config.
473                  */
474                 for (ve = pe->pe_vdevs; ve != NULL; ve = ve->ve_next) {
475
476                         /*
477                          * Determine the best configuration for this vdev by
478                          * selecting the config with the latest transaction
479                          * group.
480                          */
481                         best_txg = 0;
482                         for (ce = ve->ve_configs; ce != NULL;
483                             ce = ce->ce_next) {
484
485                                 if (ce->ce_txg > best_txg) {
486                                         tmp = ce->ce_config;
487                                         best_txg = ce->ce_txg;
488                                 }
489                         }
490
491                         /*
492                          * We rely on the fact that the max txg for the
493                          * pool will contain the most up-to-date information
494                          * about the valid top-levels in the vdev namespace.
495                          */
496                         if (best_txg > max_txg) {
497                                 (void) nvlist_remove(config,
498                                     ZPOOL_CONFIG_VDEV_CHILDREN,
499                                     DATA_TYPE_UINT64);
500                                 (void) nvlist_remove(config,
501                                     ZPOOL_CONFIG_HOLE_ARRAY,
502                                     DATA_TYPE_UINT64_ARRAY);
503
504                                 max_txg = best_txg;
505                                 hole_array = NULL;
506                                 holes = 0;
507                                 max_id = 0;
508                                 valid_top_config = B_FALSE;
509
510                                 if (nvlist_lookup_uint64(tmp,
511                                     ZPOOL_CONFIG_VDEV_CHILDREN, &max_id) == 0) {
512                                         verify(nvlist_add_uint64(config,
513                                             ZPOOL_CONFIG_VDEV_CHILDREN,
514                                             max_id) == 0);
515                                         valid_top_config = B_TRUE;
516                                 }
517
518                                 if (nvlist_lookup_uint64_array(tmp,
519                                     ZPOOL_CONFIG_HOLE_ARRAY, &hole_array,
520                                     &holes) == 0) {
521                                         verify(nvlist_add_uint64_array(config,
522                                             ZPOOL_CONFIG_HOLE_ARRAY,
523                                             hole_array, holes) == 0);
524                                 }
525                         }
526
527                         if (!config_seen) {
528                                 /*
529                                  * Copy the relevant pieces of data to the pool
530                                  * configuration:
531                                  *
532                                  *      version
533                                  *      pool guid
534                                  *      name
535                                  *      comment (if available)
536                                  *      pool state
537                                  *      hostid (if available)
538                                  *      hostname (if available)
539                                  */
540                                 uint64_t state;
541
542                                 verify(nvlist_lookup_uint64(tmp,
543                                     ZPOOL_CONFIG_VERSION, &version) == 0);
544                                 if (nvlist_add_uint64(config,
545                                     ZPOOL_CONFIG_VERSION, version) != 0)
546                                         goto nomem;
547                                 verify(nvlist_lookup_uint64(tmp,
548                                     ZPOOL_CONFIG_POOL_GUID, &guid) == 0);
549                                 if (nvlist_add_uint64(config,
550                                     ZPOOL_CONFIG_POOL_GUID, guid) != 0)
551                                         goto nomem;
552                                 verify(nvlist_lookup_string(tmp,
553                                     ZPOOL_CONFIG_POOL_NAME, &name) == 0);
554                                 if (nvlist_add_string(config,
555                                     ZPOOL_CONFIG_POOL_NAME, name) != 0)
556                                         goto nomem;
557
558                                 /*
559                                  * COMMENT is optional, don't bail if it's not
560                                  * there, instead, set it to NULL.
561                                  */
562                                 if (nvlist_lookup_string(tmp,
563                                     ZPOOL_CONFIG_COMMENT, &comment) != 0)
564                                         comment = NULL;
565                                 else if (nvlist_add_string(config,
566                                     ZPOOL_CONFIG_COMMENT, comment) != 0)
567                                         goto nomem;
568
569                                 verify(nvlist_lookup_uint64(tmp,
570                                     ZPOOL_CONFIG_POOL_STATE, &state) == 0);
571                                 if (nvlist_add_uint64(config,
572                                     ZPOOL_CONFIG_POOL_STATE, state) != 0)
573                                         goto nomem;
574
575                                 hostid = 0;
576                                 if (nvlist_lookup_uint64(tmp,
577                                     ZPOOL_CONFIG_HOSTID, &hostid) == 0) {
578                                         if (nvlist_add_uint64(config,
579                                             ZPOOL_CONFIG_HOSTID, hostid) != 0)
580                                                 goto nomem;
581                                         verify(nvlist_lookup_string(tmp,
582                                             ZPOOL_CONFIG_HOSTNAME,
583                                             &hostname) == 0);
584                                         if (nvlist_add_string(config,
585                                             ZPOOL_CONFIG_HOSTNAME,
586                                             hostname) != 0)
587                                                 goto nomem;
588                                 }
589
590                                 config_seen = B_TRUE;
591                         }
592
593                         /*
594                          * Add this top-level vdev to the child array.
595                          */
596                         verify(nvlist_lookup_nvlist(tmp,
597                             ZPOOL_CONFIG_VDEV_TREE, &nvtop) == 0);
598                         verify(nvlist_lookup_uint64(nvtop, ZPOOL_CONFIG_ID,
599                             &id) == 0);
600
601                         if (id >= children) {
602                                 nvlist_t **newchild;
603
604                                 newchild = zfs_alloc(hdl, (id + 1) *
605                                     sizeof (nvlist_t *));
606                                 if (newchild == NULL)
607                                         goto nomem;
608
609                                 for (c = 0; c < children; c++)
610                                         newchild[c] = child[c];
611
612                                 free(child);
613                                 child = newchild;
614                                 children = id + 1;
615                         }
616                         if (nvlist_dup(nvtop, &child[id], 0) != 0)
617                                 goto nomem;
618
619                 }
620
621                 /*
622                  * If we have information about all the top-levels then
623                  * clean up the nvlist which we've constructed. This
624                  * means removing any extraneous devices that are
625                  * beyond the valid range or adding devices to the end
626                  * of our array which appear to be missing.
627                  */
628                 if (valid_top_config) {
629                         if (max_id < children) {
630                                 for (c = max_id; c < children; c++)
631                                         nvlist_free(child[c]);
632                                 children = max_id;
633                         } else if (max_id > children) {
634                                 nvlist_t **newchild;
635
636                                 newchild = zfs_alloc(hdl, (max_id) *
637                                     sizeof (nvlist_t *));
638                                 if (newchild == NULL)
639                                         goto nomem;
640
641                                 for (c = 0; c < children; c++)
642                                         newchild[c] = child[c];
643
644                                 free(child);
645                                 child = newchild;
646                                 children = max_id;
647                         }
648                 }
649
650                 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
651                     &guid) == 0);
652
653                 /*
654                  * The vdev namespace may contain holes as a result of
655                  * device removal. We must add them back into the vdev
656                  * tree before we process any missing devices.
657                  */
658                 if (holes > 0) {
659                         ASSERT(valid_top_config);
660
661                         for (c = 0; c < children; c++) {
662                                 nvlist_t *holey;
663
664                                 if (child[c] != NULL ||
665                                     !vdev_is_hole(hole_array, holes, c))
666                                         continue;
667
668                                 if (nvlist_alloc(&holey, NV_UNIQUE_NAME,
669                                     0) != 0)
670                                         goto nomem;
671
672                                 /*
673                                  * Holes in the namespace are treated as
674                                  * "hole" top-level vdevs and have a
675                                  * special flag set on them.
676                                  */
677                                 if (nvlist_add_string(holey,
678                                     ZPOOL_CONFIG_TYPE,
679                                     VDEV_TYPE_HOLE) != 0 ||
680                                     nvlist_add_uint64(holey,
681                                     ZPOOL_CONFIG_ID, c) != 0 ||
682                                     nvlist_add_uint64(holey,
683                                     ZPOOL_CONFIG_GUID, 0ULL) != 0)
684                                         goto nomem;
685                                 child[c] = holey;
686                         }
687                 }
688
689                 /*
690                  * Look for any missing top-level vdevs.  If this is the case,
691                  * create a faked up 'missing' vdev as a placeholder.  We cannot
692                  * simply compress the child array, because the kernel performs
693                  * certain checks to make sure the vdev IDs match their location
694                  * in the configuration.
695                  */
696                 for (c = 0; c < children; c++) {
697                         if (child[c] == NULL) {
698                                 nvlist_t *missing;
699                                 if (nvlist_alloc(&missing, NV_UNIQUE_NAME,
700                                     0) != 0)
701                                         goto nomem;
702                                 if (nvlist_add_string(missing,
703                                     ZPOOL_CONFIG_TYPE,
704                                     VDEV_TYPE_MISSING) != 0 ||
705                                     nvlist_add_uint64(missing,
706                                     ZPOOL_CONFIG_ID, c) != 0 ||
707                                     nvlist_add_uint64(missing,
708                                     ZPOOL_CONFIG_GUID, 0ULL) != 0) {
709                                         nvlist_free(missing);
710                                         goto nomem;
711                                 }
712                                 child[c] = missing;
713                         }
714                 }
715
716                 /*
717                  * Put all of this pool's top-level vdevs into a root vdev.
718                  */
719                 if (nvlist_alloc(&nvroot, NV_UNIQUE_NAME, 0) != 0)
720                         goto nomem;
721                 if (nvlist_add_string(nvroot, ZPOOL_CONFIG_TYPE,
722                     VDEV_TYPE_ROOT) != 0 ||
723                     nvlist_add_uint64(nvroot, ZPOOL_CONFIG_ID, 0ULL) != 0 ||
724                     nvlist_add_uint64(nvroot, ZPOOL_CONFIG_GUID, guid) != 0 ||
725                     nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
726                     child, children) != 0) {
727                         nvlist_free(nvroot);
728                         goto nomem;
729                 }
730
731                 for (c = 0; c < children; c++)
732                         nvlist_free(child[c]);
733                 free(child);
734                 children = 0;
735                 child = NULL;
736
737                 /*
738                  * Go through and fix up any paths and/or devids based on our
739                  * known list of vdev GUID -> path mappings.
740                  */
741                 if (fix_paths(nvroot, pl->names) != 0) {
742                         nvlist_free(nvroot);
743                         goto nomem;
744                 }
745
746                 /*
747                  * Add the root vdev to this pool's configuration.
748                  */
749                 if (nvlist_add_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
750                     nvroot) != 0) {
751                         nvlist_free(nvroot);
752                         goto nomem;
753                 }
754                 nvlist_free(nvroot);
755
756                 /*
757                  * zdb uses this path to report on active pools that were
758                  * imported or created using -R.
759                  */
760                 if (active_ok)
761                         goto add_pool;
762
763                 /*
764                  * Determine if this pool is currently active, in which case we
765                  * can't actually import it.
766                  */
767                 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
768                     &name) == 0);
769                 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
770                     &guid) == 0);
771
772                 if (pool_active(hdl, name, guid, &isactive) != 0)
773                         goto error;
774
775                 if (isactive) {
776                         nvlist_free(config);
777                         config = NULL;
778                         continue;
779                 }
780
781                 if ((nvl = refresh_config(hdl, config)) == NULL) {
782                         nvlist_free(config);
783                         config = NULL;
784                         continue;
785                 }
786
787                 nvlist_free(config);
788                 config = nvl;
789
790                 /*
791                  * Go through and update the paths for spares, now that we have
792                  * them.
793                  */
794                 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
795                     &nvroot) == 0);
796                 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
797                     &spares, &nspares) == 0) {
798                         for (i = 0; i < nspares; i++) {
799                                 if (fix_paths(spares[i], pl->names) != 0)
800                                         goto nomem;
801                         }
802                 }
803
804                 /*
805                  * Update the paths for l2cache devices.
806                  */
807                 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
808                     &l2cache, &nl2cache) == 0) {
809                         for (i = 0; i < nl2cache; i++) {
810                                 if (fix_paths(l2cache[i], pl->names) != 0)
811                                         goto nomem;
812                         }
813                 }
814
815                 /*
816                  * Restore the original information read from the actual label.
817                  */
818                 (void) nvlist_remove(config, ZPOOL_CONFIG_HOSTID,
819                     DATA_TYPE_UINT64);
820                 (void) nvlist_remove(config, ZPOOL_CONFIG_HOSTNAME,
821                     DATA_TYPE_STRING);
822                 if (hostid != 0) {
823                         verify(nvlist_add_uint64(config, ZPOOL_CONFIG_HOSTID,
824                             hostid) == 0);
825                         verify(nvlist_add_string(config, ZPOOL_CONFIG_HOSTNAME,
826                             hostname) == 0);
827                 }
828
829 add_pool:
830                 /*
831                  * Add this pool to the list of configs.
832                  */
833                 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
834                     &name) == 0);
835                 if (nvlist_add_nvlist(ret, name, config) != 0)
836                         goto nomem;
837
838                 found_one = B_TRUE;
839                 nvlist_free(config);
840                 config = NULL;
841         }
842
843         if (!found_one) {
844                 nvlist_free(ret);
845                 ret = NULL;
846         }
847
848         return (ret);
849
850 nomem:
851         (void) no_memory(hdl);
852 error:
853         nvlist_free(config);
854         nvlist_free(ret);
855         for (c = 0; c < children; c++)
856                 nvlist_free(child[c]);
857         free(child);
858
859         return (NULL);
860 }
861
862 /*
863  * Return the offset of the given label.
864  */
865 static uint64_t
866 label_offset(uint64_t size, int l)
867 {
868         ASSERT(P2PHASE_TYPED(size, sizeof (vdev_label_t), uint64_t) == 0);
869         return (l * sizeof (vdev_label_t) + (l < VDEV_LABELS / 2 ?
870             0 : size - VDEV_LABELS * sizeof (vdev_label_t)));
871 }
872
873 /*
874  * Given a file descriptor, read the label information and return an nvlist
875  * describing the configuration, if there is one.
876  */
877 int
878 zpool_read_label(int fd, nvlist_t **config)
879 {
880         struct stat64 statbuf;
881         int l;
882         vdev_label_t *label;
883         uint64_t state, txg, size;
884
885         *config = NULL;
886
887         if (fstat64(fd, &statbuf) == -1)
888                 return (0);
889         size = P2ALIGN_TYPED(statbuf.st_size, sizeof (vdev_label_t), uint64_t);
890
891         if ((label = malloc(sizeof (vdev_label_t))) == NULL)
892                 return (-1);
893
894         for (l = 0; l < VDEV_LABELS; l++) {
895                 if (pread64(fd, label, sizeof (vdev_label_t),
896                     label_offset(size, l)) != sizeof (vdev_label_t))
897                         continue;
898
899                 if (nvlist_unpack(label->vl_vdev_phys.vp_nvlist,
900                     sizeof (label->vl_vdev_phys.vp_nvlist), config, 0) != 0)
901                         continue;
902
903                 if (nvlist_lookup_uint64(*config, ZPOOL_CONFIG_POOL_STATE,
904                     &state) != 0 || state > POOL_STATE_L2CACHE) {
905                         nvlist_free(*config);
906                         continue;
907                 }
908
909                 if (state != POOL_STATE_SPARE && state != POOL_STATE_L2CACHE &&
910                     (nvlist_lookup_uint64(*config, ZPOOL_CONFIG_POOL_TXG,
911                     &txg) != 0 || txg == 0)) {
912                         nvlist_free(*config);
913                         continue;
914                 }
915
916                 free(label);
917                 return (0);
918         }
919
920         free(label);
921         *config = NULL;
922         return (0);
923 }
924
925 #ifdef HAVE_LIBBLKID
926 /*
927  * Use libblkid to quickly search for zfs devices
928  */
929 static int
930 zpool_find_import_blkid(libzfs_handle_t *hdl, pool_list_t *pools)
931 {
932         blkid_cache cache;
933         blkid_dev_iterate iter;
934         blkid_dev dev;
935         const char *devname;
936         nvlist_t *config;
937         int fd, err;
938
939         err = blkid_get_cache(&cache, NULL);
940         if (err != 0) {
941                 (void) zfs_error_fmt(hdl, EZFS_BADCACHE,
942                     dgettext(TEXT_DOMAIN, "blkid_get_cache() %d"), err);
943                 goto err_blkid1;
944         }
945
946         err = blkid_probe_all(cache);
947         if (err != 0) {
948                 (void) zfs_error_fmt(hdl, EZFS_BADCACHE,
949                     dgettext(TEXT_DOMAIN, "blkid_probe_all() %d"), err);
950                 goto err_blkid2;
951         }
952
953         iter = blkid_dev_iterate_begin(cache);
954         if (iter == NULL) {
955                 (void) zfs_error_fmt(hdl, EZFS_BADCACHE,
956                     dgettext(TEXT_DOMAIN, "blkid_dev_iterate_begin()"));
957                 goto err_blkid2;
958         }
959
960         err = blkid_dev_set_search(iter, "TYPE", "zfs");
961         if (err != 0) {
962                 (void) zfs_error_fmt(hdl, EZFS_BADCACHE,
963                     dgettext(TEXT_DOMAIN, "blkid_dev_set_search() %d"), err);
964                 goto err_blkid3;
965         }
966
967         while (blkid_dev_next(iter, &dev) == 0) {
968                 devname = blkid_dev_devname(dev);
969                 if ((fd = open64(devname, O_RDONLY)) < 0)
970                         continue;
971
972                 err = zpool_read_label(fd, &config);
973                 (void) close(fd);
974
975                 if (err != 0) {
976                         (void) no_memory(hdl);
977                         goto err_blkid3;
978                 }
979
980                 if (config != NULL) {
981                         err = add_config(hdl, pools, devname, config);
982                         if (err != 0)
983                                 goto err_blkid3;
984                 }
985         }
986
987 err_blkid3:
988         blkid_dev_iterate_end(iter);
989 err_blkid2:
990         blkid_put_cache(cache);
991 err_blkid1:
992         return err;
993 }
994 #endif /* HAVE_LIBBLKID */
995
996 /*
997  * Given a list of directories to search, find all pools stored on disk.  This
998  * includes partial pools which are not available to import.  If no args are
999  * given (argc is 0), then the default directory (/dev/dsk) is searched.
1000  * poolname or guid (but not both) are provided by the caller when trying
1001  * to import a specific pool.
1002  */
1003 static nvlist_t *
1004 zpool_find_import_impl(libzfs_handle_t *hdl, importargs_t *iarg)
1005 {
1006         int i, dirs = iarg->paths;
1007         DIR *dirp = NULL;
1008         struct dirent64 *dp;
1009         char path[MAXPATHLEN];
1010         char *end, **dir = iarg->path;
1011         size_t pathleft;
1012         struct stat64 statbuf;
1013         nvlist_t *ret = NULL, *config;
1014         static char *default_dir = DISK_ROOT;
1015         int fd;
1016         pool_list_t pools = { 0 };
1017         pool_entry_t *pe, *penext;
1018         vdev_entry_t *ve, *venext;
1019         config_entry_t *ce, *cenext;
1020         name_entry_t *ne, *nenext;
1021
1022         verify(iarg->poolname == NULL || iarg->guid == 0);
1023
1024         if (dirs == 0) {
1025 #ifdef HAVE_LIBBLKID
1026                 /* Use libblkid to scan all device for their type */
1027                 if (zpool_find_import_blkid(hdl, &pools) == 0)
1028                         goto skip_scanning;
1029
1030                 (void) zfs_error_fmt(hdl, EZFS_BADCACHE,
1031                     dgettext(TEXT_DOMAIN, "blkid failure falling back "
1032                     "to manual probing"));
1033 #endif /* HAVE_LIBBLKID */
1034                 dirs = 1;
1035                 dir = &default_dir;
1036         }
1037
1038         /*
1039          * Go through and read the label configuration information from every
1040          * possible device, organizing the information according to pool GUID
1041          * and toplevel GUID.
1042          */
1043         for (i = 0; i < dirs; i++) {
1044                 char *rdsk;
1045                 int dfd;
1046
1047                 /* use realpath to normalize the path */
1048                 if (realpath(dir[i], path) == 0) {
1049                         (void) zfs_error_fmt(hdl, EZFS_BADPATH,
1050                             dgettext(TEXT_DOMAIN, "cannot open '%s'"), dir[i]);
1051                         goto error;
1052                 }
1053                 end = &path[strlen(path)];
1054                 *end++ = '/';
1055                 *end = 0;
1056                 pathleft = &path[sizeof (path)] - end;
1057
1058                 /*
1059                  * Using raw devices instead of block devices when we're
1060                  * reading the labels skips a bunch of slow operations during
1061                  * close(2) processing, so we replace /dev/dsk with /dev/rdsk.
1062                  */
1063                 if (strcmp(path, "/dev/dsk/") == 0)
1064                         rdsk = "/dev/rdsk/";
1065                 else
1066                         rdsk = path;
1067
1068                 if ((dfd = open64(rdsk, O_RDONLY)) < 0 ||
1069                     (dirp = fdopendir(dfd)) == NULL) {
1070                         zfs_error_aux(hdl, strerror(errno));
1071                         (void) zfs_error_fmt(hdl, EZFS_BADPATH,
1072                             dgettext(TEXT_DOMAIN, "cannot open '%s'"),
1073                             rdsk);
1074                         goto error;
1075                 }
1076
1077                 /*
1078                  * This is not MT-safe, but we have no MT consumers of libzfs
1079                  */
1080                 while ((dp = readdir64(dirp)) != NULL) {
1081                         const char *name = dp->d_name;
1082                         if (name[0] == '.' &&
1083                             (name[1] == 0 || (name[1] == '.' && name[2] == 0)))
1084                                 continue;
1085
1086                         /*
1087                          * Skip checking devices with well known prefixes:
1088                          * watchdog - A special close is required to avoid
1089                          *            triggering it and resetting the system.
1090                          * fuse     - Fuse control device.
1091                          * ppp      - Generic PPP driver.
1092                          * tty*     - Generic serial interface.
1093                          * vcs*     - Virtual console memory.
1094                          * parport* - Parallel port interface.
1095                          * lp*      - Printer interface.
1096                          * fd*      - Floppy interface.
1097                          * hpet     - High Precision Event Timer, crashes qemu
1098                          *            when accessed from a virtual machine.
1099                          * core     - Symlink to /proc/kcore, causes a crash
1100                          *            when access from Xen dom0.
1101                          */
1102                         if ((strncmp(name, "watchdog", 8) == 0) ||
1103                             (strncmp(name, "fuse", 4) == 0)     ||
1104                             (strncmp(name, "ppp", 3) == 0)      ||
1105                             (strncmp(name, "tty", 3) == 0)      ||
1106                             (strncmp(name, "vcs", 3) == 0)      ||
1107                             (strncmp(name, "parport", 7) == 0)  ||
1108                             (strncmp(name, "lp", 2) == 0)       ||
1109                             (strncmp(name, "fd", 2) == 0)       ||
1110                             (strncmp(name, "hpet", 4) == 0)     ||
1111                             (strncmp(name, "core", 4) == 0))
1112                                 continue;
1113
1114                         /*
1115                          * Ignore failed stats.  We only want regular
1116                          * files and block devices.
1117                          */
1118                         if ((fstatat64(dfd, name, &statbuf, 0) != 0) ||
1119                             (!S_ISREG(statbuf.st_mode) &&
1120                             !S_ISBLK(statbuf.st_mode)))
1121                                 continue;
1122
1123                         if ((fd = openat64(dfd, name, O_RDONLY)) < 0)
1124                                 continue;
1125
1126                         if ((zpool_read_label(fd, &config)) != 0) {
1127                                 (void) close(fd);
1128                                 (void) no_memory(hdl);
1129                                 goto error;
1130                         }
1131
1132                         (void) close(fd);
1133
1134                         if (config != NULL) {
1135                                 boolean_t matched = B_TRUE;
1136
1137                                 if (iarg->poolname != NULL) {
1138                                         char *pname;
1139
1140                                         matched = nvlist_lookup_string(config,
1141                                             ZPOOL_CONFIG_POOL_NAME,
1142                                             &pname) == 0 &&
1143                                             strcmp(iarg->poolname, pname) == 0;
1144                                 } else if (iarg->guid != 0) {
1145                                         uint64_t this_guid;
1146
1147                                         matched = nvlist_lookup_uint64(config,
1148                                             ZPOOL_CONFIG_POOL_GUID,
1149                                             &this_guid) == 0 &&
1150                                             iarg->guid == this_guid;
1151                                 }
1152                                 if (!matched) {
1153                                         nvlist_free(config);
1154                                         config = NULL;
1155                                         continue;
1156                                 }
1157                                 /* use the non-raw path for the config */
1158                                 (void) strlcpy(end, name, pathleft);
1159                                 if (add_config(hdl, &pools, path, config) != 0)
1160                                         goto error;
1161                         }
1162                 }
1163
1164                 (void) closedir(dirp);
1165                 dirp = NULL;
1166         }
1167
1168 #ifdef HAVE_LIBBLKID
1169 skip_scanning:
1170 #endif
1171         ret = get_configs(hdl, &pools, iarg->can_be_active);
1172
1173 error:
1174         for (pe = pools.pools; pe != NULL; pe = penext) {
1175                 penext = pe->pe_next;
1176                 for (ve = pe->pe_vdevs; ve != NULL; ve = venext) {
1177                         venext = ve->ve_next;
1178                         for (ce = ve->ve_configs; ce != NULL; ce = cenext) {
1179                                 cenext = ce->ce_next;
1180                                 if (ce->ce_config)
1181                                         nvlist_free(ce->ce_config);
1182                                 free(ce);
1183                         }
1184                         free(ve);
1185                 }
1186                 free(pe);
1187         }
1188
1189         for (ne = pools.names; ne != NULL; ne = nenext) {
1190                 nenext = ne->ne_next;
1191                 if (ne->ne_name)
1192                         free(ne->ne_name);
1193                 free(ne);
1194         }
1195
1196         if (dirp)
1197                 (void) closedir(dirp);
1198
1199         return (ret);
1200 }
1201
1202 nvlist_t *
1203 zpool_find_import(libzfs_handle_t *hdl, int argc, char **argv)
1204 {
1205         importargs_t iarg = { 0 };
1206
1207         iarg.paths = argc;
1208         iarg.path = argv;
1209
1210         return (zpool_find_import_impl(hdl, &iarg));
1211 }
1212
1213 /*
1214  * Given a cache file, return the contents as a list of importable pools.
1215  * poolname or guid (but not both) are provided by the caller when trying
1216  * to import a specific pool.
1217  */
1218 nvlist_t *
1219 zpool_find_import_cached(libzfs_handle_t *hdl, const char *cachefile,
1220     char *poolname, uint64_t guid)
1221 {
1222         char *buf;
1223         int fd;
1224         struct stat64 statbuf;
1225         nvlist_t *raw, *src, *dst;
1226         nvlist_t *pools;
1227         nvpair_t *elem;
1228         char *name;
1229         uint64_t this_guid;
1230         boolean_t active;
1231
1232         verify(poolname == NULL || guid == 0);
1233
1234         if ((fd = open(cachefile, O_RDONLY)) < 0) {
1235                 zfs_error_aux(hdl, "%s", strerror(errno));
1236                 (void) zfs_error(hdl, EZFS_BADCACHE,
1237                     dgettext(TEXT_DOMAIN, "failed to open cache file"));
1238                 return (NULL);
1239         }
1240
1241         if (fstat64(fd, &statbuf) != 0) {
1242                 zfs_error_aux(hdl, "%s", strerror(errno));
1243                 (void) close(fd);
1244                 (void) zfs_error(hdl, EZFS_BADCACHE,
1245                     dgettext(TEXT_DOMAIN, "failed to get size of cache file"));
1246                 return (NULL);
1247         }
1248
1249         if ((buf = zfs_alloc(hdl, statbuf.st_size)) == NULL) {
1250                 (void) close(fd);
1251                 return (NULL);
1252         }
1253
1254         if (read(fd, buf, statbuf.st_size) != statbuf.st_size) {
1255                 (void) close(fd);
1256                 free(buf);
1257                 (void) zfs_error(hdl, EZFS_BADCACHE,
1258                     dgettext(TEXT_DOMAIN,
1259                     "failed to read cache file contents"));
1260                 return (NULL);
1261         }
1262
1263         (void) close(fd);
1264
1265         if (nvlist_unpack(buf, statbuf.st_size, &raw, 0) != 0) {
1266                 free(buf);
1267                 (void) zfs_error(hdl, EZFS_BADCACHE,
1268                     dgettext(TEXT_DOMAIN,
1269                     "invalid or corrupt cache file contents"));
1270                 return (NULL);
1271         }
1272
1273         free(buf);
1274
1275         /*
1276          * Go through and get the current state of the pools and refresh their
1277          * state.
1278          */
1279         if (nvlist_alloc(&pools, 0, 0) != 0) {
1280                 (void) no_memory(hdl);
1281                 nvlist_free(raw);
1282                 return (NULL);
1283         }
1284
1285         elem = NULL;
1286         while ((elem = nvlist_next_nvpair(raw, elem)) != NULL) {
1287                 verify(nvpair_value_nvlist(elem, &src) == 0);
1288
1289                 verify(nvlist_lookup_string(src, ZPOOL_CONFIG_POOL_NAME,
1290                     &name) == 0);
1291                 if (poolname != NULL && strcmp(poolname, name) != 0)
1292                         continue;
1293
1294                 verify(nvlist_lookup_uint64(src, ZPOOL_CONFIG_POOL_GUID,
1295                     &this_guid) == 0);
1296                 if (guid != 0) {
1297                         verify(nvlist_lookup_uint64(src, ZPOOL_CONFIG_POOL_GUID,
1298                             &this_guid) == 0);
1299                         if (guid != this_guid)
1300                                 continue;
1301                 }
1302
1303                 if (pool_active(hdl, name, this_guid, &active) != 0) {
1304                         nvlist_free(raw);
1305                         nvlist_free(pools);
1306                         return (NULL);
1307                 }
1308
1309                 if (active)
1310                         continue;
1311
1312                 if ((dst = refresh_config(hdl, src)) == NULL) {
1313                         nvlist_free(raw);
1314                         nvlist_free(pools);
1315                         return (NULL);
1316                 }
1317
1318                 if (nvlist_add_nvlist(pools, nvpair_name(elem), dst) != 0) {
1319                         (void) no_memory(hdl);
1320                         nvlist_free(dst);
1321                         nvlist_free(raw);
1322                         nvlist_free(pools);
1323                         return (NULL);
1324                 }
1325                 nvlist_free(dst);
1326         }
1327
1328         nvlist_free(raw);
1329         return (pools);
1330 }
1331
1332 static int
1333 name_or_guid_exists(zpool_handle_t *zhp, void *data)
1334 {
1335         importargs_t *import = data;
1336         int found = 0;
1337
1338         if (import->poolname != NULL) {
1339                 char *pool_name;
1340
1341                 verify(nvlist_lookup_string(zhp->zpool_config,
1342                     ZPOOL_CONFIG_POOL_NAME, &pool_name) == 0);
1343                 if (strcmp(pool_name, import->poolname) == 0)
1344                         found = 1;
1345         } else {
1346                 uint64_t pool_guid;
1347
1348                 verify(nvlist_lookup_uint64(zhp->zpool_config,
1349                     ZPOOL_CONFIG_POOL_GUID, &pool_guid) == 0);
1350                 if (pool_guid == import->guid)
1351                         found = 1;
1352         }
1353
1354         zpool_close(zhp);
1355         return (found);
1356 }
1357
1358 nvlist_t *
1359 zpool_search_import(libzfs_handle_t *hdl, importargs_t *import)
1360 {
1361         verify(import->poolname == NULL || import->guid == 0);
1362
1363         if (import->unique)
1364                 import->exists = zpool_iter(hdl, name_or_guid_exists, import);
1365
1366         if (import->cachefile != NULL)
1367                 return (zpool_find_import_cached(hdl, import->cachefile,
1368                     import->poolname, import->guid));
1369
1370         return (zpool_find_import_impl(hdl, import));
1371 }
1372
1373 boolean_t
1374 find_guid(nvlist_t *nv, uint64_t guid)
1375 {
1376         uint64_t tmp;
1377         nvlist_t **child;
1378         uint_t c, children;
1379
1380         verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &tmp) == 0);
1381         if (tmp == guid)
1382                 return (B_TRUE);
1383
1384         if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
1385             &child, &children) == 0) {
1386                 for (c = 0; c < children; c++)
1387                         if (find_guid(child[c], guid))
1388                                 return (B_TRUE);
1389         }
1390
1391         return (B_FALSE);
1392 }
1393
1394 typedef struct aux_cbdata {
1395         const char      *cb_type;
1396         uint64_t        cb_guid;
1397         zpool_handle_t  *cb_zhp;
1398 } aux_cbdata_t;
1399
1400 static int
1401 find_aux(zpool_handle_t *zhp, void *data)
1402 {
1403         aux_cbdata_t *cbp = data;
1404         nvlist_t **list;
1405         uint_t i, count;
1406         uint64_t guid;
1407         nvlist_t *nvroot;
1408
1409         verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
1410             &nvroot) == 0);
1411
1412         if (nvlist_lookup_nvlist_array(nvroot, cbp->cb_type,
1413             &list, &count) == 0) {
1414                 for (i = 0; i < count; i++) {
1415                         verify(nvlist_lookup_uint64(list[i],
1416                             ZPOOL_CONFIG_GUID, &guid) == 0);
1417                         if (guid == cbp->cb_guid) {
1418                                 cbp->cb_zhp = zhp;
1419                                 return (1);
1420                         }
1421                 }
1422         }
1423
1424         zpool_close(zhp);
1425         return (0);
1426 }
1427
1428 /*
1429  * Determines if the pool is in use.  If so, it returns true and the state of
1430  * the pool as well as the name of the pool.  Both strings are allocated and
1431  * must be freed by the caller.
1432  */
1433 int
1434 zpool_in_use(libzfs_handle_t *hdl, int fd, pool_state_t *state, char **namestr,
1435     boolean_t *inuse)
1436 {
1437         nvlist_t *config;
1438         char *name;
1439         boolean_t ret;
1440         uint64_t guid, vdev_guid;
1441         zpool_handle_t *zhp;
1442         nvlist_t *pool_config;
1443         uint64_t stateval, isspare;
1444         aux_cbdata_t cb = { 0 };
1445         boolean_t isactive;
1446
1447         *inuse = B_FALSE;
1448
1449         if (zpool_read_label(fd, &config) != 0) {
1450                 (void) no_memory(hdl);
1451                 return (-1);
1452         }
1453
1454         if (config == NULL)
1455                 return (0);
1456
1457         verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE,
1458             &stateval) == 0);
1459         verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_GUID,
1460             &vdev_guid) == 0);
1461
1462         if (stateval != POOL_STATE_SPARE && stateval != POOL_STATE_L2CACHE) {
1463                 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
1464                     &name) == 0);
1465                 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
1466                     &guid) == 0);
1467         }
1468
1469         switch (stateval) {
1470         case POOL_STATE_EXPORTED:
1471                 /*
1472                  * A pool with an exported state may in fact be imported
1473                  * read-only, so check the in-core state to see if it's
1474                  * active and imported read-only.  If it is, set
1475                  * its state to active.
1476                  */
1477                 if (pool_active(hdl, name, guid, &isactive) == 0 && isactive &&
1478                     (zhp = zpool_open_canfail(hdl, name)) != NULL &&
1479                     zpool_get_prop_int(zhp, ZPOOL_PROP_READONLY, NULL))
1480                         stateval = POOL_STATE_ACTIVE;
1481
1482                 ret = B_TRUE;
1483                 break;
1484
1485         case POOL_STATE_ACTIVE:
1486                 /*
1487                  * For an active pool, we have to determine if it's really part
1488                  * of a currently active pool (in which case the pool will exist
1489                  * and the guid will be the same), or whether it's part of an
1490                  * active pool that was disconnected without being explicitly
1491                  * exported.
1492                  */
1493                 if (pool_active(hdl, name, guid, &isactive) != 0) {
1494                         nvlist_free(config);
1495                         return (-1);
1496                 }
1497
1498                 if (isactive) {
1499                         /*
1500                          * Because the device may have been removed while
1501                          * offlined, we only report it as active if the vdev is
1502                          * still present in the config.  Otherwise, pretend like
1503                          * it's not in use.
1504                          */
1505                         if ((zhp = zpool_open_canfail(hdl, name)) != NULL &&
1506                             (pool_config = zpool_get_config(zhp, NULL))
1507                             != NULL) {
1508                                 nvlist_t *nvroot;
1509
1510                                 verify(nvlist_lookup_nvlist(pool_config,
1511                                     ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
1512                                 ret = find_guid(nvroot, vdev_guid);
1513                         } else {
1514                                 ret = B_FALSE;
1515                         }
1516
1517                         /*
1518                          * If this is an active spare within another pool, we
1519                          * treat it like an unused hot spare.  This allows the
1520                          * user to create a pool with a hot spare that currently
1521                          * in use within another pool.  Since we return B_TRUE,
1522                          * libdiskmgt will continue to prevent generic consumers
1523                          * from using the device.
1524                          */
1525                         if (ret && nvlist_lookup_uint64(config,
1526                             ZPOOL_CONFIG_IS_SPARE, &isspare) == 0 && isspare)
1527                                 stateval = POOL_STATE_SPARE;
1528
1529                         if (zhp != NULL)
1530                                 zpool_close(zhp);
1531                 } else {
1532                         stateval = POOL_STATE_POTENTIALLY_ACTIVE;
1533                         ret = B_TRUE;
1534                 }
1535                 break;
1536
1537         case POOL_STATE_SPARE:
1538                 /*
1539                  * For a hot spare, it can be either definitively in use, or
1540                  * potentially active.  To determine if it's in use, we iterate
1541                  * over all pools in the system and search for one with a spare
1542                  * with a matching guid.
1543                  *
1544                  * Due to the shared nature of spares, we don't actually report
1545                  * the potentially active case as in use.  This means the user
1546                  * can freely create pools on the hot spares of exported pools,
1547                  * but to do otherwise makes the resulting code complicated, and
1548                  * we end up having to deal with this case anyway.
1549                  */
1550                 cb.cb_zhp = NULL;
1551                 cb.cb_guid = vdev_guid;
1552                 cb.cb_type = ZPOOL_CONFIG_SPARES;
1553                 if (zpool_iter(hdl, find_aux, &cb) == 1) {
1554                         name = (char *)zpool_get_name(cb.cb_zhp);
1555                         ret = TRUE;
1556                 } else {
1557                         ret = FALSE;
1558                 }
1559                 break;
1560
1561         case POOL_STATE_L2CACHE:
1562
1563                 /*
1564                  * Check if any pool is currently using this l2cache device.
1565                  */
1566                 cb.cb_zhp = NULL;
1567                 cb.cb_guid = vdev_guid;
1568                 cb.cb_type = ZPOOL_CONFIG_L2CACHE;
1569                 if (zpool_iter(hdl, find_aux, &cb) == 1) {
1570                         name = (char *)zpool_get_name(cb.cb_zhp);
1571                         ret = TRUE;
1572                 } else {
1573                         ret = FALSE;
1574                 }
1575                 break;
1576
1577         default:
1578                 ret = B_FALSE;
1579         }
1580
1581
1582         if (ret) {
1583                 if ((*namestr = zfs_strdup(hdl, name)) == NULL) {
1584                         if (cb.cb_zhp)
1585                                 zpool_close(cb.cb_zhp);
1586                         nvlist_free(config);
1587                         return (-1);
1588                 }
1589                 *state = (pool_state_t)stateval;
1590         }
1591
1592         if (cb.cb_zhp)
1593                 zpool_close(cb.cb_zhp);
1594
1595         nvlist_free(config);
1596         *inuse = ret;
1597         return (0);
1598 }