Skip /dev/hpet during 'zpool import'
[zfs.git] / lib / libzfs / libzfs_import.c
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23  */
24
25 /*
26  * Pool import support functions.
27  *
28  * To import a pool, we rely on reading the configuration information from the
29  * ZFS label of each device.  If we successfully read the label, then we
30  * organize the configuration information in the following hierarchy:
31  *
32  *      pool guid -> toplevel vdev guid -> label txg
33  *
34  * Duplicate entries matching this same tuple will be discarded.  Once we have
35  * examined every device, we pick the best label txg config for each toplevel
36  * vdev.  We then arrange these toplevel vdevs into a complete pool config, and
37  * update any paths that have changed.  Finally, we attempt to import the pool
38  * using our derived config, and record the results.
39  */
40
41 #include <ctype.h>
42 #include <devid.h>
43 #include <dirent.h>
44 #include <errno.h>
45 #include <libintl.h>
46 #include <stddef.h>
47 #include <stdlib.h>
48 #include <string.h>
49 #include <sys/stat.h>
50 #include <unistd.h>
51 #include <fcntl.h>
52 #include <sys/vtoc.h>
53 #include <sys/dktp/fdisk.h>
54 #include <sys/efi_partition.h>
55
56 #include <sys/vdev_impl.h>
57 #ifdef HAVE_LIBBLKID
58 #include <blkid/blkid.h>
59 #endif
60
61 #include "libzfs.h"
62 #include "libzfs_impl.h"
63
64 /*
65  * Intermediate structures used to gather configuration information.
66  */
67 typedef struct config_entry {
68         uint64_t                ce_txg;
69         nvlist_t                *ce_config;
70         struct config_entry     *ce_next;
71 } config_entry_t;
72
73 typedef struct vdev_entry {
74         uint64_t                ve_guid;
75         config_entry_t          *ve_configs;
76         struct vdev_entry       *ve_next;
77 } vdev_entry_t;
78
79 typedef struct pool_entry {
80         uint64_t                pe_guid;
81         vdev_entry_t            *pe_vdevs;
82         struct pool_entry       *pe_next;
83 } pool_entry_t;
84
85 typedef struct name_entry {
86         char                    *ne_name;
87         uint64_t                ne_guid;
88         struct name_entry       *ne_next;
89 } name_entry_t;
90
91 typedef struct pool_list {
92         pool_entry_t            *pools;
93         name_entry_t            *names;
94 } pool_list_t;
95
96 static char *
97 get_devid(const char *path)
98 {
99         int fd;
100         ddi_devid_t devid;
101         char *minor, *ret;
102
103         if ((fd = open(path, O_RDONLY)) < 0)
104                 return (NULL);
105
106         minor = NULL;
107         ret = NULL;
108         if (devid_get(fd, &devid) == 0) {
109                 if (devid_get_minor_name(fd, &minor) == 0)
110                         ret = devid_str_encode(devid, minor);
111                 if (minor != NULL)
112                         devid_str_free(minor);
113                 devid_free(devid);
114         }
115         (void) close(fd);
116
117         return (ret);
118 }
119
120
121 /*
122  * Go through and fix up any path and/or devid information for the given vdev
123  * configuration.
124  */
125 static int
126 fix_paths(nvlist_t *nv, name_entry_t *names)
127 {
128         nvlist_t **child;
129         uint_t c, children;
130         uint64_t guid;
131         name_entry_t *ne, *best;
132         char *path, *devid;
133         int matched;
134
135         if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
136             &child, &children) == 0) {
137                 for (c = 0; c < children; c++)
138                         if (fix_paths(child[c], names) != 0)
139                                 return (-1);
140                 return (0);
141         }
142
143         /*
144          * This is a leaf (file or disk) vdev.  In either case, go through
145          * the name list and see if we find a matching guid.  If so, replace
146          * the path and see if we can calculate a new devid.
147          *
148          * There may be multiple names associated with a particular guid, in
149          * which case we have overlapping slices or multiple paths to the same
150          * disk.  If this is the case, then we want to pick the path that is
151          * the most similar to the original, where "most similar" is the number
152          * of matching characters starting from the end of the path.  This will
153          * preserve slice numbers even if the disks have been reorganized, and
154          * will also catch preferred disk names if multiple paths exist.
155          */
156         verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) == 0);
157         if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) != 0)
158                 path = NULL;
159
160         matched = 0;
161         best = NULL;
162         for (ne = names; ne != NULL; ne = ne->ne_next) {
163                 if (ne->ne_guid == guid) {
164                         const char *src, *dst;
165                         int count;
166
167                         if (path == NULL) {
168                                 best = ne;
169                                 break;
170                         }
171
172                         src = ne->ne_name + strlen(ne->ne_name) - 1;
173                         dst = path + strlen(path) - 1;
174                         for (count = 0; src >= ne->ne_name && dst >= path;
175                             src--, dst--, count++)
176                                 if (*src != *dst)
177                                         break;
178
179                         /*
180                          * At this point, 'count' is the number of characters
181                          * matched from the end.
182                          */
183                         if (count > matched || best == NULL) {
184                                 best = ne;
185                                 matched = count;
186                         }
187                 }
188         }
189
190         if (best == NULL)
191                 return (0);
192
193         if (nvlist_add_string(nv, ZPOOL_CONFIG_PATH, best->ne_name) != 0)
194                 return (-1);
195
196         if ((devid = get_devid(best->ne_name)) == NULL) {
197                 (void) nvlist_remove_all(nv, ZPOOL_CONFIG_DEVID);
198         } else {
199                 if (nvlist_add_string(nv, ZPOOL_CONFIG_DEVID, devid) != 0)
200                         return (-1);
201                 devid_str_free(devid);
202         }
203
204         return (0);
205 }
206
207 /*
208  * Add the given configuration to the list of known devices.
209  */
210 static int
211 add_config(libzfs_handle_t *hdl, pool_list_t *pl, const char *path,
212     nvlist_t *config)
213 {
214         uint64_t pool_guid, vdev_guid, top_guid, txg, state;
215         pool_entry_t *pe;
216         vdev_entry_t *ve;
217         config_entry_t *ce;
218         name_entry_t *ne;
219
220         /*
221          * If this is a hot spare not currently in use or level 2 cache
222          * device, add it to the list of names to translate, but don't do
223          * anything else.
224          */
225         if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE,
226             &state) == 0 &&
227             (state == POOL_STATE_SPARE || state == POOL_STATE_L2CACHE) &&
228             nvlist_lookup_uint64(config, ZPOOL_CONFIG_GUID, &vdev_guid) == 0) {
229                 if ((ne = zfs_alloc(hdl, sizeof (name_entry_t))) == NULL)
230                         return (-1);
231
232                 if ((ne->ne_name = zfs_strdup(hdl, path)) == NULL) {
233                         free(ne);
234                         return (-1);
235                 }
236                 ne->ne_guid = vdev_guid;
237                 ne->ne_next = pl->names;
238                 pl->names = ne;
239                 return (0);
240         }
241
242         /*
243          * If we have a valid config but cannot read any of these fields, then
244          * it means we have a half-initialized label.  In vdev_label_init()
245          * we write a label with txg == 0 so that we can identify the device
246          * in case the user refers to the same disk later on.  If we fail to
247          * create the pool, we'll be left with a label in this state
248          * which should not be considered part of a valid pool.
249          */
250         if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
251             &pool_guid) != 0 ||
252             nvlist_lookup_uint64(config, ZPOOL_CONFIG_GUID,
253             &vdev_guid) != 0 ||
254             nvlist_lookup_uint64(config, ZPOOL_CONFIG_TOP_GUID,
255             &top_guid) != 0 ||
256             nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG,
257             &txg) != 0 || txg == 0) {
258                 nvlist_free(config);
259                 return (0);
260         }
261
262         /*
263          * First, see if we know about this pool.  If not, then add it to the
264          * list of known pools.
265          */
266         for (pe = pl->pools; pe != NULL; pe = pe->pe_next) {
267                 if (pe->pe_guid == pool_guid)
268                         break;
269         }
270
271         if (pe == NULL) {
272                 if ((pe = zfs_alloc(hdl, sizeof (pool_entry_t))) == NULL) {
273                         nvlist_free(config);
274                         return (-1);
275                 }
276                 pe->pe_guid = pool_guid;
277                 pe->pe_next = pl->pools;
278                 pl->pools = pe;
279         }
280
281         /*
282          * Second, see if we know about this toplevel vdev.  Add it if its
283          * missing.
284          */
285         for (ve = pe->pe_vdevs; ve != NULL; ve = ve->ve_next) {
286                 if (ve->ve_guid == top_guid)
287                         break;
288         }
289
290         if (ve == NULL) {
291                 if ((ve = zfs_alloc(hdl, sizeof (vdev_entry_t))) == NULL) {
292                         nvlist_free(config);
293                         return (-1);
294                 }
295                 ve->ve_guid = top_guid;
296                 ve->ve_next = pe->pe_vdevs;
297                 pe->pe_vdevs = ve;
298         }
299
300         /*
301          * Third, see if we have a config with a matching transaction group.  If
302          * so, then we do nothing.  Otherwise, add it to the list of known
303          * configs.
304          */
305         for (ce = ve->ve_configs; ce != NULL; ce = ce->ce_next) {
306                 if (ce->ce_txg == txg)
307                         break;
308         }
309
310         if (ce == NULL) {
311                 if ((ce = zfs_alloc(hdl, sizeof (config_entry_t))) == NULL) {
312                         nvlist_free(config);
313                         return (-1);
314                 }
315                 ce->ce_txg = txg;
316                 ce->ce_config = config;
317                 ce->ce_next = ve->ve_configs;
318                 ve->ve_configs = ce;
319         } else {
320                 nvlist_free(config);
321         }
322
323         /*
324          * At this point we've successfully added our config to the list of
325          * known configs.  The last thing to do is add the vdev guid -> path
326          * mappings so that we can fix up the configuration as necessary before
327          * doing the import.
328          */
329         if ((ne = zfs_alloc(hdl, sizeof (name_entry_t))) == NULL)
330                 return (-1);
331
332         if ((ne->ne_name = zfs_strdup(hdl, path)) == NULL) {
333                 free(ne);
334                 return (-1);
335         }
336
337         ne->ne_guid = vdev_guid;
338         ne->ne_next = pl->names;
339         pl->names = ne;
340
341         return (0);
342 }
343
344 /*
345  * Returns true if the named pool matches the given GUID.
346  */
347 static int
348 pool_active(libzfs_handle_t *hdl, const char *name, uint64_t guid,
349     boolean_t *isactive)
350 {
351         zpool_handle_t *zhp;
352         uint64_t theguid;
353
354         if (zpool_open_silent(hdl, name, &zhp) != 0)
355                 return (-1);
356
357         if (zhp == NULL) {
358                 *isactive = B_FALSE;
359                 return (0);
360         }
361
362         verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_POOL_GUID,
363             &theguid) == 0);
364
365         zpool_close(zhp);
366
367         *isactive = (theguid == guid);
368         return (0);
369 }
370
371 static nvlist_t *
372 refresh_config(libzfs_handle_t *hdl, nvlist_t *config)
373 {
374         nvlist_t *nvl;
375         zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
376         int err;
377
378         if (zcmd_write_conf_nvlist(hdl, &zc, config) != 0)
379                 return (NULL);
380
381         if (zcmd_alloc_dst_nvlist(hdl, &zc,
382             zc.zc_nvlist_conf_size * 2) != 0) {
383                 zcmd_free_nvlists(&zc);
384                 return (NULL);
385         }
386
387         while ((err = ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_TRYIMPORT,
388             &zc)) != 0 && errno == ENOMEM) {
389                 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
390                         zcmd_free_nvlists(&zc);
391                         return (NULL);
392                 }
393         }
394
395         if (err) {
396                 zcmd_free_nvlists(&zc);
397                 return (NULL);
398         }
399
400         if (zcmd_read_dst_nvlist(hdl, &zc, &nvl) != 0) {
401                 zcmd_free_nvlists(&zc);
402                 return (NULL);
403         }
404
405         zcmd_free_nvlists(&zc);
406         return (nvl);
407 }
408
409 /*
410  * Determine if the vdev id is a hole in the namespace.
411  */
412 boolean_t
413 vdev_is_hole(uint64_t *hole_array, uint_t holes, uint_t id)
414 {
415         int c;
416
417         for (c = 0; c < holes; c++) {
418
419                 /* Top-level is a hole */
420                 if (hole_array[c] == id)
421                         return (B_TRUE);
422         }
423         return (B_FALSE);
424 }
425
426 /*
427  * Convert our list of pools into the definitive set of configurations.  We
428  * start by picking the best config for each toplevel vdev.  Once that's done,
429  * we assemble the toplevel vdevs into a full config for the pool.  We make a
430  * pass to fix up any incorrect paths, and then add it to the main list to
431  * return to the user.
432  */
433 static nvlist_t *
434 get_configs(libzfs_handle_t *hdl, pool_list_t *pl, boolean_t active_ok)
435 {
436         pool_entry_t *pe;
437         vdev_entry_t *ve;
438         config_entry_t *ce;
439         nvlist_t *ret = NULL, *config = NULL, *tmp = NULL, *nvtop, *nvroot;
440         nvlist_t **spares, **l2cache;
441         uint_t i, nspares, nl2cache;
442         boolean_t config_seen;
443         uint64_t best_txg;
444         char *name, *hostname;
445         uint64_t version, guid;
446         uint_t children = 0;
447         nvlist_t **child = NULL;
448         uint_t holes;
449         uint64_t *hole_array, max_id;
450         uint_t c;
451         boolean_t isactive;
452         uint64_t hostid;
453         nvlist_t *nvl;
454         boolean_t found_one = B_FALSE;
455         boolean_t valid_top_config = B_FALSE;
456
457         if (nvlist_alloc(&ret, 0, 0) != 0)
458                 goto nomem;
459
460         for (pe = pl->pools; pe != NULL; pe = pe->pe_next) {
461                 uint64_t id, max_txg = 0;
462
463                 if (nvlist_alloc(&config, NV_UNIQUE_NAME, 0) != 0)
464                         goto nomem;
465                 config_seen = B_FALSE;
466
467                 /*
468                  * Iterate over all toplevel vdevs.  Grab the pool configuration
469                  * from the first one we find, and then go through the rest and
470                  * add them as necessary to the 'vdevs' member of the config.
471                  */
472                 for (ve = pe->pe_vdevs; ve != NULL; ve = ve->ve_next) {
473
474                         /*
475                          * Determine the best configuration for this vdev by
476                          * selecting the config with the latest transaction
477                          * group.
478                          */
479                         best_txg = 0;
480                         for (ce = ve->ve_configs; ce != NULL;
481                             ce = ce->ce_next) {
482
483                                 if (ce->ce_txg > best_txg) {
484                                         tmp = ce->ce_config;
485                                         best_txg = ce->ce_txg;
486                                 }
487                         }
488
489                         /*
490                          * We rely on the fact that the max txg for the
491                          * pool will contain the most up-to-date information
492                          * about the valid top-levels in the vdev namespace.
493                          */
494                         if (best_txg > max_txg) {
495                                 (void) nvlist_remove(config,
496                                     ZPOOL_CONFIG_VDEV_CHILDREN,
497                                     DATA_TYPE_UINT64);
498                                 (void) nvlist_remove(config,
499                                     ZPOOL_CONFIG_HOLE_ARRAY,
500                                     DATA_TYPE_UINT64_ARRAY);
501
502                                 max_txg = best_txg;
503                                 hole_array = NULL;
504                                 holes = 0;
505                                 max_id = 0;
506                                 valid_top_config = B_FALSE;
507
508                                 if (nvlist_lookup_uint64(tmp,
509                                     ZPOOL_CONFIG_VDEV_CHILDREN, &max_id) == 0) {
510                                         verify(nvlist_add_uint64(config,
511                                             ZPOOL_CONFIG_VDEV_CHILDREN,
512                                             max_id) == 0);
513                                         valid_top_config = B_TRUE;
514                                 }
515
516                                 if (nvlist_lookup_uint64_array(tmp,
517                                     ZPOOL_CONFIG_HOLE_ARRAY, &hole_array,
518                                     &holes) == 0) {
519                                         verify(nvlist_add_uint64_array(config,
520                                             ZPOOL_CONFIG_HOLE_ARRAY,
521                                             hole_array, holes) == 0);
522                                 }
523                         }
524
525                         if (!config_seen) {
526                                 /*
527                                  * Copy the relevant pieces of data to the pool
528                                  * configuration:
529                                  *
530                                  *      version
531                                  *      pool guid
532                                  *      name
533                                  *      pool state
534                                  *      hostid (if available)
535                                  *      hostname (if available)
536                                  */
537                                 uint64_t state;
538
539                                 verify(nvlist_lookup_uint64(tmp,
540                                     ZPOOL_CONFIG_VERSION, &version) == 0);
541                                 if (nvlist_add_uint64(config,
542                                     ZPOOL_CONFIG_VERSION, version) != 0)
543                                         goto nomem;
544                                 verify(nvlist_lookup_uint64(tmp,
545                                     ZPOOL_CONFIG_POOL_GUID, &guid) == 0);
546                                 if (nvlist_add_uint64(config,
547                                     ZPOOL_CONFIG_POOL_GUID, guid) != 0)
548                                         goto nomem;
549                                 verify(nvlist_lookup_string(tmp,
550                                     ZPOOL_CONFIG_POOL_NAME, &name) == 0);
551                                 if (nvlist_add_string(config,
552                                     ZPOOL_CONFIG_POOL_NAME, name) != 0)
553                                         goto nomem;
554                                 verify(nvlist_lookup_uint64(tmp,
555                                     ZPOOL_CONFIG_POOL_STATE, &state) == 0);
556                                 if (nvlist_add_uint64(config,
557                                     ZPOOL_CONFIG_POOL_STATE, state) != 0)
558                                         goto nomem;
559                                 hostid = 0;
560                                 if (nvlist_lookup_uint64(tmp,
561                                     ZPOOL_CONFIG_HOSTID, &hostid) == 0) {
562                                         if (nvlist_add_uint64(config,
563                                             ZPOOL_CONFIG_HOSTID, hostid) != 0)
564                                                 goto nomem;
565                                         verify(nvlist_lookup_string(tmp,
566                                             ZPOOL_CONFIG_HOSTNAME,
567                                             &hostname) == 0);
568                                         if (nvlist_add_string(config,
569                                             ZPOOL_CONFIG_HOSTNAME,
570                                             hostname) != 0)
571                                                 goto nomem;
572                                 }
573
574                                 config_seen = B_TRUE;
575                         }
576
577                         /*
578                          * Add this top-level vdev to the child array.
579                          */
580                         verify(nvlist_lookup_nvlist(tmp,
581                             ZPOOL_CONFIG_VDEV_TREE, &nvtop) == 0);
582                         verify(nvlist_lookup_uint64(nvtop, ZPOOL_CONFIG_ID,
583                             &id) == 0);
584
585                         if (id >= children) {
586                                 nvlist_t **newchild;
587
588                                 newchild = zfs_alloc(hdl, (id + 1) *
589                                     sizeof (nvlist_t *));
590                                 if (newchild == NULL)
591                                         goto nomem;
592
593                                 for (c = 0; c < children; c++)
594                                         newchild[c] = child[c];
595
596                                 free(child);
597                                 child = newchild;
598                                 children = id + 1;
599                         }
600                         if (nvlist_dup(nvtop, &child[id], 0) != 0)
601                                 goto nomem;
602
603                 }
604
605                 /*
606                  * If we have information about all the top-levels then
607                  * clean up the nvlist which we've constructed. This
608                  * means removing any extraneous devices that are
609                  * beyond the valid range or adding devices to the end
610                  * of our array which appear to be missing.
611                  */
612                 if (valid_top_config) {
613                         if (max_id < children) {
614                                 for (c = max_id; c < children; c++)
615                                         nvlist_free(child[c]);
616                                 children = max_id;
617                         } else if (max_id > children) {
618                                 nvlist_t **newchild;
619
620                                 newchild = zfs_alloc(hdl, (max_id) *
621                                     sizeof (nvlist_t *));
622                                 if (newchild == NULL)
623                                         goto nomem;
624
625                                 for (c = 0; c < children; c++)
626                                         newchild[c] = child[c];
627
628                                 free(child);
629                                 child = newchild;
630                                 children = max_id;
631                         }
632                 }
633
634                 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
635                     &guid) == 0);
636
637                 /*
638                  * The vdev namespace may contain holes as a result of
639                  * device removal. We must add them back into the vdev
640                  * tree before we process any missing devices.
641                  */
642                 if (holes > 0) {
643                         ASSERT(valid_top_config);
644
645                         for (c = 0; c < children; c++) {
646                                 nvlist_t *holey;
647
648                                 if (child[c] != NULL ||
649                                     !vdev_is_hole(hole_array, holes, c))
650                                         continue;
651
652                                 if (nvlist_alloc(&holey, NV_UNIQUE_NAME,
653                                     0) != 0)
654                                         goto nomem;
655
656                                 /*
657                                  * Holes in the namespace are treated as
658                                  * "hole" top-level vdevs and have a
659                                  * special flag set on them.
660                                  */
661                                 if (nvlist_add_string(holey,
662                                     ZPOOL_CONFIG_TYPE,
663                                     VDEV_TYPE_HOLE) != 0 ||
664                                     nvlist_add_uint64(holey,
665                                     ZPOOL_CONFIG_ID, c) != 0 ||
666                                     nvlist_add_uint64(holey,
667                                     ZPOOL_CONFIG_GUID, 0ULL) != 0)
668                                         goto nomem;
669                                 child[c] = holey;
670                         }
671                 }
672
673                 /*
674                  * Look for any missing top-level vdevs.  If this is the case,
675                  * create a faked up 'missing' vdev as a placeholder.  We cannot
676                  * simply compress the child array, because the kernel performs
677                  * certain checks to make sure the vdev IDs match their location
678                  * in the configuration.
679                  */
680                 for (c = 0; c < children; c++) {
681                         if (child[c] == NULL) {
682                                 nvlist_t *missing;
683                                 if (nvlist_alloc(&missing, NV_UNIQUE_NAME,
684                                     0) != 0)
685                                         goto nomem;
686                                 if (nvlist_add_string(missing,
687                                     ZPOOL_CONFIG_TYPE,
688                                     VDEV_TYPE_MISSING) != 0 ||
689                                     nvlist_add_uint64(missing,
690                                     ZPOOL_CONFIG_ID, c) != 0 ||
691                                     nvlist_add_uint64(missing,
692                                     ZPOOL_CONFIG_GUID, 0ULL) != 0) {
693                                         nvlist_free(missing);
694                                         goto nomem;
695                                 }
696                                 child[c] = missing;
697                         }
698                 }
699
700                 /*
701                  * Put all of this pool's top-level vdevs into a root vdev.
702                  */
703                 if (nvlist_alloc(&nvroot, NV_UNIQUE_NAME, 0) != 0)
704                         goto nomem;
705                 if (nvlist_add_string(nvroot, ZPOOL_CONFIG_TYPE,
706                     VDEV_TYPE_ROOT) != 0 ||
707                     nvlist_add_uint64(nvroot, ZPOOL_CONFIG_ID, 0ULL) != 0 ||
708                     nvlist_add_uint64(nvroot, ZPOOL_CONFIG_GUID, guid) != 0 ||
709                     nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
710                     child, children) != 0) {
711                         nvlist_free(nvroot);
712                         goto nomem;
713                 }
714
715                 for (c = 0; c < children; c++)
716                         nvlist_free(child[c]);
717                 free(child);
718                 children = 0;
719                 child = NULL;
720
721                 /*
722                  * Go through and fix up any paths and/or devids based on our
723                  * known list of vdev GUID -> path mappings.
724                  */
725                 if (fix_paths(nvroot, pl->names) != 0) {
726                         nvlist_free(nvroot);
727                         goto nomem;
728                 }
729
730                 /*
731                  * Add the root vdev to this pool's configuration.
732                  */
733                 if (nvlist_add_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
734                     nvroot) != 0) {
735                         nvlist_free(nvroot);
736                         goto nomem;
737                 }
738                 nvlist_free(nvroot);
739
740                 /*
741                  * zdb uses this path to report on active pools that were
742                  * imported or created using -R.
743                  */
744                 if (active_ok)
745                         goto add_pool;
746
747                 /*
748                  * Determine if this pool is currently active, in which case we
749                  * can't actually import it.
750                  */
751                 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
752                     &name) == 0);
753                 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
754                     &guid) == 0);
755
756                 if (pool_active(hdl, name, guid, &isactive) != 0)
757                         goto error;
758
759                 if (isactive) {
760                         nvlist_free(config);
761                         config = NULL;
762                         continue;
763                 }
764
765                 if ((nvl = refresh_config(hdl, config)) == NULL) {
766                         nvlist_free(config);
767                         config = NULL;
768                         continue;
769                 }
770
771                 nvlist_free(config);
772                 config = nvl;
773
774                 /*
775                  * Go through and update the paths for spares, now that we have
776                  * them.
777                  */
778                 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
779                     &nvroot) == 0);
780                 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
781                     &spares, &nspares) == 0) {
782                         for (i = 0; i < nspares; i++) {
783                                 if (fix_paths(spares[i], pl->names) != 0)
784                                         goto nomem;
785                         }
786                 }
787
788                 /*
789                  * Update the paths for l2cache devices.
790                  */
791                 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
792                     &l2cache, &nl2cache) == 0) {
793                         for (i = 0; i < nl2cache; i++) {
794                                 if (fix_paths(l2cache[i], pl->names) != 0)
795                                         goto nomem;
796                         }
797                 }
798
799                 /*
800                  * Restore the original information read from the actual label.
801                  */
802                 (void) nvlist_remove(config, ZPOOL_CONFIG_HOSTID,
803                     DATA_TYPE_UINT64);
804                 (void) nvlist_remove(config, ZPOOL_CONFIG_HOSTNAME,
805                     DATA_TYPE_STRING);
806                 if (hostid != 0) {
807                         verify(nvlist_add_uint64(config, ZPOOL_CONFIG_HOSTID,
808                             hostid) == 0);
809                         verify(nvlist_add_string(config, ZPOOL_CONFIG_HOSTNAME,
810                             hostname) == 0);
811                 }
812
813 add_pool:
814                 /*
815                  * Add this pool to the list of configs.
816                  */
817                 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
818                     &name) == 0);
819                 if (nvlist_add_nvlist(ret, name, config) != 0)
820                         goto nomem;
821
822                 found_one = B_TRUE;
823                 nvlist_free(config);
824                 config = NULL;
825         }
826
827         if (!found_one) {
828                 nvlist_free(ret);
829                 ret = NULL;
830         }
831
832         return (ret);
833
834 nomem:
835         (void) no_memory(hdl);
836 error:
837         nvlist_free(config);
838         nvlist_free(ret);
839         for (c = 0; c < children; c++)
840                 nvlist_free(child[c]);
841         free(child);
842
843         return (NULL);
844 }
845
846 /*
847  * Return the offset of the given label.
848  */
849 static uint64_t
850 label_offset(uint64_t size, int l)
851 {
852         ASSERT(P2PHASE_TYPED(size, sizeof (vdev_label_t), uint64_t) == 0);
853         return (l * sizeof (vdev_label_t) + (l < VDEV_LABELS / 2 ?
854             0 : size - VDEV_LABELS * sizeof (vdev_label_t)));
855 }
856
857 /*
858  * Given a file descriptor, read the label information and return an nvlist
859  * describing the configuration, if there is one.
860  */
861 int
862 zpool_read_label(int fd, nvlist_t **config)
863 {
864         struct stat64 statbuf;
865         int l;
866         vdev_label_t *label;
867         uint64_t state, txg, size;
868
869         *config = NULL;
870
871         if (fstat64(fd, &statbuf) == -1)
872                 return (0);
873         size = P2ALIGN_TYPED(statbuf.st_size, sizeof (vdev_label_t), uint64_t);
874
875         if ((label = malloc(sizeof (vdev_label_t))) == NULL)
876                 return (-1);
877
878         for (l = 0; l < VDEV_LABELS; l++) {
879                 if (pread64(fd, label, sizeof (vdev_label_t),
880                     label_offset(size, l)) != sizeof (vdev_label_t))
881                         continue;
882
883                 if (nvlist_unpack(label->vl_vdev_phys.vp_nvlist,
884                     sizeof (label->vl_vdev_phys.vp_nvlist), config, 0) != 0)
885                         continue;
886
887                 if (nvlist_lookup_uint64(*config, ZPOOL_CONFIG_POOL_STATE,
888                     &state) != 0 || state > POOL_STATE_L2CACHE) {
889                         nvlist_free(*config);
890                         continue;
891                 }
892
893                 if (state != POOL_STATE_SPARE && state != POOL_STATE_L2CACHE &&
894                     (nvlist_lookup_uint64(*config, ZPOOL_CONFIG_POOL_TXG,
895                     &txg) != 0 || txg == 0)) {
896                         nvlist_free(*config);
897                         continue;
898                 }
899
900                 free(label);
901                 return (0);
902         }
903
904         free(label);
905         *config = NULL;
906         return (0);
907 }
908
909 #ifdef HAVE_LIBBLKID
910 /*
911  * Use libblkid to quickly search for zfs devices
912  */
913 static int
914 zpool_find_import_blkid(libzfs_handle_t *hdl, pool_list_t *pools)
915 {
916         blkid_cache cache;
917         blkid_dev_iterate iter;
918         blkid_dev dev;
919         const char *devname;
920         nvlist_t *config;
921         int fd, err;
922
923         err = blkid_get_cache(&cache, NULL);
924         if (err != 0) {
925                 (void) zfs_error_fmt(hdl, EZFS_BADCACHE,
926                     dgettext(TEXT_DOMAIN, "blkid_get_cache() %d"), err);
927                 goto err_blkid1;
928         }
929
930         err = blkid_probe_all(cache);
931         if (err != 0) {
932                 (void) zfs_error_fmt(hdl, EZFS_BADCACHE,
933                     dgettext(TEXT_DOMAIN, "blkid_probe_all() %d"), err);
934                 goto err_blkid2;
935         }
936
937         iter = blkid_dev_iterate_begin(cache);
938         if (iter == NULL) {
939                 (void) zfs_error_fmt(hdl, EZFS_BADCACHE,
940                     dgettext(TEXT_DOMAIN, "blkid_dev_iterate_begin()"));
941                 goto err_blkid2;
942         }
943
944         err = blkid_dev_set_search(iter, "TYPE", "zfs");
945         if (err != 0) {
946                 (void) zfs_error_fmt(hdl, EZFS_BADCACHE,
947                     dgettext(TEXT_DOMAIN, "blkid_dev_set_search() %d"), err);
948                 goto err_blkid3;
949         }
950
951         while (blkid_dev_next(iter, &dev) == 0) {
952                 devname = blkid_dev_devname(dev);
953                 if ((fd = open64(devname, O_RDONLY)) < 0)
954                         continue;
955
956                 err = zpool_read_label(fd, &config);
957                 (void) close(fd);
958
959                 if (err != 0) {
960                         (void) no_memory(hdl);
961                         goto err_blkid3;
962                 }
963
964                 if (config != NULL) {
965                         err = add_config(hdl, pools, devname, config);
966                         if (err != 0)
967                                 goto err_blkid3;
968                 }
969         }
970
971 err_blkid3:
972         blkid_dev_iterate_end(iter);
973 err_blkid2:
974         blkid_put_cache(cache);
975 err_blkid1:
976         return err;
977 }
978 #endif /* HAVE_LIBBLKID */
979
980 /*
981  * Given a list of directories to search, find all pools stored on disk.  This
982  * includes partial pools which are not available to import.  If no args are
983  * given (argc is 0), then the default directory (/dev/dsk) is searched.
984  * poolname or guid (but not both) are provided by the caller when trying
985  * to import a specific pool.
986  */
987 static nvlist_t *
988 zpool_find_import_impl(libzfs_handle_t *hdl, importargs_t *iarg)
989 {
990         int i, dirs = iarg->paths;
991         DIR *dirp = NULL;
992         struct dirent64 *dp;
993         char path[MAXPATHLEN];
994         char *end, **dir = iarg->path;
995         size_t pathleft;
996         struct stat64 statbuf;
997         nvlist_t *ret = NULL, *config;
998         static char *default_dir = DISK_ROOT;
999         int fd;
1000         pool_list_t pools = { 0 };
1001         pool_entry_t *pe, *penext;
1002         vdev_entry_t *ve, *venext;
1003         config_entry_t *ce, *cenext;
1004         name_entry_t *ne, *nenext;
1005
1006         verify(iarg->poolname == NULL || iarg->guid == 0);
1007
1008         if (dirs == 0) {
1009 #ifdef HAVE_LIBBLKID
1010                 /* Use libblkid to scan all device for their type */
1011                 if (zpool_find_import_blkid(hdl, &pools) == 0)
1012                         goto skip_scanning;
1013
1014                 (void) zfs_error_fmt(hdl, EZFS_BADCACHE,
1015                     dgettext(TEXT_DOMAIN, "blkid failure falling back "
1016                     "to manual probing"));
1017 #endif /* HAVE_LIBBLKID */
1018                 dirs = 1;
1019                 dir = &default_dir;
1020         }
1021
1022         /*
1023          * Go through and read the label configuration information from every
1024          * possible device, organizing the information according to pool GUID
1025          * and toplevel GUID.
1026          */
1027         for (i = 0; i < dirs; i++) {
1028                 char *rdsk;
1029                 int dfd;
1030
1031                 /* use realpath to normalize the path */
1032                 if (realpath(dir[i], path) == 0) {
1033                         (void) zfs_error_fmt(hdl, EZFS_BADPATH,
1034                             dgettext(TEXT_DOMAIN, "cannot open '%s'"), dir[i]);
1035                         goto error;
1036                 }
1037                 end = &path[strlen(path)];
1038                 *end++ = '/';
1039                 *end = 0;
1040                 pathleft = &path[sizeof (path)] - end;
1041
1042                 /*
1043                  * Using raw devices instead of block devices when we're
1044                  * reading the labels skips a bunch of slow operations during
1045                  * close(2) processing, so we replace /dev/dsk with /dev/rdsk.
1046                  */
1047                 if (strcmp(path, "/dev/dsk/") == 0)
1048                         rdsk = "/dev/rdsk/";
1049                 else
1050                         rdsk = path;
1051
1052                 if ((dfd = open64(rdsk, O_RDONLY)) < 0 ||
1053                     (dirp = fdopendir(dfd)) == NULL) {
1054                         zfs_error_aux(hdl, strerror(errno));
1055                         (void) zfs_error_fmt(hdl, EZFS_BADPATH,
1056                             dgettext(TEXT_DOMAIN, "cannot open '%s'"),
1057                             rdsk);
1058                         goto error;
1059                 }
1060
1061                 /*
1062                  * This is not MT-safe, but we have no MT consumers of libzfs
1063                  */
1064                 while ((dp = readdir64(dirp)) != NULL) {
1065                         const char *name = dp->d_name;
1066                         if (name[0] == '.' &&
1067                             (name[1] == 0 || (name[1] == '.' && name[2] == 0)))
1068                                 continue;
1069
1070                         /*
1071                          * Skip checking devices with well known prefixes:
1072                          * watchdog - A special close is required to avoid
1073                          *            triggering it and resetting the system.
1074                          * fuse     - Fuse control device.
1075                          * ppp      - Generic PPP driver.
1076                          * tty*     - Generic serial interface.
1077                          * vcs*     - Virtual console memory.
1078                          * parport* - Parallel port interface.
1079                          * lp*      - Printer interface.
1080                          * fd*      - Floppy interface.
1081                          */
1082                         if ((strncmp(name, "watchdog", 8) == 0) ||
1083                             (strncmp(name, "fuse", 4) == 0)     ||
1084                             (strncmp(name, "ppp", 3) == 0)      ||
1085                             (strncmp(name, "tty", 3) == 0)      ||
1086                             (strncmp(name, "vcs", 3) == 0)      ||
1087                             (strncmp(name, "parport", 7) == 0)  ||
1088                             (strncmp(name, "lp", 2) == 0)       ||
1089                             (strncmp(name, "fd", 2) == 0)       ||
1090                             (strncmp(name, "hpet", 4) == 0))
1091                                 continue;
1092
1093                         if ((fd = openat64(dfd, name, O_RDONLY)) < 0)
1094                                 continue;
1095
1096                         /*
1097                          * Ignore failed stats.  We only want regular
1098                          * files and block devs.
1099                          */
1100                         if (fstat64(fd, &statbuf) != 0 ||
1101                             (!S_ISREG(statbuf.st_mode) &&
1102                             !S_ISBLK(statbuf.st_mode))) {
1103                                 (void) close(fd);
1104                                 continue;
1105                         }
1106
1107                         if ((zpool_read_label(fd, &config)) != 0) {
1108                                 (void) close(fd);
1109                                 (void) no_memory(hdl);
1110                                 goto error;
1111                         }
1112
1113                         (void) close(fd);
1114
1115                         if (config != NULL) {
1116                                 boolean_t matched = B_TRUE;
1117
1118                                 if (iarg->poolname != NULL) {
1119                                         char *pname;
1120
1121                                         matched = nvlist_lookup_string(config,
1122                                             ZPOOL_CONFIG_POOL_NAME,
1123                                             &pname) == 0 &&
1124                                             strcmp(iarg->poolname, pname) == 0;
1125                                 } else if (iarg->guid != 0) {
1126                                         uint64_t this_guid;
1127
1128                                         matched = nvlist_lookup_uint64(config,
1129                                             ZPOOL_CONFIG_POOL_GUID,
1130                                             &this_guid) == 0 &&
1131                                             iarg->guid == this_guid;
1132                                 }
1133                                 if (!matched) {
1134                                         nvlist_free(config);
1135                                         config = NULL;
1136                                         continue;
1137                                 }
1138                                 /* use the non-raw path for the config */
1139                                 (void) strlcpy(end, name, pathleft);
1140                                 if (add_config(hdl, &pools, path, config) != 0)
1141                                         goto error;
1142                         }
1143                 }
1144
1145                 (void) closedir(dirp);
1146                 dirp = NULL;
1147         }
1148
1149 #ifdef HAVE_LIBBLKID
1150 skip_scanning:
1151 #endif
1152         ret = get_configs(hdl, &pools, iarg->can_be_active);
1153
1154 error:
1155         for (pe = pools.pools; pe != NULL; pe = penext) {
1156                 penext = pe->pe_next;
1157                 for (ve = pe->pe_vdevs; ve != NULL; ve = venext) {
1158                         venext = ve->ve_next;
1159                         for (ce = ve->ve_configs; ce != NULL; ce = cenext) {
1160                                 cenext = ce->ce_next;
1161                                 if (ce->ce_config)
1162                                         nvlist_free(ce->ce_config);
1163                                 free(ce);
1164                         }
1165                         free(ve);
1166                 }
1167                 free(pe);
1168         }
1169
1170         for (ne = pools.names; ne != NULL; ne = nenext) {
1171                 nenext = ne->ne_next;
1172                 if (ne->ne_name)
1173                         free(ne->ne_name);
1174                 free(ne);
1175         }
1176
1177         if (dirp)
1178                 (void) closedir(dirp);
1179
1180         return (ret);
1181 }
1182
1183 nvlist_t *
1184 zpool_find_import(libzfs_handle_t *hdl, int argc, char **argv)
1185 {
1186         importargs_t iarg = { 0 };
1187
1188         iarg.paths = argc;
1189         iarg.path = argv;
1190
1191         return (zpool_find_import_impl(hdl, &iarg));
1192 }
1193
1194 /*
1195  * Given a cache file, return the contents as a list of importable pools.
1196  * poolname or guid (but not both) are provided by the caller when trying
1197  * to import a specific pool.
1198  */
1199 nvlist_t *
1200 zpool_find_import_cached(libzfs_handle_t *hdl, const char *cachefile,
1201     char *poolname, uint64_t guid)
1202 {
1203         char *buf;
1204         int fd;
1205         struct stat64 statbuf;
1206         nvlist_t *raw, *src, *dst;
1207         nvlist_t *pools;
1208         nvpair_t *elem;
1209         char *name;
1210         uint64_t this_guid;
1211         boolean_t active;
1212
1213         verify(poolname == NULL || guid == 0);
1214
1215         if ((fd = open(cachefile, O_RDONLY)) < 0) {
1216                 zfs_error_aux(hdl, "%s", strerror(errno));
1217                 (void) zfs_error(hdl, EZFS_BADCACHE,
1218                     dgettext(TEXT_DOMAIN, "failed to open cache file"));
1219                 return (NULL);
1220         }
1221
1222         if (fstat64(fd, &statbuf) != 0) {
1223                 zfs_error_aux(hdl, "%s", strerror(errno));
1224                 (void) close(fd);
1225                 (void) zfs_error(hdl, EZFS_BADCACHE,
1226                     dgettext(TEXT_DOMAIN, "failed to get size of cache file"));
1227                 return (NULL);
1228         }
1229
1230         if ((buf = zfs_alloc(hdl, statbuf.st_size)) == NULL) {
1231                 (void) close(fd);
1232                 return (NULL);
1233         }
1234
1235         if (read(fd, buf, statbuf.st_size) != statbuf.st_size) {
1236                 (void) close(fd);
1237                 free(buf);
1238                 (void) zfs_error(hdl, EZFS_BADCACHE,
1239                     dgettext(TEXT_DOMAIN,
1240                     "failed to read cache file contents"));
1241                 return (NULL);
1242         }
1243
1244         (void) close(fd);
1245
1246         if (nvlist_unpack(buf, statbuf.st_size, &raw, 0) != 0) {
1247                 free(buf);
1248                 (void) zfs_error(hdl, EZFS_BADCACHE,
1249                     dgettext(TEXT_DOMAIN,
1250                     "invalid or corrupt cache file contents"));
1251                 return (NULL);
1252         }
1253
1254         free(buf);
1255
1256         /*
1257          * Go through and get the current state of the pools and refresh their
1258          * state.
1259          */
1260         if (nvlist_alloc(&pools, 0, 0) != 0) {
1261                 (void) no_memory(hdl);
1262                 nvlist_free(raw);
1263                 return (NULL);
1264         }
1265
1266         elem = NULL;
1267         while ((elem = nvlist_next_nvpair(raw, elem)) != NULL) {
1268                 verify(nvpair_value_nvlist(elem, &src) == 0);
1269
1270                 verify(nvlist_lookup_string(src, ZPOOL_CONFIG_POOL_NAME,
1271                     &name) == 0);
1272                 if (poolname != NULL && strcmp(poolname, name) != 0)
1273                         continue;
1274
1275                 verify(nvlist_lookup_uint64(src, ZPOOL_CONFIG_POOL_GUID,
1276                     &this_guid) == 0);
1277                 if (guid != 0) {
1278                         verify(nvlist_lookup_uint64(src, ZPOOL_CONFIG_POOL_GUID,
1279                             &this_guid) == 0);
1280                         if (guid != this_guid)
1281                                 continue;
1282                 }
1283
1284                 if (pool_active(hdl, name, this_guid, &active) != 0) {
1285                         nvlist_free(raw);
1286                         nvlist_free(pools);
1287                         return (NULL);
1288                 }
1289
1290                 if (active)
1291                         continue;
1292
1293                 if ((dst = refresh_config(hdl, src)) == NULL) {
1294                         nvlist_free(raw);
1295                         nvlist_free(pools);
1296                         return (NULL);
1297                 }
1298
1299                 if (nvlist_add_nvlist(pools, nvpair_name(elem), dst) != 0) {
1300                         (void) no_memory(hdl);
1301                         nvlist_free(dst);
1302                         nvlist_free(raw);
1303                         nvlist_free(pools);
1304                         return (NULL);
1305                 }
1306                 nvlist_free(dst);
1307         }
1308
1309         nvlist_free(raw);
1310         return (pools);
1311 }
1312
1313 static int
1314 name_or_guid_exists(zpool_handle_t *zhp, void *data)
1315 {
1316         importargs_t *import = data;
1317         int found = 0;
1318
1319         if (import->poolname != NULL) {
1320                 char *pool_name;
1321
1322                 verify(nvlist_lookup_string(zhp->zpool_config,
1323                     ZPOOL_CONFIG_POOL_NAME, &pool_name) == 0);
1324                 if (strcmp(pool_name, import->poolname) == 0)
1325                         found = 1;
1326         } else {
1327                 uint64_t pool_guid;
1328
1329                 verify(nvlist_lookup_uint64(zhp->zpool_config,
1330                     ZPOOL_CONFIG_POOL_GUID, &pool_guid) == 0);
1331                 if (pool_guid == import->guid)
1332                         found = 1;
1333         }
1334
1335         zpool_close(zhp);
1336         return (found);
1337 }
1338
1339 nvlist_t *
1340 zpool_search_import(libzfs_handle_t *hdl, importargs_t *import)
1341 {
1342         verify(import->poolname == NULL || import->guid == 0);
1343
1344         if (import->unique)
1345                 import->exists = zpool_iter(hdl, name_or_guid_exists, import);
1346
1347         if (import->cachefile != NULL)
1348                 return (zpool_find_import_cached(hdl, import->cachefile,
1349                     import->poolname, import->guid));
1350
1351         return (zpool_find_import_impl(hdl, import));
1352 }
1353
1354 boolean_t
1355 find_guid(nvlist_t *nv, uint64_t guid)
1356 {
1357         uint64_t tmp;
1358         nvlist_t **child;
1359         uint_t c, children;
1360
1361         verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &tmp) == 0);
1362         if (tmp == guid)
1363                 return (B_TRUE);
1364
1365         if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
1366             &child, &children) == 0) {
1367                 for (c = 0; c < children; c++)
1368                         if (find_guid(child[c], guid))
1369                                 return (B_TRUE);
1370         }
1371
1372         return (B_FALSE);
1373 }
1374
1375 typedef struct aux_cbdata {
1376         const char      *cb_type;
1377         uint64_t        cb_guid;
1378         zpool_handle_t  *cb_zhp;
1379 } aux_cbdata_t;
1380
1381 static int
1382 find_aux(zpool_handle_t *zhp, void *data)
1383 {
1384         aux_cbdata_t *cbp = data;
1385         nvlist_t **list;
1386         uint_t i, count;
1387         uint64_t guid;
1388         nvlist_t *nvroot;
1389
1390         verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
1391             &nvroot) == 0);
1392
1393         if (nvlist_lookup_nvlist_array(nvroot, cbp->cb_type,
1394             &list, &count) == 0) {
1395                 for (i = 0; i < count; i++) {
1396                         verify(nvlist_lookup_uint64(list[i],
1397                             ZPOOL_CONFIG_GUID, &guid) == 0);
1398                         if (guid == cbp->cb_guid) {
1399                                 cbp->cb_zhp = zhp;
1400                                 return (1);
1401                         }
1402                 }
1403         }
1404
1405         zpool_close(zhp);
1406         return (0);
1407 }
1408
1409 /*
1410  * Determines if the pool is in use.  If so, it returns true and the state of
1411  * the pool as well as the name of the pool.  Both strings are allocated and
1412  * must be freed by the caller.
1413  */
1414 int
1415 zpool_in_use(libzfs_handle_t *hdl, int fd, pool_state_t *state, char **namestr,
1416     boolean_t *inuse)
1417 {
1418         nvlist_t *config;
1419         char *name;
1420         boolean_t ret;
1421         uint64_t guid, vdev_guid;
1422         zpool_handle_t *zhp;
1423         nvlist_t *pool_config;
1424         uint64_t stateval, isspare;
1425         aux_cbdata_t cb = { 0 };
1426         boolean_t isactive;
1427
1428         *inuse = B_FALSE;
1429
1430         if (zpool_read_label(fd, &config) != 0) {
1431                 (void) no_memory(hdl);
1432                 return (-1);
1433         }
1434
1435         if (config == NULL)
1436                 return (0);
1437
1438         verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE,
1439             &stateval) == 0);
1440         verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_GUID,
1441             &vdev_guid) == 0);
1442
1443         if (stateval != POOL_STATE_SPARE && stateval != POOL_STATE_L2CACHE) {
1444                 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
1445                     &name) == 0);
1446                 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
1447                     &guid) == 0);
1448         }
1449
1450         switch (stateval) {
1451         case POOL_STATE_EXPORTED:
1452                 /*
1453                  * A pool with an exported state may in fact be imported
1454                  * read-only, so check the in-core state to see if it's
1455                  * active and imported read-only.  If it is, set
1456                  * its state to active.
1457                  */
1458                 if (pool_active(hdl, name, guid, &isactive) == 0 && isactive &&
1459                     (zhp = zpool_open_canfail(hdl, name)) != NULL &&
1460                     zpool_get_prop_int(zhp, ZPOOL_PROP_READONLY, NULL))
1461                         stateval = POOL_STATE_ACTIVE;
1462
1463                 ret = B_TRUE;
1464                 break;
1465
1466         case POOL_STATE_ACTIVE:
1467                 /*
1468                  * For an active pool, we have to determine if it's really part
1469                  * of a currently active pool (in which case the pool will exist
1470                  * and the guid will be the same), or whether it's part of an
1471                  * active pool that was disconnected without being explicitly
1472                  * exported.
1473                  */
1474                 if (pool_active(hdl, name, guid, &isactive) != 0) {
1475                         nvlist_free(config);
1476                         return (-1);
1477                 }
1478
1479                 if (isactive) {
1480                         /*
1481                          * Because the device may have been removed while
1482                          * offlined, we only report it as active if the vdev is
1483                          * still present in the config.  Otherwise, pretend like
1484                          * it's not in use.
1485                          */
1486                         if ((zhp = zpool_open_canfail(hdl, name)) != NULL &&
1487                             (pool_config = zpool_get_config(zhp, NULL))
1488                             != NULL) {
1489                                 nvlist_t *nvroot;
1490
1491                                 verify(nvlist_lookup_nvlist(pool_config,
1492                                     ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
1493                                 ret = find_guid(nvroot, vdev_guid);
1494                         } else {
1495                                 ret = B_FALSE;
1496                         }
1497
1498                         /*
1499                          * If this is an active spare within another pool, we
1500                          * treat it like an unused hot spare.  This allows the
1501                          * user to create a pool with a hot spare that currently
1502                          * in use within another pool.  Since we return B_TRUE,
1503                          * libdiskmgt will continue to prevent generic consumers
1504                          * from using the device.
1505                          */
1506                         if (ret && nvlist_lookup_uint64(config,
1507                             ZPOOL_CONFIG_IS_SPARE, &isspare) == 0 && isspare)
1508                                 stateval = POOL_STATE_SPARE;
1509
1510                         if (zhp != NULL)
1511                                 zpool_close(zhp);
1512                 } else {
1513                         stateval = POOL_STATE_POTENTIALLY_ACTIVE;
1514                         ret = B_TRUE;
1515                 }
1516                 break;
1517
1518         case POOL_STATE_SPARE:
1519                 /*
1520                  * For a hot spare, it can be either definitively in use, or
1521                  * potentially active.  To determine if it's in use, we iterate
1522                  * over all pools in the system and search for one with a spare
1523                  * with a matching guid.
1524                  *
1525                  * Due to the shared nature of spares, we don't actually report
1526                  * the potentially active case as in use.  This means the user
1527                  * can freely create pools on the hot spares of exported pools,
1528                  * but to do otherwise makes the resulting code complicated, and
1529                  * we end up having to deal with this case anyway.
1530                  */
1531                 cb.cb_zhp = NULL;
1532                 cb.cb_guid = vdev_guid;
1533                 cb.cb_type = ZPOOL_CONFIG_SPARES;
1534                 if (zpool_iter(hdl, find_aux, &cb) == 1) {
1535                         name = (char *)zpool_get_name(cb.cb_zhp);
1536                         ret = TRUE;
1537                 } else {
1538                         ret = FALSE;
1539                 }
1540                 break;
1541
1542         case POOL_STATE_L2CACHE:
1543
1544                 /*
1545                  * Check if any pool is currently using this l2cache device.
1546                  */
1547                 cb.cb_zhp = NULL;
1548                 cb.cb_guid = vdev_guid;
1549                 cb.cb_type = ZPOOL_CONFIG_L2CACHE;
1550                 if (zpool_iter(hdl, find_aux, &cb) == 1) {
1551                         name = (char *)zpool_get_name(cb.cb_zhp);
1552                         ret = TRUE;
1553                 } else {
1554                         ret = FALSE;
1555                 }
1556                 break;
1557
1558         default:
1559                 ret = B_FALSE;
1560         }
1561
1562
1563         if (ret) {
1564                 if ((*namestr = zfs_strdup(hdl, name)) == NULL) {
1565                         if (cb.cb_zhp)
1566                                 zpool_close(cb.cb_zhp);
1567                         nvlist_free(config);
1568                         return (-1);
1569                 }
1570                 *state = (pool_state_t)stateval;
1571         }
1572
1573         if (cb.cb_zhp)
1574                 zpool_close(cb.cb_zhp);
1575
1576         nvlist_free(config);
1577         *inuse = ret;
1578         return (0);
1579 }