68bfdee5b06d0b5d56151d5b87758eb219570d16
[zfs.git] / lib / libzfs / libzfs_pool.c
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21
22 /*
23  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
25  * Copyright (c) 2011 by Delphix. All rights reserved.
26  */
27
28 #include <ctype.h>
29 #include <errno.h>
30 #include <devid.h>
31 #include <fcntl.h>
32 #include <libintl.h>
33 #include <stdio.h>
34 #include <stdlib.h>
35 #include <strings.h>
36 #include <unistd.h>
37 #include <zone.h>
38 #include <sys/stat.h>
39 #include <sys/efi_partition.h>
40 #include <sys/vtoc.h>
41 #include <sys/zfs_ioctl.h>
42 #include <dlfcn.h>
43
44 #include "zfs_namecheck.h"
45 #include "zfs_prop.h"
46 #include "libzfs_impl.h"
47 #include "zfs_comutil.h"
48
49 static int read_efi_label(nvlist_t *config, diskaddr_t *sb);
50
51 typedef struct prop_flags {
52         int create:1;   /* Validate property on creation */
53         int import:1;   /* Validate property on import */
54 } prop_flags_t;
55
56 /*
57  * ====================================================================
58  *   zpool property functions
59  * ====================================================================
60  */
61
62 static int
63 zpool_get_all_props(zpool_handle_t *zhp)
64 {
65         zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
66         libzfs_handle_t *hdl = zhp->zpool_hdl;
67
68         (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
69
70         if (zcmd_alloc_dst_nvlist(hdl, &zc, 0) != 0)
71                 return (-1);
72
73         while (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_PROPS, &zc) != 0) {
74                 if (errno == ENOMEM) {
75                         if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
76                                 zcmd_free_nvlists(&zc);
77                                 return (-1);
78                         }
79                 } else {
80                         zcmd_free_nvlists(&zc);
81                         return (-1);
82                 }
83         }
84
85         if (zcmd_read_dst_nvlist(hdl, &zc, &zhp->zpool_props) != 0) {
86                 zcmd_free_nvlists(&zc);
87                 return (-1);
88         }
89
90         zcmd_free_nvlists(&zc);
91
92         return (0);
93 }
94
95 static int
96 zpool_props_refresh(zpool_handle_t *zhp)
97 {
98         nvlist_t *old_props;
99
100         old_props = zhp->zpool_props;
101
102         if (zpool_get_all_props(zhp) != 0)
103                 return (-1);
104
105         nvlist_free(old_props);
106         return (0);
107 }
108
109 static char *
110 zpool_get_prop_string(zpool_handle_t *zhp, zpool_prop_t prop,
111     zprop_source_t *src)
112 {
113         nvlist_t *nv, *nvl;
114         uint64_t ival;
115         char *value;
116         zprop_source_t source;
117
118         nvl = zhp->zpool_props;
119         if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) {
120                 verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &ival) == 0);
121                 source = ival;
122                 verify(nvlist_lookup_string(nv, ZPROP_VALUE, &value) == 0);
123         } else {
124                 source = ZPROP_SRC_DEFAULT;
125                 if ((value = (char *)zpool_prop_default_string(prop)) == NULL)
126                         value = "-";
127         }
128
129         if (src)
130                 *src = source;
131
132         return (value);
133 }
134
135 uint64_t
136 zpool_get_prop_int(zpool_handle_t *zhp, zpool_prop_t prop, zprop_source_t *src)
137 {
138         nvlist_t *nv, *nvl;
139         uint64_t value;
140         zprop_source_t source;
141
142         if (zhp->zpool_props == NULL && zpool_get_all_props(zhp)) {
143                 /*
144                  * zpool_get_all_props() has most likely failed because
145                  * the pool is faulted, but if all we need is the top level
146                  * vdev's guid then get it from the zhp config nvlist.
147                  */
148                 if ((prop == ZPOOL_PROP_GUID) &&
149                     (nvlist_lookup_nvlist(zhp->zpool_config,
150                     ZPOOL_CONFIG_VDEV_TREE, &nv) == 0) &&
151                     (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &value)
152                     == 0)) {
153                         return (value);
154                 }
155                 return (zpool_prop_default_numeric(prop));
156         }
157
158         nvl = zhp->zpool_props;
159         if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) {
160                 verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &value) == 0);
161                 source = value;
162                 verify(nvlist_lookup_uint64(nv, ZPROP_VALUE, &value) == 0);
163         } else {
164                 source = ZPROP_SRC_DEFAULT;
165                 value = zpool_prop_default_numeric(prop);
166         }
167
168         if (src)
169                 *src = source;
170
171         return (value);
172 }
173
174 /*
175  * Map VDEV STATE to printed strings.
176  */
177 char *
178 zpool_state_to_name(vdev_state_t state, vdev_aux_t aux)
179 {
180         switch (state) {
181         default:
182                 break;
183         case VDEV_STATE_CLOSED:
184         case VDEV_STATE_OFFLINE:
185                 return (gettext("OFFLINE"));
186         case VDEV_STATE_REMOVED:
187                 return (gettext("REMOVED"));
188         case VDEV_STATE_CANT_OPEN:
189                 if (aux == VDEV_AUX_CORRUPT_DATA || aux == VDEV_AUX_BAD_LOG)
190                         return (gettext("FAULTED"));
191                 else if (aux == VDEV_AUX_SPLIT_POOL)
192                         return (gettext("SPLIT"));
193                 else
194                         return (gettext("UNAVAIL"));
195         case VDEV_STATE_FAULTED:
196                 return (gettext("FAULTED"));
197         case VDEV_STATE_DEGRADED:
198                 return (gettext("DEGRADED"));
199         case VDEV_STATE_HEALTHY:
200                 return (gettext("ONLINE"));
201         }
202
203         return (gettext("UNKNOWN"));
204 }
205
206 /*
207  * Get a zpool property value for 'prop' and return the value in
208  * a pre-allocated buffer.
209  */
210 int
211 zpool_get_prop(zpool_handle_t *zhp, zpool_prop_t prop, char *buf, size_t len,
212     zprop_source_t *srctype)
213 {
214         uint64_t intval;
215         const char *strval;
216         zprop_source_t src = ZPROP_SRC_NONE;
217         nvlist_t *nvroot;
218         vdev_stat_t *vs;
219         uint_t vsc;
220
221         if (zpool_get_state(zhp) == POOL_STATE_UNAVAIL) {
222                 switch (prop) {
223                 case ZPOOL_PROP_NAME:
224                         (void) strlcpy(buf, zpool_get_name(zhp), len);
225                         break;
226
227                 case ZPOOL_PROP_HEALTH:
228                         (void) strlcpy(buf, "FAULTED", len);
229                         break;
230
231                 case ZPOOL_PROP_GUID:
232                         intval = zpool_get_prop_int(zhp, prop, &src);
233                         (void) snprintf(buf, len, "%llu", (u_longlong_t)intval);
234                         break;
235
236                 case ZPOOL_PROP_ALTROOT:
237                 case ZPOOL_PROP_CACHEFILE:
238                 case ZPOOL_PROP_COMMENT:
239                         if (zhp->zpool_props != NULL ||
240                             zpool_get_all_props(zhp) == 0) {
241                                 (void) strlcpy(buf,
242                                     zpool_get_prop_string(zhp, prop, &src),
243                                     len);
244                                 if (srctype != NULL)
245                                         *srctype = src;
246                                 return (0);
247                         }
248                         /* FALLTHROUGH */
249                 default:
250                         (void) strlcpy(buf, "-", len);
251                         break;
252                 }
253
254                 if (srctype != NULL)
255                         *srctype = src;
256                 return (0);
257         }
258
259         if (zhp->zpool_props == NULL && zpool_get_all_props(zhp) &&
260             prop != ZPOOL_PROP_NAME)
261                 return (-1);
262
263         switch (zpool_prop_get_type(prop)) {
264         case PROP_TYPE_STRING:
265                 (void) strlcpy(buf, zpool_get_prop_string(zhp, prop, &src),
266                     len);
267                 break;
268
269         case PROP_TYPE_NUMBER:
270                 intval = zpool_get_prop_int(zhp, prop, &src);
271
272                 switch (prop) {
273                 case ZPOOL_PROP_SIZE:
274                 case ZPOOL_PROP_ALLOCATED:
275                 case ZPOOL_PROP_FREE:
276                 case ZPOOL_PROP_ASHIFT:
277                         (void) zfs_nicenum(intval, buf, len);
278                         break;
279
280                 case ZPOOL_PROP_CAPACITY:
281                         (void) snprintf(buf, len, "%llu%%",
282                             (u_longlong_t)intval);
283                         break;
284
285                 case ZPOOL_PROP_DEDUPRATIO:
286                         (void) snprintf(buf, len, "%llu.%02llux",
287                             (u_longlong_t)(intval / 100),
288                             (u_longlong_t)(intval % 100));
289                         break;
290
291                 case ZPOOL_PROP_HEALTH:
292                         verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
293                             ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
294                         verify(nvlist_lookup_uint64_array(nvroot,
295                             ZPOOL_CONFIG_VDEV_STATS, (uint64_t **)&vs, &vsc)
296                             == 0);
297
298                         (void) strlcpy(buf, zpool_state_to_name(intval,
299                             vs->vs_aux), len);
300                         break;
301                 default:
302                         (void) snprintf(buf, len, "%llu", (u_longlong_t)intval);
303                 }
304                 break;
305
306         case PROP_TYPE_INDEX:
307                 intval = zpool_get_prop_int(zhp, prop, &src);
308                 if (zpool_prop_index_to_string(prop, intval, &strval)
309                     != 0)
310                         return (-1);
311                 (void) strlcpy(buf, strval, len);
312                 break;
313
314         default:
315                 abort();
316         }
317
318         if (srctype)
319                 *srctype = src;
320
321         return (0);
322 }
323
324 /*
325  * Check if the bootfs name has the same pool name as it is set to.
326  * Assuming bootfs is a valid dataset name.
327  */
328 static boolean_t
329 bootfs_name_valid(const char *pool, char *bootfs)
330 {
331         int len = strlen(pool);
332
333         if (!zfs_name_valid(bootfs, ZFS_TYPE_FILESYSTEM|ZFS_TYPE_SNAPSHOT))
334                 return (B_FALSE);
335
336         if (strncmp(pool, bootfs, len) == 0 &&
337             (bootfs[len] == '/' || bootfs[len] == '\0'))
338                 return (B_TRUE);
339
340         return (B_FALSE);
341 }
342
343 /*
344  * Inspect the configuration to determine if any of the devices contain
345  * an EFI label.
346  */
347 static boolean_t
348 pool_uses_efi(nvlist_t *config)
349 {
350         nvlist_t **child;
351         uint_t c, children;
352
353         if (nvlist_lookup_nvlist_array(config, ZPOOL_CONFIG_CHILDREN,
354             &child, &children) != 0)
355                 return (read_efi_label(config, NULL) >= 0);
356
357         for (c = 0; c < children; c++) {
358                 if (pool_uses_efi(child[c]))
359                         return (B_TRUE);
360         }
361         return (B_FALSE);
362 }
363
364 static boolean_t
365 pool_is_bootable(zpool_handle_t *zhp)
366 {
367         char bootfs[ZPOOL_MAXNAMELEN];
368
369         return (zpool_get_prop(zhp, ZPOOL_PROP_BOOTFS, bootfs,
370             sizeof (bootfs), NULL) == 0 && strncmp(bootfs, "-",
371             sizeof (bootfs)) != 0);
372 }
373
374
375 /*
376  * Given an nvlist of zpool properties to be set, validate that they are
377  * correct, and parse any numeric properties (index, boolean, etc) if they are
378  * specified as strings.
379  */
380 static nvlist_t *
381 zpool_valid_proplist(libzfs_handle_t *hdl, const char *poolname,
382     nvlist_t *props, uint64_t version, prop_flags_t flags, char *errbuf)
383 {
384         nvpair_t *elem;
385         nvlist_t *retprops;
386         zpool_prop_t prop;
387         char *strval;
388         uint64_t intval;
389         char *slash, *check;
390         struct stat64 statbuf;
391         zpool_handle_t *zhp;
392         nvlist_t *nvroot;
393
394         if (nvlist_alloc(&retprops, NV_UNIQUE_NAME, 0) != 0) {
395                 (void) no_memory(hdl);
396                 return (NULL);
397         }
398
399         elem = NULL;
400         while ((elem = nvlist_next_nvpair(props, elem)) != NULL) {
401                 const char *propname = nvpair_name(elem);
402
403                 /*
404                  * Make sure this property is valid and applies to this type.
405                  */
406                 if ((prop = zpool_name_to_prop(propname)) == ZPROP_INVAL) {
407                         zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
408                             "invalid property '%s'"), propname);
409                         (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
410                         goto error;
411                 }
412
413                 if (zpool_prop_readonly(prop)) {
414                         zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' "
415                             "is readonly"), propname);
416                         (void) zfs_error(hdl, EZFS_PROPREADONLY, errbuf);
417                         goto error;
418                 }
419
420                 if (zprop_parse_value(hdl, elem, prop, ZFS_TYPE_POOL, retprops,
421                     &strval, &intval, errbuf) != 0)
422                         goto error;
423
424                 /*
425                  * Perform additional checking for specific properties.
426                  */
427                 switch (prop) {
428                 default:
429                         break;
430                 case ZPOOL_PROP_VERSION:
431                         if (intval < version || intval > SPA_VERSION) {
432                                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
433                                     "property '%s' number %d is invalid."),
434                                     propname, intval);
435                                 (void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
436                                 goto error;
437                         }
438                         break;
439
440                 case ZPOOL_PROP_ASHIFT:
441                         if (!flags.create) {
442                                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
443                                     "property '%s' can only be set at "
444                                     "creation time"), propname);
445                                 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
446                                 goto error;
447                         }
448
449                         if (intval != 0 && (intval < 9 || intval > 13)) {
450                                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
451                                     "property '%s' number %d is invalid."),
452                                     propname, intval);
453                                 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
454                                 goto error;
455                         }
456                         break;
457
458                 case ZPOOL_PROP_BOOTFS:
459                         if (flags.create || flags.import) {
460                                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
461                                     "property '%s' cannot be set at creation "
462                                     "or import time"), propname);
463                                 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
464                                 goto error;
465                         }
466
467                         if (version < SPA_VERSION_BOOTFS) {
468                                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
469                                     "pool must be upgraded to support "
470                                     "'%s' property"), propname);
471                                 (void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
472                                 goto error;
473                         }
474
475                         /*
476                          * bootfs property value has to be a dataset name and
477                          * the dataset has to be in the same pool as it sets to.
478                          */
479                         if (strval[0] != '\0' && !bootfs_name_valid(poolname,
480                             strval)) {
481                                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' "
482                                     "is an invalid name"), strval);
483                                 (void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf);
484                                 goto error;
485                         }
486
487                         if ((zhp = zpool_open_canfail(hdl, poolname)) == NULL) {
488                                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
489                                     "could not open pool '%s'"), poolname);
490                                 (void) zfs_error(hdl, EZFS_OPENFAILED, errbuf);
491                                 goto error;
492                         }
493                         verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
494                             ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
495
496 #if defined(__sun__) || defined(__sun)
497                         /*
498                          * bootfs property cannot be set on a disk which has
499                          * been EFI labeled.
500                          */
501                         if (pool_uses_efi(nvroot)) {
502                                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
503                                     "property '%s' not supported on "
504                                     "EFI labeled devices"), propname);
505                                 (void) zfs_error(hdl, EZFS_POOL_NOTSUP, errbuf);
506                                 zpool_close(zhp);
507                                 goto error;
508                         }
509 #endif
510                         zpool_close(zhp);
511                         break;
512
513                 case ZPOOL_PROP_ALTROOT:
514                         if (!flags.create && !flags.import) {
515                                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
516                                     "property '%s' can only be set during pool "
517                                     "creation or import"), propname);
518                                 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
519                                 goto error;
520                         }
521
522                         if (strval[0] != '/') {
523                                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
524                                     "bad alternate root '%s'"), strval);
525                                 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
526                                 goto error;
527                         }
528                         break;
529
530                 case ZPOOL_PROP_CACHEFILE:
531                         if (strval[0] == '\0')
532                                 break;
533
534                         if (strcmp(strval, "none") == 0)
535                                 break;
536
537                         if (strval[0] != '/') {
538                                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
539                                     "property '%s' must be empty, an "
540                                     "absolute path, or 'none'"), propname);
541                                 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
542                                 goto error;
543                         }
544
545                         slash = strrchr(strval, '/');
546
547                         if (slash[1] == '\0' || strcmp(slash, "/.") == 0 ||
548                             strcmp(slash, "/..") == 0) {
549                                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
550                                     "'%s' is not a valid file"), strval);
551                                 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
552                                 goto error;
553                         }
554
555                         *slash = '\0';
556
557                         if (strval[0] != '\0' &&
558                             (stat64(strval, &statbuf) != 0 ||
559                             !S_ISDIR(statbuf.st_mode))) {
560                                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
561                                     "'%s' is not a valid directory"),
562                                     strval);
563                                 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
564                                 goto error;
565                         }
566
567                         *slash = '/';
568                         break;
569
570                 case ZPOOL_PROP_COMMENT:
571                         for (check = strval; *check != '\0'; check++) {
572                                 if (!isprint(*check)) {
573                                         zfs_error_aux(hdl,
574                                             dgettext(TEXT_DOMAIN,
575                                             "comment may only have printable "
576                                             "characters"));
577                                         (void) zfs_error(hdl, EZFS_BADPROP,
578                                             errbuf);
579                                         goto error;
580                                 }
581                         }
582                         if (strlen(strval) > ZPROP_MAX_COMMENT) {
583                                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
584                                     "comment must not exceed %d characters"),
585                                     ZPROP_MAX_COMMENT);
586                                 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
587                                 goto error;
588                         }
589                         break;
590                 case ZPOOL_PROP_READONLY:
591                         if (!flags.import) {
592                                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
593                                     "property '%s' can only be set at "
594                                     "import time"), propname);
595                                 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
596                                 goto error;
597                         }
598                         break;
599                 }
600         }
601
602         return (retprops);
603 error:
604         nvlist_free(retprops);
605         return (NULL);
606 }
607
608 /*
609  * Set zpool property : propname=propval.
610  */
611 int
612 zpool_set_prop(zpool_handle_t *zhp, const char *propname, const char *propval)
613 {
614         zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
615         int ret = -1;
616         char errbuf[1024];
617         nvlist_t *nvl = NULL;
618         nvlist_t *realprops;
619         uint64_t version;
620         prop_flags_t flags = { 0 };
621
622         (void) snprintf(errbuf, sizeof (errbuf),
623             dgettext(TEXT_DOMAIN, "cannot set property for '%s'"),
624             zhp->zpool_name);
625
626         if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0)
627                 return (no_memory(zhp->zpool_hdl));
628
629         if (nvlist_add_string(nvl, propname, propval) != 0) {
630                 nvlist_free(nvl);
631                 return (no_memory(zhp->zpool_hdl));
632         }
633
634         version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL);
635         if ((realprops = zpool_valid_proplist(zhp->zpool_hdl,
636             zhp->zpool_name, nvl, version, flags, errbuf)) == NULL) {
637                 nvlist_free(nvl);
638                 return (-1);
639         }
640
641         nvlist_free(nvl);
642         nvl = realprops;
643
644         /*
645          * Execute the corresponding ioctl() to set this property.
646          */
647         (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
648
649         if (zcmd_write_src_nvlist(zhp->zpool_hdl, &zc, nvl) != 0) {
650                 nvlist_free(nvl);
651                 return (-1);
652         }
653
654         ret = zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_SET_PROPS, &zc);
655
656         zcmd_free_nvlists(&zc);
657         nvlist_free(nvl);
658
659         if (ret)
660                 (void) zpool_standard_error(zhp->zpool_hdl, errno, errbuf);
661         else
662                 (void) zpool_props_refresh(zhp);
663
664         return (ret);
665 }
666
667 int
668 zpool_expand_proplist(zpool_handle_t *zhp, zprop_list_t **plp)
669 {
670         libzfs_handle_t *hdl = zhp->zpool_hdl;
671         zprop_list_t *entry;
672         char buf[ZFS_MAXPROPLEN];
673
674         if (zprop_expand_list(hdl, plp, ZFS_TYPE_POOL) != 0)
675                 return (-1);
676
677         for (entry = *plp; entry != NULL; entry = entry->pl_next) {
678
679                 if (entry->pl_fixed)
680                         continue;
681
682                 if (entry->pl_prop != ZPROP_INVAL &&
683                     zpool_get_prop(zhp, entry->pl_prop, buf, sizeof (buf),
684                     NULL) == 0) {
685                         if (strlen(buf) > entry->pl_width)
686                                 entry->pl_width = strlen(buf);
687                 }
688         }
689
690         return (0);
691 }
692
693
694 /*
695  * Don't start the slice at the default block of 34; many storage
696  * devices will use a stripe width of 128k, other vendors prefer a 1m
697  * alignment.  It is best to play it safe and ensure a 1m alignment
698  * given 512B blocks.  When the block size is larger by a power of 2
699  * we will still be 1m aligned.  Some devices are sensitive to the
700  * partition ending alignment as well.
701  */
702 #define NEW_START_BLOCK         2048
703 #define PARTITION_END_ALIGNMENT 2048
704
705 /*
706  * Validate the given pool name, optionally putting an extended error message in
707  * 'buf'.
708  */
709 boolean_t
710 zpool_name_valid(libzfs_handle_t *hdl, boolean_t isopen, const char *pool)
711 {
712         namecheck_err_t why;
713         char what;
714         int ret;
715
716         ret = pool_namecheck(pool, &why, &what);
717
718         /*
719          * The rules for reserved pool names were extended at a later point.
720          * But we need to support users with existing pools that may now be
721          * invalid.  So we only check for this expanded set of names during a
722          * create (or import), and only in userland.
723          */
724         if (ret == 0 && !isopen &&
725             (strncmp(pool, "mirror", 6) == 0 ||
726             strncmp(pool, "raidz", 5) == 0 ||
727             strncmp(pool, "spare", 5) == 0 ||
728             strcmp(pool, "log") == 0)) {
729                 if (hdl != NULL)
730                         zfs_error_aux(hdl,
731                             dgettext(TEXT_DOMAIN, "name is reserved"));
732                 return (B_FALSE);
733         }
734
735
736         if (ret != 0) {
737                 if (hdl != NULL) {
738                         switch (why) {
739                         case NAME_ERR_TOOLONG:
740                                 zfs_error_aux(hdl,
741                                     dgettext(TEXT_DOMAIN, "name is too long"));
742                                 break;
743
744                         case NAME_ERR_INVALCHAR:
745                                 zfs_error_aux(hdl,
746                                     dgettext(TEXT_DOMAIN, "invalid character "
747                                     "'%c' in pool name"), what);
748                                 break;
749
750                         case NAME_ERR_NOLETTER:
751                                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
752                                     "name must begin with a letter"));
753                                 break;
754
755                         case NAME_ERR_RESERVED:
756                                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
757                                     "name is reserved"));
758                                 break;
759
760                         case NAME_ERR_DISKLIKE:
761                                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
762                                     "pool name is reserved"));
763                                 break;
764
765                         case NAME_ERR_LEADING_SLASH:
766                                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
767                                     "leading slash in name"));
768                                 break;
769
770                         case NAME_ERR_EMPTY_COMPONENT:
771                                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
772                                     "empty component in name"));
773                                 break;
774
775                         case NAME_ERR_TRAILING_SLASH:
776                                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
777                                     "trailing slash in name"));
778                                 break;
779
780                         case NAME_ERR_MULTIPLE_AT:
781                                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
782                                     "multiple '@' delimiters in name"));
783                                 break;
784                         case NAME_ERR_NO_AT:
785                                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
786                                     "permission set is missing '@'"));
787                                 break;
788                         }
789                 }
790                 return (B_FALSE);
791         }
792
793         return (B_TRUE);
794 }
795
796 /*
797  * Open a handle to the given pool, even if the pool is currently in the FAULTED
798  * state.
799  */
800 zpool_handle_t *
801 zpool_open_canfail(libzfs_handle_t *hdl, const char *pool)
802 {
803         zpool_handle_t *zhp;
804         boolean_t missing;
805
806         /*
807          * Make sure the pool name is valid.
808          */
809         if (!zpool_name_valid(hdl, B_TRUE, pool)) {
810                 (void) zfs_error_fmt(hdl, EZFS_INVALIDNAME,
811                     dgettext(TEXT_DOMAIN, "cannot open '%s'"),
812                     pool);
813                 return (NULL);
814         }
815
816         if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL)
817                 return (NULL);
818
819         zhp->zpool_hdl = hdl;
820         (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name));
821
822         if (zpool_refresh_stats(zhp, &missing) != 0) {
823                 zpool_close(zhp);
824                 return (NULL);
825         }
826
827         if (missing) {
828                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "no such pool"));
829                 (void) zfs_error_fmt(hdl, EZFS_NOENT,
830                     dgettext(TEXT_DOMAIN, "cannot open '%s'"), pool);
831                 zpool_close(zhp);
832                 return (NULL);
833         }
834
835         return (zhp);
836 }
837
838 /*
839  * Like the above, but silent on error.  Used when iterating over pools (because
840  * the configuration cache may be out of date).
841  */
842 int
843 zpool_open_silent(libzfs_handle_t *hdl, const char *pool, zpool_handle_t **ret)
844 {
845         zpool_handle_t *zhp;
846         boolean_t missing;
847
848         if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL)
849                 return (-1);
850
851         zhp->zpool_hdl = hdl;
852         (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name));
853
854         if (zpool_refresh_stats(zhp, &missing) != 0) {
855                 zpool_close(zhp);
856                 return (-1);
857         }
858
859         if (missing) {
860                 zpool_close(zhp);
861                 *ret = NULL;
862                 return (0);
863         }
864
865         *ret = zhp;
866         return (0);
867 }
868
869 /*
870  * Similar to zpool_open_canfail(), but refuses to open pools in the faulted
871  * state.
872  */
873 zpool_handle_t *
874 zpool_open(libzfs_handle_t *hdl, const char *pool)
875 {
876         zpool_handle_t *zhp;
877
878         if ((zhp = zpool_open_canfail(hdl, pool)) == NULL)
879                 return (NULL);
880
881         if (zhp->zpool_state == POOL_STATE_UNAVAIL) {
882                 (void) zfs_error_fmt(hdl, EZFS_POOLUNAVAIL,
883                     dgettext(TEXT_DOMAIN, "cannot open '%s'"), zhp->zpool_name);
884                 zpool_close(zhp);
885                 return (NULL);
886         }
887
888         return (zhp);
889 }
890
891 /*
892  * Close the handle.  Simply frees the memory associated with the handle.
893  */
894 void
895 zpool_close(zpool_handle_t *zhp)
896 {
897         if (zhp->zpool_config)
898                 nvlist_free(zhp->zpool_config);
899         if (zhp->zpool_old_config)
900                 nvlist_free(zhp->zpool_old_config);
901         if (zhp->zpool_props)
902                 nvlist_free(zhp->zpool_props);
903         free(zhp);
904 }
905
906 /*
907  * Return the name of the pool.
908  */
909 const char *
910 zpool_get_name(zpool_handle_t *zhp)
911 {
912         return (zhp->zpool_name);
913 }
914
915
916 /*
917  * Return the state of the pool (ACTIVE or UNAVAILABLE)
918  */
919 int
920 zpool_get_state(zpool_handle_t *zhp)
921 {
922         return (zhp->zpool_state);
923 }
924
925 /*
926  * Create the named pool, using the provided vdev list.  It is assumed
927  * that the consumer has already validated the contents of the nvlist, so we
928  * don't have to worry about error semantics.
929  */
930 int
931 zpool_create(libzfs_handle_t *hdl, const char *pool, nvlist_t *nvroot,
932     nvlist_t *props, nvlist_t *fsprops)
933 {
934         zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
935         nvlist_t *zc_fsprops = NULL;
936         nvlist_t *zc_props = NULL;
937         char msg[1024];
938         char *altroot;
939         int ret = -1;
940
941         (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
942             "cannot create '%s'"), pool);
943
944         if (!zpool_name_valid(hdl, B_FALSE, pool))
945                 return (zfs_error(hdl, EZFS_INVALIDNAME, msg));
946
947         if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
948                 return (-1);
949
950         if (props) {
951                 prop_flags_t flags = { .create = B_TRUE, .import = B_FALSE };
952
953                 if ((zc_props = zpool_valid_proplist(hdl, pool, props,
954                     SPA_VERSION_1, flags, msg)) == NULL) {
955                         goto create_failed;
956                 }
957         }
958
959         if (fsprops) {
960                 uint64_t zoned;
961                 char *zonestr;
962
963                 zoned = ((nvlist_lookup_string(fsprops,
964                     zfs_prop_to_name(ZFS_PROP_ZONED), &zonestr) == 0) &&
965                     strcmp(zonestr, "on") == 0);
966
967                 if ((zc_fsprops = zfs_valid_proplist(hdl,
968                     ZFS_TYPE_FILESYSTEM, fsprops, zoned, NULL, msg)) == NULL) {
969                         goto create_failed;
970                 }
971                 if (!zc_props &&
972                     (nvlist_alloc(&zc_props, NV_UNIQUE_NAME, 0) != 0)) {
973                         goto create_failed;
974                 }
975                 if (nvlist_add_nvlist(zc_props,
976                     ZPOOL_ROOTFS_PROPS, zc_fsprops) != 0) {
977                         goto create_failed;
978                 }
979         }
980
981         if (zc_props && zcmd_write_src_nvlist(hdl, &zc, zc_props) != 0)
982                 goto create_failed;
983
984         (void) strlcpy(zc.zc_name, pool, sizeof (zc.zc_name));
985
986         if ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_CREATE, &zc)) != 0) {
987
988                 zcmd_free_nvlists(&zc);
989                 nvlist_free(zc_props);
990                 nvlist_free(zc_fsprops);
991
992                 switch (errno) {
993                 case EBUSY:
994                         /*
995                          * This can happen if the user has specified the same
996                          * device multiple times.  We can't reliably detect this
997                          * until we try to add it and see we already have a
998                          * label.  This can also happen under if the device is
999                          * part of an active md or lvm device.
1000                          */
1001                         zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1002                             "one or more vdevs refer to the same device, or one of\n"
1003                             "the devices is part of an active md or lvm device"));
1004                         return (zfs_error(hdl, EZFS_BADDEV, msg));
1005
1006                 case EOVERFLOW:
1007                         /*
1008                          * This occurs when one of the devices is below
1009                          * SPA_MINDEVSIZE.  Unfortunately, we can't detect which
1010                          * device was the problem device since there's no
1011                          * reliable way to determine device size from userland.
1012                          */
1013                         {
1014                                 char buf[64];
1015
1016                                 zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf));
1017
1018                                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1019                                     "one or more devices is less than the "
1020                                     "minimum size (%s)"), buf);
1021                         }
1022                         return (zfs_error(hdl, EZFS_BADDEV, msg));
1023
1024                 case ENOSPC:
1025                         zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1026                             "one or more devices is out of space"));
1027                         return (zfs_error(hdl, EZFS_BADDEV, msg));
1028
1029                 case ENOTBLK:
1030                         zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1031                             "cache device must be a disk or disk slice"));
1032                         return (zfs_error(hdl, EZFS_BADDEV, msg));
1033
1034                 default:
1035                         return (zpool_standard_error(hdl, errno, msg));
1036                 }
1037         }
1038
1039         /*
1040          * If this is an alternate root pool, then we automatically set the
1041          * mountpoint of the root dataset to be '/'.
1042          */
1043         if (nvlist_lookup_string(props, zpool_prop_to_name(ZPOOL_PROP_ALTROOT),
1044             &altroot) == 0) {
1045                 zfs_handle_t *zhp;
1046
1047                 verify((zhp = zfs_open(hdl, pool, ZFS_TYPE_DATASET)) != NULL);
1048                 verify(zfs_prop_set(zhp, zfs_prop_to_name(ZFS_PROP_MOUNTPOINT),
1049                     "/") == 0);
1050
1051                 zfs_close(zhp);
1052         }
1053
1054 create_failed:
1055         zcmd_free_nvlists(&zc);
1056         nvlist_free(zc_props);
1057         nvlist_free(zc_fsprops);
1058         return (ret);
1059 }
1060
1061 /*
1062  * Destroy the given pool.  It is up to the caller to ensure that there are no
1063  * datasets left in the pool.
1064  */
1065 int
1066 zpool_destroy(zpool_handle_t *zhp)
1067 {
1068         zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
1069         zfs_handle_t *zfp = NULL;
1070         libzfs_handle_t *hdl = zhp->zpool_hdl;
1071         char msg[1024];
1072
1073         if (zhp->zpool_state == POOL_STATE_ACTIVE &&
1074             (zfp = zfs_open(hdl, zhp->zpool_name, ZFS_TYPE_FILESYSTEM)) == NULL)
1075                 return (-1);
1076
1077         (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1078
1079         if (zfs_ioctl(hdl, ZFS_IOC_POOL_DESTROY, &zc) != 0) {
1080                 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1081                     "cannot destroy '%s'"), zhp->zpool_name);
1082
1083                 if (errno == EROFS) {
1084                         zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1085                             "one or more devices is read only"));
1086                         (void) zfs_error(hdl, EZFS_BADDEV, msg);
1087                 } else {
1088                         (void) zpool_standard_error(hdl, errno, msg);
1089                 }
1090
1091                 if (zfp)
1092                         zfs_close(zfp);
1093                 return (-1);
1094         }
1095
1096         if (zfp) {
1097                 remove_mountpoint(zfp);
1098                 zfs_close(zfp);
1099         }
1100
1101         return (0);
1102 }
1103
1104 /*
1105  * Add the given vdevs to the pool.  The caller must have already performed the
1106  * necessary verification to ensure that the vdev specification is well-formed.
1107  */
1108 int
1109 zpool_add(zpool_handle_t *zhp, nvlist_t *nvroot)
1110 {
1111         zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
1112         int ret;
1113         libzfs_handle_t *hdl = zhp->zpool_hdl;
1114         char msg[1024];
1115         nvlist_t **spares, **l2cache;
1116         uint_t nspares, nl2cache;
1117
1118         (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1119             "cannot add to '%s'"), zhp->zpool_name);
1120
1121         if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) <
1122             SPA_VERSION_SPARES &&
1123             nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
1124             &spares, &nspares) == 0) {
1125                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be "
1126                     "upgraded to add hot spares"));
1127                 return (zfs_error(hdl, EZFS_BADVERSION, msg));
1128         }
1129
1130         if (pool_is_bootable(zhp) && nvlist_lookup_nvlist_array(nvroot,
1131             ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0) {
1132                 uint64_t s;
1133
1134                 for (s = 0; s < nspares; s++) {
1135                         char *path;
1136
1137                         if (nvlist_lookup_string(spares[s], ZPOOL_CONFIG_PATH,
1138                             &path) == 0 && pool_uses_efi(spares[s])) {
1139                                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1140                                     "device '%s' contains an EFI label and "
1141                                     "cannot be used on root pools."),
1142                                     zpool_vdev_name(hdl, NULL, spares[s],
1143                                     B_FALSE));
1144                                 return (zfs_error(hdl, EZFS_POOL_NOTSUP, msg));
1145                         }
1146                 }
1147         }
1148
1149         if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) <
1150             SPA_VERSION_L2CACHE &&
1151             nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
1152             &l2cache, &nl2cache) == 0) {
1153                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be "
1154                     "upgraded to add cache devices"));
1155                 return (zfs_error(hdl, EZFS_BADVERSION, msg));
1156         }
1157
1158         if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
1159                 return (-1);
1160         (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1161
1162         if (zfs_ioctl(hdl, ZFS_IOC_VDEV_ADD, &zc) != 0) {
1163                 switch (errno) {
1164                 case EBUSY:
1165                         /*
1166                          * This can happen if the user has specified the same
1167                          * device multiple times.  We can't reliably detect this
1168                          * until we try to add it and see we already have a
1169                          * label.
1170                          */
1171                         zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1172                             "one or more vdevs refer to the same device"));
1173                         (void) zfs_error(hdl, EZFS_BADDEV, msg);
1174                         break;
1175
1176                 case EOVERFLOW:
1177                         /*
1178                          * This occurrs when one of the devices is below
1179                          * SPA_MINDEVSIZE.  Unfortunately, we can't detect which
1180                          * device was the problem device since there's no
1181                          * reliable way to determine device size from userland.
1182                          */
1183                         {
1184                                 char buf[64];
1185
1186                                 zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf));
1187
1188                                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1189                                     "device is less than the minimum "
1190                                     "size (%s)"), buf);
1191                         }
1192                         (void) zfs_error(hdl, EZFS_BADDEV, msg);
1193                         break;
1194
1195                 case ENOTSUP:
1196                         zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1197                             "pool must be upgraded to add these vdevs"));
1198                         (void) zfs_error(hdl, EZFS_BADVERSION, msg);
1199                         break;
1200
1201                 case EDOM:
1202                         zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1203                             "root pool can not have multiple vdevs"
1204                             " or separate logs"));
1205                         (void) zfs_error(hdl, EZFS_POOL_NOTSUP, msg);
1206                         break;
1207
1208                 case ENOTBLK:
1209                         zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1210                             "cache device must be a disk or disk slice"));
1211                         (void) zfs_error(hdl, EZFS_BADDEV, msg);
1212                         break;
1213
1214                 default:
1215                         (void) zpool_standard_error(hdl, errno, msg);
1216                 }
1217
1218                 ret = -1;
1219         } else {
1220                 ret = 0;
1221         }
1222
1223         zcmd_free_nvlists(&zc);
1224
1225         return (ret);
1226 }
1227
1228 /*
1229  * Exports the pool from the system.  The caller must ensure that there are no
1230  * mounted datasets in the pool.
1231  */
1232 int
1233 zpool_export_common(zpool_handle_t *zhp, boolean_t force, boolean_t hardforce)
1234 {
1235         zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
1236         char msg[1024];
1237
1238         (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1239             "cannot export '%s'"), zhp->zpool_name);
1240
1241         (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1242         zc.zc_cookie = force;
1243         zc.zc_guid = hardforce;
1244
1245         if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_EXPORT, &zc) != 0) {
1246                 switch (errno) {
1247                 case EXDEV:
1248                         zfs_error_aux(zhp->zpool_hdl, dgettext(TEXT_DOMAIN,
1249                             "use '-f' to override the following errors:\n"
1250                             "'%s' has an active shared spare which could be"
1251                             " used by other pools once '%s' is exported."),
1252                             zhp->zpool_name, zhp->zpool_name);
1253                         return (zfs_error(zhp->zpool_hdl, EZFS_ACTIVE_SPARE,
1254                             msg));
1255                 default:
1256                         return (zpool_standard_error_fmt(zhp->zpool_hdl, errno,
1257                             msg));
1258                 }
1259         }
1260
1261         return (0);
1262 }
1263
1264 int
1265 zpool_export(zpool_handle_t *zhp, boolean_t force)
1266 {
1267         return (zpool_export_common(zhp, force, B_FALSE));
1268 }
1269
1270 int
1271 zpool_export_force(zpool_handle_t *zhp)
1272 {
1273         return (zpool_export_common(zhp, B_TRUE, B_TRUE));
1274 }
1275
1276 static void
1277 zpool_rewind_exclaim(libzfs_handle_t *hdl, const char *name, boolean_t dryrun,
1278     nvlist_t *config)
1279 {
1280         nvlist_t *nv = NULL;
1281         uint64_t rewindto;
1282         int64_t loss = -1;
1283         struct tm t;
1284         char timestr[128];
1285
1286         if (!hdl->libzfs_printerr || config == NULL)
1287                 return;
1288
1289         if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nv) != 0)
1290                 return;
1291
1292         if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_TIME, &rewindto) != 0)
1293                 return;
1294         (void) nvlist_lookup_int64(nv, ZPOOL_CONFIG_REWIND_TIME, &loss);
1295
1296         if (localtime_r((time_t *)&rewindto, &t) != NULL &&
1297             strftime(timestr, 128, "%c", &t) != 0) {
1298                 if (dryrun) {
1299                         (void) printf(dgettext(TEXT_DOMAIN,
1300                             "Would be able to return %s "
1301                             "to its state as of %s.\n"),
1302                             name, timestr);
1303                 } else {
1304                         (void) printf(dgettext(TEXT_DOMAIN,
1305                             "Pool %s returned to its state as of %s.\n"),
1306                             name, timestr);
1307                 }
1308                 if (loss > 120) {
1309                         (void) printf(dgettext(TEXT_DOMAIN,
1310                             "%s approximately %lld "),
1311                             dryrun ? "Would discard" : "Discarded",
1312                             ((longlong_t)loss + 30) / 60);
1313                         (void) printf(dgettext(TEXT_DOMAIN,
1314                             "minutes of transactions.\n"));
1315                 } else if (loss > 0) {
1316                         (void) printf(dgettext(TEXT_DOMAIN,
1317                             "%s approximately %lld "),
1318                             dryrun ? "Would discard" : "Discarded",
1319                             (longlong_t)loss);
1320                         (void) printf(dgettext(TEXT_DOMAIN,
1321                             "seconds of transactions.\n"));
1322                 }
1323         }
1324 }
1325
1326 void
1327 zpool_explain_recover(libzfs_handle_t *hdl, const char *name, int reason,
1328     nvlist_t *config)
1329 {
1330         nvlist_t *nv = NULL;
1331         int64_t loss = -1;
1332         uint64_t edata = UINT64_MAX;
1333         uint64_t rewindto;
1334         struct tm t;
1335         char timestr[128];
1336
1337         if (!hdl->libzfs_printerr)
1338                 return;
1339
1340         if (reason >= 0)
1341                 (void) printf(dgettext(TEXT_DOMAIN, "action: "));
1342         else
1343                 (void) printf(dgettext(TEXT_DOMAIN, "\t"));
1344
1345         /* All attempted rewinds failed if ZPOOL_CONFIG_LOAD_TIME missing */
1346         if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nv) != 0 ||
1347             nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_TIME, &rewindto) != 0)
1348                 goto no_info;
1349
1350         (void) nvlist_lookup_int64(nv, ZPOOL_CONFIG_REWIND_TIME, &loss);
1351         (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_DATA_ERRORS,
1352             &edata);
1353
1354         (void) printf(dgettext(TEXT_DOMAIN,
1355             "Recovery is possible, but will result in some data loss.\n"));
1356
1357         if (localtime_r((time_t *)&rewindto, &t) != NULL &&
1358             strftime(timestr, 128, "%c", &t) != 0) {
1359                 (void) printf(dgettext(TEXT_DOMAIN,
1360                     "\tReturning the pool to its state as of %s\n"
1361                     "\tshould correct the problem.  "),
1362                     timestr);
1363         } else {
1364                 (void) printf(dgettext(TEXT_DOMAIN,
1365                     "\tReverting the pool to an earlier state "
1366                     "should correct the problem.\n\t"));
1367         }
1368
1369         if (loss > 120) {
1370                 (void) printf(dgettext(TEXT_DOMAIN,
1371                     "Approximately %lld minutes of data\n"
1372                     "\tmust be discarded, irreversibly.  "),
1373                     ((longlong_t)loss + 30) / 60);
1374         } else if (loss > 0) {
1375                 (void) printf(dgettext(TEXT_DOMAIN,
1376                     "Approximately %lld seconds of data\n"
1377                     "\tmust be discarded, irreversibly.  "),
1378                     (longlong_t)loss);
1379         }
1380         if (edata != 0 && edata != UINT64_MAX) {
1381                 if (edata == 1) {
1382                         (void) printf(dgettext(TEXT_DOMAIN,
1383                             "After rewind, at least\n"
1384                             "\tone persistent user-data error will remain.  "));
1385                 } else {
1386                         (void) printf(dgettext(TEXT_DOMAIN,
1387                             "After rewind, several\n"
1388                             "\tpersistent user-data errors will remain.  "));
1389                 }
1390         }
1391         (void) printf(dgettext(TEXT_DOMAIN,
1392             "Recovery can be attempted\n\tby executing 'zpool %s -F %s'.  "),
1393             reason >= 0 ? "clear" : "import", name);
1394
1395         (void) printf(dgettext(TEXT_DOMAIN,
1396             "A scrub of the pool\n"
1397             "\tis strongly recommended after recovery.\n"));
1398         return;
1399
1400 no_info:
1401         (void) printf(dgettext(TEXT_DOMAIN,
1402             "Destroy and re-create the pool from\n\ta backup source.\n"));
1403 }
1404
1405 /*
1406  * zpool_import() is a contracted interface. Should be kept the same
1407  * if possible.
1408  *
1409  * Applications should use zpool_import_props() to import a pool with
1410  * new properties value to be set.
1411  */
1412 int
1413 zpool_import(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,
1414     char *altroot)
1415 {
1416         nvlist_t *props = NULL;
1417         int ret;
1418
1419         if (altroot != NULL) {
1420                 if (nvlist_alloc(&props, NV_UNIQUE_NAME, 0) != 0) {
1421                         return (zfs_error_fmt(hdl, EZFS_NOMEM,
1422                             dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1423                             newname));
1424                 }
1425
1426                 if (nvlist_add_string(props,
1427                     zpool_prop_to_name(ZPOOL_PROP_ALTROOT), altroot) != 0 ||
1428                     nvlist_add_string(props,
1429                     zpool_prop_to_name(ZPOOL_PROP_CACHEFILE), "none") != 0) {
1430                         nvlist_free(props);
1431                         return (zfs_error_fmt(hdl, EZFS_NOMEM,
1432                             dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1433                             newname));
1434                 }
1435         }
1436
1437         ret = zpool_import_props(hdl, config, newname, props,
1438             ZFS_IMPORT_NORMAL);
1439         if (props)
1440                 nvlist_free(props);
1441         return (ret);
1442 }
1443
1444 static void
1445 print_vdev_tree(libzfs_handle_t *hdl, const char *name, nvlist_t *nv,
1446     int indent)
1447 {
1448         nvlist_t **child;
1449         uint_t c, children;
1450         char *vname;
1451         uint64_t is_log = 0;
1452
1453         (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_LOG,
1454             &is_log);
1455
1456         if (name != NULL)
1457                 (void) printf("\t%*s%s%s\n", indent, "", name,
1458                     is_log ? " [log]" : "");
1459
1460         if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
1461             &child, &children) != 0)
1462                 return;
1463
1464         for (c = 0; c < children; c++) {
1465                 vname = zpool_vdev_name(hdl, NULL, child[c], B_TRUE);
1466                 print_vdev_tree(hdl, vname, child[c], indent + 2);
1467                 free(vname);
1468         }
1469 }
1470
1471 /*
1472  * Import the given pool using the known configuration and a list of
1473  * properties to be set. The configuration should have come from
1474  * zpool_find_import(). The 'newname' parameters control whether the pool
1475  * is imported with a different name.
1476  */
1477 int
1478 zpool_import_props(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,
1479     nvlist_t *props, int flags)
1480 {
1481         zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
1482         zpool_rewind_policy_t policy;
1483         nvlist_t *nv = NULL;
1484         nvlist_t *nvinfo = NULL;
1485         nvlist_t *missing = NULL;
1486         char *thename;
1487         char *origname;
1488         int ret;
1489         int error = 0;
1490         char errbuf[1024];
1491
1492         verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
1493             &origname) == 0);
1494
1495         (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
1496             "cannot import pool '%s'"), origname);
1497
1498         if (newname != NULL) {
1499                 if (!zpool_name_valid(hdl, B_FALSE, newname))
1500                         return (zfs_error_fmt(hdl, EZFS_INVALIDNAME,
1501                             dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1502                             newname));
1503                 thename = (char *)newname;
1504         } else {
1505                 thename = origname;
1506         }
1507
1508         if (props) {
1509                 uint64_t version;
1510                 prop_flags_t flags = { .create = B_FALSE, .import = B_TRUE };
1511
1512                 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
1513                     &version) == 0);
1514
1515                 if ((props = zpool_valid_proplist(hdl, origname,
1516                     props, version, flags, errbuf)) == NULL) {
1517                         return (-1);
1518                 } else if (zcmd_write_src_nvlist(hdl, &zc, props) != 0) {
1519                         nvlist_free(props);
1520                         return (-1);
1521                 }
1522         }
1523
1524         (void) strlcpy(zc.zc_name, thename, sizeof (zc.zc_name));
1525
1526         verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
1527             &zc.zc_guid) == 0);
1528
1529         if (zcmd_write_conf_nvlist(hdl, &zc, config) != 0) {
1530                 nvlist_free(props);
1531                 return (-1);
1532         }
1533         if (zcmd_alloc_dst_nvlist(hdl, &zc, zc.zc_nvlist_conf_size * 2) != 0) {
1534                 nvlist_free(props);
1535                 return (-1);
1536         }
1537
1538         zc.zc_cookie = flags;
1539         while ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_IMPORT, &zc)) != 0 &&
1540             errno == ENOMEM) {
1541                 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
1542                         zcmd_free_nvlists(&zc);
1543                         return (-1);
1544                 }
1545         }
1546         if (ret != 0)
1547                 error = errno;
1548
1549         (void) zcmd_read_dst_nvlist(hdl, &zc, &nv);
1550         zpool_get_rewind_policy(config, &policy);
1551
1552         if (error) {
1553                 char desc[1024];
1554
1555                 /*
1556                  * Dry-run failed, but we print out what success
1557                  * looks like if we found a best txg
1558                  */
1559                 if (policy.zrp_request & ZPOOL_TRY_REWIND) {
1560                         zpool_rewind_exclaim(hdl, newname ? origname : thename,
1561                             B_TRUE, nv);
1562                         nvlist_free(nv);
1563                         return (-1);
1564                 }
1565
1566                 if (newname == NULL)
1567                         (void) snprintf(desc, sizeof (desc),
1568                             dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1569                             thename);
1570                 else
1571                         (void) snprintf(desc, sizeof (desc),
1572                             dgettext(TEXT_DOMAIN, "cannot import '%s' as '%s'"),
1573                             origname, thename);
1574
1575                 switch (error) {
1576                 case ENOTSUP:
1577                         /*
1578                          * Unsupported version.
1579                          */
1580                         (void) zfs_error(hdl, EZFS_BADVERSION, desc);
1581                         break;
1582
1583                 case EINVAL:
1584                         (void) zfs_error(hdl, EZFS_INVALCONFIG, desc);
1585                         break;
1586
1587                 case EROFS:
1588                         zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1589                             "one or more devices is read only"));
1590                         (void) zfs_error(hdl, EZFS_BADDEV, desc);
1591                         break;
1592
1593                 case ENXIO:
1594                         if (nv && nvlist_lookup_nvlist(nv,
1595                             ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0 &&
1596                             nvlist_lookup_nvlist(nvinfo,
1597                             ZPOOL_CONFIG_MISSING_DEVICES, &missing) == 0) {
1598                                 (void) printf(dgettext(TEXT_DOMAIN,
1599                                     "The devices below are missing, use "
1600                                     "'-m' to import the pool anyway:\n"));
1601                                 print_vdev_tree(hdl, NULL, missing, 2);
1602                                 (void) printf("\n");
1603                         }
1604                         (void) zpool_standard_error(hdl, error, desc);
1605                         break;
1606
1607                 case EEXIST:
1608                         (void) zpool_standard_error(hdl, error, desc);
1609                         break;
1610
1611                 case EBUSY:
1612                         zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1613                             "one or more devices are already in use\n"));
1614                         (void) zfs_error(hdl, EZFS_BADDEV, desc);
1615                         break;
1616
1617                 default:
1618                         (void) zpool_standard_error(hdl, error, desc);
1619                         zpool_explain_recover(hdl,
1620                             newname ? origname : thename, -error, nv);
1621                         break;
1622                 }
1623
1624                 nvlist_free(nv);
1625                 ret = -1;
1626         } else {
1627                 zpool_handle_t *zhp;
1628
1629                 /*
1630                  * This should never fail, but play it safe anyway.
1631                  */
1632                 if (zpool_open_silent(hdl, thename, &zhp) != 0)
1633                         ret = -1;
1634                 else if (zhp != NULL)
1635                         zpool_close(zhp);
1636                 if (policy.zrp_request &
1637                     (ZPOOL_DO_REWIND | ZPOOL_TRY_REWIND)) {
1638                         zpool_rewind_exclaim(hdl, newname ? origname : thename,
1639                             ((policy.zrp_request & ZPOOL_TRY_REWIND) != 0), nv);
1640                 }
1641                 nvlist_free(nv);
1642                 return (0);
1643         }
1644
1645         zcmd_free_nvlists(&zc);
1646         nvlist_free(props);
1647
1648         return (ret);
1649 }
1650
1651 /*
1652  * Scan the pool.
1653  */
1654 int
1655 zpool_scan(zpool_handle_t *zhp, pool_scan_func_t func)
1656 {
1657         zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
1658         char msg[1024];
1659         libzfs_handle_t *hdl = zhp->zpool_hdl;
1660
1661         (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1662         zc.zc_cookie = func;
1663
1664         if (zfs_ioctl(hdl, ZFS_IOC_POOL_SCAN, &zc) == 0 ||
1665             (errno == ENOENT && func != POOL_SCAN_NONE))
1666                 return (0);
1667
1668         if (func == POOL_SCAN_SCRUB) {
1669                 (void) snprintf(msg, sizeof (msg),
1670                     dgettext(TEXT_DOMAIN, "cannot scrub %s"), zc.zc_name);
1671         } else if (func == POOL_SCAN_NONE) {
1672                 (void) snprintf(msg, sizeof (msg),
1673                     dgettext(TEXT_DOMAIN, "cannot cancel scrubbing %s"),
1674                     zc.zc_name);
1675         } else {
1676                 assert(!"unexpected result");
1677         }
1678
1679         if (errno == EBUSY) {
1680                 nvlist_t *nvroot;
1681                 pool_scan_stat_t *ps = NULL;
1682                 uint_t psc;
1683
1684                 verify(nvlist_lookup_nvlist(zhp->zpool_config,
1685                     ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
1686                 (void) nvlist_lookup_uint64_array(nvroot,
1687                     ZPOOL_CONFIG_SCAN_STATS, (uint64_t **)&ps, &psc);
1688                 if (ps && ps->pss_func == POOL_SCAN_SCRUB)
1689                         return (zfs_error(hdl, EZFS_SCRUBBING, msg));
1690                 else
1691                         return (zfs_error(hdl, EZFS_RESILVERING, msg));
1692         } else if (errno == ENOENT) {
1693                 return (zfs_error(hdl, EZFS_NO_SCRUB, msg));
1694         } else {
1695                 return (zpool_standard_error(hdl, errno, msg));
1696         }
1697 }
1698
1699 /*
1700  * Find a vdev that matches the search criteria specified. We use the
1701  * the nvpair name to determine how we should look for the device.
1702  * 'avail_spare' is set to TRUE if the provided guid refers to an AVAIL
1703  * spare; but FALSE if its an INUSE spare.
1704  */
1705 static nvlist_t *
1706 vdev_to_nvlist_iter(nvlist_t *nv, nvlist_t *search, boolean_t *avail_spare,
1707     boolean_t *l2cache, boolean_t *log)
1708 {
1709         uint_t c, children;
1710         nvlist_t **child;
1711         nvlist_t *ret;
1712         uint64_t is_log;
1713         char *srchkey;
1714         nvpair_t *pair = nvlist_next_nvpair(search, NULL);
1715
1716         /* Nothing to look for */
1717         if (search == NULL || pair == NULL)
1718                 return (NULL);
1719
1720         /* Obtain the key we will use to search */
1721         srchkey = nvpair_name(pair);
1722
1723         switch (nvpair_type(pair)) {
1724         case DATA_TYPE_UINT64:
1725                 if (strcmp(srchkey, ZPOOL_CONFIG_GUID) == 0) {
1726                         uint64_t srchval, theguid;
1727
1728                         verify(nvpair_value_uint64(pair, &srchval) == 0);
1729                         verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
1730                             &theguid) == 0);
1731                         if (theguid == srchval)
1732                                 return (nv);
1733                 }
1734                 break;
1735
1736         case DATA_TYPE_STRING: {
1737                 char *srchval, *val;
1738
1739                 verify(nvpair_value_string(pair, &srchval) == 0);
1740                 if (nvlist_lookup_string(nv, srchkey, &val) != 0)
1741                         break;
1742
1743                 /*
1744                  * Search for the requested value. Special cases:
1745                  *
1746                  * - ZPOOL_CONFIG_PATH for whole disk entries.  These end in with a
1747                  *   partition suffix "1", "-part1", or "p1".  The suffix is  hidden
1748                  *   from the user, but included in the string, so this matches around
1749                  *   it.
1750                  * - looking for a top-level vdev name (i.e. ZPOOL_CONFIG_TYPE).
1751                  *
1752                  * Otherwise, all other searches are simple string compares.
1753                  */
1754                 if (strcmp(srchkey, ZPOOL_CONFIG_PATH) == 0) {
1755                         uint64_t wholedisk = 0;
1756
1757                         (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
1758                             &wholedisk);
1759                         if (wholedisk) {
1760                                 char buf[MAXPATHLEN];
1761
1762                                 zfs_append_partition(srchval, buf, sizeof (buf));
1763                                 if (strcmp(val, buf) == 0)
1764                                         return (nv);
1765
1766                                 break;
1767                         }
1768                 } else if (strcmp(srchkey, ZPOOL_CONFIG_TYPE) == 0 && val) {
1769                         char *type, *idx, *end, *p;
1770                         uint64_t id, vdev_id;
1771
1772                         /*
1773                          * Determine our vdev type, keeping in mind
1774                          * that the srchval is composed of a type and
1775                          * vdev id pair (i.e. mirror-4).
1776                          */
1777                         if ((type = strdup(srchval)) == NULL)
1778                                 return (NULL);
1779
1780                         if ((p = strrchr(type, '-')) == NULL) {
1781                                 free(type);
1782                                 break;
1783                         }
1784                         idx = p + 1;
1785                         *p = '\0';
1786
1787                         /*
1788                          * If the types don't match then keep looking.
1789                          */
1790                         if (strncmp(val, type, strlen(val)) != 0) {
1791                                 free(type);
1792                                 break;
1793                         }
1794
1795                         verify(strncmp(type, VDEV_TYPE_RAIDZ,
1796                             strlen(VDEV_TYPE_RAIDZ)) == 0 ||
1797                             strncmp(type, VDEV_TYPE_MIRROR,
1798                             strlen(VDEV_TYPE_MIRROR)) == 0);
1799                         verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID,
1800                             &id) == 0);
1801
1802                         errno = 0;
1803                         vdev_id = strtoull(idx, &end, 10);
1804
1805                         free(type);
1806                         if (errno != 0)
1807                                 return (NULL);
1808
1809                         /*
1810                          * Now verify that we have the correct vdev id.
1811                          */
1812                         if (vdev_id == id)
1813                                 return (nv);
1814                 }
1815
1816                 /*
1817                  * Common case
1818                  */
1819                 if (strcmp(srchval, val) == 0)
1820                         return (nv);
1821                 break;
1822         }
1823
1824         default:
1825                 break;
1826         }
1827
1828         if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
1829             &child, &children) != 0)
1830                 return (NULL);
1831
1832         for (c = 0; c < children; c++) {
1833                 if ((ret = vdev_to_nvlist_iter(child[c], search,
1834                     avail_spare, l2cache, NULL)) != NULL) {
1835                         /*
1836                          * The 'is_log' value is only set for the toplevel
1837                          * vdev, not the leaf vdevs.  So we always lookup the
1838                          * log device from the root of the vdev tree (where
1839                          * 'log' is non-NULL).
1840                          */
1841                         if (log != NULL &&
1842                             nvlist_lookup_uint64(child[c],
1843                             ZPOOL_CONFIG_IS_LOG, &is_log) == 0 &&
1844                             is_log) {
1845                                 *log = B_TRUE;
1846                         }
1847                         return (ret);
1848                 }
1849         }
1850
1851         if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES,
1852             &child, &children) == 0) {
1853                 for (c = 0; c < children; c++) {
1854                         if ((ret = vdev_to_nvlist_iter(child[c], search,
1855                             avail_spare, l2cache, NULL)) != NULL) {
1856                                 *avail_spare = B_TRUE;
1857                                 return (ret);
1858                         }
1859                 }
1860         }
1861
1862         if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE,
1863             &child, &children) == 0) {
1864                 for (c = 0; c < children; c++) {
1865                         if ((ret = vdev_to_nvlist_iter(child[c], search,
1866                             avail_spare, l2cache, NULL)) != NULL) {
1867                                 *l2cache = B_TRUE;
1868                                 return (ret);
1869                         }
1870                 }
1871         }
1872
1873         return (NULL);
1874 }
1875
1876 /*
1877  * Given a physical path (minus the "/devices" prefix), find the
1878  * associated vdev.
1879  */
1880 nvlist_t *
1881 zpool_find_vdev_by_physpath(zpool_handle_t *zhp, const char *ppath,
1882     boolean_t *avail_spare, boolean_t *l2cache, boolean_t *log)
1883 {
1884         nvlist_t *search, *nvroot, *ret;
1885
1886         verify(nvlist_alloc(&search, NV_UNIQUE_NAME, KM_SLEEP) == 0);
1887         verify(nvlist_add_string(search, ZPOOL_CONFIG_PHYS_PATH, ppath) == 0);
1888
1889         verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
1890             &nvroot) == 0);
1891
1892         *avail_spare = B_FALSE;
1893         *l2cache = B_FALSE;
1894         if (log != NULL)
1895                 *log = B_FALSE;
1896         ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log);
1897         nvlist_free(search);
1898
1899         return (ret);
1900 }
1901
1902 /*
1903  * Determine if we have an "interior" top-level vdev (i.e mirror/raidz).
1904  */
1905 boolean_t
1906 zpool_vdev_is_interior(const char *name)
1907 {
1908         if (strncmp(name, VDEV_TYPE_RAIDZ, strlen(VDEV_TYPE_RAIDZ)) == 0 ||
1909             strncmp(name, VDEV_TYPE_MIRROR, strlen(VDEV_TYPE_MIRROR)) == 0)
1910                 return (B_TRUE);
1911         return (B_FALSE);
1912 }
1913
1914 nvlist_t *
1915 zpool_find_vdev(zpool_handle_t *zhp, const char *path, boolean_t *avail_spare,
1916     boolean_t *l2cache, boolean_t *log)
1917 {
1918         char buf[MAXPATHLEN];
1919         char *end;
1920         nvlist_t *nvroot, *search, *ret;
1921         uint64_t guid;
1922
1923         verify(nvlist_alloc(&search, NV_UNIQUE_NAME, KM_SLEEP) == 0);
1924
1925         guid = strtoull(path, &end, 10);
1926         if (guid != 0 && *end == '\0') {
1927                 verify(nvlist_add_uint64(search, ZPOOL_CONFIG_GUID, guid) == 0);
1928         } else if (zpool_vdev_is_interior(path)) {
1929                 verify(nvlist_add_string(search, ZPOOL_CONFIG_TYPE, path) == 0);
1930         } else if (path[0] != '/') {
1931                 if (zfs_resolve_shortname(path, buf, sizeof (buf)) < 0) {
1932                         nvlist_free(search);
1933                         return (NULL);
1934                 }
1935                 verify(nvlist_add_string(search, ZPOOL_CONFIG_PATH, buf) == 0);
1936         } else {
1937                 verify(nvlist_add_string(search, ZPOOL_CONFIG_PATH, path) == 0);
1938         }
1939
1940         verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
1941             &nvroot) == 0);
1942
1943         *avail_spare = B_FALSE;
1944         *l2cache = B_FALSE;
1945         if (log != NULL)
1946                 *log = B_FALSE;
1947         ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log);
1948         nvlist_free(search);
1949
1950         return (ret);
1951 }
1952
1953 static int
1954 vdev_online(nvlist_t *nv)
1955 {
1956         uint64_t ival;
1957
1958         if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_OFFLINE, &ival) == 0 ||
1959             nvlist_lookup_uint64(nv, ZPOOL_CONFIG_FAULTED, &ival) == 0 ||
1960             nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVED, &ival) == 0)
1961                 return (0);
1962
1963         return (1);
1964 }
1965
1966 /*
1967  * Helper function for zpool_get_physpaths().
1968  */
1969 static int
1970 vdev_get_one_physpath(nvlist_t *config, char *physpath, size_t physpath_size,
1971     size_t *bytes_written)
1972 {
1973         size_t bytes_left, pos, rsz;
1974         char *tmppath;
1975         const char *format;
1976
1977         if (nvlist_lookup_string(config, ZPOOL_CONFIG_PHYS_PATH,
1978             &tmppath) != 0)
1979                 return (EZFS_NODEVICE);
1980
1981         pos = *bytes_written;
1982         bytes_left = physpath_size - pos;
1983         format = (pos == 0) ? "%s" : " %s";
1984
1985         rsz = snprintf(physpath + pos, bytes_left, format, tmppath);
1986         *bytes_written += rsz;
1987
1988         if (rsz >= bytes_left) {
1989                 /* if physpath was not copied properly, clear it */
1990                 if (bytes_left != 0) {
1991                         physpath[pos] = 0;
1992                 }
1993                 return (EZFS_NOSPC);
1994         }
1995         return (0);
1996 }
1997
1998 static int
1999 vdev_get_physpaths(nvlist_t *nv, char *physpath, size_t phypath_size,
2000     size_t *rsz, boolean_t is_spare)
2001 {
2002         char *type;
2003         int ret;
2004
2005         if (nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) != 0)
2006                 return (EZFS_INVALCONFIG);
2007
2008         if (strcmp(type, VDEV_TYPE_DISK) == 0) {
2009                 /*
2010                  * An active spare device has ZPOOL_CONFIG_IS_SPARE set.
2011                  * For a spare vdev, we only want to boot from the active
2012                  * spare device.
2013                  */
2014                 if (is_spare) {
2015                         uint64_t spare = 0;
2016                         (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_SPARE,
2017                             &spare);
2018                         if (!spare)
2019                                 return (EZFS_INVALCONFIG);
2020                 }
2021
2022                 if (vdev_online(nv)) {
2023                         if ((ret = vdev_get_one_physpath(nv, physpath,
2024                             phypath_size, rsz)) != 0)
2025                                 return (ret);
2026                 }
2027         } else if (strcmp(type, VDEV_TYPE_MIRROR) == 0 ||
2028             strcmp(type, VDEV_TYPE_REPLACING) == 0 ||
2029             (is_spare = (strcmp(type, VDEV_TYPE_SPARE) == 0))) {
2030                 nvlist_t **child;
2031                 uint_t count;
2032                 int i, ret;
2033
2034                 if (nvlist_lookup_nvlist_array(nv,
2035                     ZPOOL_CONFIG_CHILDREN, &child, &count) != 0)
2036                         return (EZFS_INVALCONFIG);
2037
2038                 for (i = 0; i < count; i++) {
2039                         ret = vdev_get_physpaths(child[i], physpath,
2040                             phypath_size, rsz, is_spare);
2041                         if (ret == EZFS_NOSPC)
2042                                 return (ret);
2043                 }
2044         }
2045
2046         return (EZFS_POOL_INVALARG);
2047 }
2048
2049 /*
2050  * Get phys_path for a root pool config.
2051  * Return 0 on success; non-zero on failure.
2052  */
2053 static int
2054 zpool_get_config_physpath(nvlist_t *config, char *physpath, size_t phypath_size)
2055 {
2056         size_t rsz;
2057         nvlist_t *vdev_root;
2058         nvlist_t **child;
2059         uint_t count;
2060         char *type;
2061
2062         rsz = 0;
2063
2064         if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
2065             &vdev_root) != 0)
2066                 return (EZFS_INVALCONFIG);
2067
2068         if (nvlist_lookup_string(vdev_root, ZPOOL_CONFIG_TYPE, &type) != 0 ||
2069             nvlist_lookup_nvlist_array(vdev_root, ZPOOL_CONFIG_CHILDREN,
2070             &child, &count) != 0)
2071                 return (EZFS_INVALCONFIG);
2072
2073         /*
2074          * root pool can not have EFI labeled disks and can only have
2075          * a single top-level vdev.
2076          */
2077         if (strcmp(type, VDEV_TYPE_ROOT) != 0 || count != 1 ||
2078             pool_uses_efi(vdev_root))
2079                 return (EZFS_POOL_INVALARG);
2080
2081         (void) vdev_get_physpaths(child[0], physpath, phypath_size, &rsz,
2082             B_FALSE);
2083
2084         /* No online devices */
2085         if (rsz == 0)
2086                 return (EZFS_NODEVICE);
2087
2088         return (0);
2089 }
2090
2091 /*
2092  * Get phys_path for a root pool
2093  * Return 0 on success; non-zero on failure.
2094  */
2095 int
2096 zpool_get_physpath(zpool_handle_t *zhp, char *physpath, size_t phypath_size)
2097 {
2098         return (zpool_get_config_physpath(zhp->zpool_config, physpath,
2099             phypath_size));
2100 }
2101
2102 /*
2103  * If the device has being dynamically expanded then we need to relabel
2104  * the disk to use the new unallocated space.
2105  */
2106 static int
2107 zpool_relabel_disk(libzfs_handle_t *hdl, const char *path, const char *msg)
2108 {
2109         int fd, error;
2110
2111         if ((fd = open(path, O_RDWR|O_DIRECT)) < 0) {
2112                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot "
2113                     "relabel '%s': unable to open device: %d"), path, errno);
2114                 return (zfs_error(hdl, EZFS_OPENFAILED, msg));
2115         }
2116
2117         /*
2118          * It's possible that we might encounter an error if the device
2119          * does not have any unallocated space left. If so, we simply
2120          * ignore that error and continue on.
2121          *
2122          * Also, we don't call efi_rescan() - that would just return EBUSY.
2123          * The module will do it for us in vdev_disk_open().
2124          */
2125         error = efi_use_whole_disk(fd);
2126         (void) close(fd);
2127         if (error && error != VT_ENOSPC) {
2128                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot "
2129                     "relabel '%s': unable to read disk capacity"), path);
2130                 return (zfs_error(hdl, EZFS_NOCAP, msg));
2131         }
2132         return (0);
2133 }
2134
2135 /*
2136  * Bring the specified vdev online.   The 'flags' parameter is a set of the
2137  * ZFS_ONLINE_* flags.
2138  */
2139 int
2140 zpool_vdev_online(zpool_handle_t *zhp, const char *path, int flags,
2141     vdev_state_t *newstate)
2142 {
2143         zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
2144         char msg[1024];
2145         nvlist_t *tgt;
2146         boolean_t avail_spare, l2cache, islog;
2147         libzfs_handle_t *hdl = zhp->zpool_hdl;
2148         int error;
2149
2150         if (flags & ZFS_ONLINE_EXPAND) {
2151                 (void) snprintf(msg, sizeof (msg),
2152                     dgettext(TEXT_DOMAIN, "cannot expand %s"), path);
2153         } else {
2154                 (void) snprintf(msg, sizeof (msg),
2155                     dgettext(TEXT_DOMAIN, "cannot online %s"), path);
2156         }
2157
2158         (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2159         if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
2160             &islog)) == NULL)
2161                 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2162
2163         verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2164
2165         if (avail_spare)
2166                 return (zfs_error(hdl, EZFS_ISSPARE, msg));
2167
2168         if (flags & ZFS_ONLINE_EXPAND ||
2169             zpool_get_prop_int(zhp, ZPOOL_PROP_AUTOEXPAND, NULL)) {
2170                 uint64_t wholedisk = 0;
2171
2172                 (void) nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_WHOLE_DISK,
2173                     &wholedisk);
2174
2175                 /*
2176                  * XXX - L2ARC 1.0 devices can't support expansion.
2177                  */
2178                 if (l2cache) {
2179                         zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2180                             "cannot expand cache devices"));
2181                         return (zfs_error(hdl, EZFS_VDEVNOTSUP, msg));
2182                 }
2183
2184                 if (wholedisk) {
2185                         const char *fullpath = path;
2186                         char buf[MAXPATHLEN];
2187
2188                         if (path[0] != '/') {
2189                                 error = zfs_resolve_shortname(path, buf,
2190                                     sizeof(buf));
2191                                 if (error != 0)
2192                                         return (zfs_error(hdl, EZFS_NODEVICE,
2193                                             msg));
2194
2195                                 fullpath = buf;
2196                         }
2197
2198                         error = zpool_relabel_disk(hdl, fullpath, msg);
2199                         if (error != 0)
2200                                 return (error);
2201                 }
2202         }
2203
2204         zc.zc_cookie = VDEV_STATE_ONLINE;
2205         zc.zc_obj = flags;
2206
2207         if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) != 0) {
2208                 if (errno == EINVAL) {
2209                         zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "was split "
2210                             "from this pool into a new one.  Use '%s' "
2211                             "instead"), "zpool detach");
2212                         return (zfs_error(hdl, EZFS_POSTSPLIT_ONLINE, msg));
2213                 }
2214                 return (zpool_standard_error(hdl, errno, msg));
2215         }
2216
2217         *newstate = zc.zc_cookie;
2218         return (0);
2219 }
2220
2221 /*
2222  * Take the specified vdev offline
2223  */
2224 int
2225 zpool_vdev_offline(zpool_handle_t *zhp, const char *path, boolean_t istmp)
2226 {
2227         zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
2228         char msg[1024];
2229         nvlist_t *tgt;
2230         boolean_t avail_spare, l2cache;
2231         libzfs_handle_t *hdl = zhp->zpool_hdl;
2232
2233         (void) snprintf(msg, sizeof (msg),
2234             dgettext(TEXT_DOMAIN, "cannot offline %s"), path);
2235
2236         (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2237         if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
2238             NULL)) == NULL)
2239                 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2240
2241         verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2242
2243         if (avail_spare)
2244                 return (zfs_error(hdl, EZFS_ISSPARE, msg));
2245
2246         zc.zc_cookie = VDEV_STATE_OFFLINE;
2247         zc.zc_obj = istmp ? ZFS_OFFLINE_TEMPORARY : 0;
2248
2249         if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
2250                 return (0);
2251
2252         switch (errno) {
2253         case EBUSY:
2254
2255                 /*
2256                  * There are no other replicas of this device.
2257                  */
2258                 return (zfs_error(hdl, EZFS_NOREPLICAS, msg));
2259
2260         case EEXIST:
2261                 /*
2262                  * The log device has unplayed logs
2263                  */
2264                 return (zfs_error(hdl, EZFS_UNPLAYED_LOGS, msg));
2265
2266         default:
2267                 return (zpool_standard_error(hdl, errno, msg));
2268         }
2269 }
2270
2271 /*
2272  * Mark the given vdev faulted.
2273  */
2274 int
2275 zpool_vdev_fault(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux)
2276 {
2277         zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
2278         char msg[1024];
2279         libzfs_handle_t *hdl = zhp->zpool_hdl;
2280
2281         (void) snprintf(msg, sizeof (msg),
2282            dgettext(TEXT_DOMAIN, "cannot fault %llu"), (u_longlong_t)guid);
2283
2284         (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2285         zc.zc_guid = guid;
2286         zc.zc_cookie = VDEV_STATE_FAULTED;
2287         zc.zc_obj = aux;
2288
2289         if (ioctl(hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
2290                 return (0);
2291
2292         switch (errno) {
2293         case EBUSY:
2294
2295                 /*
2296                  * There are no other replicas of this device.
2297                  */
2298                 return (zfs_error(hdl, EZFS_NOREPLICAS, msg));
2299
2300         default:
2301                 return (zpool_standard_error(hdl, errno, msg));
2302         }
2303
2304 }
2305
2306 /*
2307  * Mark the given vdev degraded.
2308  */
2309 int
2310 zpool_vdev_degrade(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux)
2311 {
2312         zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
2313         char msg[1024];
2314         libzfs_handle_t *hdl = zhp->zpool_hdl;
2315
2316         (void) snprintf(msg, sizeof (msg),
2317            dgettext(TEXT_DOMAIN, "cannot degrade %llu"), (u_longlong_t)guid);
2318
2319         (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2320         zc.zc_guid = guid;
2321         zc.zc_cookie = VDEV_STATE_DEGRADED;
2322         zc.zc_obj = aux;
2323
2324         if (ioctl(hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
2325                 return (0);
2326
2327         return (zpool_standard_error(hdl, errno, msg));
2328 }
2329
2330 /*
2331  * Returns TRUE if the given nvlist is a vdev that was originally swapped in as
2332  * a hot spare.
2333  */
2334 static boolean_t
2335 is_replacing_spare(nvlist_t *search, nvlist_t *tgt, int which)
2336 {
2337         nvlist_t **child;
2338         uint_t c, children;
2339         char *type;
2340
2341         if (nvlist_lookup_nvlist_array(search, ZPOOL_CONFIG_CHILDREN, &child,
2342             &children) == 0) {
2343                 verify(nvlist_lookup_string(search, ZPOOL_CONFIG_TYPE,
2344                     &type) == 0);
2345
2346                 if (strcmp(type, VDEV_TYPE_SPARE) == 0 &&
2347                     children == 2 && child[which] == tgt)
2348                         return (B_TRUE);
2349
2350                 for (c = 0; c < children; c++)
2351                         if (is_replacing_spare(child[c], tgt, which))
2352                                 return (B_TRUE);
2353         }
2354
2355         return (B_FALSE);
2356 }
2357
2358 /*
2359  * Attach new_disk (fully described by nvroot) to old_disk.
2360  * If 'replacing' is specified, the new disk will replace the old one.
2361  */
2362 int
2363 zpool_vdev_attach(zpool_handle_t *zhp,
2364     const char *old_disk, const char *new_disk, nvlist_t *nvroot, int replacing)
2365 {
2366         zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
2367         char msg[1024];
2368         int ret;
2369         nvlist_t *tgt;
2370         boolean_t avail_spare, l2cache, islog;
2371         uint64_t val;
2372         char *newname;
2373         nvlist_t **child;
2374         uint_t children;
2375         nvlist_t *config_root;
2376         libzfs_handle_t *hdl = zhp->zpool_hdl;
2377         boolean_t rootpool = pool_is_bootable(zhp);
2378
2379         if (replacing)
2380                 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
2381                     "cannot replace %s with %s"), old_disk, new_disk);
2382         else
2383                 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
2384                     "cannot attach %s to %s"), new_disk, old_disk);
2385
2386         /*
2387          * If this is a root pool, make sure that we're not attaching an
2388          * EFI labeled device.
2389          */
2390         if (rootpool && pool_uses_efi(nvroot)) {
2391                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2392                     "EFI labeled devices are not supported on root pools."));
2393                 return (zfs_error(hdl, EZFS_POOL_NOTSUP, msg));
2394         }
2395
2396         (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2397         if ((tgt = zpool_find_vdev(zhp, old_disk, &avail_spare, &l2cache,
2398             &islog)) == 0)
2399                 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2400
2401         if (avail_spare)
2402                 return (zfs_error(hdl, EZFS_ISSPARE, msg));
2403
2404         if (l2cache)
2405                 return (zfs_error(hdl, EZFS_ISL2CACHE, msg));
2406
2407         verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2408         zc.zc_cookie = replacing;
2409
2410         if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
2411             &child, &children) != 0 || children != 1) {
2412                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2413                     "new device must be a single disk"));
2414                 return (zfs_error(hdl, EZFS_INVALCONFIG, msg));
2415         }
2416
2417         verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
2418             ZPOOL_CONFIG_VDEV_TREE, &config_root) == 0);
2419
2420         if ((newname = zpool_vdev_name(NULL, NULL, child[0], B_FALSE)) == NULL)
2421                 return (-1);
2422
2423         /*
2424          * If the target is a hot spare that has been swapped in, we can only
2425          * replace it with another hot spare.
2426          */
2427         if (replacing &&
2428             nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_IS_SPARE, &val) == 0 &&
2429             (zpool_find_vdev(zhp, newname, &avail_spare, &l2cache,
2430             NULL) == NULL || !avail_spare) &&
2431             is_replacing_spare(config_root, tgt, 1)) {
2432                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2433                     "can only be replaced by another hot spare"));
2434                 free(newname);
2435                 return (zfs_error(hdl, EZFS_BADTARGET, msg));
2436         }
2437
2438         free(newname);
2439
2440         if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
2441                 return (-1);
2442
2443         ret = zfs_ioctl(hdl, ZFS_IOC_VDEV_ATTACH, &zc);
2444
2445         zcmd_free_nvlists(&zc);
2446
2447         if (ret == 0) {
2448                 if (rootpool) {
2449                         /*
2450                          * XXX need a better way to prevent user from
2451                          * booting up a half-baked vdev.
2452                          */
2453                         (void) fprintf(stderr, dgettext(TEXT_DOMAIN, "Make "
2454                             "sure to wait until resilver is done "
2455                             "before rebooting.\n"));
2456                 }
2457                 return (0);
2458         }
2459
2460         switch (errno) {
2461         case ENOTSUP:
2462                 /*
2463                  * Can't attach to or replace this type of vdev.
2464                  */
2465                 if (replacing) {
2466                         uint64_t version = zpool_get_prop_int(zhp,
2467                             ZPOOL_PROP_VERSION, NULL);
2468
2469                         if (islog)
2470                                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2471                                     "cannot replace a log with a spare"));
2472                         else if (version >= SPA_VERSION_MULTI_REPLACE)
2473                                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2474                                     "already in replacing/spare config; wait "
2475                                     "for completion or use 'zpool detach'"));
2476                         else
2477                                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2478                                     "cannot replace a replacing device"));
2479                 } else {
2480                         zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2481                             "can only attach to mirrors and top-level "
2482                             "disks"));
2483                 }
2484                 (void) zfs_error(hdl, EZFS_BADTARGET, msg);
2485                 break;
2486
2487         case EINVAL:
2488                 /*
2489                  * The new device must be a single disk.
2490                  */
2491                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2492                     "new device must be a single disk"));
2493                 (void) zfs_error(hdl, EZFS_INVALCONFIG, msg);
2494                 break;
2495
2496         case EBUSY:
2497                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "%s is busy"),
2498                     new_disk);
2499                 (void) zfs_error(hdl, EZFS_BADDEV, msg);
2500                 break;
2501
2502         case EOVERFLOW:
2503                 /*
2504                  * The new device is too small.
2505                  */
2506                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2507                     "device is too small"));
2508                 (void) zfs_error(hdl, EZFS_BADDEV, msg);
2509                 break;
2510
2511         case EDOM:
2512                 /*
2513                  * The new device has a different alignment requirement.
2514                  */
2515                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2516                     "devices have different sector alignment"));
2517                 (void) zfs_error(hdl, EZFS_BADDEV, msg);
2518                 break;
2519
2520         case ENAMETOOLONG:
2521                 /*
2522                  * The resulting top-level vdev spec won't fit in the label.
2523                  */
2524                 (void) zfs_error(hdl, EZFS_DEVOVERFLOW, msg);
2525                 break;
2526
2527         default:
2528                 (void) zpool_standard_error(hdl, errno, msg);
2529         }
2530
2531         return (-1);
2532 }
2533
2534 /*
2535  * Detach the specified device.
2536  */
2537 int
2538 zpool_vdev_detach(zpool_handle_t *zhp, const char *path)
2539 {
2540         zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
2541         char msg[1024];
2542         nvlist_t *tgt;
2543         boolean_t avail_spare, l2cache;
2544         libzfs_handle_t *hdl = zhp->zpool_hdl;
2545
2546         (void) snprintf(msg, sizeof (msg),
2547             dgettext(TEXT_DOMAIN, "cannot detach %s"), path);
2548
2549         (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2550         if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
2551             NULL)) == 0)
2552                 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2553
2554         if (avail_spare)
2555                 return (zfs_error(hdl, EZFS_ISSPARE, msg));
2556
2557         if (l2cache)
2558                 return (zfs_error(hdl, EZFS_ISL2CACHE, msg));
2559
2560         verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2561
2562         if (zfs_ioctl(hdl, ZFS_IOC_VDEV_DETACH, &zc) == 0)
2563                 return (0);
2564
2565         switch (errno) {
2566
2567         case ENOTSUP:
2568                 /*
2569                  * Can't detach from this type of vdev.
2570                  */
2571                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "only "
2572                     "applicable to mirror and replacing vdevs"));
2573                 (void) zfs_error(hdl, EZFS_BADTARGET, msg);
2574                 break;
2575
2576         case EBUSY:
2577                 /*
2578                  * There are no other replicas of this device.
2579                  */
2580                 (void) zfs_error(hdl, EZFS_NOREPLICAS, msg);
2581                 break;
2582
2583         default:
2584                 (void) zpool_standard_error(hdl, errno, msg);
2585         }
2586
2587         return (-1);
2588 }
2589
2590 /*
2591  * Find a mirror vdev in the source nvlist.
2592  *
2593  * The mchild array contains a list of disks in one of the top-level mirrors
2594  * of the source pool.  The schild array contains a list of disks that the
2595  * user specified on the command line.  We loop over the mchild array to
2596  * see if any entry in the schild array matches.
2597  *
2598  * If a disk in the mchild array is found in the schild array, we return
2599  * the index of that entry.  Otherwise we return -1.
2600  */
2601 static int
2602 find_vdev_entry(zpool_handle_t *zhp, nvlist_t **mchild, uint_t mchildren,
2603     nvlist_t **schild, uint_t schildren)
2604 {
2605         uint_t mc;
2606
2607         for (mc = 0; mc < mchildren; mc++) {
2608                 uint_t sc;
2609                 char *mpath = zpool_vdev_name(zhp->zpool_hdl, zhp,
2610                     mchild[mc], B_FALSE);
2611
2612                 for (sc = 0; sc < schildren; sc++) {
2613                         char *spath = zpool_vdev_name(zhp->zpool_hdl, zhp,
2614                             schild[sc], B_FALSE);
2615                         boolean_t result = (strcmp(mpath, spath) == 0);
2616
2617                         free(spath);
2618                         if (result) {
2619                                 free(mpath);
2620                                 return (mc);
2621                         }
2622                 }
2623
2624                 free(mpath);
2625         }
2626
2627         return (-1);
2628 }
2629
2630 /*
2631  * Split a mirror pool.  If newroot points to null, then a new nvlist
2632  * is generated and it is the responsibility of the caller to free it.
2633  */
2634 int
2635 zpool_vdev_split(zpool_handle_t *zhp, char *newname, nvlist_t **newroot,
2636     nvlist_t *props, splitflags_t flags)
2637 {
2638         zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
2639         char msg[1024];
2640         nvlist_t *tree, *config, **child, **newchild, *newconfig = NULL;
2641         nvlist_t **varray = NULL, *zc_props = NULL;
2642         uint_t c, children, newchildren, lastlog = 0, vcount, found = 0;
2643         libzfs_handle_t *hdl = zhp->zpool_hdl;
2644         uint64_t vers;
2645         boolean_t freelist = B_FALSE, memory_err = B_TRUE;
2646         int retval = 0;
2647
2648         (void) snprintf(msg, sizeof (msg),
2649             dgettext(TEXT_DOMAIN, "Unable to split %s"), zhp->zpool_name);
2650
2651         if (!zpool_name_valid(hdl, B_FALSE, newname))
2652                 return (zfs_error(hdl, EZFS_INVALIDNAME, msg));
2653
2654         if ((config = zpool_get_config(zhp, NULL)) == NULL) {
2655                 (void) fprintf(stderr, gettext("Internal error: unable to "
2656                     "retrieve pool configuration\n"));
2657                 return (-1);
2658         }
2659
2660         verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &tree)
2661             == 0);
2662         verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, &vers) == 0);
2663
2664         if (props) {
2665                 prop_flags_t flags = { .create = B_FALSE, .import = B_TRUE };
2666                 if ((zc_props = zpool_valid_proplist(hdl, zhp->zpool_name,
2667                     props, vers, flags, msg)) == NULL)
2668                         return (-1);
2669         }
2670
2671         if (nvlist_lookup_nvlist_array(tree, ZPOOL_CONFIG_CHILDREN, &child,
2672             &children) != 0) {
2673                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2674                     "Source pool is missing vdev tree"));
2675                 if (zc_props)
2676                         nvlist_free(zc_props);
2677                 return (-1);
2678         }
2679
2680         varray = zfs_alloc(hdl, children * sizeof (nvlist_t *));
2681         vcount = 0;
2682
2683         if (*newroot == NULL ||
2684             nvlist_lookup_nvlist_array(*newroot, ZPOOL_CONFIG_CHILDREN,
2685             &newchild, &newchildren) != 0)
2686                 newchildren = 0;
2687
2688         for (c = 0; c < children; c++) {
2689                 uint64_t is_log = B_FALSE, is_hole = B_FALSE;
2690                 char *type;
2691                 nvlist_t **mchild, *vdev;
2692                 uint_t mchildren;
2693                 int entry;
2694
2695                 /*
2696                  * Unlike cache & spares, slogs are stored in the
2697                  * ZPOOL_CONFIG_CHILDREN array.  We filter them out here.
2698                  */
2699                 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
2700                     &is_log);
2701                 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE,
2702                     &is_hole);
2703                 if (is_log || is_hole) {
2704                         /*
2705                          * Create a hole vdev and put it in the config.
2706                          */
2707                         if (nvlist_alloc(&vdev, NV_UNIQUE_NAME, 0) != 0)
2708                                 goto out;
2709                         if (nvlist_add_string(vdev, ZPOOL_CONFIG_TYPE,
2710                             VDEV_TYPE_HOLE) != 0)
2711                                 goto out;
2712                         if (nvlist_add_uint64(vdev, ZPOOL_CONFIG_IS_HOLE,
2713                             1) != 0)
2714                                 goto out;
2715                         if (lastlog == 0)
2716                                 lastlog = vcount;
2717                         varray[vcount++] = vdev;
2718                         continue;
2719                 }
2720                 lastlog = 0;
2721                 verify(nvlist_lookup_string(child[c], ZPOOL_CONFIG_TYPE, &type)
2722                     == 0);
2723                 if (strcmp(type, VDEV_TYPE_MIRROR) != 0) {
2724                         zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2725                             "Source pool must be composed only of mirrors\n"));
2726                         retval = zfs_error(hdl, EZFS_INVALCONFIG, msg);
2727                         goto out;
2728                 }
2729
2730                 verify(nvlist_lookup_nvlist_array(child[c],
2731                     ZPOOL_CONFIG_CHILDREN, &mchild, &mchildren) == 0);
2732
2733                 /* find or add an entry for this top-level vdev */
2734                 if (newchildren > 0 &&
2735                     (entry = find_vdev_entry(zhp, mchild, mchildren,
2736                     newchild, newchildren)) >= 0) {
2737                         /* We found a disk that the user specified. */
2738                         vdev = mchild[entry];
2739                         ++found;
2740                 } else {
2741                         /* User didn't specify a disk for this vdev. */
2742                         vdev = mchild[mchildren - 1];
2743                 }
2744
2745                 if (nvlist_dup(vdev, &varray[vcount++], 0) != 0)
2746                         goto out;
2747         }
2748
2749         /* did we find every disk the user specified? */
2750         if (found != newchildren) {
2751                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "Device list must "
2752                     "include at most one disk from each mirror"));
2753                 retval = zfs_error(hdl, EZFS_INVALCONFIG, msg);
2754                 goto out;
2755         }
2756
2757         /* Prepare the nvlist for populating. */
2758         if (*newroot == NULL) {
2759                 if (nvlist_alloc(newroot, NV_UNIQUE_NAME, 0) != 0)
2760                         goto out;
2761                 freelist = B_TRUE;
2762                 if (nvlist_add_string(*newroot, ZPOOL_CONFIG_TYPE,
2763                     VDEV_TYPE_ROOT) != 0)
2764                         goto out;
2765         } else {
2766                 verify(nvlist_remove_all(*newroot, ZPOOL_CONFIG_CHILDREN) == 0);
2767         }
2768
2769         /* Add all the children we found */
2770         if (nvlist_add_nvlist_array(*newroot, ZPOOL_CONFIG_CHILDREN, varray,
2771             lastlog == 0 ? vcount : lastlog) != 0)
2772                 goto out;
2773
2774         /*
2775          * If we're just doing a dry run, exit now with success.
2776          */
2777         if (flags.dryrun) {
2778                 memory_err = B_FALSE;
2779                 freelist = B_FALSE;
2780                 goto out;
2781         }
2782
2783         /* now build up the config list & call the ioctl */
2784         if (nvlist_alloc(&newconfig, NV_UNIQUE_NAME, 0) != 0)
2785                 goto out;
2786
2787         if (nvlist_add_nvlist(newconfig,
2788             ZPOOL_CONFIG_VDEV_TREE, *newroot) != 0 ||
2789             nvlist_add_string(newconfig,
2790             ZPOOL_CONFIG_POOL_NAME, newname) != 0 ||
2791             nvlist_add_uint64(newconfig, ZPOOL_CONFIG_VERSION, vers) != 0)
2792                 goto out;
2793
2794         /*
2795          * The new pool is automatically part of the namespace unless we
2796          * explicitly export it.
2797          */
2798         if (!flags.import)
2799                 zc.zc_cookie = ZPOOL_EXPORT_AFTER_SPLIT;
2800         (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2801         (void) strlcpy(zc.zc_string, newname, sizeof (zc.zc_string));
2802         if (zcmd_write_conf_nvlist(hdl, &zc, newconfig) != 0)
2803                 goto out;
2804         if (zc_props != NULL && zcmd_write_src_nvlist(hdl, &zc, zc_props) != 0)
2805                 goto out;
2806
2807         if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SPLIT, &zc) != 0) {
2808                 retval = zpool_standard_error(hdl, errno, msg);
2809                 goto out;
2810         }
2811
2812         freelist = B_FALSE;
2813         memory_err = B_FALSE;
2814
2815 out:
2816         if (varray != NULL) {
2817                 int v;
2818
2819                 for (v = 0; v < vcount; v++)
2820                         nvlist_free(varray[v]);
2821                 free(varray);
2822         }
2823         zcmd_free_nvlists(&zc);
2824         if (zc_props)
2825                 nvlist_free(zc_props);
2826         if (newconfig)
2827                 nvlist_free(newconfig);
2828         if (freelist) {
2829                 nvlist_free(*newroot);
2830                 *newroot = NULL;
2831         }
2832
2833         if (retval != 0)
2834                 return (retval);
2835
2836         if (memory_err)
2837                 return (no_memory(hdl));
2838
2839         return (0);
2840 }
2841
2842 /*
2843  * Remove the given device.  Currently, this is supported only for hot spares
2844  * and level 2 cache devices.
2845  */
2846 int
2847 zpool_vdev_remove(zpool_handle_t *zhp, const char *path)
2848 {
2849         zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
2850         char msg[1024];
2851         nvlist_t *tgt;
2852         boolean_t avail_spare, l2cache, islog;
2853         libzfs_handle_t *hdl = zhp->zpool_hdl;
2854         uint64_t version;
2855
2856         (void) snprintf(msg, sizeof (msg),
2857             dgettext(TEXT_DOMAIN, "cannot remove %s"), path);
2858
2859         (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2860         if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
2861             &islog)) == 0)
2862                 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2863         /*
2864          * XXX - this should just go away.
2865          */
2866         if (!avail_spare && !l2cache && !islog) {
2867                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2868                     "only inactive hot spares, cache, top-level, "
2869                     "or log devices can be removed"));
2870                 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2871         }
2872
2873         version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL);
2874         if (islog && version < SPA_VERSION_HOLES) {
2875                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2876                     "pool must be upgrade to support log removal"));
2877                 return (zfs_error(hdl, EZFS_BADVERSION, msg));
2878         }
2879
2880         verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2881
2882         if (zfs_ioctl(hdl, ZFS_IOC_VDEV_REMOVE, &zc) == 0)
2883                 return (0);
2884
2885         return (zpool_standard_error(hdl, errno, msg));
2886 }
2887
2888 /*
2889  * Clear the errors for the pool, or the particular device if specified.
2890  */
2891 int
2892 zpool_clear(zpool_handle_t *zhp, const char *path, nvlist_t *rewindnvl)
2893 {
2894         zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
2895         char msg[1024];
2896         nvlist_t *tgt;
2897         zpool_rewind_policy_t policy;
2898         boolean_t avail_spare, l2cache;
2899         libzfs_handle_t *hdl = zhp->zpool_hdl;
2900         nvlist_t *nvi = NULL;
2901         int error;
2902
2903         if (path)
2904                 (void) snprintf(msg, sizeof (msg),
2905                     dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),
2906                     path);
2907         else
2908                 (void) snprintf(msg, sizeof (msg),
2909                     dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),
2910                     zhp->zpool_name);
2911
2912         (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2913         if (path) {
2914                 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare,
2915                     &l2cache, NULL)) == 0)
2916                         return (zfs_error(hdl, EZFS_NODEVICE, msg));
2917
2918                 /*
2919                  * Don't allow error clearing for hot spares.  Do allow
2920                  * error clearing for l2cache devices.
2921                  */
2922                 if (avail_spare)
2923                         return (zfs_error(hdl, EZFS_ISSPARE, msg));
2924
2925                 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID,
2926                     &zc.zc_guid) == 0);
2927         }
2928
2929         zpool_get_rewind_policy(rewindnvl, &policy);
2930         zc.zc_cookie = policy.zrp_request;
2931
2932         if (zcmd_alloc_dst_nvlist(hdl, &zc, zhp->zpool_config_size * 2) != 0)
2933                 return (-1);
2934
2935         if (zcmd_write_src_nvlist(hdl, &zc, rewindnvl) != 0)
2936                 return (-1);
2937
2938         while ((error = zfs_ioctl(hdl, ZFS_IOC_CLEAR, &zc)) != 0 &&
2939             errno == ENOMEM) {
2940                 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
2941                         zcmd_free_nvlists(&zc);
2942                         return (-1);
2943                 }
2944         }
2945
2946         if (!error || ((policy.zrp_request & ZPOOL_TRY_REWIND) &&
2947             errno != EPERM && errno != EACCES)) {
2948                 if (policy.zrp_request &
2949                     (ZPOOL_DO_REWIND | ZPOOL_TRY_REWIND)) {
2950                         (void) zcmd_read_dst_nvlist(hdl, &zc, &nvi);
2951                         zpool_rewind_exclaim(hdl, zc.zc_name,
2952                             ((policy.zrp_request & ZPOOL_TRY_REWIND) != 0),
2953                             nvi);
2954                         nvlist_free(nvi);
2955                 }
2956                 zcmd_free_nvlists(&zc);
2957                 return (0);
2958         }
2959
2960         zcmd_free_nvlists(&zc);
2961         return (zpool_standard_error(hdl, errno, msg));
2962 }
2963
2964 /*
2965  * Similar to zpool_clear(), but takes a GUID (used by fmd).
2966  */
2967 int
2968 zpool_vdev_clear(zpool_handle_t *zhp, uint64_t guid)
2969 {
2970         zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
2971         char msg[1024];
2972         libzfs_handle_t *hdl = zhp->zpool_hdl;
2973
2974         (void) snprintf(msg, sizeof (msg),
2975             dgettext(TEXT_DOMAIN, "cannot clear errors for %llx"),
2976            (u_longlong_t)guid);
2977
2978         (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2979         zc.zc_guid = guid;
2980         zc.zc_cookie = ZPOOL_NO_REWIND;
2981
2982         if (ioctl(hdl->libzfs_fd, ZFS_IOC_CLEAR, &zc) == 0)
2983                 return (0);
2984
2985         return (zpool_standard_error(hdl, errno, msg));
2986 }
2987
2988 /*
2989  * Change the GUID for a pool.
2990  */
2991 int
2992 zpool_reguid(zpool_handle_t *zhp)
2993 {
2994         char msg[1024];
2995         libzfs_handle_t *hdl = zhp->zpool_hdl;
2996         zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
2997
2998         (void) snprintf(msg, sizeof (msg),
2999             dgettext(TEXT_DOMAIN, "cannot reguid '%s'"), zhp->zpool_name);
3000
3001         (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3002         if (zfs_ioctl(hdl, ZFS_IOC_POOL_REGUID, &zc) == 0)
3003                 return (0);
3004
3005         return (zpool_standard_error(hdl, errno, msg));
3006 }
3007
3008 /*
3009  * Convert from a devid string to a path.
3010  */
3011 static char *
3012 devid_to_path(char *devid_str)
3013 {
3014         ddi_devid_t devid;
3015         char *minor;
3016         char *path;
3017         devid_nmlist_t *list = NULL;
3018         int ret;
3019
3020         if (devid_str_decode(devid_str, &devid, &minor) != 0)
3021                 return (NULL);
3022
3023         ret = devid_deviceid_to_nmlist("/dev", devid, minor, &list);
3024
3025         devid_str_free(minor);
3026         devid_free(devid);
3027
3028         if (ret != 0)
3029                 return (NULL);
3030
3031         if ((path = strdup(list[0].devname)) == NULL)
3032                 return (NULL);
3033
3034         devid_free_nmlist(list);
3035
3036         return (path);
3037 }
3038
3039 /*
3040  * Convert from a path to a devid string.
3041  */
3042 static char *
3043 path_to_devid(const char *path)
3044 {
3045         int fd;
3046         ddi_devid_t devid;
3047         char *minor, *ret;
3048
3049         if ((fd = open(path, O_RDONLY)) < 0)
3050                 return (NULL);
3051
3052         minor = NULL;
3053         ret = NULL;
3054         if (devid_get(fd, &devid) == 0) {
3055                 if (devid_get_minor_name(fd, &minor) == 0)
3056                         ret = devid_str_encode(devid, minor);
3057                 if (minor != NULL)
3058                         devid_str_free(minor);
3059                 devid_free(devid);
3060         }
3061         (void) close(fd);
3062
3063         return (ret);
3064 }
3065
3066 /*
3067  * Issue the necessary ioctl() to update the stored path value for the vdev.  We
3068  * ignore any failure here, since a common case is for an unprivileged user to
3069  * type 'zpool status', and we'll display the correct information anyway.
3070  */
3071 static void
3072 set_path(zpool_handle_t *zhp, nvlist_t *nv, const char *path)
3073 {
3074         zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
3075
3076         (void) strncpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3077         (void) strncpy(zc.zc_value, path, sizeof (zc.zc_value));
3078         verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
3079             &zc.zc_guid) == 0);
3080
3081         (void) ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_SETPATH, &zc);
3082 }
3083
3084 /*
3085  * Remove partition suffix from a vdev path.  Partition suffixes may take three
3086  * forms: "-partX", "pX", or "X", where X is a string of digits.  The second
3087  * case only occurs when the suffix is preceded by a digit, i.e. "md0p0" The
3088  * third case only occurs when preceded by a string matching the regular
3089  * expression "^[hs]d[a-z]+", i.e. a scsi or ide disk.
3090  */
3091 static char *
3092 strip_partition(libzfs_handle_t *hdl, char *path)
3093 {
3094         char *tmp = zfs_strdup(hdl, path);
3095         char *part = NULL, *d = NULL;
3096
3097         if ((part = strstr(tmp, "-part")) && part != tmp) {
3098                 d = part + 5;
3099         } else if ((part = strrchr(tmp, 'p')) &&
3100             part > tmp + 1 && isdigit(*(part-1))) {
3101                 d = part + 1;
3102         } else if ((tmp[0] == 'h' || tmp[0] == 's') && tmp[1] == 'd') {
3103                 for (d = &tmp[2]; isalpha(*d); part = ++d);
3104         }
3105         if (part && d && *d != '\0') {
3106                 for (; isdigit(*d); d++);
3107                 if (*d == '\0')
3108                         *part = '\0';
3109         }
3110         return (tmp);
3111 }
3112
3113 #define PATH_BUF_LEN    64
3114
3115 /*
3116  * Given a vdev, return the name to display in iostat.  If the vdev has a path,
3117  * we use that, stripping off any leading "/dev/dsk/"; if not, we use the type.
3118  * We also check if this is a whole disk, in which case we strip off the
3119  * trailing 's0' slice name.
3120  *
3121  * This routine is also responsible for identifying when disks have been
3122  * reconfigured in a new location.  The kernel will have opened the device by
3123  * devid, but the path will still refer to the old location.  To catch this, we
3124  * first do a path -> devid translation (which is fast for the common case).  If
3125  * the devid matches, we're done.  If not, we do a reverse devid -> path
3126  * translation and issue the appropriate ioctl() to update the path of the vdev.
3127  * If 'zhp' is NULL, then this is an exported pool, and we don't need to do any
3128  * of these checks.
3129  */
3130 char *
3131 zpool_vdev_name(libzfs_handle_t *hdl, zpool_handle_t *zhp, nvlist_t *nv,
3132     boolean_t verbose)
3133 {
3134         char *path, *devid, *type;
3135         uint64_t value;
3136         char buf[PATH_BUF_LEN];
3137         vdev_stat_t *vs;
3138         uint_t vsc;
3139
3140         if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT,
3141             &value) == 0) {
3142                 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
3143                     &value) == 0);
3144                 (void) snprintf(buf, sizeof (buf), "%llu",
3145                     (u_longlong_t)value);
3146                 path = buf;
3147         } else if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) {
3148                 /*
3149                  * If the device is dead (faulted, offline, etc) then don't
3150                  * bother opening it.  Otherwise we may be forcing the user to
3151                  * open a misbehaving device, which can have undesirable
3152                  * effects.
3153                  */
3154                 if ((nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS,
3155                     (uint64_t **)&vs, &vsc) != 0 ||
3156                     vs->vs_state >= VDEV_STATE_DEGRADED) &&
3157                     zhp != NULL &&
3158                     nvlist_lookup_string(nv, ZPOOL_CONFIG_DEVID, &devid) == 0) {
3159                         /*
3160                          * Determine if the current path is correct.
3161                          */
3162                         char *newdevid = path_to_devid(path);
3163
3164                         if (newdevid == NULL ||
3165                             strcmp(devid, newdevid) != 0) {
3166                                 char *newpath;
3167
3168                                 if ((newpath = devid_to_path(devid)) != NULL) {
3169                                         /*
3170                                          * Update the path appropriately.
3171                                          */
3172                                         set_path(zhp, nv, newpath);
3173                                         if (nvlist_add_string(nv,
3174                                             ZPOOL_CONFIG_PATH, newpath) == 0)
3175                                                 verify(nvlist_lookup_string(nv,
3176                                                     ZPOOL_CONFIG_PATH,
3177                                                     &path) == 0);
3178                                         free(newpath);
3179                                 }
3180                         }
3181
3182                         if (newdevid)
3183                                 devid_str_free(newdevid);
3184                 }
3185
3186                 /*
3187                  * For a block device only use the name.
3188                  */
3189                 verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) == 0);
3190                 if (strcmp(type, VDEV_TYPE_DISK) == 0) {
3191                         path = strrchr(path, '/');
3192                         path++;
3193                 }
3194
3195                 /*
3196                  * Remove the partition from the path it this is a whole disk.
3197                  */
3198                 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
3199                     &value) == 0 && value) {
3200                         return strip_partition(hdl, path);
3201                 }
3202         } else {
3203                 verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &path) == 0);
3204
3205                 /*
3206                  * If it's a raidz device, we need to stick in the parity level.
3207                  */
3208                 if (strcmp(path, VDEV_TYPE_RAIDZ) == 0) {
3209                         char tmpbuf[PATH_BUF_LEN];
3210
3211                         verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NPARITY,
3212                             &value) == 0);
3213                         (void) snprintf(tmpbuf, sizeof (tmpbuf), "%s%llu", path,
3214                             (u_longlong_t)value);
3215                         path = tmpbuf;
3216                 }
3217
3218                 /*
3219                  * We identify each top-level vdev by using a <type-id>
3220                  * naming convention.
3221                  */
3222                 if (verbose) {
3223                         uint64_t id;
3224
3225                         verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID,
3226                             &id) == 0);
3227                         (void) snprintf(buf, sizeof (buf), "%s-%llu", path,
3228                             (u_longlong_t)id);
3229                         path = buf;
3230                 }
3231         }
3232
3233         return (zfs_strdup(hdl, path));
3234 }
3235
3236 static int
3237 zbookmark_compare(const void *a, const void *b)
3238 {
3239         return (memcmp(a, b, sizeof (zbookmark_t)));
3240 }
3241
3242 /*
3243  * Retrieve the persistent error log, uniquify the members, and return to the
3244  * caller.
3245  */
3246 int
3247 zpool_get_errlog(zpool_handle_t *zhp, nvlist_t **nverrlistp)
3248 {
3249         zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
3250         uint64_t count;
3251         zbookmark_t *zb = NULL;
3252         int i;
3253
3254         /*
3255          * Retrieve the raw error list from the kernel.  If the number of errors
3256          * has increased, allocate more space and continue until we get the
3257          * entire list.
3258          */
3259         verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_ERRCOUNT,
3260             &count) == 0);
3261         if (count == 0)
3262                 return (0);
3263         if ((zc.zc_nvlist_dst = (uintptr_t)zfs_alloc(zhp->zpool_hdl,
3264             count * sizeof (zbookmark_t))) == (uintptr_t)NULL)
3265                 return (-1);
3266         zc.zc_nvlist_dst_size = count;
3267         (void) strcpy(zc.zc_name, zhp->zpool_name);
3268         for (;;) {
3269                 if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_ERROR_LOG,
3270                     &zc) != 0) {
3271                         free((void *)(uintptr_t)zc.zc_nvlist_dst);
3272                         if (errno == ENOMEM) {
3273                                 count = zc.zc_nvlist_dst_size;
3274                                 if ((zc.zc_nvlist_dst = (uintptr_t)
3275                                     zfs_alloc(zhp->zpool_hdl, count *
3276                                     sizeof (zbookmark_t))) == (uintptr_t)NULL)
3277                                         return (-1);
3278                         } else {
3279                                 return (-1);
3280                         }
3281                 } else {
3282                         break;
3283                 }
3284         }
3285
3286         /*
3287          * Sort the resulting bookmarks.  This is a little confusing due to the
3288          * implementation of ZFS_IOC_ERROR_LOG.  The bookmarks are copied last
3289          * to first, and 'zc_nvlist_dst_size' indicates the number of boomarks
3290          * _not_ copied as part of the process.  So we point the start of our
3291          * array appropriate and decrement the total number of elements.
3292          */
3293         zb = ((zbookmark_t *)(uintptr_t)zc.zc_nvlist_dst) +
3294             zc.zc_nvlist_dst_size;
3295         count -= zc.zc_nvlist_dst_size;
3296
3297         qsort(zb, count, sizeof (zbookmark_t), zbookmark_compare);
3298
3299         verify(nvlist_alloc(nverrlistp, 0, KM_SLEEP) == 0);
3300
3301         /*
3302          * Fill in the nverrlistp with nvlist's of dataset and object numbers.
3303          */
3304         for (i = 0; i < count; i++) {
3305                 nvlist_t *nv;
3306
3307                 /* ignoring zb_blkid and zb_level for now */
3308                 if (i > 0 && zb[i-1].zb_objset == zb[i].zb_objset &&
3309                     zb[i-1].zb_object == zb[i].zb_object)
3310                         continue;
3311
3312                 if (nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) != 0)
3313                         goto nomem;
3314                 if (nvlist_add_uint64(nv, ZPOOL_ERR_DATASET,
3315                     zb[i].zb_objset) != 0) {
3316                         nvlist_free(nv);
3317                         goto nomem;
3318                 }
3319                 if (nvlist_add_uint64(nv, ZPOOL_ERR_OBJECT,
3320                     zb[i].zb_object) != 0) {
3321                         nvlist_free(nv);
3322                         goto nomem;
3323                 }
3324                 if (nvlist_add_nvlist(*nverrlistp, "ejk", nv) != 0) {
3325                         nvlist_free(nv);
3326                         goto nomem;
3327                 }
3328                 nvlist_free(nv);
3329         }
3330
3331         free((void *)(uintptr_t)zc.zc_nvlist_dst);
3332         return (0);
3333
3334 nomem:
3335         free((void *)(uintptr_t)zc.zc_nvlist_dst);
3336         return (no_memory(zhp->zpool_hdl));
3337 }
3338
3339 /*
3340  * Upgrade a ZFS pool to the latest on-disk version.
3341  */
3342 int
3343 zpool_upgrade(zpool_handle_t *zhp, uint64_t new_version)
3344 {
3345         zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
3346         libzfs_handle_t *hdl = zhp->zpool_hdl;
3347
3348         (void) strcpy(zc.zc_name, zhp->zpool_name);
3349         zc.zc_cookie = new_version;
3350
3351         if (zfs_ioctl(hdl, ZFS_IOC_POOL_UPGRADE, &zc) != 0)
3352                 return (zpool_standard_error_fmt(hdl, errno,
3353                     dgettext(TEXT_DOMAIN, "cannot upgrade '%s'"),
3354                     zhp->zpool_name));
3355         return (0);
3356 }
3357
3358 void
3359 zpool_set_history_str(const char *subcommand, int argc, char **argv,
3360     char *history_str)
3361 {
3362         int i;
3363
3364         (void) strlcpy(history_str, subcommand, HIS_MAX_RECORD_LEN);
3365         for (i = 1; i < argc; i++) {
3366                 if (strlen(history_str) + 1 + strlen(argv[i]) >
3367                     HIS_MAX_RECORD_LEN)
3368                         break;
3369                 (void) strlcat(history_str, " ", HIS_MAX_RECORD_LEN);
3370                 (void) strlcat(history_str, argv[i], HIS_MAX_RECORD_LEN);
3371         }
3372 }
3373
3374 /*
3375  * Stage command history for logging.
3376  */
3377 int
3378 zpool_stage_history(libzfs_handle_t *hdl, const char *history_str)
3379 {
3380         if (history_str == NULL)
3381                 return (EINVAL);
3382
3383         if (strlen(history_str) > HIS_MAX_RECORD_LEN)
3384                 return (EINVAL);
3385
3386         if (hdl->libzfs_log_str != NULL)
3387                 free(hdl->libzfs_log_str);
3388
3389         if ((hdl->libzfs_log_str = strdup(history_str)) == NULL)
3390                 return (no_memory(hdl));
3391
3392         return (0);
3393 }
3394
3395 /*
3396  * Perform ioctl to get some command history of a pool.
3397  *
3398  * 'buf' is the buffer to fill up to 'len' bytes.  'off' is the
3399  * logical offset of the history buffer to start reading from.
3400  *
3401  * Upon return, 'off' is the next logical offset to read from and
3402  * 'len' is the actual amount of bytes read into 'buf'.
3403  */
3404 static int
3405 get_history(zpool_handle_t *zhp, char *buf, uint64_t *off, uint64_t *len)
3406 {
3407         zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
3408         libzfs_handle_t *hdl = zhp->zpool_hdl;
3409
3410         (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3411
3412         zc.zc_history = (uint64_t)(uintptr_t)buf;
3413         zc.zc_history_len = *len;
3414         zc.zc_history_offset = *off;
3415
3416         if (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_HISTORY, &zc) != 0) {
3417                 switch (errno) {
3418                 case EPERM:
3419                         return (zfs_error_fmt(hdl, EZFS_PERM,
3420                             dgettext(TEXT_DOMAIN,
3421                             "cannot show history for pool '%s'"),
3422                             zhp->zpool_name));
3423                 case ENOENT:
3424                         return (zfs_error_fmt(hdl, EZFS_NOHISTORY,
3425                             dgettext(TEXT_DOMAIN, "cannot get history for pool "
3426                             "'%s'"), zhp->zpool_name));
3427                 case ENOTSUP:
3428                         return (zfs_error_fmt(hdl, EZFS_BADVERSION,
3429                             dgettext(TEXT_DOMAIN, "cannot get history for pool "
3430                             "'%s', pool must be upgraded"), zhp->zpool_name));
3431                 default:
3432                         return (zpool_standard_error_fmt(hdl, errno,
3433                             dgettext(TEXT_DOMAIN,
3434                             "cannot get history for '%s'"), zhp->zpool_name));
3435                 }
3436         }
3437
3438         *len = zc.zc_history_len;
3439         *off = zc.zc_history_offset;
3440
3441         return (0);
3442 }
3443
3444 /*
3445  * Process the buffer of nvlists, unpacking and storing each nvlist record
3446  * into 'records'.  'leftover' is set to the number of bytes that weren't
3447  * processed as there wasn't a complete record.
3448  */
3449 int
3450 zpool_history_unpack(char *buf, uint64_t bytes_read, uint64_t *leftover,
3451     nvlist_t ***records, uint_t *numrecords)
3452 {
3453         uint64_t reclen;
3454         nvlist_t *nv;
3455         int i;
3456
3457         while (bytes_read > sizeof (reclen)) {
3458
3459                 /* get length of packed record (stored as little endian) */
3460                 for (i = 0, reclen = 0; i < sizeof (reclen); i++)
3461                         reclen += (uint64_t)(((uchar_t *)buf)[i]) << (8*i);
3462
3463                 if (bytes_read < sizeof (reclen) + reclen)
3464                         break;
3465
3466                 /* unpack record */
3467                 if (nvlist_unpack(buf + sizeof (reclen), reclen, &nv, 0) != 0)
3468                         return (ENOMEM);
3469                 bytes_read -= sizeof (reclen) + reclen;
3470                 buf += sizeof (reclen) + reclen;
3471
3472                 /* add record to nvlist array */
3473                 (*numrecords)++;
3474                 if (ISP2(*numrecords + 1)) {
3475                         *records = realloc(*records,
3476                             *numrecords * 2 * sizeof (nvlist_t *));
3477                 }
3478                 (*records)[*numrecords - 1] = nv;
3479         }
3480
3481         *leftover = bytes_read;
3482         return (0);
3483 }
3484
3485 #define HIS_BUF_LEN     (128*1024)
3486
3487 /*
3488  * Retrieve the command history of a pool.
3489  */
3490 int
3491 zpool_get_history(zpool_handle_t *zhp, nvlist_t **nvhisp)
3492 {
3493         char buf[HIS_BUF_LEN];
3494         uint64_t off = 0;
3495         nvlist_t **records = NULL;
3496         uint_t numrecords = 0;
3497         int err, i;
3498
3499         do {
3500                 uint64_t bytes_read = sizeof (buf);
3501                 uint64_t leftover;
3502
3503                 if ((err = get_history(zhp, buf, &off, &bytes_read)) != 0)
3504                         break;
3505
3506                 /* if nothing else was read in, we're at EOF, just return */
3507                 if (!bytes_read)
3508                         break;
3509
3510                 if ((err = zpool_history_unpack(buf, bytes_read,
3511                     &leftover, &records, &numrecords)) != 0)
3512                         break;
3513                 off -= leftover;
3514
3515                 /* CONSTCOND */
3516         } while (1);
3517
3518         if (!err) {
3519                 verify(nvlist_alloc(nvhisp, NV_UNIQUE_NAME, 0) == 0);
3520                 verify(nvlist_add_nvlist_array(*nvhisp, ZPOOL_HIST_RECORD,
3521                     records, numrecords) == 0);
3522         }
3523         for (i = 0; i < numrecords; i++)
3524                 nvlist_free(records[i]);
3525         free(records);
3526
3527         return (err);
3528 }
3529
3530 /*
3531  * Retrieve the next event.  If there is a new event available 'nvp' will
3532  * contain a newly allocated nvlist and 'dropped' will be set to the number
3533  * of missed events since the last call to this function.  When 'nvp' is
3534  * set to NULL it indicates no new events are available.  In either case
3535  * the function returns 0 and it is up to the caller to free 'nvp'.  In
3536  * the case of a fatal error the function will return a non-zero value.
3537  * When the function is called in blocking mode it will not return until
3538  * a new event is available.
3539  */
3540 int
3541 zpool_events_next(libzfs_handle_t *hdl, nvlist_t **nvp,
3542     int *dropped, int block, int cleanup_fd)
3543 {
3544         zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
3545         int error = 0;
3546
3547         *nvp = NULL;
3548         *dropped = 0;
3549         zc.zc_cleanup_fd = cleanup_fd;
3550
3551         if (!block)
3552                 zc.zc_guid = ZEVENT_NONBLOCK;
3553
3554         if (zcmd_alloc_dst_nvlist(hdl, &zc, ZEVENT_SIZE) != 0)
3555                 return (-1);
3556
3557 retry:
3558         if (zfs_ioctl(hdl, ZFS_IOC_EVENTS_NEXT, &zc) != 0) {
3559                 switch (errno) {
3560                 case ESHUTDOWN:
3561                         error = zfs_error_fmt(hdl, EZFS_POOLUNAVAIL,
3562                             dgettext(TEXT_DOMAIN, "zfs shutdown"));
3563                         goto out;
3564                 case ENOENT:
3565                         /* Blocking error case should not occur */
3566                         if (block)
3567                                 error = zpool_standard_error_fmt(hdl, errno,
3568                                     dgettext(TEXT_DOMAIN, "cannot get event"));
3569
3570                         goto out;
3571                 case ENOMEM:
3572                         if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
3573                                 error = zfs_error_fmt(hdl, EZFS_NOMEM,
3574                                     dgettext(TEXT_DOMAIN, "cannot get event"));
3575                                 goto out;
3576                         } else {
3577                                 goto retry;
3578                         }
3579                 default:
3580                         error = zpool_standard_error_fmt(hdl, errno,
3581                             dgettext(TEXT_DOMAIN, "cannot get event"));
3582                         goto out;
3583                 }
3584         }
3585
3586         error = zcmd_read_dst_nvlist(hdl, &zc, nvp);
3587         if (error != 0)
3588                 goto out;
3589
3590         *dropped = (int)zc.zc_cookie;
3591 out:
3592         zcmd_free_nvlists(&zc);
3593
3594         return (error);
3595 }
3596
3597 /*
3598  * Clear all events.
3599  */
3600 int
3601 zpool_events_clear(libzfs_handle_t *hdl, int *count)
3602 {
3603         zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
3604         char msg[1024];
3605
3606         (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
3607             "cannot clear events"));
3608
3609         if (zfs_ioctl(hdl, ZFS_IOC_EVENTS_CLEAR, &zc) != 0)
3610                 return (zpool_standard_error_fmt(hdl, errno, msg));
3611
3612         if (count != NULL)
3613                 *count = (int)zc.zc_cookie; /* # of events cleared */
3614
3615         return (0);
3616 }
3617
3618 void
3619 zpool_obj_to_path(zpool_handle_t *zhp, uint64_t dsobj, uint64_t obj,
3620     char *pathname, size_t len)
3621 {
3622         zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
3623         boolean_t mounted = B_FALSE;
3624         char *mntpnt = NULL;
3625         char dsname[MAXNAMELEN];
3626
3627         if (dsobj == 0) {
3628                 /* special case for the MOS */
3629                 (void) snprintf(pathname, len, "<metadata>:<0x%llx>", (longlong_t)obj);
3630                 return;
3631         }
3632
3633         /* get the dataset's name */
3634         (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3635         zc.zc_obj = dsobj;
3636         if (ioctl(zhp->zpool_hdl->libzfs_fd,
3637             ZFS_IOC_DSOBJ_TO_DSNAME, &zc) != 0) {
3638                 /* just write out a path of two object numbers */
3639                 (void) snprintf(pathname, len, "<0x%llx>:<0x%llx>",
3640                     (longlong_t)dsobj, (longlong_t)obj);
3641                 return;
3642         }
3643         (void) strlcpy(dsname, zc.zc_value, sizeof (dsname));
3644
3645         /* find out if the dataset is mounted */
3646         mounted = is_mounted(zhp->zpool_hdl, dsname, &mntpnt);
3647
3648         /* get the corrupted object's path */
3649         (void) strlcpy(zc.zc_name, dsname, sizeof (zc.zc_name));
3650         zc.zc_obj = obj;
3651         if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_OBJ_TO_PATH,
3652             &zc) == 0) {
3653                 if (mounted) {
3654                         (void) snprintf(pathname, len, "%s%s", mntpnt,
3655                             zc.zc_value);
3656                 } else {
3657                         (void) snprintf(pathname, len, "%s:%s",
3658                             dsname, zc.zc_value);
3659                 }
3660         } else {
3661                 (void) snprintf(pathname, len, "%s:<0x%llx>", dsname, (longlong_t)obj);
3662         }
3663         free(mntpnt);
3664 }
3665
3666 /*
3667  * Read the EFI label from the config, if a label does not exist then
3668  * pass back the error to the caller. If the caller has passed a non-NULL
3669  * diskaddr argument then we set it to the starting address of the EFI
3670  * partition.
3671  */
3672 static int
3673 read_efi_label(nvlist_t *config, diskaddr_t *sb)
3674 {
3675         char *path;
3676         int fd;
3677         char diskname[MAXPATHLEN];
3678         int err = -1;
3679
3680         if (nvlist_lookup_string(config, ZPOOL_CONFIG_PATH, &path) != 0)
3681                 return (err);
3682
3683         (void) snprintf(diskname, sizeof (diskname), "%s%s", RDISK_ROOT,
3684             strrchr(path, '/'));
3685         if ((fd = open(diskname, O_RDWR|O_DIRECT)) >= 0) {
3686                 struct dk_gpt *vtoc;
3687
3688                 if ((err = efi_alloc_and_read(fd, &vtoc)) >= 0) {
3689                         if (sb != NULL)
3690                                 *sb = vtoc->efi_parts[0].p_start;
3691                         efi_free(vtoc);
3692                 }
3693                 (void) close(fd);
3694         }
3695         return (err);
3696 }
3697
3698 /*
3699  * determine where a partition starts on a disk in the current
3700  * configuration
3701  */
3702 static diskaddr_t
3703 find_start_block(nvlist_t *config)
3704 {
3705         nvlist_t **child;
3706         uint_t c, children;
3707         diskaddr_t sb = MAXOFFSET_T;
3708         uint64_t wholedisk;
3709
3710         if (nvlist_lookup_nvlist_array(config,
3711             ZPOOL_CONFIG_CHILDREN, &child, &children) != 0) {
3712                 if (nvlist_lookup_uint64(config,
3713                     ZPOOL_CONFIG_WHOLE_DISK,
3714                     &wholedisk) != 0 || !wholedisk) {
3715                         return (MAXOFFSET_T);
3716                 }
3717                 if (read_efi_label(config, &sb) < 0)
3718                         sb = MAXOFFSET_T;
3719                 return (sb);
3720         }
3721
3722         for (c = 0; c < children; c++) {
3723                 sb = find_start_block(child[c]);
3724                 if (sb != MAXOFFSET_T) {
3725                         return (sb);
3726                 }
3727         }
3728         return (MAXOFFSET_T);
3729 }
3730
3731 int
3732 zpool_label_disk_wait(char *path, int timeout)
3733 {
3734         struct stat64 statbuf;
3735         int i;
3736
3737         /*
3738          * Wait timeout miliseconds for a newly created device to be available
3739          * from the given path.  There is a small window when a /dev/ device
3740          * will exist and the udev link will not, so we must wait for the
3741          * symlink.  Depending on the udev rules this may take a few seconds.
3742          */
3743         for (i = 0; i < timeout; i++) {
3744                 usleep(1000);
3745
3746                 errno = 0;
3747                 if ((stat64(path, &statbuf) == 0) && (errno == 0))
3748                         return (0);
3749         }
3750
3751         return (ENOENT);
3752 }
3753
3754 int
3755 zpool_label_disk_check(char *path)
3756 {
3757         struct dk_gpt *vtoc;
3758         int fd, err;
3759
3760         if ((fd = open(path, O_RDWR|O_DIRECT)) < 0)
3761                 return errno;
3762
3763         if ((err = efi_alloc_and_read(fd, &vtoc)) != 0) {
3764                 (void) close(fd);
3765                 return err;
3766         }
3767
3768         if (vtoc->efi_flags & EFI_GPT_PRIMARY_CORRUPT) {
3769                 efi_free(vtoc);
3770                 (void) close(fd);
3771                 return EIDRM;
3772         }
3773
3774         efi_free(vtoc);
3775         (void) close(fd);
3776         return 0;
3777 }
3778
3779 /*
3780  * Label an individual disk.  The name provided is the short name,
3781  * stripped of any leading /dev path.
3782  */
3783 int
3784 zpool_label_disk(libzfs_handle_t *hdl, zpool_handle_t *zhp, char *name)
3785 {
3786         char path[MAXPATHLEN];
3787         struct dk_gpt *vtoc;
3788         int rval, fd;
3789         size_t resv = EFI_MIN_RESV_SIZE;
3790         uint64_t slice_size;
3791         diskaddr_t start_block;
3792         char errbuf[1024];
3793
3794         /* prepare an error message just in case */
3795         (void) snprintf(errbuf, sizeof (errbuf),
3796             dgettext(TEXT_DOMAIN, "cannot label '%s'"), name);
3797
3798         if (zhp) {
3799                 nvlist_t *nvroot;
3800
3801                 if (pool_is_bootable(zhp)) {
3802                         zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3803                             "EFI labeled devices are not supported on root "
3804                             "pools."));
3805                         return (zfs_error(hdl, EZFS_POOL_NOTSUP, errbuf));
3806                 }
3807
3808                 verify(nvlist_lookup_nvlist(zhp->zpool_config,
3809                     ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
3810
3811                 if (zhp->zpool_start_block == 0)
3812                         start_block = find_start_block(nvroot);
3813                 else
3814                         start_block = zhp->zpool_start_block;
3815                 zhp->zpool_start_block = start_block;
3816         } else {
3817                 /* new pool */
3818                 start_block = NEW_START_BLOCK;
3819         }
3820
3821         (void) snprintf(path, sizeof (path), "%s/%s%s", RDISK_ROOT, name,
3822             BACKUP_SLICE);
3823
3824         if ((fd = open(path, O_RDWR|O_DIRECT)) < 0) {
3825                 /*
3826                  * This shouldn't happen.  We've long since verified that this
3827                  * is a valid device.
3828                  */
3829                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot "
3830                     "label '%s': unable to open device: %d"), path, errno);
3831                 return (zfs_error(hdl, EZFS_OPENFAILED, errbuf));
3832         }
3833
3834         if (efi_alloc_and_init(fd, EFI_NUMPAR, &vtoc) != 0) {
3835                 /*
3836                  * The only way this can fail is if we run out of memory, or we
3837                  * were unable to read the disk's capacity
3838                  */
3839                 if (errno == ENOMEM)
3840                         (void) no_memory(hdl);
3841
3842                 (void) close(fd);
3843                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot "
3844                     "label '%s': unable to read disk capacity"), path);
3845
3846                 return (zfs_error(hdl, EZFS_NOCAP, errbuf));
3847         }
3848
3849         slice_size = vtoc->efi_last_u_lba + 1;
3850         slice_size -= EFI_MIN_RESV_SIZE;
3851         if (start_block == MAXOFFSET_T)
3852                 start_block = NEW_START_BLOCK;
3853         slice_size -= start_block;
3854         slice_size = P2ALIGN(slice_size, PARTITION_END_ALIGNMENT);
3855
3856         vtoc->efi_parts[0].p_start = start_block;
3857         vtoc->efi_parts[0].p_size = slice_size;
3858
3859         /*
3860          * Why we use V_USR: V_BACKUP confuses users, and is considered
3861          * disposable by some EFI utilities (since EFI doesn't have a backup
3862          * slice).  V_UNASSIGNED is supposed to be used only for zero size
3863          * partitions, and efi_write() will fail if we use it.  V_ROOT, V_BOOT,
3864          * etc. were all pretty specific.  V_USR is as close to reality as we
3865          * can get, in the absence of V_OTHER.
3866          */
3867         vtoc->efi_parts[0].p_tag = V_USR;
3868         (void) strcpy(vtoc->efi_parts[0].p_name, "zfs");
3869
3870         vtoc->efi_parts[8].p_start = slice_size + start_block;
3871         vtoc->efi_parts[8].p_size = resv;
3872         vtoc->efi_parts[8].p_tag = V_RESERVED;
3873
3874         if ((rval = efi_write(fd, vtoc)) != 0 || (rval = efi_rescan(fd)) != 0) {
3875                 /*
3876                  * Some block drivers (like pcata) may not support EFI
3877                  * GPT labels.  Print out a helpful error message dir-
3878                  * ecting the user to manually label the disk and give
3879                  * a specific slice.
3880                  */
3881                 (void) close(fd);
3882                 efi_free(vtoc);
3883
3884                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "try using "
3885                     "parted(8) and then provide a specific slice: %d"), rval);
3886                 return (zfs_error(hdl, EZFS_LABELFAILED, errbuf));
3887         }
3888
3889         (void) close(fd);
3890         efi_free(vtoc);
3891
3892         /* Wait for the first expected slice to appear. */
3893         (void) snprintf(path, sizeof (path), "%s/%s%s%s", DISK_ROOT, name,
3894             isdigit(name[strlen(name)-1]) ? "p" : "", FIRST_SLICE);
3895         rval = zpool_label_disk_wait(path, 3000);
3896         if (rval) {
3897                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "failed to "
3898                     "detect device partitions on '%s': %d"), path, rval);
3899                 return (zfs_error(hdl, EZFS_LABELFAILED, errbuf));
3900         }
3901
3902         /* We can't be to paranoid.  Read the label back and verify it. */
3903         (void) snprintf(path, sizeof (path), "%s/%s", DISK_ROOT, name);
3904         rval = zpool_label_disk_check(path);
3905         if (rval) {
3906                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "freshly written "
3907                     "EFI label on '%s' is damaged.  Ensure\nthis device "
3908                     "is not in in use, and is functioning properly: %d"),
3909                     path, rval);
3910                 return (zfs_error(hdl, EZFS_LABELFAILED, errbuf));
3911         }
3912
3913         return 0;
3914 }