Use the right device path when relabeling.
[zfs.git] / lib / libzfs / libzfs_pool.c
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21
22 /*
23  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
25  * Copyright (c) 2011 by Delphix. All rights reserved.
26  */
27
28 #include <ctype.h>
29 #include <errno.h>
30 #include <devid.h>
31 #include <fcntl.h>
32 #include <libintl.h>
33 #include <stdio.h>
34 #include <stdlib.h>
35 #include <strings.h>
36 #include <unistd.h>
37 #include <zone.h>
38 #include <sys/stat.h>
39 #include <sys/efi_partition.h>
40 #include <sys/vtoc.h>
41 #include <sys/zfs_ioctl.h>
42 #include <dlfcn.h>
43
44 #include "zfs_namecheck.h"
45 #include "zfs_prop.h"
46 #include "libzfs_impl.h"
47 #include "zfs_comutil.h"
48
49 static int read_efi_label(nvlist_t *config, diskaddr_t *sb);
50
51 typedef struct prop_flags {
52         int create:1;   /* Validate property on creation */
53         int import:1;   /* Validate property on import */
54 } prop_flags_t;
55
56 /*
57  * ====================================================================
58  *   zpool property functions
59  * ====================================================================
60  */
61
62 static int
63 zpool_get_all_props(zpool_handle_t *zhp)
64 {
65         zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
66         libzfs_handle_t *hdl = zhp->zpool_hdl;
67
68         (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
69
70         if (zcmd_alloc_dst_nvlist(hdl, &zc, 0) != 0)
71                 return (-1);
72
73         while (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_PROPS, &zc) != 0) {
74                 if (errno == ENOMEM) {
75                         if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
76                                 zcmd_free_nvlists(&zc);
77                                 return (-1);
78                         }
79                 } else {
80                         zcmd_free_nvlists(&zc);
81                         return (-1);
82                 }
83         }
84
85         if (zcmd_read_dst_nvlist(hdl, &zc, &zhp->zpool_props) != 0) {
86                 zcmd_free_nvlists(&zc);
87                 return (-1);
88         }
89
90         zcmd_free_nvlists(&zc);
91
92         return (0);
93 }
94
95 static int
96 zpool_props_refresh(zpool_handle_t *zhp)
97 {
98         nvlist_t *old_props;
99
100         old_props = zhp->zpool_props;
101
102         if (zpool_get_all_props(zhp) != 0)
103                 return (-1);
104
105         nvlist_free(old_props);
106         return (0);
107 }
108
109 static char *
110 zpool_get_prop_string(zpool_handle_t *zhp, zpool_prop_t prop,
111     zprop_source_t *src)
112 {
113         nvlist_t *nv, *nvl;
114         uint64_t ival;
115         char *value;
116         zprop_source_t source;
117
118         nvl = zhp->zpool_props;
119         if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) {
120                 verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &ival) == 0);
121                 source = ival;
122                 verify(nvlist_lookup_string(nv, ZPROP_VALUE, &value) == 0);
123         } else {
124                 source = ZPROP_SRC_DEFAULT;
125                 if ((value = (char *)zpool_prop_default_string(prop)) == NULL)
126                         value = "-";
127         }
128
129         if (src)
130                 *src = source;
131
132         return (value);
133 }
134
135 uint64_t
136 zpool_get_prop_int(zpool_handle_t *zhp, zpool_prop_t prop, zprop_source_t *src)
137 {
138         nvlist_t *nv, *nvl;
139         uint64_t value;
140         zprop_source_t source;
141
142         if (zhp->zpool_props == NULL && zpool_get_all_props(zhp)) {
143                 /*
144                  * zpool_get_all_props() has most likely failed because
145                  * the pool is faulted, but if all we need is the top level
146                  * vdev's guid then get it from the zhp config nvlist.
147                  */
148                 if ((prop == ZPOOL_PROP_GUID) &&
149                     (nvlist_lookup_nvlist(zhp->zpool_config,
150                     ZPOOL_CONFIG_VDEV_TREE, &nv) == 0) &&
151                     (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &value)
152                     == 0)) {
153                         return (value);
154                 }
155                 return (zpool_prop_default_numeric(prop));
156         }
157
158         nvl = zhp->zpool_props;
159         if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) {
160                 verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &value) == 0);
161                 source = value;
162                 verify(nvlist_lookup_uint64(nv, ZPROP_VALUE, &value) == 0);
163         } else {
164                 source = ZPROP_SRC_DEFAULT;
165                 value = zpool_prop_default_numeric(prop);
166         }
167
168         if (src)
169                 *src = source;
170
171         return (value);
172 }
173
174 /*
175  * Map VDEV STATE to printed strings.
176  */
177 char *
178 zpool_state_to_name(vdev_state_t state, vdev_aux_t aux)
179 {
180         switch (state) {
181         default:
182                 break;
183         case VDEV_STATE_CLOSED:
184         case VDEV_STATE_OFFLINE:
185                 return (gettext("OFFLINE"));
186         case VDEV_STATE_REMOVED:
187                 return (gettext("REMOVED"));
188         case VDEV_STATE_CANT_OPEN:
189                 if (aux == VDEV_AUX_CORRUPT_DATA || aux == VDEV_AUX_BAD_LOG)
190                         return (gettext("FAULTED"));
191                 else if (aux == VDEV_AUX_SPLIT_POOL)
192                         return (gettext("SPLIT"));
193                 else
194                         return (gettext("UNAVAIL"));
195         case VDEV_STATE_FAULTED:
196                 return (gettext("FAULTED"));
197         case VDEV_STATE_DEGRADED:
198                 return (gettext("DEGRADED"));
199         case VDEV_STATE_HEALTHY:
200                 return (gettext("ONLINE"));
201         }
202
203         return (gettext("UNKNOWN"));
204 }
205
206 /*
207  * Get a zpool property value for 'prop' and return the value in
208  * a pre-allocated buffer.
209  */
210 int
211 zpool_get_prop(zpool_handle_t *zhp, zpool_prop_t prop, char *buf, size_t len,
212     zprop_source_t *srctype)
213 {
214         uint64_t intval;
215         const char *strval;
216         zprop_source_t src = ZPROP_SRC_NONE;
217         nvlist_t *nvroot;
218         vdev_stat_t *vs;
219         uint_t vsc;
220
221         if (zpool_get_state(zhp) == POOL_STATE_UNAVAIL) {
222                 switch (prop) {
223                 case ZPOOL_PROP_NAME:
224                         (void) strlcpy(buf, zpool_get_name(zhp), len);
225                         break;
226
227                 case ZPOOL_PROP_HEALTH:
228                         (void) strlcpy(buf, "FAULTED", len);
229                         break;
230
231                 case ZPOOL_PROP_GUID:
232                         intval = zpool_get_prop_int(zhp, prop, &src);
233                         (void) snprintf(buf, len, "%llu", (u_longlong_t)intval);
234                         break;
235
236                 case ZPOOL_PROP_ALTROOT:
237                 case ZPOOL_PROP_CACHEFILE:
238                         if (zhp->zpool_props != NULL ||
239                             zpool_get_all_props(zhp) == 0) {
240                                 (void) strlcpy(buf,
241                                     zpool_get_prop_string(zhp, prop, &src),
242                                     len);
243                                 if (srctype != NULL)
244                                         *srctype = src;
245                                 return (0);
246                         }
247                         /* FALLTHROUGH */
248                 default:
249                         (void) strlcpy(buf, "-", len);
250                         break;
251                 }
252
253                 if (srctype != NULL)
254                         *srctype = src;
255                 return (0);
256         }
257
258         if (zhp->zpool_props == NULL && zpool_get_all_props(zhp) &&
259             prop != ZPOOL_PROP_NAME)
260                 return (-1);
261
262         switch (zpool_prop_get_type(prop)) {
263         case PROP_TYPE_STRING:
264                 (void) strlcpy(buf, zpool_get_prop_string(zhp, prop, &src),
265                     len);
266                 break;
267
268         case PROP_TYPE_NUMBER:
269                 intval = zpool_get_prop_int(zhp, prop, &src);
270
271                 switch (prop) {
272                 case ZPOOL_PROP_SIZE:
273                 case ZPOOL_PROP_ALLOCATED:
274                 case ZPOOL_PROP_FREE:
275                 case ZPOOL_PROP_ASHIFT:
276                         (void) zfs_nicenum(intval, buf, len);
277                         break;
278
279                 case ZPOOL_PROP_CAPACITY:
280                         (void) snprintf(buf, len, "%llu%%",
281                             (u_longlong_t)intval);
282                         break;
283
284                 case ZPOOL_PROP_DEDUPRATIO:
285                         (void) snprintf(buf, len, "%llu.%02llux",
286                             (u_longlong_t)(intval / 100),
287                             (u_longlong_t)(intval % 100));
288                         break;
289
290                 case ZPOOL_PROP_HEALTH:
291                         verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
292                             ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
293                         verify(nvlist_lookup_uint64_array(nvroot,
294                             ZPOOL_CONFIG_VDEV_STATS, (uint64_t **)&vs, &vsc)
295                             == 0);
296
297                         (void) strlcpy(buf, zpool_state_to_name(intval,
298                             vs->vs_aux), len);
299                         break;
300                 default:
301                         (void) snprintf(buf, len, "%llu", (u_longlong_t)intval);
302                 }
303                 break;
304
305         case PROP_TYPE_INDEX:
306                 intval = zpool_get_prop_int(zhp, prop, &src);
307                 if (zpool_prop_index_to_string(prop, intval, &strval)
308                     != 0)
309                         return (-1);
310                 (void) strlcpy(buf, strval, len);
311                 break;
312
313         default:
314                 abort();
315         }
316
317         if (srctype)
318                 *srctype = src;
319
320         return (0);
321 }
322
323 /*
324  * Check if the bootfs name has the same pool name as it is set to.
325  * Assuming bootfs is a valid dataset name.
326  */
327 static boolean_t
328 bootfs_name_valid(const char *pool, char *bootfs)
329 {
330         int len = strlen(pool);
331
332         if (!zfs_name_valid(bootfs, ZFS_TYPE_FILESYSTEM|ZFS_TYPE_SNAPSHOT))
333                 return (B_FALSE);
334
335         if (strncmp(pool, bootfs, len) == 0 &&
336             (bootfs[len] == '/' || bootfs[len] == '\0'))
337                 return (B_TRUE);
338
339         return (B_FALSE);
340 }
341
342 /*
343  * Inspect the configuration to determine if any of the devices contain
344  * an EFI label.
345  */
346 static boolean_t
347 pool_uses_efi(nvlist_t *config)
348 {
349         nvlist_t **child;
350         uint_t c, children;
351
352         if (nvlist_lookup_nvlist_array(config, ZPOOL_CONFIG_CHILDREN,
353             &child, &children) != 0)
354                 return (read_efi_label(config, NULL) >= 0);
355
356         for (c = 0; c < children; c++) {
357                 if (pool_uses_efi(child[c]))
358                         return (B_TRUE);
359         }
360         return (B_FALSE);
361 }
362
363 static boolean_t
364 pool_is_bootable(zpool_handle_t *zhp)
365 {
366         char bootfs[ZPOOL_MAXNAMELEN];
367
368         return (zpool_get_prop(zhp, ZPOOL_PROP_BOOTFS, bootfs,
369             sizeof (bootfs), NULL) == 0 && strncmp(bootfs, "-",
370             sizeof (bootfs)) != 0);
371 }
372
373
374 /*
375  * Given an nvlist of zpool properties to be set, validate that they are
376  * correct, and parse any numeric properties (index, boolean, etc) if they are
377  * specified as strings.
378  */
379 static nvlist_t *
380 zpool_valid_proplist(libzfs_handle_t *hdl, const char *poolname,
381     nvlist_t *props, uint64_t version, prop_flags_t flags, char *errbuf)
382 {
383         nvpair_t *elem;
384         nvlist_t *retprops;
385         zpool_prop_t prop;
386         char *strval;
387         uint64_t intval;
388         char *slash;
389         struct stat64 statbuf;
390         zpool_handle_t *zhp;
391         nvlist_t *nvroot;
392
393         if (nvlist_alloc(&retprops, NV_UNIQUE_NAME, 0) != 0) {
394                 (void) no_memory(hdl);
395                 return (NULL);
396         }
397
398         elem = NULL;
399         while ((elem = nvlist_next_nvpair(props, elem)) != NULL) {
400                 const char *propname = nvpair_name(elem);
401
402                 /*
403                  * Make sure this property is valid and applies to this type.
404                  */
405                 if ((prop = zpool_name_to_prop(propname)) == ZPROP_INVAL) {
406                         zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
407                             "invalid property '%s'"), propname);
408                         (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
409                         goto error;
410                 }
411
412                 if (zpool_prop_readonly(prop)) {
413                         zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' "
414                             "is readonly"), propname);
415                         (void) zfs_error(hdl, EZFS_PROPREADONLY, errbuf);
416                         goto error;
417                 }
418
419                 if (zprop_parse_value(hdl, elem, prop, ZFS_TYPE_POOL, retprops,
420                     &strval, &intval, errbuf) != 0)
421                         goto error;
422
423                 /*
424                  * Perform additional checking for specific properties.
425                  */
426                 switch (prop) {
427                 default:
428                         break;
429                 case ZPOOL_PROP_VERSION:
430                         if (intval < version || intval > SPA_VERSION) {
431                                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
432                                     "property '%s' number %d is invalid."),
433                                     propname, intval);
434                                 (void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
435                                 goto error;
436                         }
437                         break;
438
439                 case ZPOOL_PROP_ASHIFT:
440                         if (!flags.create) {
441                                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
442                                     "property '%s' can only be set at "
443                                     "creation time"), propname);
444                                 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
445                                 goto error;
446                         }
447
448                         if (intval != 0 && (intval < 9 || intval > 13)) {
449                                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
450                                     "property '%s' number %d is invalid."),
451                                     propname, intval);
452                                 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
453                                 goto error;
454                         }
455                         break;
456
457                 case ZPOOL_PROP_BOOTFS:
458                         if (flags.create || flags.import) {
459                                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
460                                     "property '%s' cannot be set at creation "
461                                     "or import time"), propname);
462                                 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
463                                 goto error;
464                         }
465
466                         if (version < SPA_VERSION_BOOTFS) {
467                                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
468                                     "pool must be upgraded to support "
469                                     "'%s' property"), propname);
470                                 (void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
471                                 goto error;
472                         }
473
474                         /*
475                          * bootfs property value has to be a dataset name and
476                          * the dataset has to be in the same pool as it sets to.
477                          */
478                         if (strval[0] != '\0' && !bootfs_name_valid(poolname,
479                             strval)) {
480                                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' "
481                                     "is an invalid name"), strval);
482                                 (void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf);
483                                 goto error;
484                         }
485
486                         if ((zhp = zpool_open_canfail(hdl, poolname)) == NULL) {
487                                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
488                                     "could not open pool '%s'"), poolname);
489                                 (void) zfs_error(hdl, EZFS_OPENFAILED, errbuf);
490                                 goto error;
491                         }
492                         verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
493                             ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
494
495 #if defined(__sun__) || defined(__sun)
496                         /*
497                          * bootfs property cannot be set on a disk which has
498                          * been EFI labeled.
499                          */
500                         if (pool_uses_efi(nvroot)) {
501                                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
502                                     "property '%s' not supported on "
503                                     "EFI labeled devices"), propname);
504                                 (void) zfs_error(hdl, EZFS_POOL_NOTSUP, errbuf);
505                                 zpool_close(zhp);
506                                 goto error;
507                         }
508 #endif
509                         zpool_close(zhp);
510                         break;
511
512                 case ZPOOL_PROP_ALTROOT:
513                         if (!flags.create && !flags.import) {
514                                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
515                                     "property '%s' can only be set during pool "
516                                     "creation or import"), propname);
517                                 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
518                                 goto error;
519                         }
520
521                         if (strval[0] != '/') {
522                                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
523                                     "bad alternate root '%s'"), strval);
524                                 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
525                                 goto error;
526                         }
527                         break;
528
529                 case ZPOOL_PROP_CACHEFILE:
530                         if (strval[0] == '\0')
531                                 break;
532
533                         if (strcmp(strval, "none") == 0)
534                                 break;
535
536                         if (strval[0] != '/') {
537                                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
538                                     "property '%s' must be empty, an "
539                                     "absolute path, or 'none'"), propname);
540                                 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
541                                 goto error;
542                         }
543
544                         slash = strrchr(strval, '/');
545
546                         if (slash[1] == '\0' || strcmp(slash, "/.") == 0 ||
547                             strcmp(slash, "/..") == 0) {
548                                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
549                                     "'%s' is not a valid file"), strval);
550                                 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
551                                 goto error;
552                         }
553
554                         *slash = '\0';
555
556                         if (strval[0] != '\0' &&
557                             (stat64(strval, &statbuf) != 0 ||
558                             !S_ISDIR(statbuf.st_mode))) {
559                                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
560                                     "'%s' is not a valid directory"),
561                                     strval);
562                                 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
563                                 goto error;
564                         }
565
566                         *slash = '/';
567                         break;
568
569                 case ZPOOL_PROP_READONLY:
570                         if (!flags.import) {
571                                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
572                                     "property '%s' can only be set at "
573                                     "import time"), propname);
574                                 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
575                                 goto error;
576                         }
577                         break;
578                 }
579         }
580
581         return (retprops);
582 error:
583         nvlist_free(retprops);
584         return (NULL);
585 }
586
587 /*
588  * Set zpool property : propname=propval.
589  */
590 int
591 zpool_set_prop(zpool_handle_t *zhp, const char *propname, const char *propval)
592 {
593         zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
594         int ret = -1;
595         char errbuf[1024];
596         nvlist_t *nvl = NULL;
597         nvlist_t *realprops;
598         uint64_t version;
599         prop_flags_t flags = { 0 };
600
601         (void) snprintf(errbuf, sizeof (errbuf),
602             dgettext(TEXT_DOMAIN, "cannot set property for '%s'"),
603             zhp->zpool_name);
604
605         if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0)
606                 return (no_memory(zhp->zpool_hdl));
607
608         if (nvlist_add_string(nvl, propname, propval) != 0) {
609                 nvlist_free(nvl);
610                 return (no_memory(zhp->zpool_hdl));
611         }
612
613         version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL);
614         if ((realprops = zpool_valid_proplist(zhp->zpool_hdl,
615             zhp->zpool_name, nvl, version, flags, errbuf)) == NULL) {
616                 nvlist_free(nvl);
617                 return (-1);
618         }
619
620         nvlist_free(nvl);
621         nvl = realprops;
622
623         /*
624          * Execute the corresponding ioctl() to set this property.
625          */
626         (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
627
628         if (zcmd_write_src_nvlist(zhp->zpool_hdl, &zc, nvl) != 0) {
629                 nvlist_free(nvl);
630                 return (-1);
631         }
632
633         ret = zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_SET_PROPS, &zc);
634
635         zcmd_free_nvlists(&zc);
636         nvlist_free(nvl);
637
638         if (ret)
639                 (void) zpool_standard_error(zhp->zpool_hdl, errno, errbuf);
640         else
641                 (void) zpool_props_refresh(zhp);
642
643         return (ret);
644 }
645
646 int
647 zpool_expand_proplist(zpool_handle_t *zhp, zprop_list_t **plp)
648 {
649         libzfs_handle_t *hdl = zhp->zpool_hdl;
650         zprop_list_t *entry;
651         char buf[ZFS_MAXPROPLEN];
652
653         if (zprop_expand_list(hdl, plp, ZFS_TYPE_POOL) != 0)
654                 return (-1);
655
656         for (entry = *plp; entry != NULL; entry = entry->pl_next) {
657
658                 if (entry->pl_fixed)
659                         continue;
660
661                 if (entry->pl_prop != ZPROP_INVAL &&
662                     zpool_get_prop(zhp, entry->pl_prop, buf, sizeof (buf),
663                     NULL) == 0) {
664                         if (strlen(buf) > entry->pl_width)
665                                 entry->pl_width = strlen(buf);
666                 }
667         }
668
669         return (0);
670 }
671
672
673 /*
674  * Don't start the slice at the default block of 34; many storage
675  * devices will use a stripe width of 128k, other vendors prefer a 1m
676  * alignment.  It is best to play it safe and ensure a 1m alignment
677  * given 512B blocks.  When the block size is larger by a power of 2
678  * we will still be 1m aligned.  Some devices are sensitive to the
679  * partition ending alignment as well.
680  */
681 #define NEW_START_BLOCK         2048
682 #define PARTITION_END_ALIGNMENT 2048
683
684 /*
685  * Validate the given pool name, optionally putting an extended error message in
686  * 'buf'.
687  */
688 boolean_t
689 zpool_name_valid(libzfs_handle_t *hdl, boolean_t isopen, const char *pool)
690 {
691         namecheck_err_t why;
692         char what;
693         int ret;
694
695         ret = pool_namecheck(pool, &why, &what);
696
697         /*
698          * The rules for reserved pool names were extended at a later point.
699          * But we need to support users with existing pools that may now be
700          * invalid.  So we only check for this expanded set of names during a
701          * create (or import), and only in userland.
702          */
703         if (ret == 0 && !isopen &&
704             (strncmp(pool, "mirror", 6) == 0 ||
705             strncmp(pool, "raidz", 5) == 0 ||
706             strncmp(pool, "spare", 5) == 0 ||
707             strcmp(pool, "log") == 0)) {
708                 if (hdl != NULL)
709                         zfs_error_aux(hdl,
710                             dgettext(TEXT_DOMAIN, "name is reserved"));
711                 return (B_FALSE);
712         }
713
714
715         if (ret != 0) {
716                 if (hdl != NULL) {
717                         switch (why) {
718                         case NAME_ERR_TOOLONG:
719                                 zfs_error_aux(hdl,
720                                     dgettext(TEXT_DOMAIN, "name is too long"));
721                                 break;
722
723                         case NAME_ERR_INVALCHAR:
724                                 zfs_error_aux(hdl,
725                                     dgettext(TEXT_DOMAIN, "invalid character "
726                                     "'%c' in pool name"), what);
727                                 break;
728
729                         case NAME_ERR_NOLETTER:
730                                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
731                                     "name must begin with a letter"));
732                                 break;
733
734                         case NAME_ERR_RESERVED:
735                                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
736                                     "name is reserved"));
737                                 break;
738
739                         case NAME_ERR_DISKLIKE:
740                                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
741                                     "pool name is reserved"));
742                                 break;
743
744                         case NAME_ERR_LEADING_SLASH:
745                                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
746                                     "leading slash in name"));
747                                 break;
748
749                         case NAME_ERR_EMPTY_COMPONENT:
750                                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
751                                     "empty component in name"));
752                                 break;
753
754                         case NAME_ERR_TRAILING_SLASH:
755                                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
756                                     "trailing slash in name"));
757                                 break;
758
759                         case NAME_ERR_MULTIPLE_AT:
760                                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
761                                     "multiple '@' delimiters in name"));
762                                 break;
763                         case NAME_ERR_NO_AT:
764                                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
765                                     "permission set is missing '@'"));
766                                 break;
767                         }
768                 }
769                 return (B_FALSE);
770         }
771
772         return (B_TRUE);
773 }
774
775 /*
776  * Open a handle to the given pool, even if the pool is currently in the FAULTED
777  * state.
778  */
779 zpool_handle_t *
780 zpool_open_canfail(libzfs_handle_t *hdl, const char *pool)
781 {
782         zpool_handle_t *zhp;
783         boolean_t missing;
784
785         /*
786          * Make sure the pool name is valid.
787          */
788         if (!zpool_name_valid(hdl, B_TRUE, pool)) {
789                 (void) zfs_error_fmt(hdl, EZFS_INVALIDNAME,
790                     dgettext(TEXT_DOMAIN, "cannot open '%s'"),
791                     pool);
792                 return (NULL);
793         }
794
795         if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL)
796                 return (NULL);
797
798         zhp->zpool_hdl = hdl;
799         (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name));
800
801         if (zpool_refresh_stats(zhp, &missing) != 0) {
802                 zpool_close(zhp);
803                 return (NULL);
804         }
805
806         if (missing) {
807                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "no such pool"));
808                 (void) zfs_error_fmt(hdl, EZFS_NOENT,
809                     dgettext(TEXT_DOMAIN, "cannot open '%s'"), pool);
810                 zpool_close(zhp);
811                 return (NULL);
812         }
813
814         return (zhp);
815 }
816
817 /*
818  * Like the above, but silent on error.  Used when iterating over pools (because
819  * the configuration cache may be out of date).
820  */
821 int
822 zpool_open_silent(libzfs_handle_t *hdl, const char *pool, zpool_handle_t **ret)
823 {
824         zpool_handle_t *zhp;
825         boolean_t missing;
826
827         if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL)
828                 return (-1);
829
830         zhp->zpool_hdl = hdl;
831         (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name));
832
833         if (zpool_refresh_stats(zhp, &missing) != 0) {
834                 zpool_close(zhp);
835                 return (-1);
836         }
837
838         if (missing) {
839                 zpool_close(zhp);
840                 *ret = NULL;
841                 return (0);
842         }
843
844         *ret = zhp;
845         return (0);
846 }
847
848 /*
849  * Similar to zpool_open_canfail(), but refuses to open pools in the faulted
850  * state.
851  */
852 zpool_handle_t *
853 zpool_open(libzfs_handle_t *hdl, const char *pool)
854 {
855         zpool_handle_t *zhp;
856
857         if ((zhp = zpool_open_canfail(hdl, pool)) == NULL)
858                 return (NULL);
859
860         if (zhp->zpool_state == POOL_STATE_UNAVAIL) {
861                 (void) zfs_error_fmt(hdl, EZFS_POOLUNAVAIL,
862                     dgettext(TEXT_DOMAIN, "cannot open '%s'"), zhp->zpool_name);
863                 zpool_close(zhp);
864                 return (NULL);
865         }
866
867         return (zhp);
868 }
869
870 /*
871  * Close the handle.  Simply frees the memory associated with the handle.
872  */
873 void
874 zpool_close(zpool_handle_t *zhp)
875 {
876         if (zhp->zpool_config)
877                 nvlist_free(zhp->zpool_config);
878         if (zhp->zpool_old_config)
879                 nvlist_free(zhp->zpool_old_config);
880         if (zhp->zpool_props)
881                 nvlist_free(zhp->zpool_props);
882         free(zhp);
883 }
884
885 /*
886  * Return the name of the pool.
887  */
888 const char *
889 zpool_get_name(zpool_handle_t *zhp)
890 {
891         return (zhp->zpool_name);
892 }
893
894
895 /*
896  * Return the state of the pool (ACTIVE or UNAVAILABLE)
897  */
898 int
899 zpool_get_state(zpool_handle_t *zhp)
900 {
901         return (zhp->zpool_state);
902 }
903
904 /*
905  * Create the named pool, using the provided vdev list.  It is assumed
906  * that the consumer has already validated the contents of the nvlist, so we
907  * don't have to worry about error semantics.
908  */
909 int
910 zpool_create(libzfs_handle_t *hdl, const char *pool, nvlist_t *nvroot,
911     nvlist_t *props, nvlist_t *fsprops)
912 {
913         zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
914         nvlist_t *zc_fsprops = NULL;
915         nvlist_t *zc_props = NULL;
916         char msg[1024];
917         char *altroot;
918         int ret = -1;
919
920         (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
921             "cannot create '%s'"), pool);
922
923         if (!zpool_name_valid(hdl, B_FALSE, pool))
924                 return (zfs_error(hdl, EZFS_INVALIDNAME, msg));
925
926         if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
927                 return (-1);
928
929         if (props) {
930                 prop_flags_t flags = { .create = B_TRUE, .import = B_FALSE };
931
932                 if ((zc_props = zpool_valid_proplist(hdl, pool, props,
933                     SPA_VERSION_1, flags, msg)) == NULL) {
934                         goto create_failed;
935                 }
936         }
937
938         if (fsprops) {
939                 uint64_t zoned;
940                 char *zonestr;
941
942                 zoned = ((nvlist_lookup_string(fsprops,
943                     zfs_prop_to_name(ZFS_PROP_ZONED), &zonestr) == 0) &&
944                     strcmp(zonestr, "on") == 0);
945
946                 if ((zc_fsprops = zfs_valid_proplist(hdl,
947                     ZFS_TYPE_FILESYSTEM, fsprops, zoned, NULL, msg)) == NULL) {
948                         goto create_failed;
949                 }
950                 if (!zc_props &&
951                     (nvlist_alloc(&zc_props, NV_UNIQUE_NAME, 0) != 0)) {
952                         goto create_failed;
953                 }
954                 if (nvlist_add_nvlist(zc_props,
955                     ZPOOL_ROOTFS_PROPS, zc_fsprops) != 0) {
956                         goto create_failed;
957                 }
958         }
959
960         if (zc_props && zcmd_write_src_nvlist(hdl, &zc, zc_props) != 0)
961                 goto create_failed;
962
963         (void) strlcpy(zc.zc_name, pool, sizeof (zc.zc_name));
964
965         if ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_CREATE, &zc)) != 0) {
966
967                 zcmd_free_nvlists(&zc);
968                 nvlist_free(zc_props);
969                 nvlist_free(zc_fsprops);
970
971                 switch (errno) {
972                 case EBUSY:
973                         /*
974                          * This can happen if the user has specified the same
975                          * device multiple times.  We can't reliably detect this
976                          * until we try to add it and see we already have a
977                          * label.  This can also happen under if the device is
978                          * part of an active md or lvm device.
979                          */
980                         zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
981                             "one or more vdevs refer to the same device, or one of\n"
982                             "the devices is part of an active md or lvm device"));
983                         return (zfs_error(hdl, EZFS_BADDEV, msg));
984
985                 case EOVERFLOW:
986                         /*
987                          * This occurs when one of the devices is below
988                          * SPA_MINDEVSIZE.  Unfortunately, we can't detect which
989                          * device was the problem device since there's no
990                          * reliable way to determine device size from userland.
991                          */
992                         {
993                                 char buf[64];
994
995                                 zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf));
996
997                                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
998                                     "one or more devices is less than the "
999                                     "minimum size (%s)"), buf);
1000                         }
1001                         return (zfs_error(hdl, EZFS_BADDEV, msg));
1002
1003                 case ENOSPC:
1004                         zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1005                             "one or more devices is out of space"));
1006                         return (zfs_error(hdl, EZFS_BADDEV, msg));
1007
1008                 case ENOTBLK:
1009                         zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1010                             "cache device must be a disk or disk slice"));
1011                         return (zfs_error(hdl, EZFS_BADDEV, msg));
1012
1013                 default:
1014                         return (zpool_standard_error(hdl, errno, msg));
1015                 }
1016         }
1017
1018         /*
1019          * If this is an alternate root pool, then we automatically set the
1020          * mountpoint of the root dataset to be '/'.
1021          */
1022         if (nvlist_lookup_string(props, zpool_prop_to_name(ZPOOL_PROP_ALTROOT),
1023             &altroot) == 0) {
1024                 zfs_handle_t *zhp;
1025
1026                 verify((zhp = zfs_open(hdl, pool, ZFS_TYPE_DATASET)) != NULL);
1027                 verify(zfs_prop_set(zhp, zfs_prop_to_name(ZFS_PROP_MOUNTPOINT),
1028                     "/") == 0);
1029
1030                 zfs_close(zhp);
1031         }
1032
1033 create_failed:
1034         zcmd_free_nvlists(&zc);
1035         nvlist_free(zc_props);
1036         nvlist_free(zc_fsprops);
1037         return (ret);
1038 }
1039
1040 /*
1041  * Destroy the given pool.  It is up to the caller to ensure that there are no
1042  * datasets left in the pool.
1043  */
1044 int
1045 zpool_destroy(zpool_handle_t *zhp)
1046 {
1047         zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
1048         zfs_handle_t *zfp = NULL;
1049         libzfs_handle_t *hdl = zhp->zpool_hdl;
1050         char msg[1024];
1051
1052         if (zhp->zpool_state == POOL_STATE_ACTIVE &&
1053             (zfp = zfs_open(hdl, zhp->zpool_name, ZFS_TYPE_FILESYSTEM)) == NULL)
1054                 return (-1);
1055
1056         (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1057
1058         if (zfs_ioctl(hdl, ZFS_IOC_POOL_DESTROY, &zc) != 0) {
1059                 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1060                     "cannot destroy '%s'"), zhp->zpool_name);
1061
1062                 if (errno == EROFS) {
1063                         zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1064                             "one or more devices is read only"));
1065                         (void) zfs_error(hdl, EZFS_BADDEV, msg);
1066                 } else {
1067                         (void) zpool_standard_error(hdl, errno, msg);
1068                 }
1069
1070                 if (zfp)
1071                         zfs_close(zfp);
1072                 return (-1);
1073         }
1074
1075         if (zfp) {
1076                 remove_mountpoint(zfp);
1077                 zfs_close(zfp);
1078         }
1079
1080         return (0);
1081 }
1082
1083 /*
1084  * Add the given vdevs to the pool.  The caller must have already performed the
1085  * necessary verification to ensure that the vdev specification is well-formed.
1086  */
1087 int
1088 zpool_add(zpool_handle_t *zhp, nvlist_t *nvroot)
1089 {
1090         zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
1091         int ret;
1092         libzfs_handle_t *hdl = zhp->zpool_hdl;
1093         char msg[1024];
1094         nvlist_t **spares, **l2cache;
1095         uint_t nspares, nl2cache;
1096
1097         (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1098             "cannot add to '%s'"), zhp->zpool_name);
1099
1100         if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) <
1101             SPA_VERSION_SPARES &&
1102             nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
1103             &spares, &nspares) == 0) {
1104                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be "
1105                     "upgraded to add hot spares"));
1106                 return (zfs_error(hdl, EZFS_BADVERSION, msg));
1107         }
1108
1109         if (pool_is_bootable(zhp) && nvlist_lookup_nvlist_array(nvroot,
1110             ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0) {
1111                 uint64_t s;
1112
1113                 for (s = 0; s < nspares; s++) {
1114                         char *path;
1115
1116                         if (nvlist_lookup_string(spares[s], ZPOOL_CONFIG_PATH,
1117                             &path) == 0 && pool_uses_efi(spares[s])) {
1118                                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1119                                     "device '%s' contains an EFI label and "
1120                                     "cannot be used on root pools."),
1121                                     zpool_vdev_name(hdl, NULL, spares[s],
1122                                     B_FALSE));
1123                                 return (zfs_error(hdl, EZFS_POOL_NOTSUP, msg));
1124                         }
1125                 }
1126         }
1127
1128         if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) <
1129             SPA_VERSION_L2CACHE &&
1130             nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
1131             &l2cache, &nl2cache) == 0) {
1132                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be "
1133                     "upgraded to add cache devices"));
1134                 return (zfs_error(hdl, EZFS_BADVERSION, msg));
1135         }
1136
1137         if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
1138                 return (-1);
1139         (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1140
1141         if (zfs_ioctl(hdl, ZFS_IOC_VDEV_ADD, &zc) != 0) {
1142                 switch (errno) {
1143                 case EBUSY:
1144                         /*
1145                          * This can happen if the user has specified the same
1146                          * device multiple times.  We can't reliably detect this
1147                          * until we try to add it and see we already have a
1148                          * label.
1149                          */
1150                         zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1151                             "one or more vdevs refer to the same device"));
1152                         (void) zfs_error(hdl, EZFS_BADDEV, msg);
1153                         break;
1154
1155                 case EOVERFLOW:
1156                         /*
1157                          * This occurrs when one of the devices is below
1158                          * SPA_MINDEVSIZE.  Unfortunately, we can't detect which
1159                          * device was the problem device since there's no
1160                          * reliable way to determine device size from userland.
1161                          */
1162                         {
1163                                 char buf[64];
1164
1165                                 zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf));
1166
1167                                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1168                                     "device is less than the minimum "
1169                                     "size (%s)"), buf);
1170                         }
1171                         (void) zfs_error(hdl, EZFS_BADDEV, msg);
1172                         break;
1173
1174                 case ENOTSUP:
1175                         zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1176                             "pool must be upgraded to add these vdevs"));
1177                         (void) zfs_error(hdl, EZFS_BADVERSION, msg);
1178                         break;
1179
1180                 case EDOM:
1181                         zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1182                             "root pool can not have multiple vdevs"
1183                             " or separate logs"));
1184                         (void) zfs_error(hdl, EZFS_POOL_NOTSUP, msg);
1185                         break;
1186
1187                 case ENOTBLK:
1188                         zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1189                             "cache device must be a disk or disk slice"));
1190                         (void) zfs_error(hdl, EZFS_BADDEV, msg);
1191                         break;
1192
1193                 default:
1194                         (void) zpool_standard_error(hdl, errno, msg);
1195                 }
1196
1197                 ret = -1;
1198         } else {
1199                 ret = 0;
1200         }
1201
1202         zcmd_free_nvlists(&zc);
1203
1204         return (ret);
1205 }
1206
1207 /*
1208  * Exports the pool from the system.  The caller must ensure that there are no
1209  * mounted datasets in the pool.
1210  */
1211 int
1212 zpool_export_common(zpool_handle_t *zhp, boolean_t force, boolean_t hardforce)
1213 {
1214         zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
1215         char msg[1024];
1216
1217         (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1218             "cannot export '%s'"), zhp->zpool_name);
1219
1220         (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1221         zc.zc_cookie = force;
1222         zc.zc_guid = hardforce;
1223
1224         if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_EXPORT, &zc) != 0) {
1225                 switch (errno) {
1226                 case EXDEV:
1227                         zfs_error_aux(zhp->zpool_hdl, dgettext(TEXT_DOMAIN,
1228                             "use '-f' to override the following errors:\n"
1229                             "'%s' has an active shared spare which could be"
1230                             " used by other pools once '%s' is exported."),
1231                             zhp->zpool_name, zhp->zpool_name);
1232                         return (zfs_error(zhp->zpool_hdl, EZFS_ACTIVE_SPARE,
1233                             msg));
1234                 default:
1235                         return (zpool_standard_error_fmt(zhp->zpool_hdl, errno,
1236                             msg));
1237                 }
1238         }
1239
1240         return (0);
1241 }
1242
1243 int
1244 zpool_export(zpool_handle_t *zhp, boolean_t force)
1245 {
1246         return (zpool_export_common(zhp, force, B_FALSE));
1247 }
1248
1249 int
1250 zpool_export_force(zpool_handle_t *zhp)
1251 {
1252         return (zpool_export_common(zhp, B_TRUE, B_TRUE));
1253 }
1254
1255 static void
1256 zpool_rewind_exclaim(libzfs_handle_t *hdl, const char *name, boolean_t dryrun,
1257     nvlist_t *config)
1258 {
1259         nvlist_t *nv = NULL;
1260         uint64_t rewindto;
1261         int64_t loss = -1;
1262         struct tm t;
1263         char timestr[128];
1264
1265         if (!hdl->libzfs_printerr || config == NULL)
1266                 return;
1267
1268         if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nv) != 0)
1269                 return;
1270
1271         if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_TIME, &rewindto) != 0)
1272                 return;
1273         (void) nvlist_lookup_int64(nv, ZPOOL_CONFIG_REWIND_TIME, &loss);
1274
1275         if (localtime_r((time_t *)&rewindto, &t) != NULL &&
1276             strftime(timestr, 128, "%c", &t) != 0) {
1277                 if (dryrun) {
1278                         (void) printf(dgettext(TEXT_DOMAIN,
1279                             "Would be able to return %s "
1280                             "to its state as of %s.\n"),
1281                             name, timestr);
1282                 } else {
1283                         (void) printf(dgettext(TEXT_DOMAIN,
1284                             "Pool %s returned to its state as of %s.\n"),
1285                             name, timestr);
1286                 }
1287                 if (loss > 120) {
1288                         (void) printf(dgettext(TEXT_DOMAIN,
1289                             "%s approximately %lld "),
1290                             dryrun ? "Would discard" : "Discarded",
1291                             ((longlong_t)loss + 30) / 60);
1292                         (void) printf(dgettext(TEXT_DOMAIN,
1293                             "minutes of transactions.\n"));
1294                 } else if (loss > 0) {
1295                         (void) printf(dgettext(TEXT_DOMAIN,
1296                             "%s approximately %lld "),
1297                             dryrun ? "Would discard" : "Discarded",
1298                             (longlong_t)loss);
1299                         (void) printf(dgettext(TEXT_DOMAIN,
1300                             "seconds of transactions.\n"));
1301                 }
1302         }
1303 }
1304
1305 void
1306 zpool_explain_recover(libzfs_handle_t *hdl, const char *name, int reason,
1307     nvlist_t *config)
1308 {
1309         nvlist_t *nv = NULL;
1310         int64_t loss = -1;
1311         uint64_t edata = UINT64_MAX;
1312         uint64_t rewindto;
1313         struct tm t;
1314         char timestr[128];
1315
1316         if (!hdl->libzfs_printerr)
1317                 return;
1318
1319         if (reason >= 0)
1320                 (void) printf(dgettext(TEXT_DOMAIN, "action: "));
1321         else
1322                 (void) printf(dgettext(TEXT_DOMAIN, "\t"));
1323
1324         /* All attempted rewinds failed if ZPOOL_CONFIG_LOAD_TIME missing */
1325         if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nv) != 0 ||
1326             nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_TIME, &rewindto) != 0)
1327                 goto no_info;
1328
1329         (void) nvlist_lookup_int64(nv, ZPOOL_CONFIG_REWIND_TIME, &loss);
1330         (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_DATA_ERRORS,
1331             &edata);
1332
1333         (void) printf(dgettext(TEXT_DOMAIN,
1334             "Recovery is possible, but will result in some data loss.\n"));
1335
1336         if (localtime_r((time_t *)&rewindto, &t) != NULL &&
1337             strftime(timestr, 128, "%c", &t) != 0) {
1338                 (void) printf(dgettext(TEXT_DOMAIN,
1339                     "\tReturning the pool to its state as of %s\n"
1340                     "\tshould correct the problem.  "),
1341                     timestr);
1342         } else {
1343                 (void) printf(dgettext(TEXT_DOMAIN,
1344                     "\tReverting the pool to an earlier state "
1345                     "should correct the problem.\n\t"));
1346         }
1347
1348         if (loss > 120) {
1349                 (void) printf(dgettext(TEXT_DOMAIN,
1350                     "Approximately %lld minutes of data\n"
1351                     "\tmust be discarded, irreversibly.  "),
1352                     ((longlong_t)loss + 30) / 60);
1353         } else if (loss > 0) {
1354                 (void) printf(dgettext(TEXT_DOMAIN,
1355                     "Approximately %lld seconds of data\n"
1356                     "\tmust be discarded, irreversibly.  "),
1357                     (longlong_t)loss);
1358         }
1359         if (edata != 0 && edata != UINT64_MAX) {
1360                 if (edata == 1) {
1361                         (void) printf(dgettext(TEXT_DOMAIN,
1362                             "After rewind, at least\n"
1363                             "\tone persistent user-data error will remain.  "));
1364                 } else {
1365                         (void) printf(dgettext(TEXT_DOMAIN,
1366                             "After rewind, several\n"
1367                             "\tpersistent user-data errors will remain.  "));
1368                 }
1369         }
1370         (void) printf(dgettext(TEXT_DOMAIN,
1371             "Recovery can be attempted\n\tby executing 'zpool %s -F %s'.  "),
1372             reason >= 0 ? "clear" : "import", name);
1373
1374         (void) printf(dgettext(TEXT_DOMAIN,
1375             "A scrub of the pool\n"
1376             "\tis strongly recommended after recovery.\n"));
1377         return;
1378
1379 no_info:
1380         (void) printf(dgettext(TEXT_DOMAIN,
1381             "Destroy and re-create the pool from\n\ta backup source.\n"));
1382 }
1383
1384 /*
1385  * zpool_import() is a contracted interface. Should be kept the same
1386  * if possible.
1387  *
1388  * Applications should use zpool_import_props() to import a pool with
1389  * new properties value to be set.
1390  */
1391 int
1392 zpool_import(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,
1393     char *altroot)
1394 {
1395         nvlist_t *props = NULL;
1396         int ret;
1397
1398         if (altroot != NULL) {
1399                 if (nvlist_alloc(&props, NV_UNIQUE_NAME, 0) != 0) {
1400                         return (zfs_error_fmt(hdl, EZFS_NOMEM,
1401                             dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1402                             newname));
1403                 }
1404
1405                 if (nvlist_add_string(props,
1406                     zpool_prop_to_name(ZPOOL_PROP_ALTROOT), altroot) != 0 ||
1407                     nvlist_add_string(props,
1408                     zpool_prop_to_name(ZPOOL_PROP_CACHEFILE), "none") != 0) {
1409                         nvlist_free(props);
1410                         return (zfs_error_fmt(hdl, EZFS_NOMEM,
1411                             dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1412                             newname));
1413                 }
1414         }
1415
1416         ret = zpool_import_props(hdl, config, newname, props,
1417             ZFS_IMPORT_NORMAL);
1418         if (props)
1419                 nvlist_free(props);
1420         return (ret);
1421 }
1422
1423 static void
1424 print_vdev_tree(libzfs_handle_t *hdl, const char *name, nvlist_t *nv,
1425     int indent)
1426 {
1427         nvlist_t **child;
1428         uint_t c, children;
1429         char *vname;
1430         uint64_t is_log = 0;
1431
1432         (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_LOG,
1433             &is_log);
1434
1435         if (name != NULL)
1436                 (void) printf("\t%*s%s%s\n", indent, "", name,
1437                     is_log ? " [log]" : "");
1438
1439         if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
1440             &child, &children) != 0)
1441                 return;
1442
1443         for (c = 0; c < children; c++) {
1444                 vname = zpool_vdev_name(hdl, NULL, child[c], B_TRUE);
1445                 print_vdev_tree(hdl, vname, child[c], indent + 2);
1446                 free(vname);
1447         }
1448 }
1449
1450 /*
1451  * Import the given pool using the known configuration and a list of
1452  * properties to be set. The configuration should have come from
1453  * zpool_find_import(). The 'newname' parameters control whether the pool
1454  * is imported with a different name.
1455  */
1456 int
1457 zpool_import_props(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,
1458     nvlist_t *props, int flags)
1459 {
1460         zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
1461         zpool_rewind_policy_t policy;
1462         nvlist_t *nv = NULL;
1463         nvlist_t *nvinfo = NULL;
1464         nvlist_t *missing = NULL;
1465         char *thename;
1466         char *origname;
1467         int ret;
1468         int error = 0;
1469         char errbuf[1024];
1470
1471         verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
1472             &origname) == 0);
1473
1474         (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
1475             "cannot import pool '%s'"), origname);
1476
1477         if (newname != NULL) {
1478                 if (!zpool_name_valid(hdl, B_FALSE, newname))
1479                         return (zfs_error_fmt(hdl, EZFS_INVALIDNAME,
1480                             dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1481                             newname));
1482                 thename = (char *)newname;
1483         } else {
1484                 thename = origname;
1485         }
1486
1487         if (props) {
1488                 uint64_t version;
1489                 prop_flags_t flags = { .create = B_FALSE, .import = B_TRUE };
1490
1491                 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
1492                     &version) == 0);
1493
1494                 if ((props = zpool_valid_proplist(hdl, origname,
1495                     props, version, flags, errbuf)) == NULL) {
1496                         return (-1);
1497                 } else if (zcmd_write_src_nvlist(hdl, &zc, props) != 0) {
1498                         nvlist_free(props);
1499                         return (-1);
1500                 }
1501         }
1502
1503         (void) strlcpy(zc.zc_name, thename, sizeof (zc.zc_name));
1504
1505         verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
1506             &zc.zc_guid) == 0);
1507
1508         if (zcmd_write_conf_nvlist(hdl, &zc, config) != 0) {
1509                 nvlist_free(props);
1510                 return (-1);
1511         }
1512         if (zcmd_alloc_dst_nvlist(hdl, &zc, zc.zc_nvlist_conf_size * 2) != 0) {
1513                 nvlist_free(props);
1514                 return (-1);
1515         }
1516
1517         zc.zc_cookie = flags;
1518         while ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_IMPORT, &zc)) != 0 &&
1519             errno == ENOMEM) {
1520                 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
1521                         zcmd_free_nvlists(&zc);
1522                         return (-1);
1523                 }
1524         }
1525         if (ret != 0)
1526                 error = errno;
1527
1528         (void) zcmd_read_dst_nvlist(hdl, &zc, &nv);
1529         zpool_get_rewind_policy(config, &policy);
1530
1531         if (error) {
1532                 char desc[1024];
1533
1534                 /*
1535                  * Dry-run failed, but we print out what success
1536                  * looks like if we found a best txg
1537                  */
1538                 if (policy.zrp_request & ZPOOL_TRY_REWIND) {
1539                         zpool_rewind_exclaim(hdl, newname ? origname : thename,
1540                             B_TRUE, nv);
1541                         nvlist_free(nv);
1542                         return (-1);
1543                 }
1544
1545                 if (newname == NULL)
1546                         (void) snprintf(desc, sizeof (desc),
1547                             dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1548                             thename);
1549                 else
1550                         (void) snprintf(desc, sizeof (desc),
1551                             dgettext(TEXT_DOMAIN, "cannot import '%s' as '%s'"),
1552                             origname, thename);
1553
1554                 switch (error) {
1555                 case ENOTSUP:
1556                         /*
1557                          * Unsupported version.
1558                          */
1559                         (void) zfs_error(hdl, EZFS_BADVERSION, desc);
1560                         break;
1561
1562                 case EINVAL:
1563                         (void) zfs_error(hdl, EZFS_INVALCONFIG, desc);
1564                         break;
1565
1566                 case EROFS:
1567                         zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1568                             "one or more devices is read only"));
1569                         (void) zfs_error(hdl, EZFS_BADDEV, desc);
1570                         break;
1571
1572                 case ENXIO:
1573                         if (nv && nvlist_lookup_nvlist(nv,
1574                             ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0 &&
1575                             nvlist_lookup_nvlist(nvinfo,
1576                             ZPOOL_CONFIG_MISSING_DEVICES, &missing) == 0) {
1577                                 (void) printf(dgettext(TEXT_DOMAIN,
1578                                     "The devices below are missing, use "
1579                                     "'-m' to import the pool anyway:\n"));
1580                                 print_vdev_tree(hdl, NULL, missing, 2);
1581                                 (void) printf("\n");
1582                         }
1583                         (void) zpool_standard_error(hdl, error, desc);
1584                         break;
1585
1586                 case EEXIST:
1587                         (void) zpool_standard_error(hdl, error, desc);
1588                         break;
1589
1590                 case EBUSY:
1591                         zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1592                             "one or more devices are already in use\n"));
1593                         (void) zfs_error(hdl, EZFS_BADDEV, desc);
1594                         break;
1595
1596                 default:
1597                         (void) zpool_standard_error(hdl, error, desc);
1598                         zpool_explain_recover(hdl,
1599                             newname ? origname : thename, -error, nv);
1600                         break;
1601                 }
1602
1603                 nvlist_free(nv);
1604                 ret = -1;
1605         } else {
1606                 zpool_handle_t *zhp;
1607
1608                 /*
1609                  * This should never fail, but play it safe anyway.
1610                  */
1611                 if (zpool_open_silent(hdl, thename, &zhp) != 0)
1612                         ret = -1;
1613                 else if (zhp != NULL)
1614                         zpool_close(zhp);
1615                 if (policy.zrp_request &
1616                     (ZPOOL_DO_REWIND | ZPOOL_TRY_REWIND)) {
1617                         zpool_rewind_exclaim(hdl, newname ? origname : thename,
1618                             ((policy.zrp_request & ZPOOL_TRY_REWIND) != 0), nv);
1619                 }
1620                 nvlist_free(nv);
1621                 return (0);
1622         }
1623
1624         zcmd_free_nvlists(&zc);
1625         nvlist_free(props);
1626
1627         return (ret);
1628 }
1629
1630 /*
1631  * Scan the pool.
1632  */
1633 int
1634 zpool_scan(zpool_handle_t *zhp, pool_scan_func_t func)
1635 {
1636         zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
1637         char msg[1024];
1638         libzfs_handle_t *hdl = zhp->zpool_hdl;
1639
1640         (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1641         zc.zc_cookie = func;
1642
1643         if (zfs_ioctl(hdl, ZFS_IOC_POOL_SCAN, &zc) == 0 ||
1644             (errno == ENOENT && func != POOL_SCAN_NONE))
1645                 return (0);
1646
1647         if (func == POOL_SCAN_SCRUB) {
1648                 (void) snprintf(msg, sizeof (msg),
1649                     dgettext(TEXT_DOMAIN, "cannot scrub %s"), zc.zc_name);
1650         } else if (func == POOL_SCAN_NONE) {
1651                 (void) snprintf(msg, sizeof (msg),
1652                     dgettext(TEXT_DOMAIN, "cannot cancel scrubbing %s"),
1653                     zc.zc_name);
1654         } else {
1655                 assert(!"unexpected result");
1656         }
1657
1658         if (errno == EBUSY) {
1659                 nvlist_t *nvroot;
1660                 pool_scan_stat_t *ps = NULL;
1661                 uint_t psc;
1662
1663                 verify(nvlist_lookup_nvlist(zhp->zpool_config,
1664                     ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
1665                 (void) nvlist_lookup_uint64_array(nvroot,
1666                     ZPOOL_CONFIG_SCAN_STATS, (uint64_t **)&ps, &psc);
1667                 if (ps && ps->pss_func == POOL_SCAN_SCRUB)
1668                         return (zfs_error(hdl, EZFS_SCRUBBING, msg));
1669                 else
1670                         return (zfs_error(hdl, EZFS_RESILVERING, msg));
1671         } else if (errno == ENOENT) {
1672                 return (zfs_error(hdl, EZFS_NO_SCRUB, msg));
1673         } else {
1674                 return (zpool_standard_error(hdl, errno, msg));
1675         }
1676 }
1677
1678 /*
1679  * Find a vdev that matches the search criteria specified. We use the
1680  * the nvpair name to determine how we should look for the device.
1681  * 'avail_spare' is set to TRUE if the provided guid refers to an AVAIL
1682  * spare; but FALSE if its an INUSE spare.
1683  */
1684 static nvlist_t *
1685 vdev_to_nvlist_iter(nvlist_t *nv, nvlist_t *search, boolean_t *avail_spare,
1686     boolean_t *l2cache, boolean_t *log)
1687 {
1688         uint_t c, children;
1689         nvlist_t **child;
1690         nvlist_t *ret;
1691         uint64_t is_log;
1692         char *srchkey;
1693         nvpair_t *pair = nvlist_next_nvpair(search, NULL);
1694
1695         /* Nothing to look for */
1696         if (search == NULL || pair == NULL)
1697                 return (NULL);
1698
1699         /* Obtain the key we will use to search */
1700         srchkey = nvpair_name(pair);
1701
1702         switch (nvpair_type(pair)) {
1703         case DATA_TYPE_UINT64:
1704                 if (strcmp(srchkey, ZPOOL_CONFIG_GUID) == 0) {
1705                         uint64_t srchval, theguid;
1706
1707                         verify(nvpair_value_uint64(pair, &srchval) == 0);
1708                         verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
1709                             &theguid) == 0);
1710                         if (theguid == srchval)
1711                                 return (nv);
1712                 }
1713                 break;
1714
1715         case DATA_TYPE_STRING: {
1716                 char *srchval, *val;
1717
1718                 verify(nvpair_value_string(pair, &srchval) == 0);
1719                 if (nvlist_lookup_string(nv, srchkey, &val) != 0)
1720                         break;
1721
1722                 /*
1723                  * Search for the requested value. Special cases:
1724                  *
1725                  * - ZPOOL_CONFIG_PATH for whole disk entries.  These end in with a
1726                  *   partition suffix "1", "-part1", or "p1".  The suffix is  hidden
1727                  *   from the user, but included in the string, so this matches around
1728                  *   it.
1729                  * - looking for a top-level vdev name (i.e. ZPOOL_CONFIG_TYPE).
1730                  *
1731                  * Otherwise, all other searches are simple string compares.
1732                  */
1733                 if (strcmp(srchkey, ZPOOL_CONFIG_PATH) == 0) {
1734                         uint64_t wholedisk = 0;
1735
1736                         (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
1737                             &wholedisk);
1738                         if (wholedisk) {
1739                                 char buf[MAXPATHLEN];
1740
1741                                 zfs_append_partition(srchval, buf, sizeof (buf));
1742                                 if (strcmp(val, buf) == 0)
1743                                         return (nv);
1744
1745                                 break;
1746                         }
1747                 } else if (strcmp(srchkey, ZPOOL_CONFIG_TYPE) == 0 && val) {
1748                         char *type, *idx, *end, *p;
1749                         uint64_t id, vdev_id;
1750
1751                         /*
1752                          * Determine our vdev type, keeping in mind
1753                          * that the srchval is composed of a type and
1754                          * vdev id pair (i.e. mirror-4).
1755                          */
1756                         if ((type = strdup(srchval)) == NULL)
1757                                 return (NULL);
1758
1759                         if ((p = strrchr(type, '-')) == NULL) {
1760                                 free(type);
1761                                 break;
1762                         }
1763                         idx = p + 1;
1764                         *p = '\0';
1765
1766                         /*
1767                          * If the types don't match then keep looking.
1768                          */
1769                         if (strncmp(val, type, strlen(val)) != 0) {
1770                                 free(type);
1771                                 break;
1772                         }
1773
1774                         verify(strncmp(type, VDEV_TYPE_RAIDZ,
1775                             strlen(VDEV_TYPE_RAIDZ)) == 0 ||
1776                             strncmp(type, VDEV_TYPE_MIRROR,
1777                             strlen(VDEV_TYPE_MIRROR)) == 0);
1778                         verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID,
1779                             &id) == 0);
1780
1781                         errno = 0;
1782                         vdev_id = strtoull(idx, &end, 10);
1783
1784                         free(type);
1785                         if (errno != 0)
1786                                 return (NULL);
1787
1788                         /*
1789                          * Now verify that we have the correct vdev id.
1790                          */
1791                         if (vdev_id == id)
1792                                 return (nv);
1793                 }
1794
1795                 /*
1796                  * Common case
1797                  */
1798                 if (strcmp(srchval, val) == 0)
1799                         return (nv);
1800                 break;
1801         }
1802
1803         default:
1804                 break;
1805         }
1806
1807         if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
1808             &child, &children) != 0)
1809                 return (NULL);
1810
1811         for (c = 0; c < children; c++) {
1812                 if ((ret = vdev_to_nvlist_iter(child[c], search,
1813                     avail_spare, l2cache, NULL)) != NULL) {
1814                         /*
1815                          * The 'is_log' value is only set for the toplevel
1816                          * vdev, not the leaf vdevs.  So we always lookup the
1817                          * log device from the root of the vdev tree (where
1818                          * 'log' is non-NULL).
1819                          */
1820                         if (log != NULL &&
1821                             nvlist_lookup_uint64(child[c],
1822                             ZPOOL_CONFIG_IS_LOG, &is_log) == 0 &&
1823                             is_log) {
1824                                 *log = B_TRUE;
1825                         }
1826                         return (ret);
1827                 }
1828         }
1829
1830         if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES,
1831             &child, &children) == 0) {
1832                 for (c = 0; c < children; c++) {
1833                         if ((ret = vdev_to_nvlist_iter(child[c], search,
1834                             avail_spare, l2cache, NULL)) != NULL) {
1835                                 *avail_spare = B_TRUE;
1836                                 return (ret);
1837                         }
1838                 }
1839         }
1840
1841         if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE,
1842             &child, &children) == 0) {
1843                 for (c = 0; c < children; c++) {
1844                         if ((ret = vdev_to_nvlist_iter(child[c], search,
1845                             avail_spare, l2cache, NULL)) != NULL) {
1846                                 *l2cache = B_TRUE;
1847                                 return (ret);
1848                         }
1849                 }
1850         }
1851
1852         return (NULL);
1853 }
1854
1855 /*
1856  * Given a physical path (minus the "/devices" prefix), find the
1857  * associated vdev.
1858  */
1859 nvlist_t *
1860 zpool_find_vdev_by_physpath(zpool_handle_t *zhp, const char *ppath,
1861     boolean_t *avail_spare, boolean_t *l2cache, boolean_t *log)
1862 {
1863         nvlist_t *search, *nvroot, *ret;
1864
1865         verify(nvlist_alloc(&search, NV_UNIQUE_NAME, KM_SLEEP) == 0);
1866         verify(nvlist_add_string(search, ZPOOL_CONFIG_PHYS_PATH, ppath) == 0);
1867
1868         verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
1869             &nvroot) == 0);
1870
1871         *avail_spare = B_FALSE;
1872         *l2cache = B_FALSE;
1873         if (log != NULL)
1874                 *log = B_FALSE;
1875         ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log);
1876         nvlist_free(search);
1877
1878         return (ret);
1879 }
1880
1881 /*
1882  * Determine if we have an "interior" top-level vdev (i.e mirror/raidz).
1883  */
1884 boolean_t
1885 zpool_vdev_is_interior(const char *name)
1886 {
1887         if (strncmp(name, VDEV_TYPE_RAIDZ, strlen(VDEV_TYPE_RAIDZ)) == 0 ||
1888             strncmp(name, VDEV_TYPE_MIRROR, strlen(VDEV_TYPE_MIRROR)) == 0)
1889                 return (B_TRUE);
1890         return (B_FALSE);
1891 }
1892
1893 nvlist_t *
1894 zpool_find_vdev(zpool_handle_t *zhp, const char *path, boolean_t *avail_spare,
1895     boolean_t *l2cache, boolean_t *log)
1896 {
1897         char buf[MAXPATHLEN];
1898         char *end;
1899         nvlist_t *nvroot, *search, *ret;
1900         uint64_t guid;
1901
1902         verify(nvlist_alloc(&search, NV_UNIQUE_NAME, KM_SLEEP) == 0);
1903
1904         guid = strtoull(path, &end, 10);
1905         if (guid != 0 && *end == '\0') {
1906                 verify(nvlist_add_uint64(search, ZPOOL_CONFIG_GUID, guid) == 0);
1907         } else if (zpool_vdev_is_interior(path)) {
1908                 verify(nvlist_add_string(search, ZPOOL_CONFIG_TYPE, path) == 0);
1909         } else if (path[0] != '/') {
1910                 if (zfs_resolve_shortname(path, buf, sizeof (buf)) < 0) {
1911                         nvlist_free(search);
1912                         return (NULL);
1913                 }
1914                 verify(nvlist_add_string(search, ZPOOL_CONFIG_PATH, buf) == 0);
1915         } else {
1916                 verify(nvlist_add_string(search, ZPOOL_CONFIG_PATH, path) == 0);
1917         }
1918
1919         verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
1920             &nvroot) == 0);
1921
1922         *avail_spare = B_FALSE;
1923         *l2cache = B_FALSE;
1924         if (log != NULL)
1925                 *log = B_FALSE;
1926         ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log);
1927         nvlist_free(search);
1928
1929         return (ret);
1930 }
1931
1932 static int
1933 vdev_online(nvlist_t *nv)
1934 {
1935         uint64_t ival;
1936
1937         if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_OFFLINE, &ival) == 0 ||
1938             nvlist_lookup_uint64(nv, ZPOOL_CONFIG_FAULTED, &ival) == 0 ||
1939             nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVED, &ival) == 0)
1940                 return (0);
1941
1942         return (1);
1943 }
1944
1945 /*
1946  * Helper function for zpool_get_physpaths().
1947  */
1948 static int
1949 vdev_get_one_physpath(nvlist_t *config, char *physpath, size_t physpath_size,
1950     size_t *bytes_written)
1951 {
1952         size_t bytes_left, pos, rsz;
1953         char *tmppath;
1954         const char *format;
1955
1956         if (nvlist_lookup_string(config, ZPOOL_CONFIG_PHYS_PATH,
1957             &tmppath) != 0)
1958                 return (EZFS_NODEVICE);
1959
1960         pos = *bytes_written;
1961         bytes_left = physpath_size - pos;
1962         format = (pos == 0) ? "%s" : " %s";
1963
1964         rsz = snprintf(physpath + pos, bytes_left, format, tmppath);
1965         *bytes_written += rsz;
1966
1967         if (rsz >= bytes_left) {
1968                 /* if physpath was not copied properly, clear it */
1969                 if (bytes_left != 0) {
1970                         physpath[pos] = 0;
1971                 }
1972                 return (EZFS_NOSPC);
1973         }
1974         return (0);
1975 }
1976
1977 static int
1978 vdev_get_physpaths(nvlist_t *nv, char *physpath, size_t phypath_size,
1979     size_t *rsz, boolean_t is_spare)
1980 {
1981         char *type;
1982         int ret;
1983
1984         if (nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) != 0)
1985                 return (EZFS_INVALCONFIG);
1986
1987         if (strcmp(type, VDEV_TYPE_DISK) == 0) {
1988                 /*
1989                  * An active spare device has ZPOOL_CONFIG_IS_SPARE set.
1990                  * For a spare vdev, we only want to boot from the active
1991                  * spare device.
1992                  */
1993                 if (is_spare) {
1994                         uint64_t spare = 0;
1995                         (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_SPARE,
1996                             &spare);
1997                         if (!spare)
1998                                 return (EZFS_INVALCONFIG);
1999                 }
2000
2001                 if (vdev_online(nv)) {
2002                         if ((ret = vdev_get_one_physpath(nv, physpath,
2003                             phypath_size, rsz)) != 0)
2004                                 return (ret);
2005                 }
2006         } else if (strcmp(type, VDEV_TYPE_MIRROR) == 0 ||
2007             strcmp(type, VDEV_TYPE_REPLACING) == 0 ||
2008             (is_spare = (strcmp(type, VDEV_TYPE_SPARE) == 0))) {
2009                 nvlist_t **child;
2010                 uint_t count;
2011                 int i, ret;
2012
2013                 if (nvlist_lookup_nvlist_array(nv,
2014                     ZPOOL_CONFIG_CHILDREN, &child, &count) != 0)
2015                         return (EZFS_INVALCONFIG);
2016
2017                 for (i = 0; i < count; i++) {
2018                         ret = vdev_get_physpaths(child[i], physpath,
2019                             phypath_size, rsz, is_spare);
2020                         if (ret == EZFS_NOSPC)
2021                                 return (ret);
2022                 }
2023         }
2024
2025         return (EZFS_POOL_INVALARG);
2026 }
2027
2028 /*
2029  * Get phys_path for a root pool config.
2030  * Return 0 on success; non-zero on failure.
2031  */
2032 static int
2033 zpool_get_config_physpath(nvlist_t *config, char *physpath, size_t phypath_size)
2034 {
2035         size_t rsz;
2036         nvlist_t *vdev_root;
2037         nvlist_t **child;
2038         uint_t count;
2039         char *type;
2040
2041         rsz = 0;
2042
2043         if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
2044             &vdev_root) != 0)
2045                 return (EZFS_INVALCONFIG);
2046
2047         if (nvlist_lookup_string(vdev_root, ZPOOL_CONFIG_TYPE, &type) != 0 ||
2048             nvlist_lookup_nvlist_array(vdev_root, ZPOOL_CONFIG_CHILDREN,
2049             &child, &count) != 0)
2050                 return (EZFS_INVALCONFIG);
2051
2052         /*
2053          * root pool can not have EFI labeled disks and can only have
2054          * a single top-level vdev.
2055          */
2056         if (strcmp(type, VDEV_TYPE_ROOT) != 0 || count != 1 ||
2057             pool_uses_efi(vdev_root))
2058                 return (EZFS_POOL_INVALARG);
2059
2060         (void) vdev_get_physpaths(child[0], physpath, phypath_size, &rsz,
2061             B_FALSE);
2062
2063         /* No online devices */
2064         if (rsz == 0)
2065                 return (EZFS_NODEVICE);
2066
2067         return (0);
2068 }
2069
2070 /*
2071  * Get phys_path for a root pool
2072  * Return 0 on success; non-zero on failure.
2073  */
2074 int
2075 zpool_get_physpath(zpool_handle_t *zhp, char *physpath, size_t phypath_size)
2076 {
2077         return (zpool_get_config_physpath(zhp->zpool_config, physpath,
2078             phypath_size));
2079 }
2080
2081 /*
2082  * If the device has being dynamically expanded then we need to relabel
2083  * the disk to use the new unallocated space.
2084  */
2085 static int
2086 zpool_relabel_disk(libzfs_handle_t *hdl, const char *path, const char *msg)
2087 {
2088         int fd, error;
2089
2090         if ((fd = open(path, O_RDWR|O_DIRECT)) < 0) {
2091                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot "
2092                     "relabel '%s': unable to open device: %d"), path, errno);
2093                 return (zfs_error(hdl, EZFS_OPENFAILED, msg));
2094         }
2095
2096         /*
2097          * It's possible that we might encounter an error if the device
2098          * does not have any unallocated space left. If so, we simply
2099          * ignore that error and continue on.
2100          */
2101         error = efi_use_whole_disk(fd);
2102         (void) close(fd);
2103         if (error && error != VT_ENOSPC) {
2104                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot "
2105                     "relabel '%s': unable to read disk capacity"), path);
2106                 return (zfs_error(hdl, EZFS_NOCAP, msg));
2107         }
2108         return (0);
2109 }
2110
2111 /*
2112  * Bring the specified vdev online.   The 'flags' parameter is a set of the
2113  * ZFS_ONLINE_* flags.
2114  */
2115 int
2116 zpool_vdev_online(zpool_handle_t *zhp, const char *path, int flags,
2117     vdev_state_t *newstate)
2118 {
2119         zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
2120         char msg[1024];
2121         nvlist_t *tgt;
2122         boolean_t avail_spare, l2cache, islog;
2123         libzfs_handle_t *hdl = zhp->zpool_hdl;
2124         int error;
2125
2126         if (flags & ZFS_ONLINE_EXPAND) {
2127                 (void) snprintf(msg, sizeof (msg),
2128                     dgettext(TEXT_DOMAIN, "cannot expand %s"), path);
2129         } else {
2130                 (void) snprintf(msg, sizeof (msg),
2131                     dgettext(TEXT_DOMAIN, "cannot online %s"), path);
2132         }
2133
2134         (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2135         if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
2136             &islog)) == NULL)
2137                 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2138
2139         verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2140
2141         if (avail_spare)
2142                 return (zfs_error(hdl, EZFS_ISSPARE, msg));
2143
2144         if (flags & ZFS_ONLINE_EXPAND ||
2145             zpool_get_prop_int(zhp, ZPOOL_PROP_AUTOEXPAND, NULL)) {
2146                 uint64_t wholedisk = 0;
2147
2148                 (void) nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_WHOLE_DISK,
2149                     &wholedisk);
2150
2151                 /*
2152                  * XXX - L2ARC 1.0 devices can't support expansion.
2153                  */
2154                 if (l2cache) {
2155                         zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2156                             "cannot expand cache devices"));
2157                         return (zfs_error(hdl, EZFS_VDEVNOTSUP, msg));
2158                 }
2159
2160                 if (wholedisk) {
2161                         const char *fullpath = path;
2162                         char buf[MAXPATHLEN];
2163
2164                         if (path[0] != '/') {
2165                                 error = zfs_resolve_shortname(path, buf,
2166                                     sizeof(buf));
2167                                 if (error != 0)
2168                                         return (zfs_error(hdl, EZFS_NODEVICE,
2169                                             msg));
2170
2171                                 fullpath = buf;
2172                         }
2173
2174                         error = zpool_relabel_disk(hdl, fullpath, msg);
2175                         if (error != 0)
2176                                 return (error);
2177                 }
2178         }
2179
2180         zc.zc_cookie = VDEV_STATE_ONLINE;
2181         zc.zc_obj = flags;
2182
2183         if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) != 0) {
2184                 if (errno == EINVAL) {
2185                         zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "was split "
2186                             "from this pool into a new one.  Use '%s' "
2187                             "instead"), "zpool detach");
2188                         return (zfs_error(hdl, EZFS_POSTSPLIT_ONLINE, msg));
2189                 }
2190                 return (zpool_standard_error(hdl, errno, msg));
2191         }
2192
2193         *newstate = zc.zc_cookie;
2194         return (0);
2195 }
2196
2197 /*
2198  * Take the specified vdev offline
2199  */
2200 int
2201 zpool_vdev_offline(zpool_handle_t *zhp, const char *path, boolean_t istmp)
2202 {
2203         zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
2204         char msg[1024];
2205         nvlist_t *tgt;
2206         boolean_t avail_spare, l2cache;
2207         libzfs_handle_t *hdl = zhp->zpool_hdl;
2208
2209         (void) snprintf(msg, sizeof (msg),
2210             dgettext(TEXT_DOMAIN, "cannot offline %s"), path);
2211
2212         (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2213         if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
2214             NULL)) == NULL)
2215                 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2216
2217         verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2218
2219         if (avail_spare)
2220                 return (zfs_error(hdl, EZFS_ISSPARE, msg));
2221
2222         zc.zc_cookie = VDEV_STATE_OFFLINE;
2223         zc.zc_obj = istmp ? ZFS_OFFLINE_TEMPORARY : 0;
2224
2225         if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
2226                 return (0);
2227
2228         switch (errno) {
2229         case EBUSY:
2230
2231                 /*
2232                  * There are no other replicas of this device.
2233                  */
2234                 return (zfs_error(hdl, EZFS_NOREPLICAS, msg));
2235
2236         case EEXIST:
2237                 /*
2238                  * The log device has unplayed logs
2239                  */
2240                 return (zfs_error(hdl, EZFS_UNPLAYED_LOGS, msg));
2241
2242         default:
2243                 return (zpool_standard_error(hdl, errno, msg));
2244         }
2245 }
2246
2247 /*
2248  * Mark the given vdev faulted.
2249  */
2250 int
2251 zpool_vdev_fault(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux)
2252 {
2253         zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
2254         char msg[1024];
2255         libzfs_handle_t *hdl = zhp->zpool_hdl;
2256
2257         (void) snprintf(msg, sizeof (msg),
2258            dgettext(TEXT_DOMAIN, "cannot fault %llu"), (u_longlong_t)guid);
2259
2260         (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2261         zc.zc_guid = guid;
2262         zc.zc_cookie = VDEV_STATE_FAULTED;
2263         zc.zc_obj = aux;
2264
2265         if (ioctl(hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
2266                 return (0);
2267
2268         switch (errno) {
2269         case EBUSY:
2270
2271                 /*
2272                  * There are no other replicas of this device.
2273                  */
2274                 return (zfs_error(hdl, EZFS_NOREPLICAS, msg));
2275
2276         default:
2277                 return (zpool_standard_error(hdl, errno, msg));
2278         }
2279
2280 }
2281
2282 /*
2283  * Mark the given vdev degraded.
2284  */
2285 int
2286 zpool_vdev_degrade(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux)
2287 {
2288         zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
2289         char msg[1024];
2290         libzfs_handle_t *hdl = zhp->zpool_hdl;
2291
2292         (void) snprintf(msg, sizeof (msg),
2293            dgettext(TEXT_DOMAIN, "cannot degrade %llu"), (u_longlong_t)guid);
2294
2295         (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2296         zc.zc_guid = guid;
2297         zc.zc_cookie = VDEV_STATE_DEGRADED;
2298         zc.zc_obj = aux;
2299
2300         if (ioctl(hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
2301                 return (0);
2302
2303         return (zpool_standard_error(hdl, errno, msg));
2304 }
2305
2306 /*
2307  * Returns TRUE if the given nvlist is a vdev that was originally swapped in as
2308  * a hot spare.
2309  */
2310 static boolean_t
2311 is_replacing_spare(nvlist_t *search, nvlist_t *tgt, int which)
2312 {
2313         nvlist_t **child;
2314         uint_t c, children;
2315         char *type;
2316
2317         if (nvlist_lookup_nvlist_array(search, ZPOOL_CONFIG_CHILDREN, &child,
2318             &children) == 0) {
2319                 verify(nvlist_lookup_string(search, ZPOOL_CONFIG_TYPE,
2320                     &type) == 0);
2321
2322                 if (strcmp(type, VDEV_TYPE_SPARE) == 0 &&
2323                     children == 2 && child[which] == tgt)
2324                         return (B_TRUE);
2325
2326                 for (c = 0; c < children; c++)
2327                         if (is_replacing_spare(child[c], tgt, which))
2328                                 return (B_TRUE);
2329         }
2330
2331         return (B_FALSE);
2332 }
2333
2334 /*
2335  * Attach new_disk (fully described by nvroot) to old_disk.
2336  * If 'replacing' is specified, the new disk will replace the old one.
2337  */
2338 int
2339 zpool_vdev_attach(zpool_handle_t *zhp,
2340     const char *old_disk, const char *new_disk, nvlist_t *nvroot, int replacing)
2341 {
2342         zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
2343         char msg[1024];
2344         int ret;
2345         nvlist_t *tgt;
2346         boolean_t avail_spare, l2cache, islog;
2347         uint64_t val;
2348         char *newname;
2349         nvlist_t **child;
2350         uint_t children;
2351         nvlist_t *config_root;
2352         libzfs_handle_t *hdl = zhp->zpool_hdl;
2353         boolean_t rootpool = pool_is_bootable(zhp);
2354
2355         if (replacing)
2356                 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
2357                     "cannot replace %s with %s"), old_disk, new_disk);
2358         else
2359                 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
2360                     "cannot attach %s to %s"), new_disk, old_disk);
2361
2362         /*
2363          * If this is a root pool, make sure that we're not attaching an
2364          * EFI labeled device.
2365          */
2366         if (rootpool && pool_uses_efi(nvroot)) {
2367                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2368                     "EFI labeled devices are not supported on root pools."));
2369                 return (zfs_error(hdl, EZFS_POOL_NOTSUP, msg));
2370         }
2371
2372         (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2373         if ((tgt = zpool_find_vdev(zhp, old_disk, &avail_spare, &l2cache,
2374             &islog)) == 0)
2375                 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2376
2377         if (avail_spare)
2378                 return (zfs_error(hdl, EZFS_ISSPARE, msg));
2379
2380         if (l2cache)
2381                 return (zfs_error(hdl, EZFS_ISL2CACHE, msg));
2382
2383         verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2384         zc.zc_cookie = replacing;
2385
2386         if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
2387             &child, &children) != 0 || children != 1) {
2388                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2389                     "new device must be a single disk"));
2390                 return (zfs_error(hdl, EZFS_INVALCONFIG, msg));
2391         }
2392
2393         verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
2394             ZPOOL_CONFIG_VDEV_TREE, &config_root) == 0);
2395
2396         if ((newname = zpool_vdev_name(NULL, NULL, child[0], B_FALSE)) == NULL)
2397                 return (-1);
2398
2399         /*
2400          * If the target is a hot spare that has been swapped in, we can only
2401          * replace it with another hot spare.
2402          */
2403         if (replacing &&
2404             nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_IS_SPARE, &val) == 0 &&
2405             (zpool_find_vdev(zhp, newname, &avail_spare, &l2cache,
2406             NULL) == NULL || !avail_spare) &&
2407             is_replacing_spare(config_root, tgt, 1)) {
2408                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2409                     "can only be replaced by another hot spare"));
2410                 free(newname);
2411                 return (zfs_error(hdl, EZFS_BADTARGET, msg));
2412         }
2413
2414         free(newname);
2415
2416         if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
2417                 return (-1);
2418
2419         ret = zfs_ioctl(hdl, ZFS_IOC_VDEV_ATTACH, &zc);
2420
2421         zcmd_free_nvlists(&zc);
2422
2423         if (ret == 0) {
2424                 if (rootpool) {
2425                         /*
2426                          * XXX need a better way to prevent user from
2427                          * booting up a half-baked vdev.
2428                          */
2429                         (void) fprintf(stderr, dgettext(TEXT_DOMAIN, "Make "
2430                             "sure to wait until resilver is done "
2431                             "before rebooting.\n"));
2432                 }
2433                 return (0);
2434         }
2435
2436         switch (errno) {
2437         case ENOTSUP:
2438                 /*
2439                  * Can't attach to or replace this type of vdev.
2440                  */
2441                 if (replacing) {
2442                         uint64_t version = zpool_get_prop_int(zhp,
2443                             ZPOOL_PROP_VERSION, NULL);
2444
2445                         if (islog)
2446                                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2447                                     "cannot replace a log with a spare"));
2448                         else if (version >= SPA_VERSION_MULTI_REPLACE)
2449                                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2450                                     "already in replacing/spare config; wait "
2451                                     "for completion or use 'zpool detach'"));
2452                         else
2453                                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2454                                     "cannot replace a replacing device"));
2455                 } else {
2456                         zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2457                             "can only attach to mirrors and top-level "
2458                             "disks"));
2459                 }
2460                 (void) zfs_error(hdl, EZFS_BADTARGET, msg);
2461                 break;
2462
2463         case EINVAL:
2464                 /*
2465                  * The new device must be a single disk.
2466                  */
2467                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2468                     "new device must be a single disk"));
2469                 (void) zfs_error(hdl, EZFS_INVALCONFIG, msg);
2470                 break;
2471
2472         case EBUSY:
2473                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "%s is busy"),
2474                     new_disk);
2475                 (void) zfs_error(hdl, EZFS_BADDEV, msg);
2476                 break;
2477
2478         case EOVERFLOW:
2479                 /*
2480                  * The new device is too small.
2481                  */
2482                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2483                     "device is too small"));
2484                 (void) zfs_error(hdl, EZFS_BADDEV, msg);
2485                 break;
2486
2487         case EDOM:
2488                 /*
2489                  * The new device has a different alignment requirement.
2490                  */
2491                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2492                     "devices have different sector alignment"));
2493                 (void) zfs_error(hdl, EZFS_BADDEV, msg);
2494                 break;
2495
2496         case ENAMETOOLONG:
2497                 /*
2498                  * The resulting top-level vdev spec won't fit in the label.
2499                  */
2500                 (void) zfs_error(hdl, EZFS_DEVOVERFLOW, msg);
2501                 break;
2502
2503         default:
2504                 (void) zpool_standard_error(hdl, errno, msg);
2505         }
2506
2507         return (-1);
2508 }
2509
2510 /*
2511  * Detach the specified device.
2512  */
2513 int
2514 zpool_vdev_detach(zpool_handle_t *zhp, const char *path)
2515 {
2516         zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
2517         char msg[1024];
2518         nvlist_t *tgt;
2519         boolean_t avail_spare, l2cache;
2520         libzfs_handle_t *hdl = zhp->zpool_hdl;
2521
2522         (void) snprintf(msg, sizeof (msg),
2523             dgettext(TEXT_DOMAIN, "cannot detach %s"), path);
2524
2525         (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2526         if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
2527             NULL)) == 0)
2528                 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2529
2530         if (avail_spare)
2531                 return (zfs_error(hdl, EZFS_ISSPARE, msg));
2532
2533         if (l2cache)
2534                 return (zfs_error(hdl, EZFS_ISL2CACHE, msg));
2535
2536         verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2537
2538         if (zfs_ioctl(hdl, ZFS_IOC_VDEV_DETACH, &zc) == 0)
2539                 return (0);
2540
2541         switch (errno) {
2542
2543         case ENOTSUP:
2544                 /*
2545                  * Can't detach from this type of vdev.
2546                  */
2547                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "only "
2548                     "applicable to mirror and replacing vdevs"));
2549                 (void) zfs_error(hdl, EZFS_BADTARGET, msg);
2550                 break;
2551
2552         case EBUSY:
2553                 /*
2554                  * There are no other replicas of this device.
2555                  */
2556                 (void) zfs_error(hdl, EZFS_NOREPLICAS, msg);
2557                 break;
2558
2559         default:
2560                 (void) zpool_standard_error(hdl, errno, msg);
2561         }
2562
2563         return (-1);
2564 }
2565
2566 /*
2567  * Find a mirror vdev in the source nvlist.
2568  *
2569  * The mchild array contains a list of disks in one of the top-level mirrors
2570  * of the source pool.  The schild array contains a list of disks that the
2571  * user specified on the command line.  We loop over the mchild array to
2572  * see if any entry in the schild array matches.
2573  *
2574  * If a disk in the mchild array is found in the schild array, we return
2575  * the index of that entry.  Otherwise we return -1.
2576  */
2577 static int
2578 find_vdev_entry(zpool_handle_t *zhp, nvlist_t **mchild, uint_t mchildren,
2579     nvlist_t **schild, uint_t schildren)
2580 {
2581         uint_t mc;
2582
2583         for (mc = 0; mc < mchildren; mc++) {
2584                 uint_t sc;
2585                 char *mpath = zpool_vdev_name(zhp->zpool_hdl, zhp,
2586                     mchild[mc], B_FALSE);
2587
2588                 for (sc = 0; sc < schildren; sc++) {
2589                         char *spath = zpool_vdev_name(zhp->zpool_hdl, zhp,
2590                             schild[sc], B_FALSE);
2591                         boolean_t result = (strcmp(mpath, spath) == 0);
2592
2593                         free(spath);
2594                         if (result) {
2595                                 free(mpath);
2596                                 return (mc);
2597                         }
2598                 }
2599
2600                 free(mpath);
2601         }
2602
2603         return (-1);
2604 }
2605
2606 /*
2607  * Split a mirror pool.  If newroot points to null, then a new nvlist
2608  * is generated and it is the responsibility of the caller to free it.
2609  */
2610 int
2611 zpool_vdev_split(zpool_handle_t *zhp, char *newname, nvlist_t **newroot,
2612     nvlist_t *props, splitflags_t flags)
2613 {
2614         zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
2615         char msg[1024];
2616         nvlist_t *tree, *config, **child, **newchild, *newconfig = NULL;
2617         nvlist_t **varray = NULL, *zc_props = NULL;
2618         uint_t c, children, newchildren, lastlog = 0, vcount, found = 0;
2619         libzfs_handle_t *hdl = zhp->zpool_hdl;
2620         uint64_t vers;
2621         boolean_t freelist = B_FALSE, memory_err = B_TRUE;
2622         int retval = 0;
2623
2624         (void) snprintf(msg, sizeof (msg),
2625             dgettext(TEXT_DOMAIN, "Unable to split %s"), zhp->zpool_name);
2626
2627         if (!zpool_name_valid(hdl, B_FALSE, newname))
2628                 return (zfs_error(hdl, EZFS_INVALIDNAME, msg));
2629
2630         if ((config = zpool_get_config(zhp, NULL)) == NULL) {
2631                 (void) fprintf(stderr, gettext("Internal error: unable to "
2632                     "retrieve pool configuration\n"));
2633                 return (-1);
2634         }
2635
2636         verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &tree)
2637             == 0);
2638         verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, &vers) == 0);
2639
2640         if (props) {
2641                 prop_flags_t flags = { .create = B_FALSE, .import = B_TRUE };
2642                 if ((zc_props = zpool_valid_proplist(hdl, zhp->zpool_name,
2643                     props, vers, flags, msg)) == NULL)
2644                         return (-1);
2645         }
2646
2647         if (nvlist_lookup_nvlist_array(tree, ZPOOL_CONFIG_CHILDREN, &child,
2648             &children) != 0) {
2649                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2650                     "Source pool is missing vdev tree"));
2651                 if (zc_props)
2652                         nvlist_free(zc_props);
2653                 return (-1);
2654         }
2655
2656         varray = zfs_alloc(hdl, children * sizeof (nvlist_t *));
2657         vcount = 0;
2658
2659         if (*newroot == NULL ||
2660             nvlist_lookup_nvlist_array(*newroot, ZPOOL_CONFIG_CHILDREN,
2661             &newchild, &newchildren) != 0)
2662                 newchildren = 0;
2663
2664         for (c = 0; c < children; c++) {
2665                 uint64_t is_log = B_FALSE, is_hole = B_FALSE;
2666                 char *type;
2667                 nvlist_t **mchild, *vdev;
2668                 uint_t mchildren;
2669                 int entry;
2670
2671                 /*
2672                  * Unlike cache & spares, slogs are stored in the
2673                  * ZPOOL_CONFIG_CHILDREN array.  We filter them out here.
2674                  */
2675                 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
2676                     &is_log);
2677                 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE,
2678                     &is_hole);
2679                 if (is_log || is_hole) {
2680                         /*
2681                          * Create a hole vdev and put it in the config.
2682                          */
2683                         if (nvlist_alloc(&vdev, NV_UNIQUE_NAME, 0) != 0)
2684                                 goto out;
2685                         if (nvlist_add_string(vdev, ZPOOL_CONFIG_TYPE,
2686                             VDEV_TYPE_HOLE) != 0)
2687                                 goto out;
2688                         if (nvlist_add_uint64(vdev, ZPOOL_CONFIG_IS_HOLE,
2689                             1) != 0)
2690                                 goto out;
2691                         if (lastlog == 0)
2692                                 lastlog = vcount;
2693                         varray[vcount++] = vdev;
2694                         continue;
2695                 }
2696                 lastlog = 0;
2697                 verify(nvlist_lookup_string(child[c], ZPOOL_CONFIG_TYPE, &type)
2698                     == 0);
2699                 if (strcmp(type, VDEV_TYPE_MIRROR) != 0) {
2700                         zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2701                             "Source pool must be composed only of mirrors\n"));
2702                         retval = zfs_error(hdl, EZFS_INVALCONFIG, msg);
2703                         goto out;
2704                 }
2705
2706                 verify(nvlist_lookup_nvlist_array(child[c],
2707                     ZPOOL_CONFIG_CHILDREN, &mchild, &mchildren) == 0);
2708
2709                 /* find or add an entry for this top-level vdev */
2710                 if (newchildren > 0 &&
2711                     (entry = find_vdev_entry(zhp, mchild, mchildren,
2712                     newchild, newchildren)) >= 0) {
2713                         /* We found a disk that the user specified. */
2714                         vdev = mchild[entry];
2715                         ++found;
2716                 } else {
2717                         /* User didn't specify a disk for this vdev. */
2718                         vdev = mchild[mchildren - 1];
2719                 }
2720
2721                 if (nvlist_dup(vdev, &varray[vcount++], 0) != 0)
2722                         goto out;
2723         }
2724
2725         /* did we find every disk the user specified? */
2726         if (found != newchildren) {
2727                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "Device list must "
2728                     "include at most one disk from each mirror"));
2729                 retval = zfs_error(hdl, EZFS_INVALCONFIG, msg);
2730                 goto out;
2731         }
2732
2733         /* Prepare the nvlist for populating. */
2734         if (*newroot == NULL) {
2735                 if (nvlist_alloc(newroot, NV_UNIQUE_NAME, 0) != 0)
2736                         goto out;
2737                 freelist = B_TRUE;
2738                 if (nvlist_add_string(*newroot, ZPOOL_CONFIG_TYPE,
2739                     VDEV_TYPE_ROOT) != 0)
2740                         goto out;
2741         } else {
2742                 verify(nvlist_remove_all(*newroot, ZPOOL_CONFIG_CHILDREN) == 0);
2743         }
2744
2745         /* Add all the children we found */
2746         if (nvlist_add_nvlist_array(*newroot, ZPOOL_CONFIG_CHILDREN, varray,
2747             lastlog == 0 ? vcount : lastlog) != 0)
2748                 goto out;
2749
2750         /*
2751          * If we're just doing a dry run, exit now with success.
2752          */
2753         if (flags.dryrun) {
2754                 memory_err = B_FALSE;
2755                 freelist = B_FALSE;
2756                 goto out;
2757         }
2758
2759         /* now build up the config list & call the ioctl */
2760         if (nvlist_alloc(&newconfig, NV_UNIQUE_NAME, 0) != 0)
2761                 goto out;
2762
2763         if (nvlist_add_nvlist(newconfig,
2764             ZPOOL_CONFIG_VDEV_TREE, *newroot) != 0 ||
2765             nvlist_add_string(newconfig,
2766             ZPOOL_CONFIG_POOL_NAME, newname) != 0 ||
2767             nvlist_add_uint64(newconfig, ZPOOL_CONFIG_VERSION, vers) != 0)
2768                 goto out;
2769
2770         /*
2771          * The new pool is automatically part of the namespace unless we
2772          * explicitly export it.
2773          */
2774         if (!flags.import)
2775                 zc.zc_cookie = ZPOOL_EXPORT_AFTER_SPLIT;
2776         (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2777         (void) strlcpy(zc.zc_string, newname, sizeof (zc.zc_string));
2778         if (zcmd_write_conf_nvlist(hdl, &zc, newconfig) != 0)
2779                 goto out;
2780         if (zc_props != NULL && zcmd_write_src_nvlist(hdl, &zc, zc_props) != 0)
2781                 goto out;
2782
2783         if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SPLIT, &zc) != 0) {
2784                 retval = zpool_standard_error(hdl, errno, msg);
2785                 goto out;
2786         }
2787
2788         freelist = B_FALSE;
2789         memory_err = B_FALSE;
2790
2791 out:
2792         if (varray != NULL) {
2793                 int v;
2794
2795                 for (v = 0; v < vcount; v++)
2796                         nvlist_free(varray[v]);
2797                 free(varray);
2798         }
2799         zcmd_free_nvlists(&zc);
2800         if (zc_props)
2801                 nvlist_free(zc_props);
2802         if (newconfig)
2803                 nvlist_free(newconfig);
2804         if (freelist) {
2805                 nvlist_free(*newroot);
2806                 *newroot = NULL;
2807         }
2808
2809         if (retval != 0)
2810                 return (retval);
2811
2812         if (memory_err)
2813                 return (no_memory(hdl));
2814
2815         return (0);
2816 }
2817
2818 /*
2819  * Remove the given device.  Currently, this is supported only for hot spares
2820  * and level 2 cache devices.
2821  */
2822 int
2823 zpool_vdev_remove(zpool_handle_t *zhp, const char *path)
2824 {
2825         zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
2826         char msg[1024];
2827         nvlist_t *tgt;
2828         boolean_t avail_spare, l2cache, islog;
2829         libzfs_handle_t *hdl = zhp->zpool_hdl;
2830         uint64_t version;
2831
2832         (void) snprintf(msg, sizeof (msg),
2833             dgettext(TEXT_DOMAIN, "cannot remove %s"), path);
2834
2835         (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2836         if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
2837             &islog)) == 0)
2838                 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2839         /*
2840          * XXX - this should just go away.
2841          */
2842         if (!avail_spare && !l2cache && !islog) {
2843                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2844                     "only inactive hot spares, cache, top-level, "
2845                     "or log devices can be removed"));
2846                 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2847         }
2848
2849         version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL);
2850         if (islog && version < SPA_VERSION_HOLES) {
2851                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2852                     "pool must be upgrade to support log removal"));
2853                 return (zfs_error(hdl, EZFS_BADVERSION, msg));
2854         }
2855
2856         verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2857
2858         if (zfs_ioctl(hdl, ZFS_IOC_VDEV_REMOVE, &zc) == 0)
2859                 return (0);
2860
2861         return (zpool_standard_error(hdl, errno, msg));
2862 }
2863
2864 /*
2865  * Clear the errors for the pool, or the particular device if specified.
2866  */
2867 int
2868 zpool_clear(zpool_handle_t *zhp, const char *path, nvlist_t *rewindnvl)
2869 {
2870         zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
2871         char msg[1024];
2872         nvlist_t *tgt;
2873         zpool_rewind_policy_t policy;
2874         boolean_t avail_spare, l2cache;
2875         libzfs_handle_t *hdl = zhp->zpool_hdl;
2876         nvlist_t *nvi = NULL;
2877         int error;
2878
2879         if (path)
2880                 (void) snprintf(msg, sizeof (msg),
2881                     dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),
2882                     path);
2883         else
2884                 (void) snprintf(msg, sizeof (msg),
2885                     dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),
2886                     zhp->zpool_name);
2887
2888         (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2889         if (path) {
2890                 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare,
2891                     &l2cache, NULL)) == 0)
2892                         return (zfs_error(hdl, EZFS_NODEVICE, msg));
2893
2894                 /*
2895                  * Don't allow error clearing for hot spares.  Do allow
2896                  * error clearing for l2cache devices.
2897                  */
2898                 if (avail_spare)
2899                         return (zfs_error(hdl, EZFS_ISSPARE, msg));
2900
2901                 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID,
2902                     &zc.zc_guid) == 0);
2903         }
2904
2905         zpool_get_rewind_policy(rewindnvl, &policy);
2906         zc.zc_cookie = policy.zrp_request;
2907
2908         if (zcmd_alloc_dst_nvlist(hdl, &zc, zhp->zpool_config_size * 2) != 0)
2909                 return (-1);
2910
2911         if (zcmd_write_src_nvlist(hdl, &zc, rewindnvl) != 0)
2912                 return (-1);
2913
2914         while ((error = zfs_ioctl(hdl, ZFS_IOC_CLEAR, &zc)) != 0 &&
2915             errno == ENOMEM) {
2916                 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
2917                         zcmd_free_nvlists(&zc);
2918                         return (-1);
2919                 }
2920         }
2921
2922         if (!error || ((policy.zrp_request & ZPOOL_TRY_REWIND) &&
2923             errno != EPERM && errno != EACCES)) {
2924                 if (policy.zrp_request &
2925                     (ZPOOL_DO_REWIND | ZPOOL_TRY_REWIND)) {
2926                         (void) zcmd_read_dst_nvlist(hdl, &zc, &nvi);
2927                         zpool_rewind_exclaim(hdl, zc.zc_name,
2928                             ((policy.zrp_request & ZPOOL_TRY_REWIND) != 0),
2929                             nvi);
2930                         nvlist_free(nvi);
2931                 }
2932                 zcmd_free_nvlists(&zc);
2933                 return (0);
2934         }
2935
2936         zcmd_free_nvlists(&zc);
2937         return (zpool_standard_error(hdl, errno, msg));
2938 }
2939
2940 /*
2941  * Similar to zpool_clear(), but takes a GUID (used by fmd).
2942  */
2943 int
2944 zpool_vdev_clear(zpool_handle_t *zhp, uint64_t guid)
2945 {
2946         zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
2947         char msg[1024];
2948         libzfs_handle_t *hdl = zhp->zpool_hdl;
2949
2950         (void) snprintf(msg, sizeof (msg),
2951             dgettext(TEXT_DOMAIN, "cannot clear errors for %llx"),
2952            (u_longlong_t)guid);
2953
2954         (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2955         zc.zc_guid = guid;
2956         zc.zc_cookie = ZPOOL_NO_REWIND;
2957
2958         if (ioctl(hdl->libzfs_fd, ZFS_IOC_CLEAR, &zc) == 0)
2959                 return (0);
2960
2961         return (zpool_standard_error(hdl, errno, msg));
2962 }
2963
2964 /*
2965  * Change the GUID for a pool.
2966  */
2967 int
2968 zpool_reguid(zpool_handle_t *zhp)
2969 {
2970         char msg[1024];
2971         libzfs_handle_t *hdl = zhp->zpool_hdl;
2972         zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
2973
2974         (void) snprintf(msg, sizeof (msg),
2975             dgettext(TEXT_DOMAIN, "cannot reguid '%s'"), zhp->zpool_name);
2976
2977         (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2978         if (zfs_ioctl(hdl, ZFS_IOC_POOL_REGUID, &zc) == 0)
2979                 return (0);
2980
2981         return (zpool_standard_error(hdl, errno, msg));
2982 }
2983
2984 /*
2985  * Convert from a devid string to a path.
2986  */
2987 static char *
2988 devid_to_path(char *devid_str)
2989 {
2990         ddi_devid_t devid;
2991         char *minor;
2992         char *path;
2993         devid_nmlist_t *list = NULL;
2994         int ret;
2995
2996         if (devid_str_decode(devid_str, &devid, &minor) != 0)
2997                 return (NULL);
2998
2999         ret = devid_deviceid_to_nmlist("/dev", devid, minor, &list);
3000
3001         devid_str_free(minor);
3002         devid_free(devid);
3003
3004         if (ret != 0)
3005                 return (NULL);
3006
3007         if ((path = strdup(list[0].devname)) == NULL)
3008                 return (NULL);
3009
3010         devid_free_nmlist(list);
3011
3012         return (path);
3013 }
3014
3015 /*
3016  * Convert from a path to a devid string.
3017  */
3018 static char *
3019 path_to_devid(const char *path)
3020 {
3021         int fd;
3022         ddi_devid_t devid;
3023         char *minor, *ret;
3024
3025         if ((fd = open(path, O_RDONLY)) < 0)
3026                 return (NULL);
3027
3028         minor = NULL;
3029         ret = NULL;
3030         if (devid_get(fd, &devid) == 0) {
3031                 if (devid_get_minor_name(fd, &minor) == 0)
3032                         ret = devid_str_encode(devid, minor);
3033                 if (minor != NULL)
3034                         devid_str_free(minor);
3035                 devid_free(devid);
3036         }
3037         (void) close(fd);
3038
3039         return (ret);
3040 }
3041
3042 /*
3043  * Issue the necessary ioctl() to update the stored path value for the vdev.  We
3044  * ignore any failure here, since a common case is for an unprivileged user to
3045  * type 'zpool status', and we'll display the correct information anyway.
3046  */
3047 static void
3048 set_path(zpool_handle_t *zhp, nvlist_t *nv, const char *path)
3049 {
3050         zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
3051
3052         (void) strncpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3053         (void) strncpy(zc.zc_value, path, sizeof (zc.zc_value));
3054         verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
3055             &zc.zc_guid) == 0);
3056
3057         (void) ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_SETPATH, &zc);
3058 }
3059
3060 /*
3061  * Remove partition suffix from a vdev path.  Partition suffixes may take three
3062  * forms: "-partX", "pX", or "X", where X is a string of digits.  The second
3063  * case only occurs when the suffix is preceded by a digit, i.e. "md0p0" The
3064  * third case only occurs when preceded by a string matching the regular
3065  * expression "^[hs]d[a-z]+", i.e. a scsi or ide disk.
3066  */
3067 static char *
3068 strip_partition(libzfs_handle_t *hdl, char *path)
3069 {
3070         char *tmp = zfs_strdup(hdl, path);
3071         char *part = NULL, *d = NULL;
3072
3073         if ((part = strstr(tmp, "-part")) && part != tmp) {
3074                 d = part + 5;
3075         } else if ((part = strrchr(tmp, 'p')) &&
3076             part > tmp + 1 && isdigit(*(part-1))) {
3077                 d = part + 1;
3078         } else if ((tmp[0] == 'h' || tmp[0] == 's') && tmp[1] == 'd') {
3079                 for (d = &tmp[2]; isalpha(*d); part = ++d);
3080         }
3081         if (part && d && *d != '\0') {
3082                 for (; isdigit(*d); d++);
3083                 if (*d == '\0')
3084                         *part = '\0';
3085         }
3086         return (tmp);
3087 }
3088
3089 #define PATH_BUF_LEN    64
3090
3091 /*
3092  * Given a vdev, return the name to display in iostat.  If the vdev has a path,
3093  * we use that, stripping off any leading "/dev/dsk/"; if not, we use the type.
3094  * We also check if this is a whole disk, in which case we strip off the
3095  * trailing 's0' slice name.
3096  *
3097  * This routine is also responsible for identifying when disks have been
3098  * reconfigured in a new location.  The kernel will have opened the device by
3099  * devid, but the path will still refer to the old location.  To catch this, we
3100  * first do a path -> devid translation (which is fast for the common case).  If
3101  * the devid matches, we're done.  If not, we do a reverse devid -> path
3102  * translation and issue the appropriate ioctl() to update the path of the vdev.
3103  * If 'zhp' is NULL, then this is an exported pool, and we don't need to do any
3104  * of these checks.
3105  */
3106 char *
3107 zpool_vdev_name(libzfs_handle_t *hdl, zpool_handle_t *zhp, nvlist_t *nv,
3108     boolean_t verbose)
3109 {
3110         char *path, *devid, *type;
3111         uint64_t value;
3112         char buf[PATH_BUF_LEN];
3113         vdev_stat_t *vs;
3114         uint_t vsc;
3115
3116         if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT,
3117             &value) == 0) {
3118                 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
3119                     &value) == 0);
3120                 (void) snprintf(buf, sizeof (buf), "%llu",
3121                     (u_longlong_t)value);
3122                 path = buf;
3123         } else if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) {
3124                 /*
3125                  * If the device is dead (faulted, offline, etc) then don't
3126                  * bother opening it.  Otherwise we may be forcing the user to
3127                  * open a misbehaving device, which can have undesirable
3128                  * effects.
3129                  */
3130                 if ((nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS,
3131                     (uint64_t **)&vs, &vsc) != 0 ||
3132                     vs->vs_state >= VDEV_STATE_DEGRADED) &&
3133                     zhp != NULL &&
3134                     nvlist_lookup_string(nv, ZPOOL_CONFIG_DEVID, &devid) == 0) {
3135                         /*
3136                          * Determine if the current path is correct.
3137                          */
3138                         char *newdevid = path_to_devid(path);
3139
3140                         if (newdevid == NULL ||
3141                             strcmp(devid, newdevid) != 0) {
3142                                 char *newpath;
3143
3144                                 if ((newpath = devid_to_path(devid)) != NULL) {
3145                                         /*
3146                                          * Update the path appropriately.
3147                                          */
3148                                         set_path(zhp, nv, newpath);
3149                                         if (nvlist_add_string(nv,
3150                                             ZPOOL_CONFIG_PATH, newpath) == 0)
3151                                                 verify(nvlist_lookup_string(nv,
3152                                                     ZPOOL_CONFIG_PATH,
3153                                                     &path) == 0);
3154                                         free(newpath);
3155                                 }
3156                         }
3157
3158                         if (newdevid)
3159                                 devid_str_free(newdevid);
3160                 }
3161
3162                 /*
3163                  * For a block device only use the name.
3164                  */
3165                 verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) == 0);
3166                 if (strcmp(type, VDEV_TYPE_DISK) == 0) {
3167                         path = strrchr(path, '/');
3168                         path++;
3169                 }
3170
3171                 /*
3172                  * Remove the partition from the path it this is a whole disk.
3173                  */
3174                 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
3175                     &value) == 0 && value) {
3176                         return strip_partition(hdl, path);
3177                 }
3178         } else {
3179                 verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &path) == 0);
3180
3181                 /*
3182                  * If it's a raidz device, we need to stick in the parity level.
3183                  */
3184                 if (strcmp(path, VDEV_TYPE_RAIDZ) == 0) {
3185                         char tmpbuf[PATH_BUF_LEN];
3186
3187                         verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NPARITY,
3188                             &value) == 0);
3189                         (void) snprintf(tmpbuf, sizeof (tmpbuf), "%s%llu", path,
3190                             (u_longlong_t)value);
3191                         path = tmpbuf;
3192                 }
3193
3194                 /*
3195                  * We identify each top-level vdev by using a <type-id>
3196                  * naming convention.
3197                  */
3198                 if (verbose) {
3199                         uint64_t id;
3200
3201                         verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID,
3202                             &id) == 0);
3203                         (void) snprintf(buf, sizeof (buf), "%s-%llu", path,
3204                             (u_longlong_t)id);
3205                         path = buf;
3206                 }
3207         }
3208
3209         return (zfs_strdup(hdl, path));
3210 }
3211
3212 static int
3213 zbookmark_compare(const void *a, const void *b)
3214 {
3215         return (memcmp(a, b, sizeof (zbookmark_t)));
3216 }
3217
3218 /*
3219  * Retrieve the persistent error log, uniquify the members, and return to the
3220  * caller.
3221  */
3222 int
3223 zpool_get_errlog(zpool_handle_t *zhp, nvlist_t **nverrlistp)
3224 {
3225         zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
3226         uint64_t count;
3227         zbookmark_t *zb = NULL;
3228         int i;
3229
3230         /*
3231          * Retrieve the raw error list from the kernel.  If the number of errors
3232          * has increased, allocate more space and continue until we get the
3233          * entire list.
3234          */
3235         verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_ERRCOUNT,
3236             &count) == 0);
3237         if (count == 0)
3238                 return (0);
3239         if ((zc.zc_nvlist_dst = (uintptr_t)zfs_alloc(zhp->zpool_hdl,
3240             count * sizeof (zbookmark_t))) == (uintptr_t)NULL)
3241                 return (-1);
3242         zc.zc_nvlist_dst_size = count;
3243         (void) strcpy(zc.zc_name, zhp->zpool_name);
3244         for (;;) {
3245                 if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_ERROR_LOG,
3246                     &zc) != 0) {
3247                         free((void *)(uintptr_t)zc.zc_nvlist_dst);
3248                         if (errno == ENOMEM) {
3249                                 count = zc.zc_nvlist_dst_size;
3250                                 if ((zc.zc_nvlist_dst = (uintptr_t)
3251                                     zfs_alloc(zhp->zpool_hdl, count *
3252                                     sizeof (zbookmark_t))) == (uintptr_t)NULL)
3253                                         return (-1);
3254                         } else {
3255                                 return (-1);
3256                         }
3257                 } else {
3258                         break;
3259                 }
3260         }
3261
3262         /*
3263          * Sort the resulting bookmarks.  This is a little confusing due to the
3264          * implementation of ZFS_IOC_ERROR_LOG.  The bookmarks are copied last
3265          * to first, and 'zc_nvlist_dst_size' indicates the number of boomarks
3266          * _not_ copied as part of the process.  So we point the start of our
3267          * array appropriate and decrement the total number of elements.
3268          */
3269         zb = ((zbookmark_t *)(uintptr_t)zc.zc_nvlist_dst) +
3270             zc.zc_nvlist_dst_size;
3271         count -= zc.zc_nvlist_dst_size;
3272
3273         qsort(zb, count, sizeof (zbookmark_t), zbookmark_compare);
3274
3275         verify(nvlist_alloc(nverrlistp, 0, KM_SLEEP) == 0);
3276
3277         /*
3278          * Fill in the nverrlistp with nvlist's of dataset and object numbers.
3279          */
3280         for (i = 0; i < count; i++) {
3281                 nvlist_t *nv;
3282
3283                 /* ignoring zb_blkid and zb_level for now */
3284                 if (i > 0 && zb[i-1].zb_objset == zb[i].zb_objset &&
3285                     zb[i-1].zb_object == zb[i].zb_object)
3286                         continue;
3287
3288                 if (nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) != 0)
3289                         goto nomem;
3290                 if (nvlist_add_uint64(nv, ZPOOL_ERR_DATASET,
3291                     zb[i].zb_objset) != 0) {
3292                         nvlist_free(nv);
3293                         goto nomem;
3294                 }
3295                 if (nvlist_add_uint64(nv, ZPOOL_ERR_OBJECT,
3296                     zb[i].zb_object) != 0) {
3297                         nvlist_free(nv);
3298                         goto nomem;
3299                 }
3300                 if (nvlist_add_nvlist(*nverrlistp, "ejk", nv) != 0) {
3301                         nvlist_free(nv);
3302                         goto nomem;
3303                 }
3304                 nvlist_free(nv);
3305         }
3306
3307         free((void *)(uintptr_t)zc.zc_nvlist_dst);
3308         return (0);
3309
3310 nomem:
3311         free((void *)(uintptr_t)zc.zc_nvlist_dst);
3312         return (no_memory(zhp->zpool_hdl));
3313 }
3314
3315 /*
3316  * Upgrade a ZFS pool to the latest on-disk version.
3317  */
3318 int
3319 zpool_upgrade(zpool_handle_t *zhp, uint64_t new_version)
3320 {
3321         zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
3322         libzfs_handle_t *hdl = zhp->zpool_hdl;
3323
3324         (void) strcpy(zc.zc_name, zhp->zpool_name);
3325         zc.zc_cookie = new_version;
3326
3327         if (zfs_ioctl(hdl, ZFS_IOC_POOL_UPGRADE, &zc) != 0)
3328                 return (zpool_standard_error_fmt(hdl, errno,
3329                     dgettext(TEXT_DOMAIN, "cannot upgrade '%s'"),
3330                     zhp->zpool_name));
3331         return (0);
3332 }
3333
3334 void
3335 zpool_set_history_str(const char *subcommand, int argc, char **argv,
3336     char *history_str)
3337 {
3338         int i;
3339
3340         (void) strlcpy(history_str, subcommand, HIS_MAX_RECORD_LEN);
3341         for (i = 1; i < argc; i++) {
3342                 if (strlen(history_str) + 1 + strlen(argv[i]) >
3343                     HIS_MAX_RECORD_LEN)
3344                         break;
3345                 (void) strlcat(history_str, " ", HIS_MAX_RECORD_LEN);
3346                 (void) strlcat(history_str, argv[i], HIS_MAX_RECORD_LEN);
3347         }
3348 }
3349
3350 /*
3351  * Stage command history for logging.
3352  */
3353 int
3354 zpool_stage_history(libzfs_handle_t *hdl, const char *history_str)
3355 {
3356         if (history_str == NULL)
3357                 return (EINVAL);
3358
3359         if (strlen(history_str) > HIS_MAX_RECORD_LEN)
3360                 return (EINVAL);
3361
3362         if (hdl->libzfs_log_str != NULL)
3363                 free(hdl->libzfs_log_str);
3364
3365         if ((hdl->libzfs_log_str = strdup(history_str)) == NULL)
3366                 return (no_memory(hdl));
3367
3368         return (0);
3369 }
3370
3371 /*
3372  * Perform ioctl to get some command history of a pool.
3373  *
3374  * 'buf' is the buffer to fill up to 'len' bytes.  'off' is the
3375  * logical offset of the history buffer to start reading from.
3376  *
3377  * Upon return, 'off' is the next logical offset to read from and
3378  * 'len' is the actual amount of bytes read into 'buf'.
3379  */
3380 static int
3381 get_history(zpool_handle_t *zhp, char *buf, uint64_t *off, uint64_t *len)
3382 {
3383         zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
3384         libzfs_handle_t *hdl = zhp->zpool_hdl;
3385
3386         (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3387
3388         zc.zc_history = (uint64_t)(uintptr_t)buf;
3389         zc.zc_history_len = *len;
3390         zc.zc_history_offset = *off;
3391
3392         if (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_HISTORY, &zc) != 0) {
3393                 switch (errno) {
3394                 case EPERM:
3395                         return (zfs_error_fmt(hdl, EZFS_PERM,
3396                             dgettext(TEXT_DOMAIN,
3397                             "cannot show history for pool '%s'"),
3398                             zhp->zpool_name));
3399                 case ENOENT:
3400                         return (zfs_error_fmt(hdl, EZFS_NOHISTORY,
3401                             dgettext(TEXT_DOMAIN, "cannot get history for pool "
3402                             "'%s'"), zhp->zpool_name));
3403                 case ENOTSUP:
3404                         return (zfs_error_fmt(hdl, EZFS_BADVERSION,
3405                             dgettext(TEXT_DOMAIN, "cannot get history for pool "
3406                             "'%s', pool must be upgraded"), zhp->zpool_name));
3407                 default:
3408                         return (zpool_standard_error_fmt(hdl, errno,
3409                             dgettext(TEXT_DOMAIN,
3410                             "cannot get history for '%s'"), zhp->zpool_name));
3411                 }
3412         }
3413
3414         *len = zc.zc_history_len;
3415         *off = zc.zc_history_offset;
3416
3417         return (0);
3418 }
3419
3420 /*
3421  * Process the buffer of nvlists, unpacking and storing each nvlist record
3422  * into 'records'.  'leftover' is set to the number of bytes that weren't
3423  * processed as there wasn't a complete record.
3424  */
3425 int
3426 zpool_history_unpack(char *buf, uint64_t bytes_read, uint64_t *leftover,
3427     nvlist_t ***records, uint_t *numrecords)
3428 {
3429         uint64_t reclen;
3430         nvlist_t *nv;
3431         int i;
3432
3433         while (bytes_read > sizeof (reclen)) {
3434
3435                 /* get length of packed record (stored as little endian) */
3436                 for (i = 0, reclen = 0; i < sizeof (reclen); i++)
3437                         reclen += (uint64_t)(((uchar_t *)buf)[i]) << (8*i);
3438
3439                 if (bytes_read < sizeof (reclen) + reclen)
3440                         break;
3441
3442                 /* unpack record */
3443                 if (nvlist_unpack(buf + sizeof (reclen), reclen, &nv, 0) != 0)
3444                         return (ENOMEM);
3445                 bytes_read -= sizeof (reclen) + reclen;
3446                 buf += sizeof (reclen) + reclen;
3447
3448                 /* add record to nvlist array */
3449                 (*numrecords)++;
3450                 if (ISP2(*numrecords + 1)) {
3451                         *records = realloc(*records,
3452                             *numrecords * 2 * sizeof (nvlist_t *));
3453                 }
3454                 (*records)[*numrecords - 1] = nv;
3455         }
3456
3457         *leftover = bytes_read;
3458         return (0);
3459 }
3460
3461 #define HIS_BUF_LEN     (128*1024)
3462
3463 /*
3464  * Retrieve the command history of a pool.
3465  */
3466 int
3467 zpool_get_history(zpool_handle_t *zhp, nvlist_t **nvhisp)
3468 {
3469         char buf[HIS_BUF_LEN];
3470         uint64_t off = 0;
3471         nvlist_t **records = NULL;
3472         uint_t numrecords = 0;
3473         int err, i;
3474
3475         do {
3476                 uint64_t bytes_read = sizeof (buf);
3477                 uint64_t leftover;
3478
3479                 if ((err = get_history(zhp, buf, &off, &bytes_read)) != 0)
3480                         break;
3481
3482                 /* if nothing else was read in, we're at EOF, just return */
3483                 if (!bytes_read)
3484                         break;
3485
3486                 if ((err = zpool_history_unpack(buf, bytes_read,
3487                     &leftover, &records, &numrecords)) != 0)
3488                         break;
3489                 off -= leftover;
3490
3491                 /* CONSTCOND */
3492         } while (1);
3493
3494         if (!err) {
3495                 verify(nvlist_alloc(nvhisp, NV_UNIQUE_NAME, 0) == 0);
3496                 verify(nvlist_add_nvlist_array(*nvhisp, ZPOOL_HIST_RECORD,
3497                     records, numrecords) == 0);
3498         }
3499         for (i = 0; i < numrecords; i++)
3500                 nvlist_free(records[i]);
3501         free(records);
3502
3503         return (err);
3504 }
3505
3506 /*
3507  * Retrieve the next event.  If there is a new event available 'nvp' will
3508  * contain a newly allocated nvlist and 'dropped' will be set to the number
3509  * of missed events since the last call to this function.  When 'nvp' is
3510  * set to NULL it indicates no new events are available.  In either case
3511  * the function returns 0 and it is up to the caller to free 'nvp'.  In
3512  * the case of a fatal error the function will return a non-zero value.
3513  * When the function is called in blocking mode it will not return until
3514  * a new event is available.
3515  */
3516 int
3517 zpool_events_next(libzfs_handle_t *hdl, nvlist_t **nvp,
3518     int *dropped, int block, int cleanup_fd)
3519 {
3520         zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
3521         int error = 0;
3522
3523         *nvp = NULL;
3524         *dropped = 0;
3525         zc.zc_cleanup_fd = cleanup_fd;
3526
3527         if (!block)
3528                 zc.zc_guid = ZEVENT_NONBLOCK;
3529
3530         if (zcmd_alloc_dst_nvlist(hdl, &zc, ZEVENT_SIZE) != 0)
3531                 return (-1);
3532
3533 retry:
3534         if (zfs_ioctl(hdl, ZFS_IOC_EVENTS_NEXT, &zc) != 0) {
3535                 switch (errno) {
3536                 case ESHUTDOWN:
3537                         error = zfs_error_fmt(hdl, EZFS_POOLUNAVAIL,
3538                             dgettext(TEXT_DOMAIN, "zfs shutdown"));
3539                         goto out;
3540                 case ENOENT:
3541                         /* Blocking error case should not occur */
3542                         if (block)
3543                                 error = zpool_standard_error_fmt(hdl, errno,
3544                                     dgettext(TEXT_DOMAIN, "cannot get event"));
3545
3546                         goto out;
3547                 case ENOMEM:
3548                         if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
3549                                 error = zfs_error_fmt(hdl, EZFS_NOMEM,
3550                                     dgettext(TEXT_DOMAIN, "cannot get event"));
3551                                 goto out;
3552                         } else {
3553                                 goto retry;
3554                         }
3555                 default:
3556                         error = zpool_standard_error_fmt(hdl, errno,
3557                             dgettext(TEXT_DOMAIN, "cannot get event"));
3558                         goto out;
3559                 }
3560         }
3561
3562         error = zcmd_read_dst_nvlist(hdl, &zc, nvp);
3563         if (error != 0)
3564                 goto out;
3565
3566         *dropped = (int)zc.zc_cookie;
3567 out:
3568         zcmd_free_nvlists(&zc);
3569
3570         return (error);
3571 }
3572
3573 /*
3574  * Clear all events.
3575  */
3576 int
3577 zpool_events_clear(libzfs_handle_t *hdl, int *count)
3578 {
3579         zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
3580         char msg[1024];
3581
3582         (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
3583             "cannot clear events"));
3584
3585         if (zfs_ioctl(hdl, ZFS_IOC_EVENTS_CLEAR, &zc) != 0)
3586                 return (zpool_standard_error_fmt(hdl, errno, msg));
3587
3588         if (count != NULL)
3589                 *count = (int)zc.zc_cookie; /* # of events cleared */
3590
3591         return (0);
3592 }
3593
3594 void
3595 zpool_obj_to_path(zpool_handle_t *zhp, uint64_t dsobj, uint64_t obj,
3596     char *pathname, size_t len)
3597 {
3598         zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
3599         boolean_t mounted = B_FALSE;
3600         char *mntpnt = NULL;
3601         char dsname[MAXNAMELEN];
3602
3603         if (dsobj == 0) {
3604                 /* special case for the MOS */
3605                 (void) snprintf(pathname, len, "<metadata>:<0x%llx>", (longlong_t)obj);
3606                 return;
3607         }
3608
3609         /* get the dataset's name */
3610         (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3611         zc.zc_obj = dsobj;
3612         if (ioctl(zhp->zpool_hdl->libzfs_fd,
3613             ZFS_IOC_DSOBJ_TO_DSNAME, &zc) != 0) {
3614                 /* just write out a path of two object numbers */
3615                 (void) snprintf(pathname, len, "<0x%llx>:<0x%llx>",
3616                     (longlong_t)dsobj, (longlong_t)obj);
3617                 return;
3618         }
3619         (void) strlcpy(dsname, zc.zc_value, sizeof (dsname));
3620
3621         /* find out if the dataset is mounted */
3622         mounted = is_mounted(zhp->zpool_hdl, dsname, &mntpnt);
3623
3624         /* get the corrupted object's path */
3625         (void) strlcpy(zc.zc_name, dsname, sizeof (zc.zc_name));
3626         zc.zc_obj = obj;
3627         if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_OBJ_TO_PATH,
3628             &zc) == 0) {
3629                 if (mounted) {
3630                         (void) snprintf(pathname, len, "%s%s", mntpnt,
3631                             zc.zc_value);
3632                 } else {
3633                         (void) snprintf(pathname, len, "%s:%s",
3634                             dsname, zc.zc_value);
3635                 }
3636         } else {
3637                 (void) snprintf(pathname, len, "%s:<0x%llx>", dsname, (longlong_t)obj);
3638         }
3639         free(mntpnt);
3640 }
3641
3642 /*
3643  * Read the EFI label from the config, if a label does not exist then
3644  * pass back the error to the caller. If the caller has passed a non-NULL
3645  * diskaddr argument then we set it to the starting address of the EFI
3646  * partition.
3647  */
3648 static int
3649 read_efi_label(nvlist_t *config, diskaddr_t *sb)
3650 {
3651         char *path;
3652         int fd;
3653         char diskname[MAXPATHLEN];
3654         int err = -1;
3655
3656         if (nvlist_lookup_string(config, ZPOOL_CONFIG_PATH, &path) != 0)
3657                 return (err);
3658
3659         (void) snprintf(diskname, sizeof (diskname), "%s%s", RDISK_ROOT,
3660             strrchr(path, '/'));
3661         if ((fd = open(diskname, O_RDWR|O_DIRECT)) >= 0) {
3662                 struct dk_gpt *vtoc;
3663
3664                 if ((err = efi_alloc_and_read(fd, &vtoc)) >= 0) {
3665                         if (sb != NULL)
3666                                 *sb = vtoc->efi_parts[0].p_start;
3667                         efi_free(vtoc);
3668                 }
3669                 (void) close(fd);
3670         }
3671         return (err);
3672 }
3673
3674 /*
3675  * determine where a partition starts on a disk in the current
3676  * configuration
3677  */
3678 static diskaddr_t
3679 find_start_block(nvlist_t *config)
3680 {
3681         nvlist_t **child;
3682         uint_t c, children;
3683         diskaddr_t sb = MAXOFFSET_T;
3684         uint64_t wholedisk;
3685
3686         if (nvlist_lookup_nvlist_array(config,
3687             ZPOOL_CONFIG_CHILDREN, &child, &children) != 0) {
3688                 if (nvlist_lookup_uint64(config,
3689                     ZPOOL_CONFIG_WHOLE_DISK,
3690                     &wholedisk) != 0 || !wholedisk) {
3691                         return (MAXOFFSET_T);
3692                 }
3693                 if (read_efi_label(config, &sb) < 0)
3694                         sb = MAXOFFSET_T;
3695                 return (sb);
3696         }
3697
3698         for (c = 0; c < children; c++) {
3699                 sb = find_start_block(child[c]);
3700                 if (sb != MAXOFFSET_T) {
3701                         return (sb);
3702                 }
3703         }
3704         return (MAXOFFSET_T);
3705 }
3706
3707 int
3708 zpool_label_disk_wait(char *path, int timeout)
3709 {
3710         struct stat64 statbuf;
3711         int i;
3712
3713         /*
3714          * Wait timeout miliseconds for a newly created device to be available
3715          * from the given path.  There is a small window when a /dev/ device
3716          * will exist and the udev link will not, so we must wait for the
3717          * symlink.  Depending on the udev rules this may take a few seconds.
3718          */
3719         for (i = 0; i < timeout; i++) {
3720                 usleep(1000);
3721
3722                 errno = 0;
3723                 if ((stat64(path, &statbuf) == 0) && (errno == 0))
3724                         return (0);
3725         }
3726
3727         return (ENOENT);
3728 }
3729
3730 int
3731 zpool_label_disk_check(char *path)
3732 {
3733         struct dk_gpt *vtoc;
3734         int fd, err;
3735
3736         if ((fd = open(path, O_RDWR|O_DIRECT)) < 0)
3737                 return errno;
3738
3739         if ((err = efi_alloc_and_read(fd, &vtoc)) != 0) {
3740                 (void) close(fd);
3741                 return err;
3742         }
3743
3744         if (vtoc->efi_flags & EFI_GPT_PRIMARY_CORRUPT) {
3745                 efi_free(vtoc);
3746                 (void) close(fd);
3747                 return EIDRM;
3748         }
3749
3750         efi_free(vtoc);
3751         (void) close(fd);
3752         return 0;
3753 }
3754
3755 /*
3756  * Label an individual disk.  The name provided is the short name,
3757  * stripped of any leading /dev path.
3758  */
3759 int
3760 zpool_label_disk(libzfs_handle_t *hdl, zpool_handle_t *zhp, char *name)
3761 {
3762         char path[MAXPATHLEN];
3763         struct dk_gpt *vtoc;
3764         int rval, fd;
3765         size_t resv = EFI_MIN_RESV_SIZE;
3766         uint64_t slice_size;
3767         diskaddr_t start_block;
3768         char errbuf[1024];
3769
3770         /* prepare an error message just in case */
3771         (void) snprintf(errbuf, sizeof (errbuf),
3772             dgettext(TEXT_DOMAIN, "cannot label '%s'"), name);
3773
3774         if (zhp) {
3775                 nvlist_t *nvroot;
3776
3777                 if (pool_is_bootable(zhp)) {
3778                         zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3779                             "EFI labeled devices are not supported on root "
3780                             "pools."));
3781                         return (zfs_error(hdl, EZFS_POOL_NOTSUP, errbuf));
3782                 }
3783
3784                 verify(nvlist_lookup_nvlist(zhp->zpool_config,
3785                     ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
3786
3787                 if (zhp->zpool_start_block == 0)
3788                         start_block = find_start_block(nvroot);
3789                 else
3790                         start_block = zhp->zpool_start_block;
3791                 zhp->zpool_start_block = start_block;
3792         } else {
3793                 /* new pool */
3794                 start_block = NEW_START_BLOCK;
3795         }
3796
3797         (void) snprintf(path, sizeof (path), "%s/%s%s", RDISK_ROOT, name,
3798             BACKUP_SLICE);
3799
3800         if ((fd = open(path, O_RDWR|O_DIRECT)) < 0) {
3801                 /*
3802                  * This shouldn't happen.  We've long since verified that this
3803                  * is a valid device.
3804                  */
3805                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot "
3806                     "label '%s': unable to open device: %d"), path, errno);
3807                 return (zfs_error(hdl, EZFS_OPENFAILED, errbuf));
3808         }
3809
3810         if (efi_alloc_and_init(fd, EFI_NUMPAR, &vtoc) != 0) {
3811                 /*
3812                  * The only way this can fail is if we run out of memory, or we
3813                  * were unable to read the disk's capacity
3814                  */
3815                 if (errno == ENOMEM)
3816                         (void) no_memory(hdl);
3817
3818                 (void) close(fd);
3819                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot "
3820                     "label '%s': unable to read disk capacity"), path);
3821
3822                 return (zfs_error(hdl, EZFS_NOCAP, errbuf));
3823         }
3824
3825         slice_size = vtoc->efi_last_u_lba + 1;
3826         slice_size -= EFI_MIN_RESV_SIZE;
3827         if (start_block == MAXOFFSET_T)
3828                 start_block = NEW_START_BLOCK;
3829         slice_size -= start_block;
3830         slice_size = P2ALIGN(slice_size, PARTITION_END_ALIGNMENT);
3831
3832         vtoc->efi_parts[0].p_start = start_block;
3833         vtoc->efi_parts[0].p_size = slice_size;
3834
3835         /*
3836          * Why we use V_USR: V_BACKUP confuses users, and is considered
3837          * disposable by some EFI utilities (since EFI doesn't have a backup
3838          * slice).  V_UNASSIGNED is supposed to be used only for zero size
3839          * partitions, and efi_write() will fail if we use it.  V_ROOT, V_BOOT,
3840          * etc. were all pretty specific.  V_USR is as close to reality as we
3841          * can get, in the absence of V_OTHER.
3842          */
3843         vtoc->efi_parts[0].p_tag = V_USR;
3844         (void) strcpy(vtoc->efi_parts[0].p_name, "zfs");
3845
3846         vtoc->efi_parts[8].p_start = slice_size + start_block;
3847         vtoc->efi_parts[8].p_size = resv;
3848         vtoc->efi_parts[8].p_tag = V_RESERVED;
3849
3850         if ((rval = efi_write(fd, vtoc)) != 0) {
3851                 /*
3852                  * Some block drivers (like pcata) may not support EFI
3853                  * GPT labels.  Print out a helpful error message dir-
3854                  * ecting the user to manually label the disk and give
3855                  * a specific slice.
3856                  */
3857                 (void) close(fd);
3858                 efi_free(vtoc);
3859
3860                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "try using "
3861                     "parted(8) and then provide a specific slice: %d"), rval);
3862                 return (zfs_error(hdl, EZFS_LABELFAILED, errbuf));
3863         }
3864
3865         (void) close(fd);
3866         efi_free(vtoc);
3867
3868         /* Wait for the first expected slice to appear. */
3869         (void) snprintf(path, sizeof (path), "%s/%s%s%s", DISK_ROOT, name,
3870             isdigit(name[strlen(name)-1]) ? "p" : "", FIRST_SLICE);
3871         rval = zpool_label_disk_wait(path, 3000);
3872         if (rval) {
3873                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "failed to "
3874                     "detect device partitions on '%s': %d"), path, rval);
3875                 return (zfs_error(hdl, EZFS_LABELFAILED, errbuf));
3876         }
3877
3878         /* We can't be to paranoid.  Read the label back and verify it. */
3879         (void) snprintf(path, sizeof (path), "%s/%s", DISK_ROOT, name);
3880         rval = zpool_label_disk_check(path);
3881         if (rval) {
3882                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "freshly written "
3883                     "EFI label on '%s' is damaged.  Ensure\nthis device "
3884                     "is not in in use, and is functioning properly: %d"),
3885                     path, rval);
3886                 return (zfs_error(hdl, EZFS_LABELFAILED, errbuf));
3887         }
3888
3889         return 0;
3890 }