Update core ZFS code from build 121 to build 141.
[zfs.git] / lib / libzfs / libzfs_pool.c
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21
22 /*
23  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
24  */
25
26 #include <ctype.h>
27 #include <errno.h>
28 #include <devid.h>
29 #include <fcntl.h>
30 #include <libintl.h>
31 #include <stdio.h>
32 #include <stdlib.h>
33 #include <strings.h>
34 #include <unistd.h>
35 #include <sys/efi_partition.h>
36 #include <sys/vtoc.h>
37 #include <sys/zfs_ioctl.h>
38 #include <dlfcn.h>
39
40 #include "zfs_namecheck.h"
41 #include "zfs_prop.h"
42 #include "libzfs_impl.h"
43 #include "zfs_comutil.h"
44
45 static int read_efi_label(nvlist_t *config, diskaddr_t *sb);
46
47 #if defined(__i386) || defined(__amd64)
48 #define BOOTCMD "installgrub(1M)"
49 #else
50 #define BOOTCMD "installboot(1M)"
51 #endif
52
53 #define DISK_ROOT       "/dev/dsk"
54 #define RDISK_ROOT      "/dev/rdsk"
55 #define BACKUP_SLICE    "s2"
56
57 /*
58  * ====================================================================
59  *   zpool property functions
60  * ====================================================================
61  */
62
63 static int
64 zpool_get_all_props(zpool_handle_t *zhp)
65 {
66         zfs_cmd_t zc = { 0 };
67         libzfs_handle_t *hdl = zhp->zpool_hdl;
68
69         (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
70
71         if (zcmd_alloc_dst_nvlist(hdl, &zc, 0) != 0)
72                 return (-1);
73
74         while (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_PROPS, &zc) != 0) {
75                 if (errno == ENOMEM) {
76                         if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
77                                 zcmd_free_nvlists(&zc);
78                                 return (-1);
79                         }
80                 } else {
81                         zcmd_free_nvlists(&zc);
82                         return (-1);
83                 }
84         }
85
86         if (zcmd_read_dst_nvlist(hdl, &zc, &zhp->zpool_props) != 0) {
87                 zcmd_free_nvlists(&zc);
88                 return (-1);
89         }
90
91         zcmd_free_nvlists(&zc);
92
93         return (0);
94 }
95
96 static int
97 zpool_props_refresh(zpool_handle_t *zhp)
98 {
99         nvlist_t *old_props;
100
101         old_props = zhp->zpool_props;
102
103         if (zpool_get_all_props(zhp) != 0)
104                 return (-1);
105
106         nvlist_free(old_props);
107         return (0);
108 }
109
110 static char *
111 zpool_get_prop_string(zpool_handle_t *zhp, zpool_prop_t prop,
112     zprop_source_t *src)
113 {
114         nvlist_t *nv, *nvl;
115         uint64_t ival;
116         char *value;
117         zprop_source_t source;
118
119         nvl = zhp->zpool_props;
120         if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) {
121                 verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &ival) == 0);
122                 source = ival;
123                 verify(nvlist_lookup_string(nv, ZPROP_VALUE, &value) == 0);
124         } else {
125                 source = ZPROP_SRC_DEFAULT;
126                 if ((value = (char *)zpool_prop_default_string(prop)) == NULL)
127                         value = "-";
128         }
129
130         if (src)
131                 *src = source;
132
133         return (value);
134 }
135
136 uint64_t
137 zpool_get_prop_int(zpool_handle_t *zhp, zpool_prop_t prop, zprop_source_t *src)
138 {
139         nvlist_t *nv, *nvl;
140         uint64_t value;
141         zprop_source_t source;
142
143         if (zhp->zpool_props == NULL && zpool_get_all_props(zhp)) {
144                 /*
145                  * zpool_get_all_props() has most likely failed because
146                  * the pool is faulted, but if all we need is the top level
147                  * vdev's guid then get it from the zhp config nvlist.
148                  */
149                 if ((prop == ZPOOL_PROP_GUID) &&
150                     (nvlist_lookup_nvlist(zhp->zpool_config,
151                     ZPOOL_CONFIG_VDEV_TREE, &nv) == 0) &&
152                     (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &value)
153                     == 0)) {
154                         return (value);
155                 }
156                 return (zpool_prop_default_numeric(prop));
157         }
158
159         nvl = zhp->zpool_props;
160         if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) {
161                 verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &value) == 0);
162                 source = value;
163                 verify(nvlist_lookup_uint64(nv, ZPROP_VALUE, &value) == 0);
164         } else {
165                 source = ZPROP_SRC_DEFAULT;
166                 value = zpool_prop_default_numeric(prop);
167         }
168
169         if (src)
170                 *src = source;
171
172         return (value);
173 }
174
175 /*
176  * Map VDEV STATE to printed strings.
177  */
178 char *
179 zpool_state_to_name(vdev_state_t state, vdev_aux_t aux)
180 {
181         switch (state) {
182         case VDEV_STATE_CLOSED:
183         case VDEV_STATE_OFFLINE:
184                 return (gettext("OFFLINE"));
185         case VDEV_STATE_REMOVED:
186                 return (gettext("REMOVED"));
187         case VDEV_STATE_CANT_OPEN:
188                 if (aux == VDEV_AUX_CORRUPT_DATA || aux == VDEV_AUX_BAD_LOG)
189                         return (gettext("FAULTED"));
190                 else if (aux == VDEV_AUX_SPLIT_POOL)
191                         return (gettext("SPLIT"));
192                 else
193                         return (gettext("UNAVAIL"));
194         case VDEV_STATE_FAULTED:
195                 return (gettext("FAULTED"));
196         case VDEV_STATE_DEGRADED:
197                 return (gettext("DEGRADED"));
198         case VDEV_STATE_HEALTHY:
199                 return (gettext("ONLINE"));
200         }
201
202         return (gettext("UNKNOWN"));
203 }
204
205 /*
206  * Get a zpool property value for 'prop' and return the value in
207  * a pre-allocated buffer.
208  */
209 int
210 zpool_get_prop(zpool_handle_t *zhp, zpool_prop_t prop, char *buf, size_t len,
211     zprop_source_t *srctype)
212 {
213         uint64_t intval;
214         const char *strval;
215         zprop_source_t src = ZPROP_SRC_NONE;
216         nvlist_t *nvroot;
217         vdev_stat_t *vs;
218         uint_t vsc;
219
220         if (zpool_get_state(zhp) == POOL_STATE_UNAVAIL) {
221                 switch (prop) {
222                 case ZPOOL_PROP_NAME:
223                         (void) strlcpy(buf, zpool_get_name(zhp), len);
224                         break;
225
226                 case ZPOOL_PROP_HEALTH:
227                         (void) strlcpy(buf, "FAULTED", len);
228                         break;
229
230                 case ZPOOL_PROP_GUID:
231                         intval = zpool_get_prop_int(zhp, prop, &src);
232                         (void) snprintf(buf, len, "%llu", intval);
233                         break;
234
235                 case ZPOOL_PROP_ALTROOT:
236                 case ZPOOL_PROP_CACHEFILE:
237                         if (zhp->zpool_props != NULL ||
238                             zpool_get_all_props(zhp) == 0) {
239                                 (void) strlcpy(buf,
240                                     zpool_get_prop_string(zhp, prop, &src),
241                                     len);
242                                 if (srctype != NULL)
243                                         *srctype = src;
244                                 return (0);
245                         }
246                         /* FALLTHROUGH */
247                 default:
248                         (void) strlcpy(buf, "-", len);
249                         break;
250                 }
251
252                 if (srctype != NULL)
253                         *srctype = src;
254                 return (0);
255         }
256
257         if (zhp->zpool_props == NULL && zpool_get_all_props(zhp) &&
258             prop != ZPOOL_PROP_NAME)
259                 return (-1);
260
261         switch (zpool_prop_get_type(prop)) {
262         case PROP_TYPE_STRING:
263                 (void) strlcpy(buf, zpool_get_prop_string(zhp, prop, &src),
264                     len);
265                 break;
266
267         case PROP_TYPE_NUMBER:
268                 intval = zpool_get_prop_int(zhp, prop, &src);
269
270                 switch (prop) {
271                 case ZPOOL_PROP_SIZE:
272                 case ZPOOL_PROP_ALLOCATED:
273                 case ZPOOL_PROP_FREE:
274                         (void) zfs_nicenum(intval, buf, len);
275                         break;
276
277                 case ZPOOL_PROP_CAPACITY:
278                         (void) snprintf(buf, len, "%llu%%",
279                             (u_longlong_t)intval);
280                         break;
281
282                 case ZPOOL_PROP_DEDUPRATIO:
283                         (void) snprintf(buf, len, "%llu.%02llux",
284                             (u_longlong_t)(intval / 100),
285                             (u_longlong_t)(intval % 100));
286                         break;
287
288                 case ZPOOL_PROP_HEALTH:
289                         verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
290                             ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
291                         verify(nvlist_lookup_uint64_array(nvroot,
292                             ZPOOL_CONFIG_VDEV_STATS, (uint64_t **)&vs, &vsc)
293                             == 0);
294
295                         (void) strlcpy(buf, zpool_state_to_name(intval,
296                             vs->vs_aux), len);
297                         break;
298                 default:
299                         (void) snprintf(buf, len, "%llu", intval);
300                 }
301                 break;
302
303         case PROP_TYPE_INDEX:
304                 intval = zpool_get_prop_int(zhp, prop, &src);
305                 if (zpool_prop_index_to_string(prop, intval, &strval)
306                     != 0)
307                         return (-1);
308                 (void) strlcpy(buf, strval, len);
309                 break;
310
311         default:
312                 abort();
313         }
314
315         if (srctype)
316                 *srctype = src;
317
318         return (0);
319 }
320
321 /*
322  * Check if the bootfs name has the same pool name as it is set to.
323  * Assuming bootfs is a valid dataset name.
324  */
325 static boolean_t
326 bootfs_name_valid(const char *pool, char *bootfs)
327 {
328         int len = strlen(pool);
329
330         if (!zfs_name_valid(bootfs, ZFS_TYPE_FILESYSTEM|ZFS_TYPE_SNAPSHOT))
331                 return (B_FALSE);
332
333         if (strncmp(pool, bootfs, len) == 0 &&
334             (bootfs[len] == '/' || bootfs[len] == '\0'))
335                 return (B_TRUE);
336
337         return (B_FALSE);
338 }
339
340 /*
341  * Inspect the configuration to determine if any of the devices contain
342  * an EFI label.
343  */
344 static boolean_t
345 pool_uses_efi(nvlist_t *config)
346 {
347         nvlist_t **child;
348         uint_t c, children;
349
350         if (nvlist_lookup_nvlist_array(config, ZPOOL_CONFIG_CHILDREN,
351             &child, &children) != 0)
352                 return (read_efi_label(config, NULL) >= 0);
353
354         for (c = 0; c < children; c++) {
355                 if (pool_uses_efi(child[c]))
356                         return (B_TRUE);
357         }
358         return (B_FALSE);
359 }
360
361 static boolean_t
362 pool_is_bootable(zpool_handle_t *zhp)
363 {
364         char bootfs[ZPOOL_MAXNAMELEN];
365
366         return (zpool_get_prop(zhp, ZPOOL_PROP_BOOTFS, bootfs,
367             sizeof (bootfs), NULL) == 0 && strncmp(bootfs, "-",
368             sizeof (bootfs)) != 0);
369 }
370
371
372 /*
373  * Given an nvlist of zpool properties to be set, validate that they are
374  * correct, and parse any numeric properties (index, boolean, etc) if they are
375  * specified as strings.
376  */
377 static nvlist_t *
378 zpool_valid_proplist(libzfs_handle_t *hdl, const char *poolname,
379     nvlist_t *props, uint64_t version, boolean_t create_or_import, char *errbuf)
380 {
381         nvpair_t *elem;
382         nvlist_t *retprops;
383         zpool_prop_t prop;
384         char *strval;
385         uint64_t intval;
386         char *slash;
387         struct stat64 statbuf;
388         zpool_handle_t *zhp;
389         nvlist_t *nvroot;
390
391         if (nvlist_alloc(&retprops, NV_UNIQUE_NAME, 0) != 0) {
392                 (void) no_memory(hdl);
393                 return (NULL);
394         }
395
396         elem = NULL;
397         while ((elem = nvlist_next_nvpair(props, elem)) != NULL) {
398                 const char *propname = nvpair_name(elem);
399
400                 /*
401                  * Make sure this property is valid and applies to this type.
402                  */
403                 if ((prop = zpool_name_to_prop(propname)) == ZPROP_INVAL) {
404                         zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
405                             "invalid property '%s'"), propname);
406                         (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
407                         goto error;
408                 }
409
410                 if (zpool_prop_readonly(prop)) {
411                         zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' "
412                             "is readonly"), propname);
413                         (void) zfs_error(hdl, EZFS_PROPREADONLY, errbuf);
414                         goto error;
415                 }
416
417                 if (zprop_parse_value(hdl, elem, prop, ZFS_TYPE_POOL, retprops,
418                     &strval, &intval, errbuf) != 0)
419                         goto error;
420
421                 /*
422                  * Perform additional checking for specific properties.
423                  */
424                 switch (prop) {
425                 case ZPOOL_PROP_VERSION:
426                         if (intval < version || intval > SPA_VERSION) {
427                                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
428                                     "property '%s' number %d is invalid."),
429                                     propname, intval);
430                                 (void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
431                                 goto error;
432                         }
433                         break;
434
435                 case ZPOOL_PROP_BOOTFS:
436                         if (create_or_import) {
437                                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
438                                     "property '%s' cannot be set at creation "
439                                     "or import time"), propname);
440                                 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
441                                 goto error;
442                         }
443
444                         if (version < SPA_VERSION_BOOTFS) {
445                                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
446                                     "pool must be upgraded to support "
447                                     "'%s' property"), propname);
448                                 (void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
449                                 goto error;
450                         }
451
452                         /*
453                          * bootfs property value has to be a dataset name and
454                          * the dataset has to be in the same pool as it sets to.
455                          */
456                         if (strval[0] != '\0' && !bootfs_name_valid(poolname,
457                             strval)) {
458                                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' "
459                                     "is an invalid name"), strval);
460                                 (void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf);
461                                 goto error;
462                         }
463
464                         if ((zhp = zpool_open_canfail(hdl, poolname)) == NULL) {
465                                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
466                                     "could not open pool '%s'"), poolname);
467                                 (void) zfs_error(hdl, EZFS_OPENFAILED, errbuf);
468                                 goto error;
469                         }
470                         verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
471                             ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
472
473                         /*
474                          * bootfs property cannot be set on a disk which has
475                          * been EFI labeled.
476                          */
477                         if (pool_uses_efi(nvroot)) {
478                                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
479                                     "property '%s' not supported on "
480                                     "EFI labeled devices"), propname);
481                                 (void) zfs_error(hdl, EZFS_POOL_NOTSUP, errbuf);
482                                 zpool_close(zhp);
483                                 goto error;
484                         }
485                         zpool_close(zhp);
486                         break;
487
488                 case ZPOOL_PROP_ALTROOT:
489                         if (!create_or_import) {
490                                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
491                                     "property '%s' can only be set during pool "
492                                     "creation or import"), propname);
493                                 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
494                                 goto error;
495                         }
496
497                         if (strval[0] != '/') {
498                                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
499                                     "bad alternate root '%s'"), strval);
500                                 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
501                                 goto error;
502                         }
503                         break;
504
505                 case ZPOOL_PROP_CACHEFILE:
506                         if (strval[0] == '\0')
507                                 break;
508
509                         if (strcmp(strval, "none") == 0)
510                                 break;
511
512                         if (strval[0] != '/') {
513                                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
514                                     "property '%s' must be empty, an "
515                                     "absolute path, or 'none'"), propname);
516                                 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
517                                 goto error;
518                         }
519
520                         slash = strrchr(strval, '/');
521
522                         if (slash[1] == '\0' || strcmp(slash, "/.") == 0 ||
523                             strcmp(slash, "/..") == 0) {
524                                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
525                                     "'%s' is not a valid file"), strval);
526                                 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
527                                 goto error;
528                         }
529
530                         *slash = '\0';
531
532                         if (strval[0] != '\0' &&
533                             (stat64(strval, &statbuf) != 0 ||
534                             !S_ISDIR(statbuf.st_mode))) {
535                                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
536                                     "'%s' is not a valid directory"),
537                                     strval);
538                                 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
539                                 goto error;
540                         }
541
542                         *slash = '/';
543                         break;
544                 }
545         }
546
547         return (retprops);
548 error:
549         nvlist_free(retprops);
550         return (NULL);
551 }
552
553 /*
554  * Set zpool property : propname=propval.
555  */
556 int
557 zpool_set_prop(zpool_handle_t *zhp, const char *propname, const char *propval)
558 {
559         zfs_cmd_t zc = { 0 };
560         int ret = -1;
561         char errbuf[1024];
562         nvlist_t *nvl = NULL;
563         nvlist_t *realprops;
564         uint64_t version;
565
566         (void) snprintf(errbuf, sizeof (errbuf),
567             dgettext(TEXT_DOMAIN, "cannot set property for '%s'"),
568             zhp->zpool_name);
569
570         if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0)
571                 return (no_memory(zhp->zpool_hdl));
572
573         if (nvlist_add_string(nvl, propname, propval) != 0) {
574                 nvlist_free(nvl);
575                 return (no_memory(zhp->zpool_hdl));
576         }
577
578         version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL);
579         if ((realprops = zpool_valid_proplist(zhp->zpool_hdl,
580             zhp->zpool_name, nvl, version, B_FALSE, errbuf)) == NULL) {
581                 nvlist_free(nvl);
582                 return (-1);
583         }
584
585         nvlist_free(nvl);
586         nvl = realprops;
587
588         /*
589          * Execute the corresponding ioctl() to set this property.
590          */
591         (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
592
593         if (zcmd_write_src_nvlist(zhp->zpool_hdl, &zc, nvl) != 0) {
594                 nvlist_free(nvl);
595                 return (-1);
596         }
597
598         ret = zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_SET_PROPS, &zc);
599
600         zcmd_free_nvlists(&zc);
601         nvlist_free(nvl);
602
603         if (ret)
604                 (void) zpool_standard_error(zhp->zpool_hdl, errno, errbuf);
605         else
606                 (void) zpool_props_refresh(zhp);
607
608         return (ret);
609 }
610
611 int
612 zpool_expand_proplist(zpool_handle_t *zhp, zprop_list_t **plp)
613 {
614         libzfs_handle_t *hdl = zhp->zpool_hdl;
615         zprop_list_t *entry;
616         char buf[ZFS_MAXPROPLEN];
617
618         if (zprop_expand_list(hdl, plp, ZFS_TYPE_POOL) != 0)
619                 return (-1);
620
621         for (entry = *plp; entry != NULL; entry = entry->pl_next) {
622
623                 if (entry->pl_fixed)
624                         continue;
625
626                 if (entry->pl_prop != ZPROP_INVAL &&
627                     zpool_get_prop(zhp, entry->pl_prop, buf, sizeof (buf),
628                     NULL) == 0) {
629                         if (strlen(buf) > entry->pl_width)
630                                 entry->pl_width = strlen(buf);
631                 }
632         }
633
634         return (0);
635 }
636
637
638 /*
639  * Don't start the slice at the default block of 34; many storage
640  * devices will use a stripe width of 128k, so start there instead.
641  */
642 #define NEW_START_BLOCK 256
643
644 /*
645  * Validate the given pool name, optionally putting an extended error message in
646  * 'buf'.
647  */
648 boolean_t
649 zpool_name_valid(libzfs_handle_t *hdl, boolean_t isopen, const char *pool)
650 {
651         namecheck_err_t why;
652         char what;
653         int ret;
654
655         ret = pool_namecheck(pool, &why, &what);
656
657         /*
658          * The rules for reserved pool names were extended at a later point.
659          * But we need to support users with existing pools that may now be
660          * invalid.  So we only check for this expanded set of names during a
661          * create (or import), and only in userland.
662          */
663         if (ret == 0 && !isopen &&
664             (strncmp(pool, "mirror", 6) == 0 ||
665             strncmp(pool, "raidz", 5) == 0 ||
666             strncmp(pool, "spare", 5) == 0 ||
667             strcmp(pool, "log") == 0)) {
668                 if (hdl != NULL)
669                         zfs_error_aux(hdl,
670                             dgettext(TEXT_DOMAIN, "name is reserved"));
671                 return (B_FALSE);
672         }
673
674
675         if (ret != 0) {
676                 if (hdl != NULL) {
677                         switch (why) {
678                         case NAME_ERR_TOOLONG:
679                                 zfs_error_aux(hdl,
680                                     dgettext(TEXT_DOMAIN, "name is too long"));
681                                 break;
682
683                         case NAME_ERR_INVALCHAR:
684                                 zfs_error_aux(hdl,
685                                     dgettext(TEXT_DOMAIN, "invalid character "
686                                     "'%c' in pool name"), what);
687                                 break;
688
689                         case NAME_ERR_NOLETTER:
690                                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
691                                     "name must begin with a letter"));
692                                 break;
693
694                         case NAME_ERR_RESERVED:
695                                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
696                                     "name is reserved"));
697                                 break;
698
699                         case NAME_ERR_DISKLIKE:
700                                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
701                                     "pool name is reserved"));
702                                 break;
703
704                         case NAME_ERR_LEADING_SLASH:
705                                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
706                                     "leading slash in name"));
707                                 break;
708
709                         case NAME_ERR_EMPTY_COMPONENT:
710                                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
711                                     "empty component in name"));
712                                 break;
713
714                         case NAME_ERR_TRAILING_SLASH:
715                                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
716                                     "trailing slash in name"));
717                                 break;
718
719                         case NAME_ERR_MULTIPLE_AT:
720                                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
721                                     "multiple '@' delimiters in name"));
722                                 break;
723
724                         }
725                 }
726                 return (B_FALSE);
727         }
728
729         return (B_TRUE);
730 }
731
732 /*
733  * Open a handle to the given pool, even if the pool is currently in the FAULTED
734  * state.
735  */
736 zpool_handle_t *
737 zpool_open_canfail(libzfs_handle_t *hdl, const char *pool)
738 {
739         zpool_handle_t *zhp;
740         boolean_t missing;
741
742         /*
743          * Make sure the pool name is valid.
744          */
745         if (!zpool_name_valid(hdl, B_TRUE, pool)) {
746                 (void) zfs_error_fmt(hdl, EZFS_INVALIDNAME,
747                     dgettext(TEXT_DOMAIN, "cannot open '%s'"),
748                     pool);
749                 return (NULL);
750         }
751
752         if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL)
753                 return (NULL);
754
755         zhp->zpool_hdl = hdl;
756         (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name));
757
758         if (zpool_refresh_stats(zhp, &missing) != 0) {
759                 zpool_close(zhp);
760                 return (NULL);
761         }
762
763         if (missing) {
764                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "no such pool"));
765                 (void) zfs_error_fmt(hdl, EZFS_NOENT,
766                     dgettext(TEXT_DOMAIN, "cannot open '%s'"), pool);
767                 zpool_close(zhp);
768                 return (NULL);
769         }
770
771         return (zhp);
772 }
773
774 /*
775  * Like the above, but silent on error.  Used when iterating over pools (because
776  * the configuration cache may be out of date).
777  */
778 int
779 zpool_open_silent(libzfs_handle_t *hdl, const char *pool, zpool_handle_t **ret)
780 {
781         zpool_handle_t *zhp;
782         boolean_t missing;
783
784         if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL)
785                 return (-1);
786
787         zhp->zpool_hdl = hdl;
788         (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name));
789
790         if (zpool_refresh_stats(zhp, &missing) != 0) {
791                 zpool_close(zhp);
792                 return (-1);
793         }
794
795         if (missing) {
796                 zpool_close(zhp);
797                 *ret = NULL;
798                 return (0);
799         }
800
801         *ret = zhp;
802         return (0);
803 }
804
805 /*
806  * Similar to zpool_open_canfail(), but refuses to open pools in the faulted
807  * state.
808  */
809 zpool_handle_t *
810 zpool_open(libzfs_handle_t *hdl, const char *pool)
811 {
812         zpool_handle_t *zhp;
813
814         if ((zhp = zpool_open_canfail(hdl, pool)) == NULL)
815                 return (NULL);
816
817         if (zhp->zpool_state == POOL_STATE_UNAVAIL) {
818                 (void) zfs_error_fmt(hdl, EZFS_POOLUNAVAIL,
819                     dgettext(TEXT_DOMAIN, "cannot open '%s'"), zhp->zpool_name);
820                 zpool_close(zhp);
821                 return (NULL);
822         }
823
824         return (zhp);
825 }
826
827 /*
828  * Close the handle.  Simply frees the memory associated with the handle.
829  */
830 void
831 zpool_close(zpool_handle_t *zhp)
832 {
833         if (zhp->zpool_config)
834                 nvlist_free(zhp->zpool_config);
835         if (zhp->zpool_old_config)
836                 nvlist_free(zhp->zpool_old_config);
837         if (zhp->zpool_props)
838                 nvlist_free(zhp->zpool_props);
839         free(zhp);
840 }
841
842 /*
843  * Return the name of the pool.
844  */
845 const char *
846 zpool_get_name(zpool_handle_t *zhp)
847 {
848         return (zhp->zpool_name);
849 }
850
851
852 /*
853  * Return the state of the pool (ACTIVE or UNAVAILABLE)
854  */
855 int
856 zpool_get_state(zpool_handle_t *zhp)
857 {
858         return (zhp->zpool_state);
859 }
860
861 /*
862  * Create the named pool, using the provided vdev list.  It is assumed
863  * that the consumer has already validated the contents of the nvlist, so we
864  * don't have to worry about error semantics.
865  */
866 int
867 zpool_create(libzfs_handle_t *hdl, const char *pool, nvlist_t *nvroot,
868     nvlist_t *props, nvlist_t *fsprops)
869 {
870         zfs_cmd_t zc = { 0 };
871         nvlist_t *zc_fsprops = NULL;
872         nvlist_t *zc_props = NULL;
873         char msg[1024];
874         char *altroot;
875         int ret = -1;
876
877         (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
878             "cannot create '%s'"), pool);
879
880         if (!zpool_name_valid(hdl, B_FALSE, pool))
881                 return (zfs_error(hdl, EZFS_INVALIDNAME, msg));
882
883         if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
884                 return (-1);
885
886         if (props) {
887                 if ((zc_props = zpool_valid_proplist(hdl, pool, props,
888                     SPA_VERSION_1, B_TRUE, msg)) == NULL) {
889                         goto create_failed;
890                 }
891         }
892
893         if (fsprops) {
894                 uint64_t zoned;
895                 char *zonestr;
896
897                 zoned = ((nvlist_lookup_string(fsprops,
898                     zfs_prop_to_name(ZFS_PROP_ZONED), &zonestr) == 0) &&
899                     strcmp(zonestr, "on") == 0);
900
901                 if ((zc_fsprops = zfs_valid_proplist(hdl,
902                     ZFS_TYPE_FILESYSTEM, fsprops, zoned, NULL, msg)) == NULL) {
903                         goto create_failed;
904                 }
905                 if (!zc_props &&
906                     (nvlist_alloc(&zc_props, NV_UNIQUE_NAME, 0) != 0)) {
907                         goto create_failed;
908                 }
909                 if (nvlist_add_nvlist(zc_props,
910                     ZPOOL_ROOTFS_PROPS, zc_fsprops) != 0) {
911                         goto create_failed;
912                 }
913         }
914
915         if (zc_props && zcmd_write_src_nvlist(hdl, &zc, zc_props) != 0)
916                 goto create_failed;
917
918         (void) strlcpy(zc.zc_name, pool, sizeof (zc.zc_name));
919
920         if ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_CREATE, &zc)) != 0) {
921
922                 zcmd_free_nvlists(&zc);
923                 nvlist_free(zc_props);
924                 nvlist_free(zc_fsprops);
925
926                 switch (errno) {
927                 case EBUSY:
928                         /*
929                          * This can happen if the user has specified the same
930                          * device multiple times.  We can't reliably detect this
931                          * until we try to add it and see we already have a
932                          * label.
933                          */
934                         zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
935                             "one or more vdevs refer to the same device"));
936                         return (zfs_error(hdl, EZFS_BADDEV, msg));
937
938                 case EOVERFLOW:
939                         /*
940                          * This occurs when one of the devices is below
941                          * SPA_MINDEVSIZE.  Unfortunately, we can't detect which
942                          * device was the problem device since there's no
943                          * reliable way to determine device size from userland.
944                          */
945                         {
946                                 char buf[64];
947
948                                 zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf));
949
950                                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
951                                     "one or more devices is less than the "
952                                     "minimum size (%s)"), buf);
953                         }
954                         return (zfs_error(hdl, EZFS_BADDEV, msg));
955
956                 case ENOSPC:
957                         zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
958                             "one or more devices is out of space"));
959                         return (zfs_error(hdl, EZFS_BADDEV, msg));
960
961                 case ENOTBLK:
962                         zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
963                             "cache device must be a disk or disk slice"));
964                         return (zfs_error(hdl, EZFS_BADDEV, msg));
965
966                 default:
967                         return (zpool_standard_error(hdl, errno, msg));
968                 }
969         }
970
971         /*
972          * If this is an alternate root pool, then we automatically set the
973          * mountpoint of the root dataset to be '/'.
974          */
975         if (nvlist_lookup_string(props, zpool_prop_to_name(ZPOOL_PROP_ALTROOT),
976             &altroot) == 0) {
977                 zfs_handle_t *zhp;
978
979                 verify((zhp = zfs_open(hdl, pool, ZFS_TYPE_DATASET)) != NULL);
980                 verify(zfs_prop_set(zhp, zfs_prop_to_name(ZFS_PROP_MOUNTPOINT),
981                     "/") == 0);
982
983                 zfs_close(zhp);
984         }
985
986 create_failed:
987         zcmd_free_nvlists(&zc);
988         nvlist_free(zc_props);
989         nvlist_free(zc_fsprops);
990         return (ret);
991 }
992
993 /*
994  * Destroy the given pool.  It is up to the caller to ensure that there are no
995  * datasets left in the pool.
996  */
997 int
998 zpool_destroy(zpool_handle_t *zhp)
999 {
1000         zfs_cmd_t zc = { 0 };
1001         zfs_handle_t *zfp = NULL;
1002         libzfs_handle_t *hdl = zhp->zpool_hdl;
1003         char msg[1024];
1004
1005         if (zhp->zpool_state == POOL_STATE_ACTIVE &&
1006             (zfp = zfs_open(zhp->zpool_hdl, zhp->zpool_name,
1007             ZFS_TYPE_FILESYSTEM)) == NULL)
1008                 return (-1);
1009
1010         (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1011
1012         if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_DESTROY, &zc) != 0) {
1013                 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1014                     "cannot destroy '%s'"), zhp->zpool_name);
1015
1016                 if (errno == EROFS) {
1017                         zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1018                             "one or more devices is read only"));
1019                         (void) zfs_error(hdl, EZFS_BADDEV, msg);
1020                 } else {
1021                         (void) zpool_standard_error(hdl, errno, msg);
1022                 }
1023
1024                 if (zfp)
1025                         zfs_close(zfp);
1026                 return (-1);
1027         }
1028
1029         if (zfp) {
1030                 remove_mountpoint(zfp);
1031                 zfs_close(zfp);
1032         }
1033
1034         return (0);
1035 }
1036
1037 /*
1038  * Add the given vdevs to the pool.  The caller must have already performed the
1039  * necessary verification to ensure that the vdev specification is well-formed.
1040  */
1041 int
1042 zpool_add(zpool_handle_t *zhp, nvlist_t *nvroot)
1043 {
1044         zfs_cmd_t zc = { 0 };
1045         int ret;
1046         libzfs_handle_t *hdl = zhp->zpool_hdl;
1047         char msg[1024];
1048         nvlist_t **spares, **l2cache;
1049         uint_t nspares, nl2cache;
1050
1051         (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1052             "cannot add to '%s'"), zhp->zpool_name);
1053
1054         if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) <
1055             SPA_VERSION_SPARES &&
1056             nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
1057             &spares, &nspares) == 0) {
1058                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be "
1059                     "upgraded to add hot spares"));
1060                 return (zfs_error(hdl, EZFS_BADVERSION, msg));
1061         }
1062
1063         if (pool_is_bootable(zhp) && nvlist_lookup_nvlist_array(nvroot,
1064             ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0) {
1065                 uint64_t s;
1066
1067                 for (s = 0; s < nspares; s++) {
1068                         char *path;
1069
1070                         if (nvlist_lookup_string(spares[s], ZPOOL_CONFIG_PATH,
1071                             &path) == 0 && pool_uses_efi(spares[s])) {
1072                                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1073                                     "device '%s' contains an EFI label and "
1074                                     "cannot be used on root pools."),
1075                                     zpool_vdev_name(hdl, NULL, spares[s],
1076                                     B_FALSE));
1077                                 return (zfs_error(hdl, EZFS_POOL_NOTSUP, msg));
1078                         }
1079                 }
1080         }
1081
1082         if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) <
1083             SPA_VERSION_L2CACHE &&
1084             nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
1085             &l2cache, &nl2cache) == 0) {
1086                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be "
1087                     "upgraded to add cache devices"));
1088                 return (zfs_error(hdl, EZFS_BADVERSION, msg));
1089         }
1090
1091         if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
1092                 return (-1);
1093         (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1094
1095         if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_VDEV_ADD, &zc) != 0) {
1096                 switch (errno) {
1097                 case EBUSY:
1098                         /*
1099                          * This can happen if the user has specified the same
1100                          * device multiple times.  We can't reliably detect this
1101                          * until we try to add it and see we already have a
1102                          * label.
1103                          */
1104                         zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1105                             "one or more vdevs refer to the same device"));
1106                         (void) zfs_error(hdl, EZFS_BADDEV, msg);
1107                         break;
1108
1109                 case EOVERFLOW:
1110                         /*
1111                          * This occurrs when one of the devices is below
1112                          * SPA_MINDEVSIZE.  Unfortunately, we can't detect which
1113                          * device was the problem device since there's no
1114                          * reliable way to determine device size from userland.
1115                          */
1116                         {
1117                                 char buf[64];
1118
1119                                 zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf));
1120
1121                                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1122                                     "device is less than the minimum "
1123                                     "size (%s)"), buf);
1124                         }
1125                         (void) zfs_error(hdl, EZFS_BADDEV, msg);
1126                         break;
1127
1128                 case ENOTSUP:
1129                         zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1130                             "pool must be upgraded to add these vdevs"));
1131                         (void) zfs_error(hdl, EZFS_BADVERSION, msg);
1132                         break;
1133
1134                 case EDOM:
1135                         zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1136                             "root pool can not have multiple vdevs"
1137                             " or separate logs"));
1138                         (void) zfs_error(hdl, EZFS_POOL_NOTSUP, msg);
1139                         break;
1140
1141                 case ENOTBLK:
1142                         zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1143                             "cache device must be a disk or disk slice"));
1144                         (void) zfs_error(hdl, EZFS_BADDEV, msg);
1145                         break;
1146
1147                 default:
1148                         (void) zpool_standard_error(hdl, errno, msg);
1149                 }
1150
1151                 ret = -1;
1152         } else {
1153                 ret = 0;
1154         }
1155
1156         zcmd_free_nvlists(&zc);
1157
1158         return (ret);
1159 }
1160
1161 /*
1162  * Exports the pool from the system.  The caller must ensure that there are no
1163  * mounted datasets in the pool.
1164  */
1165 int
1166 zpool_export_common(zpool_handle_t *zhp, boolean_t force, boolean_t hardforce)
1167 {
1168         zfs_cmd_t zc = { 0 };
1169         char msg[1024];
1170
1171         (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1172             "cannot export '%s'"), zhp->zpool_name);
1173
1174         (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1175         zc.zc_cookie = force;
1176         zc.zc_guid = hardforce;
1177
1178         if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_EXPORT, &zc) != 0) {
1179                 switch (errno) {
1180                 case EXDEV:
1181                         zfs_error_aux(zhp->zpool_hdl, dgettext(TEXT_DOMAIN,
1182                             "use '-f' to override the following errors:\n"
1183                             "'%s' has an active shared spare which could be"
1184                             " used by other pools once '%s' is exported."),
1185                             zhp->zpool_name, zhp->zpool_name);
1186                         return (zfs_error(zhp->zpool_hdl, EZFS_ACTIVE_SPARE,
1187                             msg));
1188                 default:
1189                         return (zpool_standard_error_fmt(zhp->zpool_hdl, errno,
1190                             msg));
1191                 }
1192         }
1193
1194         return (0);
1195 }
1196
1197 int
1198 zpool_export(zpool_handle_t *zhp, boolean_t force)
1199 {
1200         return (zpool_export_common(zhp, force, B_FALSE));
1201 }
1202
1203 int
1204 zpool_export_force(zpool_handle_t *zhp)
1205 {
1206         return (zpool_export_common(zhp, B_TRUE, B_TRUE));
1207 }
1208
1209 static void
1210 zpool_rewind_exclaim(libzfs_handle_t *hdl, const char *name, boolean_t dryrun,
1211     nvlist_t *rbi)
1212 {
1213         uint64_t rewindto;
1214         int64_t loss = -1;
1215         struct tm t;
1216         char timestr[128];
1217
1218         if (!hdl->libzfs_printerr || rbi == NULL)
1219                 return;
1220
1221         if (nvlist_lookup_uint64(rbi, ZPOOL_CONFIG_LOAD_TIME, &rewindto) != 0)
1222                 return;
1223         (void) nvlist_lookup_int64(rbi, ZPOOL_CONFIG_REWIND_TIME, &loss);
1224
1225         if (localtime_r((time_t *)&rewindto, &t) != NULL &&
1226             strftime(timestr, 128, 0, &t) != 0) {
1227                 if (dryrun) {
1228                         (void) printf(dgettext(TEXT_DOMAIN,
1229                             "Would be able to return %s "
1230                             "to its state as of %s.\n"),
1231                             name, timestr);
1232                 } else {
1233                         (void) printf(dgettext(TEXT_DOMAIN,
1234                             "Pool %s returned to its state as of %s.\n"),
1235                             name, timestr);
1236                 }
1237                 if (loss > 120) {
1238                         (void) printf(dgettext(TEXT_DOMAIN,
1239                             "%s approximately %lld "),
1240                             dryrun ? "Would discard" : "Discarded",
1241                             (loss + 30) / 60);
1242                         (void) printf(dgettext(TEXT_DOMAIN,
1243                             "minutes of transactions.\n"));
1244                 } else if (loss > 0) {
1245                         (void) printf(dgettext(TEXT_DOMAIN,
1246                             "%s approximately %lld "),
1247                             dryrun ? "Would discard" : "Discarded", loss);
1248                         (void) printf(dgettext(TEXT_DOMAIN,
1249                             "seconds of transactions.\n"));
1250                 }
1251         }
1252 }
1253
1254 void
1255 zpool_explain_recover(libzfs_handle_t *hdl, const char *name, int reason,
1256     nvlist_t *config)
1257 {
1258         int64_t loss = -1;
1259         uint64_t edata = UINT64_MAX;
1260         uint64_t rewindto;
1261         struct tm t;
1262         char timestr[128];
1263
1264         if (!hdl->libzfs_printerr)
1265                 return;
1266
1267         if (reason >= 0)
1268                 (void) printf(dgettext(TEXT_DOMAIN, "action: "));
1269         else
1270                 (void) printf(dgettext(TEXT_DOMAIN, "\t"));
1271
1272         /* All attempted rewinds failed if ZPOOL_CONFIG_LOAD_TIME missing */
1273         if (nvlist_lookup_uint64(config,
1274             ZPOOL_CONFIG_LOAD_TIME, &rewindto) != 0)
1275                 goto no_info;
1276
1277         (void) nvlist_lookup_int64(config, ZPOOL_CONFIG_REWIND_TIME, &loss);
1278         (void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_LOAD_DATA_ERRORS,
1279             &edata);
1280
1281         (void) printf(dgettext(TEXT_DOMAIN,
1282             "Recovery is possible, but will result in some data loss.\n"));
1283
1284         if (localtime_r((time_t *)&rewindto, &t) != NULL &&
1285             strftime(timestr, 128, 0, &t) != 0) {
1286                 (void) printf(dgettext(TEXT_DOMAIN,
1287                     "\tReturning the pool to its state as of %s\n"
1288                     "\tshould correct the problem.  "),
1289                     timestr);
1290         } else {
1291                 (void) printf(dgettext(TEXT_DOMAIN,
1292                     "\tReverting the pool to an earlier state "
1293                     "should correct the problem.\n\t"));
1294         }
1295
1296         if (loss > 120) {
1297                 (void) printf(dgettext(TEXT_DOMAIN,
1298                     "Approximately %lld minutes of data\n"
1299                     "\tmust be discarded, irreversibly.  "), (loss + 30) / 60);
1300         } else if (loss > 0) {
1301                 (void) printf(dgettext(TEXT_DOMAIN,
1302                     "Approximately %lld seconds of data\n"
1303                     "\tmust be discarded, irreversibly.  "), loss);
1304         }
1305         if (edata != 0 && edata != UINT64_MAX) {
1306                 if (edata == 1) {
1307                         (void) printf(dgettext(TEXT_DOMAIN,
1308                             "After rewind, at least\n"
1309                             "\tone persistent user-data error will remain.  "));
1310                 } else {
1311                         (void) printf(dgettext(TEXT_DOMAIN,
1312                             "After rewind, several\n"
1313                             "\tpersistent user-data errors will remain.  "));
1314                 }
1315         }
1316         (void) printf(dgettext(TEXT_DOMAIN,
1317             "Recovery can be attempted\n\tby executing 'zpool %s -F %s'.  "),
1318             reason >= 0 ? "clear" : "import", name);
1319
1320         (void) printf(dgettext(TEXT_DOMAIN,
1321             "A scrub of the pool\n"
1322             "\tis strongly recommended after recovery.\n"));
1323         return;
1324
1325 no_info:
1326         (void) printf(dgettext(TEXT_DOMAIN,
1327             "Destroy and re-create the pool from\n\ta backup source.\n"));
1328 }
1329
1330 /*
1331  * zpool_import() is a contracted interface. Should be kept the same
1332  * if possible.
1333  *
1334  * Applications should use zpool_import_props() to import a pool with
1335  * new properties value to be set.
1336  */
1337 int
1338 zpool_import(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,
1339     char *altroot)
1340 {
1341         nvlist_t *props = NULL;
1342         int ret;
1343
1344         if (altroot != NULL) {
1345                 if (nvlist_alloc(&props, NV_UNIQUE_NAME, 0) != 0) {
1346                         return (zfs_error_fmt(hdl, EZFS_NOMEM,
1347                             dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1348                             newname));
1349                 }
1350
1351                 if (nvlist_add_string(props,
1352                     zpool_prop_to_name(ZPOOL_PROP_ALTROOT), altroot) != 0 ||
1353                     nvlist_add_string(props,
1354                     zpool_prop_to_name(ZPOOL_PROP_CACHEFILE), "none") != 0) {
1355                         nvlist_free(props);
1356                         return (zfs_error_fmt(hdl, EZFS_NOMEM,
1357                             dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1358                             newname));
1359                 }
1360         }
1361
1362         ret = zpool_import_props(hdl, config, newname, props, B_FALSE);
1363         if (props)
1364                 nvlist_free(props);
1365         return (ret);
1366 }
1367
1368 /*
1369  * Import the given pool using the known configuration and a list of
1370  * properties to be set. The configuration should have come from
1371  * zpool_find_import(). The 'newname' parameters control whether the pool
1372  * is imported with a different name.
1373  */
1374 int
1375 zpool_import_props(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,
1376     nvlist_t *props, boolean_t importfaulted)
1377 {
1378         zfs_cmd_t zc = { 0 };
1379         zpool_rewind_policy_t policy;
1380         nvlist_t *nvi = NULL;
1381         char *thename;
1382         char *origname;
1383         uint64_t returned_size;
1384         int ret;
1385         char errbuf[1024];
1386
1387         verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
1388             &origname) == 0);
1389
1390         (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
1391             "cannot import pool '%s'"), origname);
1392
1393         if (newname != NULL) {
1394                 if (!zpool_name_valid(hdl, B_FALSE, newname))
1395                         return (zfs_error_fmt(hdl, EZFS_INVALIDNAME,
1396                             dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1397                             newname));
1398                 thename = (char *)newname;
1399         } else {
1400                 thename = origname;
1401         }
1402
1403         if (props) {
1404                 uint64_t version;
1405
1406                 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
1407                     &version) == 0);
1408
1409                 if ((props = zpool_valid_proplist(hdl, origname,
1410                     props, version, B_TRUE, errbuf)) == NULL) {
1411                         return (-1);
1412                 } else if (zcmd_write_src_nvlist(hdl, &zc, props) != 0) {
1413                         nvlist_free(props);
1414                         return (-1);
1415                 }
1416         }
1417
1418         (void) strlcpy(zc.zc_name, thename, sizeof (zc.zc_name));
1419
1420         verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
1421             &zc.zc_guid) == 0);
1422
1423         if (zcmd_write_conf_nvlist(hdl, &zc, config) != 0) {
1424                 nvlist_free(props);
1425                 return (-1);
1426         }
1427         returned_size =  zc.zc_nvlist_conf_size + 512;
1428         if (zcmd_alloc_dst_nvlist(hdl, &zc, returned_size) != 0) {
1429                 nvlist_free(props);
1430                 return (-1);
1431         }
1432
1433         zc.zc_cookie = (uint64_t)importfaulted;
1434         ret = 0;
1435         if (zfs_ioctl(hdl, ZFS_IOC_POOL_IMPORT, &zc) != 0) {
1436                 char desc[1024];
1437
1438                 (void) zcmd_read_dst_nvlist(hdl, &zc, &nvi);
1439                 zpool_get_rewind_policy(config, &policy);
1440                 /*
1441                  * Dry-run failed, but we print out what success
1442                  * looks like if we found a best txg
1443                  */
1444                 if ((policy.zrp_request & ZPOOL_TRY_REWIND) && nvi) {
1445                         zpool_rewind_exclaim(hdl, newname ? origname : thename,
1446                             B_TRUE, nvi);
1447                         nvlist_free(nvi);
1448                         return (-1);
1449                 }
1450
1451                 if (newname == NULL)
1452                         (void) snprintf(desc, sizeof (desc),
1453                             dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1454                             thename);
1455                 else
1456                         (void) snprintf(desc, sizeof (desc),
1457                             dgettext(TEXT_DOMAIN, "cannot import '%s' as '%s'"),
1458                             origname, thename);
1459
1460                 switch (errno) {
1461                 case ENOTSUP:
1462                         /*
1463                          * Unsupported version.
1464                          */
1465                         (void) zfs_error(hdl, EZFS_BADVERSION, desc);
1466                         break;
1467
1468                 case EINVAL:
1469                         (void) zfs_error(hdl, EZFS_INVALCONFIG, desc);
1470                         break;
1471
1472                 case EROFS:
1473                         zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1474                             "one or more devices is read only"));
1475                         (void) zfs_error(hdl, EZFS_BADDEV, desc);
1476                         break;
1477
1478                 default:
1479                         (void) zcmd_read_dst_nvlist(hdl, &zc, &nvi);
1480                         (void) zpool_standard_error(hdl, errno, desc);
1481                         zpool_explain_recover(hdl,
1482                             newname ? origname : thename, -errno, nvi);
1483                         nvlist_free(nvi);
1484                         break;
1485                 }
1486
1487                 ret = -1;
1488         } else {
1489                 zpool_handle_t *zhp;
1490
1491                 /*
1492                  * This should never fail, but play it safe anyway.
1493                  */
1494                 if (zpool_open_silent(hdl, thename, &zhp) != 0)
1495                         ret = -1;
1496                 else if (zhp != NULL)
1497                         zpool_close(zhp);
1498                 (void) zcmd_read_dst_nvlist(hdl, &zc, &nvi);
1499                 zpool_get_rewind_policy(config, &policy);
1500                 if (policy.zrp_request &
1501                     (ZPOOL_DO_REWIND | ZPOOL_TRY_REWIND)) {
1502                         zpool_rewind_exclaim(hdl, newname ? origname : thename,
1503                             ((policy.zrp_request & ZPOOL_TRY_REWIND) != 0),
1504                             nvi);
1505                 }
1506                 nvlist_free(nvi);
1507                 return (0);
1508         }
1509
1510         zcmd_free_nvlists(&zc);
1511         nvlist_free(props);
1512
1513         return (ret);
1514 }
1515
1516 /*
1517  * Scan the pool.
1518  */
1519 int
1520 zpool_scan(zpool_handle_t *zhp, pool_scan_func_t func)
1521 {
1522         zfs_cmd_t zc = { 0 };
1523         char msg[1024];
1524         libzfs_handle_t *hdl = zhp->zpool_hdl;
1525
1526         (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1527         zc.zc_cookie = func;
1528
1529         if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_SCAN, &zc) == 0 ||
1530             (errno == ENOENT && func != POOL_SCAN_NONE))
1531                 return (0);
1532
1533         if (func == POOL_SCAN_SCRUB) {
1534                 (void) snprintf(msg, sizeof (msg),
1535                     dgettext(TEXT_DOMAIN, "cannot scrub %s"), zc.zc_name);
1536         } else if (func == POOL_SCAN_NONE) {
1537                 (void) snprintf(msg, sizeof (msg),
1538                     dgettext(TEXT_DOMAIN, "cannot cancel scrubbing %s"),
1539                     zc.zc_name);
1540         } else {
1541                 assert(!"unexpected result");
1542         }
1543
1544         if (errno == EBUSY) {
1545                 nvlist_t *nvroot;
1546                 pool_scan_stat_t *ps = NULL;
1547                 uint_t psc;
1548
1549                 verify(nvlist_lookup_nvlist(zhp->zpool_config,
1550                     ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
1551                 (void) nvlist_lookup_uint64_array(nvroot,
1552                     ZPOOL_CONFIG_SCAN_STATS, (uint64_t **)&ps, &psc);
1553                 if (ps && ps->pss_func == POOL_SCAN_SCRUB)
1554                         return (zfs_error(hdl, EZFS_SCRUBBING, msg));
1555                 else
1556                         return (zfs_error(hdl, EZFS_RESILVERING, msg));
1557         } else if (errno == ENOENT) {
1558                 return (zfs_error(hdl, EZFS_NO_SCRUB, msg));
1559         } else {
1560                 return (zpool_standard_error(hdl, errno, msg));
1561         }
1562 }
1563
1564 /*
1565  * This provides a very minimal check whether a given string is likely a
1566  * c#t#d# style string.  Users of this are expected to do their own
1567  * verification of the s# part.
1568  */
1569 #define CTD_CHECK(str)  (str && str[0] == 'c' && isdigit(str[1]))
1570
1571 /*
1572  * More elaborate version for ones which may start with "/dev/dsk/"
1573  * and the like.
1574  */
1575 static int
1576 ctd_check_path(char *str) {
1577         /*
1578          * If it starts with a slash, check the last component.
1579          */
1580         if (str && str[0] == '/') {
1581                 char *tmp = strrchr(str, '/');
1582
1583                 /*
1584                  * If it ends in "/old", check the second-to-last
1585                  * component of the string instead.
1586                  */
1587                 if (tmp != str && strcmp(tmp, "/old") == 0) {
1588                         for (tmp--; *tmp != '/'; tmp--)
1589                                 ;
1590                 }
1591                 str = tmp + 1;
1592         }
1593         return (CTD_CHECK(str));
1594 }
1595
1596 /*
1597  * Find a vdev that matches the search criteria specified. We use the
1598  * the nvpair name to determine how we should look for the device.
1599  * 'avail_spare' is set to TRUE if the provided guid refers to an AVAIL
1600  * spare; but FALSE if its an INUSE spare.
1601  */
1602 static nvlist_t *
1603 vdev_to_nvlist_iter(nvlist_t *nv, nvlist_t *search, boolean_t *avail_spare,
1604     boolean_t *l2cache, boolean_t *log)
1605 {
1606         uint_t c, children;
1607         nvlist_t **child;
1608         nvlist_t *ret;
1609         uint64_t is_log;
1610         char *srchkey;
1611         nvpair_t *pair = nvlist_next_nvpair(search, NULL);
1612
1613         /* Nothing to look for */
1614         if (search == NULL || pair == NULL)
1615                 return (NULL);
1616
1617         /* Obtain the key we will use to search */
1618         srchkey = nvpair_name(pair);
1619
1620         switch (nvpair_type(pair)) {
1621         case DATA_TYPE_UINT64: {
1622                 uint64_t srchval, theguid, present;
1623
1624                 verify(nvpair_value_uint64(pair, &srchval) == 0);
1625                 if (strcmp(srchkey, ZPOOL_CONFIG_GUID) == 0) {
1626                         if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT,
1627                             &present) == 0) {
1628                                 /*
1629                                  * If the device has never been present since
1630                                  * import, the only reliable way to match the
1631                                  * vdev is by GUID.
1632                                  */
1633                                 verify(nvlist_lookup_uint64(nv,
1634                                     ZPOOL_CONFIG_GUID, &theguid) == 0);
1635                                 if (theguid == srchval)
1636                                         return (nv);
1637                         }
1638                 }
1639                 break;
1640         }
1641
1642         case DATA_TYPE_STRING: {
1643                 char *srchval, *val;
1644
1645                 verify(nvpair_value_string(pair, &srchval) == 0);
1646                 if (nvlist_lookup_string(nv, srchkey, &val) != 0)
1647                         break;
1648
1649                 /*
1650                  * Search for the requested value. Special cases:
1651                  *
1652                  * - ZPOOL_CONFIG_PATH for whole disk entries.  These end in
1653                  *   "s0" or "s0/old".  The "s0" part is hidden from the user,
1654                  *   but included in the string, so this matches around it.
1655                  * - looking for a top-level vdev name (i.e. ZPOOL_CONFIG_TYPE).
1656                  *
1657                  * Otherwise, all other searches are simple string compares.
1658                  */
1659                 if (strcmp(srchkey, ZPOOL_CONFIG_PATH) == 0 &&
1660                     ctd_check_path(val)) {
1661                         uint64_t wholedisk = 0;
1662
1663                         (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
1664                             &wholedisk);
1665                         if (wholedisk) {
1666                                 int slen = strlen(srchval);
1667                                 int vlen = strlen(val);
1668
1669                                 if (slen != vlen - 2)
1670                                         break;
1671
1672                                 /*
1673                                  * make_leaf_vdev() should only set
1674                                  * wholedisk for ZPOOL_CONFIG_PATHs which
1675                                  * will include "/dev/dsk/", giving plenty of
1676                                  * room for the indices used next.
1677                                  */
1678                                 ASSERT(vlen >= 6);
1679
1680                                 /*
1681                                  * strings identical except trailing "s0"
1682                                  */
1683                                 if (strcmp(&val[vlen - 2], "s0") == 0 &&
1684                                     strncmp(srchval, val, slen) == 0)
1685                                         return (nv);
1686
1687                                 /*
1688                                  * strings identical except trailing "s0/old"
1689                                  */
1690                                 if (strcmp(&val[vlen - 6], "s0/old") == 0 &&
1691                                     strcmp(&srchval[slen - 4], "/old") == 0 &&
1692                                     strncmp(srchval, val, slen - 4) == 0)
1693                                         return (nv);
1694
1695                                 break;
1696                         }
1697                 } else if (strcmp(srchkey, ZPOOL_CONFIG_TYPE) == 0 && val) {
1698                         char *type, *idx, *end, *p;
1699                         uint64_t id, vdev_id;
1700
1701                         /*
1702                          * Determine our vdev type, keeping in mind
1703                          * that the srchval is composed of a type and
1704                          * vdev id pair (i.e. mirror-4).
1705                          */
1706                         if ((type = strdup(srchval)) == NULL)
1707                                 return (NULL);
1708
1709                         if ((p = strrchr(type, '-')) == NULL) {
1710                                 free(type);
1711                                 break;
1712                         }
1713                         idx = p + 1;
1714                         *p = '\0';
1715
1716                         /*
1717                          * If the types don't match then keep looking.
1718                          */
1719                         if (strncmp(val, type, strlen(val)) != 0) {
1720                                 free(type);
1721                                 break;
1722                         }
1723
1724                         verify(strncmp(type, VDEV_TYPE_RAIDZ,
1725                             strlen(VDEV_TYPE_RAIDZ)) == 0 ||
1726                             strncmp(type, VDEV_TYPE_MIRROR,
1727                             strlen(VDEV_TYPE_MIRROR)) == 0);
1728                         verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID,
1729                             &id) == 0);
1730
1731                         errno = 0;
1732                         vdev_id = strtoull(idx, &end, 10);
1733
1734                         free(type);
1735                         if (errno != 0)
1736                                 return (NULL);
1737
1738                         /*
1739                          * Now verify that we have the correct vdev id.
1740                          */
1741                         if (vdev_id == id)
1742                                 return (nv);
1743                 }
1744
1745                 /*
1746                  * Common case
1747                  */
1748                 if (strcmp(srchval, val) == 0)
1749                         return (nv);
1750                 break;
1751         }
1752
1753         default:
1754                 break;
1755         }
1756
1757         if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
1758             &child, &children) != 0)
1759                 return (NULL);
1760
1761         for (c = 0; c < children; c++) {
1762                 if ((ret = vdev_to_nvlist_iter(child[c], search,
1763                     avail_spare, l2cache, NULL)) != NULL) {
1764                         /*
1765                          * The 'is_log' value is only set for the toplevel
1766                          * vdev, not the leaf vdevs.  So we always lookup the
1767                          * log device from the root of the vdev tree (where
1768                          * 'log' is non-NULL).
1769                          */
1770                         if (log != NULL &&
1771                             nvlist_lookup_uint64(child[c],
1772                             ZPOOL_CONFIG_IS_LOG, &is_log) == 0 &&
1773                             is_log) {
1774                                 *log = B_TRUE;
1775                         }
1776                         return (ret);
1777                 }
1778         }
1779
1780         if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES,
1781             &child, &children) == 0) {
1782                 for (c = 0; c < children; c++) {
1783                         if ((ret = vdev_to_nvlist_iter(child[c], search,
1784                             avail_spare, l2cache, NULL)) != NULL) {
1785                                 *avail_spare = B_TRUE;
1786                                 return (ret);
1787                         }
1788                 }
1789         }
1790
1791         if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE,
1792             &child, &children) == 0) {
1793                 for (c = 0; c < children; c++) {
1794                         if ((ret = vdev_to_nvlist_iter(child[c], search,
1795                             avail_spare, l2cache, NULL)) != NULL) {
1796                                 *l2cache = B_TRUE;
1797                                 return (ret);
1798                         }
1799                 }
1800         }
1801
1802         return (NULL);
1803 }
1804
1805 /*
1806  * Given a physical path (minus the "/devices" prefix), find the
1807  * associated vdev.
1808  */
1809 nvlist_t *
1810 zpool_find_vdev_by_physpath(zpool_handle_t *zhp, const char *ppath,
1811     boolean_t *avail_spare, boolean_t *l2cache, boolean_t *log)
1812 {
1813         nvlist_t *search, *nvroot, *ret;
1814
1815         verify(nvlist_alloc(&search, NV_UNIQUE_NAME, KM_SLEEP) == 0);
1816         verify(nvlist_add_string(search, ZPOOL_CONFIG_PHYS_PATH, ppath) == 0);
1817
1818         verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
1819             &nvroot) == 0);
1820
1821         *avail_spare = B_FALSE;
1822         ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log);
1823         nvlist_free(search);
1824
1825         return (ret);
1826 }
1827
1828 /*
1829  * Determine if we have an "interior" top-level vdev (i.e mirror/raidz).
1830  */
1831 boolean_t
1832 zpool_vdev_is_interior(const char *name)
1833 {
1834         if (strncmp(name, VDEV_TYPE_RAIDZ, strlen(VDEV_TYPE_RAIDZ)) == 0 ||
1835             strncmp(name, VDEV_TYPE_MIRROR, strlen(VDEV_TYPE_MIRROR)) == 0)
1836                 return (B_TRUE);
1837         return (B_FALSE);
1838 }
1839
1840 nvlist_t *
1841 zpool_find_vdev(zpool_handle_t *zhp, const char *path, boolean_t *avail_spare,
1842     boolean_t *l2cache, boolean_t *log)
1843 {
1844         char buf[MAXPATHLEN];
1845         char *end;
1846         nvlist_t *nvroot, *search, *ret;
1847         uint64_t guid;
1848
1849         verify(nvlist_alloc(&search, NV_UNIQUE_NAME, KM_SLEEP) == 0);
1850
1851         guid = strtoull(path, &end, 10);
1852         if (guid != 0 && *end == '\0') {
1853                 verify(nvlist_add_uint64(search, ZPOOL_CONFIG_GUID, guid) == 0);
1854         } else if (zpool_vdev_is_interior(path)) {
1855                 verify(nvlist_add_string(search, ZPOOL_CONFIG_TYPE, path) == 0);
1856         } else if (path[0] != '/') {
1857                 (void) snprintf(buf, sizeof (buf), "%s%s", "/dev/dsk/", path);
1858                 verify(nvlist_add_string(search, ZPOOL_CONFIG_PATH, buf) == 0);
1859         } else {
1860                 verify(nvlist_add_string(search, ZPOOL_CONFIG_PATH, path) == 0);
1861         }
1862
1863         verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
1864             &nvroot) == 0);
1865
1866         *avail_spare = B_FALSE;
1867         *l2cache = B_FALSE;
1868         if (log != NULL)
1869                 *log = B_FALSE;
1870         ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log);
1871         nvlist_free(search);
1872
1873         return (ret);
1874 }
1875
1876 static int
1877 vdev_online(nvlist_t *nv)
1878 {
1879         uint64_t ival;
1880
1881         if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_OFFLINE, &ival) == 0 ||
1882             nvlist_lookup_uint64(nv, ZPOOL_CONFIG_FAULTED, &ival) == 0 ||
1883             nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVED, &ival) == 0)
1884                 return (0);
1885
1886         return (1);
1887 }
1888
1889 /*
1890  * Helper function for zpool_get_physpaths().
1891  */
1892 static int
1893 vdev_get_one_physpath(nvlist_t *config, char *physpath, size_t physpath_size,
1894     size_t *bytes_written)
1895 {
1896         size_t bytes_left, pos, rsz;
1897         char *tmppath;
1898         const char *format;
1899
1900         if (nvlist_lookup_string(config, ZPOOL_CONFIG_PHYS_PATH,
1901             &tmppath) != 0)
1902                 return (EZFS_NODEVICE);
1903
1904         pos = *bytes_written;
1905         bytes_left = physpath_size - pos;
1906         format = (pos == 0) ? "%s" : " %s";
1907
1908         rsz = snprintf(physpath + pos, bytes_left, format, tmppath);
1909         *bytes_written += rsz;
1910
1911         if (rsz >= bytes_left) {
1912                 /* if physpath was not copied properly, clear it */
1913                 if (bytes_left != 0) {
1914                         physpath[pos] = 0;
1915                 }
1916                 return (EZFS_NOSPC);
1917         }
1918         return (0);
1919 }
1920
1921 static int
1922 vdev_get_physpaths(nvlist_t *nv, char *physpath, size_t phypath_size,
1923     size_t *rsz, boolean_t is_spare)
1924 {
1925         char *type;
1926         int ret;
1927
1928         if (nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) != 0)
1929                 return (EZFS_INVALCONFIG);
1930
1931         if (strcmp(type, VDEV_TYPE_DISK) == 0) {
1932                 /*
1933                  * An active spare device has ZPOOL_CONFIG_IS_SPARE set.
1934                  * For a spare vdev, we only want to boot from the active
1935                  * spare device.
1936                  */
1937                 if (is_spare) {
1938                         uint64_t spare = 0;
1939                         (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_SPARE,
1940                             &spare);
1941                         if (!spare)
1942                                 return (EZFS_INVALCONFIG);
1943                 }
1944
1945                 if (vdev_online(nv)) {
1946                         if ((ret = vdev_get_one_physpath(nv, physpath,
1947                             phypath_size, rsz)) != 0)
1948                                 return (ret);
1949                 }
1950         } else if (strcmp(type, VDEV_TYPE_MIRROR) == 0 ||
1951             strcmp(type, VDEV_TYPE_REPLACING) == 0 ||
1952             (is_spare = (strcmp(type, VDEV_TYPE_SPARE) == 0))) {
1953                 nvlist_t **child;
1954                 uint_t count;
1955                 int i, ret;
1956
1957                 if (nvlist_lookup_nvlist_array(nv,
1958                     ZPOOL_CONFIG_CHILDREN, &child, &count) != 0)
1959                         return (EZFS_INVALCONFIG);
1960
1961                 for (i = 0; i < count; i++) {
1962                         ret = vdev_get_physpaths(child[i], physpath,
1963                             phypath_size, rsz, is_spare);
1964                         if (ret == EZFS_NOSPC)
1965                                 return (ret);
1966                 }
1967         }
1968
1969         return (EZFS_POOL_INVALARG);
1970 }
1971
1972 /*
1973  * Get phys_path for a root pool config.
1974  * Return 0 on success; non-zero on failure.
1975  */
1976 static int
1977 zpool_get_config_physpath(nvlist_t *config, char *physpath, size_t phypath_size)
1978 {
1979         size_t rsz;
1980         nvlist_t *vdev_root;
1981         nvlist_t **child;
1982         uint_t count;
1983         char *type;
1984
1985         rsz = 0;
1986
1987         if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
1988             &vdev_root) != 0)
1989                 return (EZFS_INVALCONFIG);
1990
1991         if (nvlist_lookup_string(vdev_root, ZPOOL_CONFIG_TYPE, &type) != 0 ||
1992             nvlist_lookup_nvlist_array(vdev_root, ZPOOL_CONFIG_CHILDREN,
1993             &child, &count) != 0)
1994                 return (EZFS_INVALCONFIG);
1995
1996         /*
1997          * root pool can not have EFI labeled disks and can only have
1998          * a single top-level vdev.
1999          */
2000         if (strcmp(type, VDEV_TYPE_ROOT) != 0 || count != 1 ||
2001             pool_uses_efi(vdev_root))
2002                 return (EZFS_POOL_INVALARG);
2003
2004         (void) vdev_get_physpaths(child[0], physpath, phypath_size, &rsz,
2005             B_FALSE);
2006
2007         /* No online devices */
2008         if (rsz == 0)
2009                 return (EZFS_NODEVICE);
2010
2011         return (0);
2012 }
2013
2014 /*
2015  * Get phys_path for a root pool
2016  * Return 0 on success; non-zero on failure.
2017  */
2018 int
2019 zpool_get_physpath(zpool_handle_t *zhp, char *physpath, size_t phypath_size)
2020 {
2021         return (zpool_get_config_physpath(zhp->zpool_config, physpath,
2022             phypath_size));
2023 }
2024
2025 /*
2026  * If the device has being dynamically expanded then we need to relabel
2027  * the disk to use the new unallocated space.
2028  */
2029 static int
2030 zpool_relabel_disk(libzfs_handle_t *hdl, const char *name)
2031 {
2032         char path[MAXPATHLEN];
2033         char errbuf[1024];
2034         int fd, error;
2035         int (*_efi_use_whole_disk)(int);
2036
2037         if ((_efi_use_whole_disk = (int (*)(int))dlsym(RTLD_DEFAULT,
2038             "efi_use_whole_disk")) == NULL)
2039                 return (-1);
2040
2041         (void) snprintf(path, sizeof (path), "%s/%s", RDISK_ROOT, name);
2042
2043         if ((fd = open(path, O_RDWR | O_NDELAY)) < 0) {
2044                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot "
2045                     "relabel '%s': unable to open device"), name);
2046                 return (zfs_error(hdl, EZFS_OPENFAILED, errbuf));
2047         }
2048
2049         /*
2050          * It's possible that we might encounter an error if the device
2051          * does not have any unallocated space left. If so, we simply
2052          * ignore that error and continue on.
2053          */
2054         error = _efi_use_whole_disk(fd);
2055         (void) close(fd);
2056         if (error && error != VT_ENOSPC) {
2057                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot "
2058                     "relabel '%s': unable to read disk capacity"), name);
2059                 return (zfs_error(hdl, EZFS_NOCAP, errbuf));
2060         }
2061         return (0);
2062 }
2063
2064 /*
2065  * Bring the specified vdev online.   The 'flags' parameter is a set of the
2066  * ZFS_ONLINE_* flags.
2067  */
2068 int
2069 zpool_vdev_online(zpool_handle_t *zhp, const char *path, int flags,
2070     vdev_state_t *newstate)
2071 {
2072         zfs_cmd_t zc = { 0 };
2073         char msg[1024];
2074         nvlist_t *tgt;
2075         boolean_t avail_spare, l2cache, islog;
2076         libzfs_handle_t *hdl = zhp->zpool_hdl;
2077
2078         if (flags & ZFS_ONLINE_EXPAND) {
2079                 (void) snprintf(msg, sizeof (msg),
2080                     dgettext(TEXT_DOMAIN, "cannot expand %s"), path);
2081         } else {
2082                 (void) snprintf(msg, sizeof (msg),
2083                     dgettext(TEXT_DOMAIN, "cannot online %s"), path);
2084         }
2085
2086         (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2087         if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
2088             &islog)) == NULL)
2089                 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2090
2091         verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2092
2093         if (avail_spare)
2094                 return (zfs_error(hdl, EZFS_ISSPARE, msg));
2095
2096         if (flags & ZFS_ONLINE_EXPAND ||
2097             zpool_get_prop_int(zhp, ZPOOL_PROP_AUTOEXPAND, NULL)) {
2098                 char *pathname = NULL;
2099                 uint64_t wholedisk = 0;
2100
2101                 (void) nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_WHOLE_DISK,
2102                     &wholedisk);
2103                 verify(nvlist_lookup_string(tgt, ZPOOL_CONFIG_PATH,
2104                     &pathname) == 0);
2105
2106                 /*
2107                  * XXX - L2ARC 1.0 devices can't support expansion.
2108                  */
2109                 if (l2cache) {
2110                         zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2111                             "cannot expand cache devices"));
2112                         return (zfs_error(hdl, EZFS_VDEVNOTSUP, msg));
2113                 }
2114
2115                 if (wholedisk) {
2116                         pathname += strlen(DISK_ROOT) + 1;
2117                         (void) zpool_relabel_disk(zhp->zpool_hdl, pathname);
2118                 }
2119         }
2120
2121         zc.zc_cookie = VDEV_STATE_ONLINE;
2122         zc.zc_obj = flags;
2123
2124         if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_VDEV_SET_STATE, &zc) != 0) {
2125                 if (errno == EINVAL) {
2126                         zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "was split "
2127                             "from this pool into a new one.  Use '%s' "
2128                             "instead"), "zpool detach");
2129                         return (zfs_error(hdl, EZFS_POSTSPLIT_ONLINE, msg));
2130                 }
2131                 return (zpool_standard_error(hdl, errno, msg));
2132         }
2133
2134         *newstate = zc.zc_cookie;
2135         return (0);
2136 }
2137
2138 /*
2139  * Take the specified vdev offline
2140  */
2141 int
2142 zpool_vdev_offline(zpool_handle_t *zhp, const char *path, boolean_t istmp)
2143 {
2144         zfs_cmd_t zc = { 0 };
2145         char msg[1024];
2146         nvlist_t *tgt;
2147         boolean_t avail_spare, l2cache;
2148         libzfs_handle_t *hdl = zhp->zpool_hdl;
2149
2150         (void) snprintf(msg, sizeof (msg),
2151             dgettext(TEXT_DOMAIN, "cannot offline %s"), path);
2152
2153         (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2154         if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
2155             NULL)) == NULL)
2156                 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2157
2158         verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2159
2160         if (avail_spare)
2161                 return (zfs_error(hdl, EZFS_ISSPARE, msg));
2162
2163         zc.zc_cookie = VDEV_STATE_OFFLINE;
2164         zc.zc_obj = istmp ? ZFS_OFFLINE_TEMPORARY : 0;
2165
2166         if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
2167                 return (0);
2168
2169         switch (errno) {
2170         case EBUSY:
2171
2172                 /*
2173                  * There are no other replicas of this device.
2174                  */
2175                 return (zfs_error(hdl, EZFS_NOREPLICAS, msg));
2176
2177         case EEXIST:
2178                 /*
2179                  * The log device has unplayed logs
2180                  */
2181                 return (zfs_error(hdl, EZFS_UNPLAYED_LOGS, msg));
2182
2183         default:
2184                 return (zpool_standard_error(hdl, errno, msg));
2185         }
2186 }
2187
2188 /*
2189  * Mark the given vdev faulted.
2190  */
2191 int
2192 zpool_vdev_fault(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux)
2193 {
2194         zfs_cmd_t zc = { 0 };
2195         char msg[1024];
2196         libzfs_handle_t *hdl = zhp->zpool_hdl;
2197
2198         (void) snprintf(msg, sizeof (msg),
2199             dgettext(TEXT_DOMAIN, "cannot fault %llu"), guid);
2200
2201         (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2202         zc.zc_guid = guid;
2203         zc.zc_cookie = VDEV_STATE_FAULTED;
2204         zc.zc_obj = aux;
2205
2206         if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
2207                 return (0);
2208
2209         switch (errno) {
2210         case EBUSY:
2211
2212                 /*
2213                  * There are no other replicas of this device.
2214                  */
2215                 return (zfs_error(hdl, EZFS_NOREPLICAS, msg));
2216
2217         default:
2218                 return (zpool_standard_error(hdl, errno, msg));
2219         }
2220
2221 }
2222
2223 /*
2224  * Mark the given vdev degraded.
2225  */
2226 int
2227 zpool_vdev_degrade(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux)
2228 {
2229         zfs_cmd_t zc = { 0 };
2230         char msg[1024];
2231         libzfs_handle_t *hdl = zhp->zpool_hdl;
2232
2233         (void) snprintf(msg, sizeof (msg),
2234             dgettext(TEXT_DOMAIN, "cannot degrade %llu"), guid);
2235
2236         (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2237         zc.zc_guid = guid;
2238         zc.zc_cookie = VDEV_STATE_DEGRADED;
2239         zc.zc_obj = aux;
2240
2241         if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
2242                 return (0);
2243
2244         return (zpool_standard_error(hdl, errno, msg));
2245 }
2246
2247 /*
2248  * Returns TRUE if the given nvlist is a vdev that was originally swapped in as
2249  * a hot spare.
2250  */
2251 static boolean_t
2252 is_replacing_spare(nvlist_t *search, nvlist_t *tgt, int which)
2253 {
2254         nvlist_t **child;
2255         uint_t c, children;
2256         char *type;
2257
2258         if (nvlist_lookup_nvlist_array(search, ZPOOL_CONFIG_CHILDREN, &child,
2259             &children) == 0) {
2260                 verify(nvlist_lookup_string(search, ZPOOL_CONFIG_TYPE,
2261                     &type) == 0);
2262
2263                 if (strcmp(type, VDEV_TYPE_SPARE) == 0 &&
2264                     children == 2 && child[which] == tgt)
2265                         return (B_TRUE);
2266
2267                 for (c = 0; c < children; c++)
2268                         if (is_replacing_spare(child[c], tgt, which))
2269                                 return (B_TRUE);
2270         }
2271
2272         return (B_FALSE);
2273 }
2274
2275 /*
2276  * Attach new_disk (fully described by nvroot) to old_disk.
2277  * If 'replacing' is specified, the new disk will replace the old one.
2278  */
2279 int
2280 zpool_vdev_attach(zpool_handle_t *zhp,
2281     const char *old_disk, const char *new_disk, nvlist_t *nvroot, int replacing)
2282 {
2283         zfs_cmd_t zc = { 0 };
2284         char msg[1024];
2285         int ret;
2286         nvlist_t *tgt;
2287         boolean_t avail_spare, l2cache, islog;
2288         uint64_t val;
2289         char *path, *newname;
2290         nvlist_t **child;
2291         uint_t children;
2292         nvlist_t *config_root;
2293         libzfs_handle_t *hdl = zhp->zpool_hdl;
2294         boolean_t rootpool = pool_is_bootable(zhp);
2295
2296         if (replacing)
2297                 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
2298                     "cannot replace %s with %s"), old_disk, new_disk);
2299         else
2300                 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
2301                     "cannot attach %s to %s"), new_disk, old_disk);
2302
2303         /*
2304          * If this is a root pool, make sure that we're not attaching an
2305          * EFI labeled device.
2306          */
2307         if (rootpool && pool_uses_efi(nvroot)) {
2308                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2309                     "EFI labeled devices are not supported on root pools."));
2310                 return (zfs_error(hdl, EZFS_POOL_NOTSUP, msg));
2311         }
2312
2313         (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2314         if ((tgt = zpool_find_vdev(zhp, old_disk, &avail_spare, &l2cache,
2315             &islog)) == 0)
2316                 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2317
2318         if (avail_spare)
2319                 return (zfs_error(hdl, EZFS_ISSPARE, msg));
2320
2321         if (l2cache)
2322                 return (zfs_error(hdl, EZFS_ISL2CACHE, msg));
2323
2324         verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2325         zc.zc_cookie = replacing;
2326
2327         if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
2328             &child, &children) != 0 || children != 1) {
2329                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2330                     "new device must be a single disk"));
2331                 return (zfs_error(hdl, EZFS_INVALCONFIG, msg));
2332         }
2333
2334         verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
2335             ZPOOL_CONFIG_VDEV_TREE, &config_root) == 0);
2336
2337         if ((newname = zpool_vdev_name(NULL, NULL, child[0], B_FALSE)) == NULL)
2338                 return (-1);
2339
2340         /*
2341          * If the target is a hot spare that has been swapped in, we can only
2342          * replace it with another hot spare.
2343          */
2344         if (replacing &&
2345             nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_IS_SPARE, &val) == 0 &&
2346             (zpool_find_vdev(zhp, newname, &avail_spare, &l2cache,
2347             NULL) == NULL || !avail_spare) &&
2348             is_replacing_spare(config_root, tgt, 1)) {
2349                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2350                     "can only be replaced by another hot spare"));
2351                 free(newname);
2352                 return (zfs_error(hdl, EZFS_BADTARGET, msg));
2353         }
2354
2355         /*
2356          * If we are attempting to replace a spare, it canot be applied to an
2357          * already spared device.
2358          */
2359         if (replacing &&
2360             nvlist_lookup_string(child[0], ZPOOL_CONFIG_PATH, &path) == 0 &&
2361             zpool_find_vdev(zhp, newname, &avail_spare,
2362             &l2cache, NULL) != NULL && avail_spare &&
2363             is_replacing_spare(config_root, tgt, 0)) {
2364                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2365                     "device has already been replaced with a spare"));
2366                 free(newname);
2367                 return (zfs_error(hdl, EZFS_BADTARGET, msg));
2368         }
2369
2370         free(newname);
2371
2372         if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
2373                 return (-1);
2374
2375         ret = zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_VDEV_ATTACH, &zc);
2376
2377         zcmd_free_nvlists(&zc);
2378
2379         if (ret == 0) {
2380                 if (rootpool) {
2381                         /*
2382                          * XXX - This should be removed once we can
2383                          * automatically install the bootblocks on the
2384                          * newly attached disk.
2385                          */
2386                         (void) fprintf(stderr, dgettext(TEXT_DOMAIN, "Please "
2387                             "be sure to invoke %s to make '%s' bootable.\n"),
2388                             BOOTCMD, new_disk);
2389
2390                         /*
2391                          * XXX need a better way to prevent user from
2392                          * booting up a half-baked vdev.
2393                          */
2394                         (void) fprintf(stderr, dgettext(TEXT_DOMAIN, "Make "
2395                             "sure to wait until resilver is done "
2396                             "before rebooting.\n"));
2397                 }
2398                 return (0);
2399         }
2400
2401         switch (errno) {
2402         case ENOTSUP:
2403                 /*
2404                  * Can't attach to or replace this type of vdev.
2405                  */
2406                 if (replacing) {
2407                         if (islog)
2408                                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2409                                     "cannot replace a log with a spare"));
2410                         else
2411                                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2412                                     "cannot replace a replacing device"));
2413                 } else {
2414                         zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2415                             "can only attach to mirrors and top-level "
2416                             "disks"));
2417                 }
2418                 (void) zfs_error(hdl, EZFS_BADTARGET, msg);
2419                 break;
2420
2421         case EINVAL:
2422                 /*
2423                  * The new device must be a single disk.
2424                  */
2425                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2426                     "new device must be a single disk"));
2427                 (void) zfs_error(hdl, EZFS_INVALCONFIG, msg);
2428                 break;
2429
2430         case EBUSY:
2431                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "%s is busy"),
2432                     new_disk);
2433                 (void) zfs_error(hdl, EZFS_BADDEV, msg);
2434                 break;
2435
2436         case EOVERFLOW:
2437                 /*
2438                  * The new device is too small.
2439                  */
2440                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2441                     "device is too small"));
2442                 (void) zfs_error(hdl, EZFS_BADDEV, msg);
2443                 break;
2444
2445         case EDOM:
2446                 /*
2447                  * The new device has a different alignment requirement.
2448                  */
2449                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2450                     "devices have different sector alignment"));
2451                 (void) zfs_error(hdl, EZFS_BADDEV, msg);
2452                 break;
2453
2454         case ENAMETOOLONG:
2455                 /*
2456                  * The resulting top-level vdev spec won't fit in the label.
2457                  */
2458                 (void) zfs_error(hdl, EZFS_DEVOVERFLOW, msg);
2459                 break;
2460
2461         default:
2462                 (void) zpool_standard_error(hdl, errno, msg);
2463         }
2464
2465         return (-1);
2466 }
2467
2468 /*
2469  * Detach the specified device.
2470  */
2471 int
2472 zpool_vdev_detach(zpool_handle_t *zhp, const char *path)
2473 {
2474         zfs_cmd_t zc = { 0 };
2475         char msg[1024];
2476         nvlist_t *tgt;
2477         boolean_t avail_spare, l2cache;
2478         libzfs_handle_t *hdl = zhp->zpool_hdl;
2479
2480         (void) snprintf(msg, sizeof (msg),
2481             dgettext(TEXT_DOMAIN, "cannot detach %s"), path);
2482
2483         (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2484         if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
2485             NULL)) == 0)
2486                 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2487
2488         if (avail_spare)
2489                 return (zfs_error(hdl, EZFS_ISSPARE, msg));
2490
2491         if (l2cache)
2492                 return (zfs_error(hdl, EZFS_ISL2CACHE, msg));
2493
2494         verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2495
2496         if (zfs_ioctl(hdl, ZFS_IOC_VDEV_DETACH, &zc) == 0)
2497                 return (0);
2498
2499         switch (errno) {
2500
2501         case ENOTSUP:
2502                 /*
2503                  * Can't detach from this type of vdev.
2504                  */
2505                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "only "
2506                     "applicable to mirror and replacing vdevs"));
2507                 (void) zfs_error(zhp->zpool_hdl, EZFS_BADTARGET, msg);
2508                 break;
2509
2510         case EBUSY:
2511                 /*
2512                  * There are no other replicas of this device.
2513                  */
2514                 (void) zfs_error(hdl, EZFS_NOREPLICAS, msg);
2515                 break;
2516
2517         default:
2518                 (void) zpool_standard_error(hdl, errno, msg);
2519         }
2520
2521         return (-1);
2522 }
2523
2524 /*
2525  * Find a mirror vdev in the source nvlist.
2526  *
2527  * The mchild array contains a list of disks in one of the top-level mirrors
2528  * of the source pool.  The schild array contains a list of disks that the
2529  * user specified on the command line.  We loop over the mchild array to
2530  * see if any entry in the schild array matches.
2531  *
2532  * If a disk in the mchild array is found in the schild array, we return
2533  * the index of that entry.  Otherwise we return -1.
2534  */
2535 static int
2536 find_vdev_entry(zpool_handle_t *zhp, nvlist_t **mchild, uint_t mchildren,
2537     nvlist_t **schild, uint_t schildren)
2538 {
2539         uint_t mc;
2540
2541         for (mc = 0; mc < mchildren; mc++) {
2542                 uint_t sc;
2543                 char *mpath = zpool_vdev_name(zhp->zpool_hdl, zhp,
2544                     mchild[mc], B_FALSE);
2545
2546                 for (sc = 0; sc < schildren; sc++) {
2547                         char *spath = zpool_vdev_name(zhp->zpool_hdl, zhp,
2548                             schild[sc], B_FALSE);
2549                         boolean_t result = (strcmp(mpath, spath) == 0);
2550
2551                         free(spath);
2552                         if (result) {
2553                                 free(mpath);
2554                                 return (mc);
2555                         }
2556                 }
2557
2558                 free(mpath);
2559         }
2560
2561         return (-1);
2562 }
2563
2564 /*
2565  * Split a mirror pool.  If newroot points to null, then a new nvlist
2566  * is generated and it is the responsibility of the caller to free it.
2567  */
2568 int
2569 zpool_vdev_split(zpool_handle_t *zhp, char *newname, nvlist_t **newroot,
2570     nvlist_t *props, splitflags_t flags)
2571 {
2572         zfs_cmd_t zc = { 0 };
2573         char msg[1024];
2574         nvlist_t *tree, *config, **child, **newchild, *newconfig = NULL;
2575         nvlist_t **varray = NULL, *zc_props = NULL;
2576         uint_t c, children, newchildren, lastlog = 0, vcount, found = 0;
2577         libzfs_handle_t *hdl = zhp->zpool_hdl;
2578         uint64_t vers;
2579         boolean_t freelist = B_FALSE, memory_err = B_TRUE;
2580         int retval = 0;
2581
2582         (void) snprintf(msg, sizeof (msg),
2583             dgettext(TEXT_DOMAIN, "Unable to split %s"), zhp->zpool_name);
2584
2585         if (!zpool_name_valid(hdl, B_FALSE, newname))
2586                 return (zfs_error(hdl, EZFS_INVALIDNAME, msg));
2587
2588         if ((config = zpool_get_config(zhp, NULL)) == NULL) {
2589                 (void) fprintf(stderr, gettext("Internal error: unable to "
2590                     "retrieve pool configuration\n"));
2591                 return (-1);
2592         }
2593
2594         verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &tree)
2595             == 0);
2596         verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, &vers) == 0);
2597
2598         if (props) {
2599                 if ((zc_props = zpool_valid_proplist(hdl, zhp->zpool_name,
2600                     props, vers, B_TRUE, msg)) == NULL)
2601                         return (-1);
2602         }
2603
2604         if (nvlist_lookup_nvlist_array(tree, ZPOOL_CONFIG_CHILDREN, &child,
2605             &children) != 0) {
2606                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2607                     "Source pool is missing vdev tree"));
2608                 if (zc_props)
2609                         nvlist_free(zc_props);
2610                 return (-1);
2611         }
2612
2613         varray = zfs_alloc(hdl, children * sizeof (nvlist_t *));
2614         vcount = 0;
2615
2616         if (*newroot == NULL ||
2617             nvlist_lookup_nvlist_array(*newroot, ZPOOL_CONFIG_CHILDREN,
2618             &newchild, &newchildren) != 0)
2619                 newchildren = 0;
2620
2621         for (c = 0; c < children; c++) {
2622                 uint64_t is_log = B_FALSE, is_hole = B_FALSE;
2623                 char *type;
2624                 nvlist_t **mchild, *vdev;
2625                 uint_t mchildren;
2626                 int entry;
2627
2628                 /*
2629                  * Unlike cache & spares, slogs are stored in the
2630                  * ZPOOL_CONFIG_CHILDREN array.  We filter them out here.
2631                  */
2632                 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
2633                     &is_log);
2634                 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE,
2635                     &is_hole);
2636                 if (is_log || is_hole) {
2637                         /*
2638                          * Create a hole vdev and put it in the config.
2639                          */
2640                         if (nvlist_alloc(&vdev, NV_UNIQUE_NAME, 0) != 0)
2641                                 goto out;
2642                         if (nvlist_add_string(vdev, ZPOOL_CONFIG_TYPE,
2643                             VDEV_TYPE_HOLE) != 0)
2644                                 goto out;
2645                         if (nvlist_add_uint64(vdev, ZPOOL_CONFIG_IS_HOLE,
2646                             1) != 0)
2647                                 goto out;
2648                         if (lastlog == 0)
2649                                 lastlog = vcount;
2650                         varray[vcount++] = vdev;
2651                         continue;
2652                 }
2653                 lastlog = 0;
2654                 verify(nvlist_lookup_string(child[c], ZPOOL_CONFIG_TYPE, &type)
2655                     == 0);
2656                 if (strcmp(type, VDEV_TYPE_MIRROR) != 0) {
2657                         zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2658                             "Source pool must be composed only of mirrors\n"));
2659                         retval = zfs_error(hdl, EZFS_INVALCONFIG, msg);
2660                         goto out;
2661                 }
2662
2663                 verify(nvlist_lookup_nvlist_array(child[c],
2664                     ZPOOL_CONFIG_CHILDREN, &mchild, &mchildren) == 0);
2665
2666                 /* find or add an entry for this top-level vdev */
2667                 if (newchildren > 0 &&
2668                     (entry = find_vdev_entry(zhp, mchild, mchildren,
2669                     newchild, newchildren)) >= 0) {
2670                         /* We found a disk that the user specified. */
2671                         vdev = mchild[entry];
2672                         ++found;
2673                 } else {
2674                         /* User didn't specify a disk for this vdev. */
2675                         vdev = mchild[mchildren - 1];
2676                 }
2677
2678                 if (nvlist_dup(vdev, &varray[vcount++], 0) != 0)
2679                         goto out;
2680         }
2681
2682         /* did we find every disk the user specified? */
2683         if (found != newchildren) {
2684                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "Device list must "
2685                     "include at most one disk from each mirror"));
2686                 retval = zfs_error(hdl, EZFS_INVALCONFIG, msg);
2687                 goto out;
2688         }
2689
2690         /* Prepare the nvlist for populating. */
2691         if (*newroot == NULL) {
2692                 if (nvlist_alloc(newroot, NV_UNIQUE_NAME, 0) != 0)
2693                         goto out;
2694                 freelist = B_TRUE;
2695                 if (nvlist_add_string(*newroot, ZPOOL_CONFIG_TYPE,
2696                     VDEV_TYPE_ROOT) != 0)
2697                         goto out;
2698         } else {
2699                 verify(nvlist_remove_all(*newroot, ZPOOL_CONFIG_CHILDREN) == 0);
2700         }
2701
2702         /* Add all the children we found */
2703         if (nvlist_add_nvlist_array(*newroot, ZPOOL_CONFIG_CHILDREN, varray,
2704             lastlog == 0 ? vcount : lastlog) != 0)
2705                 goto out;
2706
2707         /*
2708          * If we're just doing a dry run, exit now with success.
2709          */
2710         if (flags.dryrun) {
2711                 memory_err = B_FALSE;
2712                 freelist = B_FALSE;
2713                 goto out;
2714         }
2715
2716         /* now build up the config list & call the ioctl */
2717         if (nvlist_alloc(&newconfig, NV_UNIQUE_NAME, 0) != 0)
2718                 goto out;
2719
2720         if (nvlist_add_nvlist(newconfig,
2721             ZPOOL_CONFIG_VDEV_TREE, *newroot) != 0 ||
2722             nvlist_add_string(newconfig,
2723             ZPOOL_CONFIG_POOL_NAME, newname) != 0 ||
2724             nvlist_add_uint64(newconfig, ZPOOL_CONFIG_VERSION, vers) != 0)
2725                 goto out;
2726
2727         /*
2728          * The new pool is automatically part of the namespace unless we
2729          * explicitly export it.
2730          */
2731         if (!flags.import)
2732                 zc.zc_cookie = ZPOOL_EXPORT_AFTER_SPLIT;
2733         (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2734         (void) strlcpy(zc.zc_string, newname, sizeof (zc.zc_string));
2735         if (zcmd_write_conf_nvlist(hdl, &zc, newconfig) != 0)
2736                 goto out;
2737         if (zc_props != NULL && zcmd_write_src_nvlist(hdl, &zc, zc_props) != 0)
2738                 goto out;
2739
2740         if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SPLIT, &zc) != 0) {
2741                 retval = zpool_standard_error(hdl, errno, msg);
2742                 goto out;
2743         }
2744
2745         freelist = B_FALSE;
2746         memory_err = B_FALSE;
2747
2748 out:
2749         if (varray != NULL) {
2750                 int v;
2751
2752                 for (v = 0; v < vcount; v++)
2753                         nvlist_free(varray[v]);
2754                 free(varray);
2755         }
2756         zcmd_free_nvlists(&zc);
2757         if (zc_props)
2758                 nvlist_free(zc_props);
2759         if (newconfig)
2760                 nvlist_free(newconfig);
2761         if (freelist) {
2762                 nvlist_free(*newroot);
2763                 *newroot = NULL;
2764         }
2765
2766         if (retval != 0)
2767                 return (retval);
2768
2769         if (memory_err)
2770                 return (no_memory(hdl));
2771
2772         return (0);
2773 }
2774
2775 /*
2776  * Remove the given device.  Currently, this is supported only for hot spares
2777  * and level 2 cache devices.
2778  */
2779 int
2780 zpool_vdev_remove(zpool_handle_t *zhp, const char *path)
2781 {
2782         zfs_cmd_t zc = { 0 };
2783         char msg[1024];
2784         nvlist_t *tgt;
2785         boolean_t avail_spare, l2cache, islog;
2786         libzfs_handle_t *hdl = zhp->zpool_hdl;
2787         uint64_t version;
2788
2789         (void) snprintf(msg, sizeof (msg),
2790             dgettext(TEXT_DOMAIN, "cannot remove %s"), path);
2791
2792         (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2793         if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
2794             &islog)) == 0)
2795                 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2796         /*
2797          * XXX - this should just go away.
2798          */
2799         if (!avail_spare && !l2cache && !islog) {
2800                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2801                     "only inactive hot spares, cache, top-level, "
2802                     "or log devices can be removed"));
2803                 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2804         }
2805
2806         version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL);
2807         if (islog && version < SPA_VERSION_HOLES) {
2808                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2809                     "pool must be upgrade to support log removal"));
2810                 return (zfs_error(hdl, EZFS_BADVERSION, msg));
2811         }
2812
2813         verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2814
2815         if (zfs_ioctl(hdl, ZFS_IOC_VDEV_REMOVE, &zc) == 0)
2816                 return (0);
2817
2818         return (zpool_standard_error(hdl, errno, msg));
2819 }
2820
2821 /*
2822  * Clear the errors for the pool, or the particular device if specified.
2823  */
2824 int
2825 zpool_clear(zpool_handle_t *zhp, const char *path, nvlist_t *rewindnvl)
2826 {
2827         zfs_cmd_t zc = { 0 };
2828         char msg[1024];
2829         nvlist_t *tgt;
2830         zpool_rewind_policy_t policy;
2831         boolean_t avail_spare, l2cache;
2832         libzfs_handle_t *hdl = zhp->zpool_hdl;
2833         nvlist_t *nvi = NULL;
2834
2835         if (path)
2836                 (void) snprintf(msg, sizeof (msg),
2837                     dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),
2838                     path);
2839         else
2840                 (void) snprintf(msg, sizeof (msg),
2841                     dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),
2842                     zhp->zpool_name);
2843
2844         (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2845         if (path) {
2846                 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare,
2847                     &l2cache, NULL)) == 0)
2848                         return (zfs_error(hdl, EZFS_NODEVICE, msg));
2849
2850                 /*
2851                  * Don't allow error clearing for hot spares.  Do allow
2852                  * error clearing for l2cache devices.
2853                  */
2854                 if (avail_spare)
2855                         return (zfs_error(hdl, EZFS_ISSPARE, msg));
2856
2857                 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID,
2858                     &zc.zc_guid) == 0);
2859         }
2860
2861         zpool_get_rewind_policy(rewindnvl, &policy);
2862         zc.zc_cookie = policy.zrp_request;
2863
2864         if (zcmd_alloc_dst_nvlist(hdl, &zc, 8192) != 0)
2865                 return (-1);
2866
2867         if (zcmd_write_src_nvlist(zhp->zpool_hdl, &zc, rewindnvl) != 0)
2868                 return (-1);
2869
2870         if (zfs_ioctl(hdl, ZFS_IOC_CLEAR, &zc) == 0 ||
2871             ((policy.zrp_request & ZPOOL_TRY_REWIND) &&
2872             errno != EPERM && errno != EACCES)) {
2873                 if (policy.zrp_request &
2874                     (ZPOOL_DO_REWIND | ZPOOL_TRY_REWIND)) {
2875                         (void) zcmd_read_dst_nvlist(hdl, &zc, &nvi);
2876                         zpool_rewind_exclaim(hdl, zc.zc_name,
2877                             ((policy.zrp_request & ZPOOL_TRY_REWIND) != 0),
2878                             nvi);
2879                         nvlist_free(nvi);
2880                 }
2881                 zcmd_free_nvlists(&zc);
2882                 return (0);
2883         }
2884
2885         zcmd_free_nvlists(&zc);
2886         return (zpool_standard_error(hdl, errno, msg));
2887 }
2888
2889 /*
2890  * Similar to zpool_clear(), but takes a GUID (used by fmd).
2891  */
2892 int
2893 zpool_vdev_clear(zpool_handle_t *zhp, uint64_t guid)
2894 {
2895         zfs_cmd_t zc = { 0 };
2896         char msg[1024];
2897         libzfs_handle_t *hdl = zhp->zpool_hdl;
2898
2899         (void) snprintf(msg, sizeof (msg),
2900             dgettext(TEXT_DOMAIN, "cannot clear errors for %llx"),
2901             guid);
2902
2903         (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2904         zc.zc_guid = guid;
2905         zc.zc_cookie = ZPOOL_NO_REWIND;
2906
2907         if (ioctl(hdl->libzfs_fd, ZFS_IOC_CLEAR, &zc) == 0)
2908                 return (0);
2909
2910         return (zpool_standard_error(hdl, errno, msg));
2911 }
2912
2913 /*
2914  * Convert from a devid string to a path.
2915  */
2916 static char *
2917 devid_to_path(char *devid_str)
2918 {
2919         ddi_devid_t devid;
2920         char *minor;
2921         char *path;
2922         devid_nmlist_t *list = NULL;
2923         int ret;
2924
2925         if (devid_str_decode(devid_str, &devid, &minor) != 0)
2926                 return (NULL);
2927
2928         ret = devid_deviceid_to_nmlist("/dev", devid, minor, &list);
2929
2930         devid_str_free(minor);
2931         devid_free(devid);
2932
2933         if (ret != 0)
2934                 return (NULL);
2935
2936         if ((path = strdup(list[0].devname)) == NULL)
2937                 return (NULL);
2938
2939         devid_free_nmlist(list);
2940
2941         return (path);
2942 }
2943
2944 /*
2945  * Convert from a path to a devid string.
2946  */
2947 static char *
2948 path_to_devid(const char *path)
2949 {
2950         int fd;
2951         ddi_devid_t devid;
2952         char *minor, *ret;
2953
2954         if ((fd = open(path, O_RDONLY)) < 0)
2955                 return (NULL);
2956
2957         minor = NULL;
2958         ret = NULL;
2959         if (devid_get(fd, &devid) == 0) {
2960                 if (devid_get_minor_name(fd, &minor) == 0)
2961                         ret = devid_str_encode(devid, minor);
2962                 if (minor != NULL)
2963                         devid_str_free(minor);
2964                 devid_free(devid);
2965         }
2966         (void) close(fd);
2967
2968         return (ret);
2969 }
2970
2971 /*
2972  * Issue the necessary ioctl() to update the stored path value for the vdev.  We
2973  * ignore any failure here, since a common case is for an unprivileged user to
2974  * type 'zpool status', and we'll display the correct information anyway.
2975  */
2976 static void
2977 set_path(zpool_handle_t *zhp, nvlist_t *nv, const char *path)
2978 {
2979         zfs_cmd_t zc = { 0 };
2980
2981         (void) strncpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2982         (void) strncpy(zc.zc_value, path, sizeof (zc.zc_value));
2983         verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
2984             &zc.zc_guid) == 0);
2985
2986         (void) ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_SETPATH, &zc);
2987 }
2988
2989 /*
2990  * Given a vdev, return the name to display in iostat.  If the vdev has a path,
2991  * we use that, stripping off any leading "/dev/dsk/"; if not, we use the type.
2992  * We also check if this is a whole disk, in which case we strip off the
2993  * trailing 's0' slice name.
2994  *
2995  * This routine is also responsible for identifying when disks have been
2996  * reconfigured in a new location.  The kernel will have opened the device by
2997  * devid, but the path will still refer to the old location.  To catch this, we
2998  * first do a path -> devid translation (which is fast for the common case).  If
2999  * the devid matches, we're done.  If not, we do a reverse devid -> path
3000  * translation and issue the appropriate ioctl() to update the path of the vdev.
3001  * If 'zhp' is NULL, then this is an exported pool, and we don't need to do any
3002  * of these checks.
3003  */
3004 char *
3005 zpool_vdev_name(libzfs_handle_t *hdl, zpool_handle_t *zhp, nvlist_t *nv,
3006     boolean_t verbose)
3007 {
3008         char *path, *devid;
3009         uint64_t value;
3010         char buf[64];
3011         vdev_stat_t *vs;
3012         uint_t vsc;
3013
3014         if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT,
3015             &value) == 0) {
3016                 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
3017                     &value) == 0);
3018                 (void) snprintf(buf, sizeof (buf), "%llu",
3019                     (u_longlong_t)value);
3020                 path = buf;
3021         } else if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) {
3022
3023                 /*
3024                  * If the device is dead (faulted, offline, etc) then don't
3025                  * bother opening it.  Otherwise we may be forcing the user to
3026                  * open a misbehaving device, which can have undesirable
3027                  * effects.
3028                  */
3029                 if ((nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS,
3030                     (uint64_t **)&vs, &vsc) != 0 ||
3031                     vs->vs_state >= VDEV_STATE_DEGRADED) &&
3032                     zhp != NULL &&
3033                     nvlist_lookup_string(nv, ZPOOL_CONFIG_DEVID, &devid) == 0) {
3034                         /*
3035                          * Determine if the current path is correct.
3036                          */
3037                         char *newdevid = path_to_devid(path);
3038
3039                         if (newdevid == NULL ||
3040                             strcmp(devid, newdevid) != 0) {
3041                                 char *newpath;
3042
3043                                 if ((newpath = devid_to_path(devid)) != NULL) {
3044                                         /*
3045                                          * Update the path appropriately.
3046                                          */
3047                                         set_path(zhp, nv, newpath);
3048                                         if (nvlist_add_string(nv,
3049                                             ZPOOL_CONFIG_PATH, newpath) == 0)
3050                                                 verify(nvlist_lookup_string(nv,
3051                                                     ZPOOL_CONFIG_PATH,
3052                                                     &path) == 0);
3053                                         free(newpath);
3054                                 }
3055                         }
3056
3057                         if (newdevid)
3058                                 devid_str_free(newdevid);
3059                 }
3060
3061                 if (strncmp(path, "/dev/dsk/", 9) == 0)
3062                         path += 9;
3063
3064                 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
3065                     &value) == 0 && value) {
3066                         int pathlen = strlen(path);
3067                         char *tmp = zfs_strdup(hdl, path);
3068
3069                         /*
3070                          * If it starts with c#, and ends with "s0", chop
3071                          * the "s0" off, or if it ends with "s0/old", remove
3072                          * the "s0" from the middle.
3073                          */
3074                         if (CTD_CHECK(tmp)) {
3075                                 if (strcmp(&tmp[pathlen - 2], "s0") == 0) {
3076                                         tmp[pathlen - 2] = '\0';
3077                                 } else if (pathlen > 6 &&
3078                                     strcmp(&tmp[pathlen - 6], "s0/old") == 0) {
3079                                         (void) strcpy(&tmp[pathlen - 6],
3080                                             "/old");
3081                                 }
3082                         }
3083                         return (tmp);
3084                 }
3085         } else {
3086                 verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &path) == 0);
3087
3088                 /*
3089                  * If it's a raidz device, we need to stick in the parity level.
3090                  */
3091                 if (strcmp(path, VDEV_TYPE_RAIDZ) == 0) {
3092                         verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NPARITY,
3093                             &value) == 0);
3094                         (void) snprintf(buf, sizeof (buf), "%s%llu", path,
3095                             (u_longlong_t)value);
3096                         path = buf;
3097                 }
3098
3099                 /*
3100                  * We identify each top-level vdev by using a <type-id>
3101                  * naming convention.
3102                  */
3103                 if (verbose) {
3104                         uint64_t id;
3105
3106                         verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID,
3107                             &id) == 0);
3108                         (void) snprintf(buf, sizeof (buf), "%s-%llu", path,
3109                             (u_longlong_t)id);
3110                         path = buf;
3111                 }
3112         }
3113
3114         return (zfs_strdup(hdl, path));
3115 }
3116
3117 static int
3118 zbookmark_compare(const void *a, const void *b)
3119 {
3120         return (memcmp(a, b, sizeof (zbookmark_t)));
3121 }
3122
3123 /*
3124  * Retrieve the persistent error log, uniquify the members, and return to the
3125  * caller.
3126  */
3127 int
3128 zpool_get_errlog(zpool_handle_t *zhp, nvlist_t **nverrlistp)
3129 {
3130         zfs_cmd_t zc = { 0 };
3131         uint64_t count;
3132         zbookmark_t *zb = NULL;
3133         int i;
3134
3135         /*
3136          * Retrieve the raw error list from the kernel.  If the number of errors
3137          * has increased, allocate more space and continue until we get the
3138          * entire list.
3139          */
3140         verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_ERRCOUNT,
3141             &count) == 0);
3142         if (count == 0)
3143                 return (0);
3144         if ((zc.zc_nvlist_dst = (uintptr_t)zfs_alloc(zhp->zpool_hdl,
3145             count * sizeof (zbookmark_t))) == (uintptr_t)NULL)
3146                 return (-1);
3147         zc.zc_nvlist_dst_size = count;
3148         (void) strcpy(zc.zc_name, zhp->zpool_name);
3149         for (;;) {
3150                 if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_ERROR_LOG,
3151                     &zc) != 0) {
3152                         free((void *)(uintptr_t)zc.zc_nvlist_dst);
3153                         if (errno == ENOMEM) {
3154                                 count = zc.zc_nvlist_dst_size;
3155                                 if ((zc.zc_nvlist_dst = (uintptr_t)
3156                                     zfs_alloc(zhp->zpool_hdl, count *
3157                                     sizeof (zbookmark_t))) == (uintptr_t)NULL)
3158                                         return (-1);
3159                         } else {
3160                                 return (-1);
3161                         }
3162                 } else {
3163                         break;
3164                 }
3165         }
3166
3167         /*
3168          * Sort the resulting bookmarks.  This is a little confusing due to the
3169          * implementation of ZFS_IOC_ERROR_LOG.  The bookmarks are copied last
3170          * to first, and 'zc_nvlist_dst_size' indicates the number of boomarks
3171          * _not_ copied as part of the process.  So we point the start of our
3172          * array appropriate and decrement the total number of elements.
3173          */
3174         zb = ((zbookmark_t *)(uintptr_t)zc.zc_nvlist_dst) +
3175             zc.zc_nvlist_dst_size;
3176         count -= zc.zc_nvlist_dst_size;
3177
3178         qsort(zb, count, sizeof (zbookmark_t), zbookmark_compare);
3179
3180         verify(nvlist_alloc(nverrlistp, 0, KM_SLEEP) == 0);
3181
3182         /*
3183          * Fill in the nverrlistp with nvlist's of dataset and object numbers.
3184          */
3185         for (i = 0; i < count; i++) {
3186                 nvlist_t *nv;
3187
3188                 /* ignoring zb_blkid and zb_level for now */
3189                 if (i > 0 && zb[i-1].zb_objset == zb[i].zb_objset &&
3190                     zb[i-1].zb_object == zb[i].zb_object)
3191                         continue;
3192
3193                 if (nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) != 0)
3194                         goto nomem;
3195                 if (nvlist_add_uint64(nv, ZPOOL_ERR_DATASET,
3196                     zb[i].zb_objset) != 0) {
3197                         nvlist_free(nv);
3198                         goto nomem;
3199                 }
3200                 if (nvlist_add_uint64(nv, ZPOOL_ERR_OBJECT,
3201                     zb[i].zb_object) != 0) {
3202                         nvlist_free(nv);
3203                         goto nomem;
3204                 }
3205                 if (nvlist_add_nvlist(*nverrlistp, "ejk", nv) != 0) {
3206                         nvlist_free(nv);
3207                         goto nomem;
3208                 }
3209                 nvlist_free(nv);
3210         }
3211
3212         free((void *)(uintptr_t)zc.zc_nvlist_dst);
3213         return (0);
3214
3215 nomem:
3216         free((void *)(uintptr_t)zc.zc_nvlist_dst);
3217         return (no_memory(zhp->zpool_hdl));
3218 }
3219
3220 /*
3221  * Upgrade a ZFS pool to the latest on-disk version.
3222  */
3223 int
3224 zpool_upgrade(zpool_handle_t *zhp, uint64_t new_version)
3225 {
3226         zfs_cmd_t zc = { 0 };
3227         libzfs_handle_t *hdl = zhp->zpool_hdl;
3228
3229         (void) strcpy(zc.zc_name, zhp->zpool_name);
3230         zc.zc_cookie = new_version;
3231
3232         if (zfs_ioctl(hdl, ZFS_IOC_POOL_UPGRADE, &zc) != 0)
3233                 return (zpool_standard_error_fmt(hdl, errno,
3234                     dgettext(TEXT_DOMAIN, "cannot upgrade '%s'"),
3235                     zhp->zpool_name));
3236         return (0);
3237 }
3238
3239 void
3240 zpool_set_history_str(const char *subcommand, int argc, char **argv,
3241     char *history_str)
3242 {
3243         int i;
3244
3245         (void) strlcpy(history_str, subcommand, HIS_MAX_RECORD_LEN);
3246         for (i = 1; i < argc; i++) {
3247                 if (strlen(history_str) + 1 + strlen(argv[i]) >
3248                     HIS_MAX_RECORD_LEN)
3249                         break;
3250                 (void) strlcat(history_str, " ", HIS_MAX_RECORD_LEN);
3251                 (void) strlcat(history_str, argv[i], HIS_MAX_RECORD_LEN);
3252         }
3253 }
3254
3255 /*
3256  * Stage command history for logging.
3257  */
3258 int
3259 zpool_stage_history(libzfs_handle_t *hdl, const char *history_str)
3260 {
3261         if (history_str == NULL)
3262                 return (EINVAL);
3263
3264         if (strlen(history_str) > HIS_MAX_RECORD_LEN)
3265                 return (EINVAL);
3266
3267         if (hdl->libzfs_log_str != NULL)
3268                 free(hdl->libzfs_log_str);
3269
3270         if ((hdl->libzfs_log_str = strdup(history_str)) == NULL)
3271                 return (no_memory(hdl));
3272
3273         return (0);
3274 }
3275
3276 /*
3277  * Perform ioctl to get some command history of a pool.
3278  *
3279  * 'buf' is the buffer to fill up to 'len' bytes.  'off' is the
3280  * logical offset of the history buffer to start reading from.
3281  *
3282  * Upon return, 'off' is the next logical offset to read from and
3283  * 'len' is the actual amount of bytes read into 'buf'.
3284  */
3285 static int
3286 get_history(zpool_handle_t *zhp, char *buf, uint64_t *off, uint64_t *len)
3287 {
3288         zfs_cmd_t zc = { 0 };
3289         libzfs_handle_t *hdl = zhp->zpool_hdl;
3290
3291         (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3292
3293         zc.zc_history = (uint64_t)(uintptr_t)buf;
3294         zc.zc_history_len = *len;
3295         zc.zc_history_offset = *off;
3296
3297         if (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_HISTORY, &zc) != 0) {
3298                 switch (errno) {
3299                 case EPERM:
3300                         return (zfs_error_fmt(hdl, EZFS_PERM,
3301                             dgettext(TEXT_DOMAIN,
3302                             "cannot show history for pool '%s'"),
3303                             zhp->zpool_name));
3304                 case ENOENT:
3305                         return (zfs_error_fmt(hdl, EZFS_NOHISTORY,
3306                             dgettext(TEXT_DOMAIN, "cannot get history for pool "
3307                             "'%s'"), zhp->zpool_name));
3308                 case ENOTSUP:
3309                         return (zfs_error_fmt(hdl, EZFS_BADVERSION,
3310                             dgettext(TEXT_DOMAIN, "cannot get history for pool "
3311                             "'%s', pool must be upgraded"), zhp->zpool_name));
3312                 default:
3313                         return (zpool_standard_error_fmt(hdl, errno,
3314                             dgettext(TEXT_DOMAIN,
3315                             "cannot get history for '%s'"), zhp->zpool_name));
3316                 }
3317         }
3318
3319         *len = zc.zc_history_len;
3320         *off = zc.zc_history_offset;
3321
3322         return (0);
3323 }
3324
3325 /*
3326  * Process the buffer of nvlists, unpacking and storing each nvlist record
3327  * into 'records'.  'leftover' is set to the number of bytes that weren't
3328  * processed as there wasn't a complete record.
3329  */
3330 int
3331 zpool_history_unpack(char *buf, uint64_t bytes_read, uint64_t *leftover,
3332     nvlist_t ***records, uint_t *numrecords)
3333 {
3334         uint64_t reclen;
3335         nvlist_t *nv;
3336         int i;
3337
3338         while (bytes_read > sizeof (reclen)) {
3339
3340                 /* get length of packed record (stored as little endian) */
3341                 for (i = 0, reclen = 0; i < sizeof (reclen); i++)
3342                         reclen += (uint64_t)(((uchar_t *)buf)[i]) << (8*i);
3343
3344                 if (bytes_read < sizeof (reclen) + reclen)
3345                         break;
3346
3347                 /* unpack record */
3348                 if (nvlist_unpack(buf + sizeof (reclen), reclen, &nv, 0) != 0)
3349                         return (ENOMEM);
3350                 bytes_read -= sizeof (reclen) + reclen;
3351                 buf += sizeof (reclen) + reclen;
3352
3353                 /* add record to nvlist array */
3354                 (*numrecords)++;
3355                 if (ISP2(*numrecords + 1)) {
3356                         *records = realloc(*records,
3357                             *numrecords * 2 * sizeof (nvlist_t *));
3358                 }
3359                 (*records)[*numrecords - 1] = nv;
3360         }
3361
3362         *leftover = bytes_read;
3363         return (0);
3364 }
3365
3366 #define HIS_BUF_LEN     (128*1024)
3367
3368 /*
3369  * Retrieve the command history of a pool.
3370  */
3371 int
3372 zpool_get_history(zpool_handle_t *zhp, nvlist_t **nvhisp)
3373 {
3374         char buf[HIS_BUF_LEN];
3375         uint64_t off = 0;
3376         nvlist_t **records = NULL;
3377         uint_t numrecords = 0;
3378         int err, i;
3379
3380         do {
3381                 uint64_t bytes_read = sizeof (buf);
3382                 uint64_t leftover;
3383
3384                 if ((err = get_history(zhp, buf, &off, &bytes_read)) != 0)
3385                         break;
3386
3387                 /* if nothing else was read in, we're at EOF, just return */
3388                 if (!bytes_read)
3389                         break;
3390
3391                 if ((err = zpool_history_unpack(buf, bytes_read,
3392                     &leftover, &records, &numrecords)) != 0)
3393                         break;
3394                 off -= leftover;
3395
3396                 /* CONSTCOND */
3397         } while (1);
3398
3399         if (!err) {
3400                 verify(nvlist_alloc(nvhisp, NV_UNIQUE_NAME, 0) == 0);
3401                 verify(nvlist_add_nvlist_array(*nvhisp, ZPOOL_HIST_RECORD,
3402                     records, numrecords) == 0);
3403         }
3404         for (i = 0; i < numrecords; i++)
3405                 nvlist_free(records[i]);
3406         free(records);
3407
3408         return (err);
3409 }
3410
3411 void
3412 zpool_obj_to_path(zpool_handle_t *zhp, uint64_t dsobj, uint64_t obj,
3413     char *pathname, size_t len)
3414 {
3415         zfs_cmd_t zc = { 0 };
3416         boolean_t mounted = B_FALSE;
3417         char *mntpnt = NULL;
3418         char dsname[MAXNAMELEN];
3419
3420         if (dsobj == 0) {
3421                 /* special case for the MOS */
3422                 (void) snprintf(pathname, len, "<metadata>:<0x%llx>", obj);
3423                 return;
3424         }
3425
3426         /* get the dataset's name */
3427         (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3428         zc.zc_obj = dsobj;
3429         if (ioctl(zhp->zpool_hdl->libzfs_fd,
3430             ZFS_IOC_DSOBJ_TO_DSNAME, &zc) != 0) {
3431                 /* just write out a path of two object numbers */
3432                 (void) snprintf(pathname, len, "<0x%llx>:<0x%llx>",
3433                     dsobj, obj);
3434                 return;
3435         }
3436         (void) strlcpy(dsname, zc.zc_value, sizeof (dsname));
3437
3438         /* find out if the dataset is mounted */
3439         mounted = is_mounted(zhp->zpool_hdl, dsname, &mntpnt);
3440
3441         /* get the corrupted object's path */
3442         (void) strlcpy(zc.zc_name, dsname, sizeof (zc.zc_name));
3443         zc.zc_obj = obj;
3444         if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_OBJ_TO_PATH,
3445             &zc) == 0) {
3446                 if (mounted) {
3447                         (void) snprintf(pathname, len, "%s%s", mntpnt,
3448                             zc.zc_value);
3449                 } else {
3450                         (void) snprintf(pathname, len, "%s:%s",
3451                             dsname, zc.zc_value);
3452                 }
3453         } else {
3454                 (void) snprintf(pathname, len, "%s:<0x%llx>", dsname, obj);
3455         }
3456         free(mntpnt);
3457 }
3458
3459 /*
3460  * Read the EFI label from the config, if a label does not exist then
3461  * pass back the error to the caller. If the caller has passed a non-NULL
3462  * diskaddr argument then we set it to the starting address of the EFI
3463  * partition.
3464  */
3465 static int
3466 read_efi_label(nvlist_t *config, diskaddr_t *sb)
3467 {
3468         char *path;
3469         int fd;
3470         char diskname[MAXPATHLEN];
3471         int err = -1;
3472
3473         if (nvlist_lookup_string(config, ZPOOL_CONFIG_PATH, &path) != 0)
3474                 return (err);
3475
3476         (void) snprintf(diskname, sizeof (diskname), "%s%s", RDISK_ROOT,
3477             strrchr(path, '/'));
3478         if ((fd = open(diskname, O_RDONLY|O_NDELAY)) >= 0) {
3479                 struct dk_gpt *vtoc;
3480
3481                 if ((err = efi_alloc_and_read(fd, &vtoc)) >= 0) {
3482                         if (sb != NULL)
3483                                 *sb = vtoc->efi_parts[0].p_start;
3484                         efi_free(vtoc);
3485                 }
3486                 (void) close(fd);
3487         }
3488         return (err);
3489 }
3490
3491 /*
3492  * determine where a partition starts on a disk in the current
3493  * configuration
3494  */
3495 static diskaddr_t
3496 find_start_block(nvlist_t *config)
3497 {
3498         nvlist_t **child;
3499         uint_t c, children;
3500         diskaddr_t sb = MAXOFFSET_T;
3501         uint64_t wholedisk;
3502
3503         if (nvlist_lookup_nvlist_array(config,
3504             ZPOOL_CONFIG_CHILDREN, &child, &children) != 0) {
3505                 if (nvlist_lookup_uint64(config,
3506                     ZPOOL_CONFIG_WHOLE_DISK,
3507                     &wholedisk) != 0 || !wholedisk) {
3508                         return (MAXOFFSET_T);
3509                 }
3510                 if (read_efi_label(config, &sb) < 0)
3511                         sb = MAXOFFSET_T;
3512                 return (sb);
3513         }
3514
3515         for (c = 0; c < children; c++) {
3516                 sb = find_start_block(child[c]);
3517                 if (sb != MAXOFFSET_T) {
3518                         return (sb);
3519                 }
3520         }
3521         return (MAXOFFSET_T);
3522 }
3523
3524 /*
3525  * Label an individual disk.  The name provided is the short name,
3526  * stripped of any leading /dev path.
3527  */
3528 int
3529 zpool_label_disk(libzfs_handle_t *hdl, zpool_handle_t *zhp, char *name)
3530 {
3531         char path[MAXPATHLEN];
3532         struct dk_gpt *vtoc;
3533         int fd;
3534         size_t resv = EFI_MIN_RESV_SIZE;
3535         uint64_t slice_size;
3536         diskaddr_t start_block;
3537         char errbuf[1024];
3538
3539         /* prepare an error message just in case */
3540         (void) snprintf(errbuf, sizeof (errbuf),
3541             dgettext(TEXT_DOMAIN, "cannot label '%s'"), name);
3542
3543         if (zhp) {
3544                 nvlist_t *nvroot;
3545
3546                 if (pool_is_bootable(zhp)) {
3547                         zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3548                             "EFI labeled devices are not supported on root "
3549                             "pools."));
3550                         return (zfs_error(hdl, EZFS_POOL_NOTSUP, errbuf));
3551                 }
3552
3553                 verify(nvlist_lookup_nvlist(zhp->zpool_config,
3554                     ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
3555
3556                 if (zhp->zpool_start_block == 0)
3557                         start_block = find_start_block(nvroot);
3558                 else
3559                         start_block = zhp->zpool_start_block;
3560                 zhp->zpool_start_block = start_block;
3561         } else {
3562                 /* new pool */
3563                 start_block = NEW_START_BLOCK;
3564         }
3565
3566         (void) snprintf(path, sizeof (path), "%s/%s%s", RDISK_ROOT, name,
3567             BACKUP_SLICE);
3568
3569         if ((fd = open(path, O_RDWR | O_NDELAY)) < 0) {
3570                 /*
3571                  * This shouldn't happen.  We've long since verified that this
3572                  * is a valid device.
3573                  */
3574                 zfs_error_aux(hdl,
3575                     dgettext(TEXT_DOMAIN, "unable to open device"));
3576                 return (zfs_error(hdl, EZFS_OPENFAILED, errbuf));
3577         }
3578
3579         if (efi_alloc_and_init(fd, EFI_NUMPAR, &vtoc) != 0) {
3580                 /*
3581                  * The only way this can fail is if we run out of memory, or we
3582                  * were unable to read the disk's capacity
3583                  */
3584                 if (errno == ENOMEM)
3585                         (void) no_memory(hdl);
3586
3587                 (void) close(fd);
3588                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3589                     "unable to read disk capacity"), name);
3590
3591                 return (zfs_error(hdl, EZFS_NOCAP, errbuf));
3592         }
3593
3594         slice_size = vtoc->efi_last_u_lba + 1;
3595         slice_size -= EFI_MIN_RESV_SIZE;
3596         if (start_block == MAXOFFSET_T)
3597                 start_block = NEW_START_BLOCK;
3598         slice_size -= start_block;
3599
3600         vtoc->efi_parts[0].p_start = start_block;
3601         vtoc->efi_parts[0].p_size = slice_size;
3602
3603         /*
3604          * Why we use V_USR: V_BACKUP confuses users, and is considered
3605          * disposable by some EFI utilities (since EFI doesn't have a backup
3606          * slice).  V_UNASSIGNED is supposed to be used only for zero size
3607          * partitions, and efi_write() will fail if we use it.  V_ROOT, V_BOOT,
3608          * etc. were all pretty specific.  V_USR is as close to reality as we
3609          * can get, in the absence of V_OTHER.
3610          */
3611         vtoc->efi_parts[0].p_tag = V_USR;
3612         (void) strcpy(vtoc->efi_parts[0].p_name, "zfs");
3613
3614         vtoc->efi_parts[8].p_start = slice_size + start_block;
3615         vtoc->efi_parts[8].p_size = resv;
3616         vtoc->efi_parts[8].p_tag = V_RESERVED;
3617
3618         if (efi_write(fd, vtoc) != 0) {
3619                 /*
3620                  * Some block drivers (like pcata) may not support EFI
3621                  * GPT labels.  Print out a helpful error message dir-
3622                  * ecting the user to manually label the disk and give
3623                  * a specific slice.
3624                  */
3625                 (void) close(fd);
3626                 efi_free(vtoc);
3627
3628                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3629                     "try using fdisk(1M) and then provide a specific slice"));
3630                 return (zfs_error(hdl, EZFS_LABELFAILED, errbuf));
3631         }
3632
3633         (void) close(fd);
3634         efi_free(vtoc);
3635         return (0);
3636 }
3637
3638 static boolean_t
3639 supported_dump_vdev_type(libzfs_handle_t *hdl, nvlist_t *config, char *errbuf)
3640 {
3641         char *type;
3642         nvlist_t **child;
3643         uint_t children, c;
3644
3645         verify(nvlist_lookup_string(config, ZPOOL_CONFIG_TYPE, &type) == 0);
3646         if (strcmp(type, VDEV_TYPE_RAIDZ) == 0 ||
3647             strcmp(type, VDEV_TYPE_FILE) == 0 ||
3648             strcmp(type, VDEV_TYPE_LOG) == 0 ||
3649             strcmp(type, VDEV_TYPE_HOLE) == 0 ||
3650             strcmp(type, VDEV_TYPE_MISSING) == 0) {
3651                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3652                     "vdev type '%s' is not supported"), type);
3653                 (void) zfs_error(hdl, EZFS_VDEVNOTSUP, errbuf);
3654                 return (B_FALSE);
3655         }
3656         if (nvlist_lookup_nvlist_array(config, ZPOOL_CONFIG_CHILDREN,
3657             &child, &children) == 0) {
3658                 for (c = 0; c < children; c++) {
3659                         if (!supported_dump_vdev_type(hdl, child[c], errbuf))
3660                                 return (B_FALSE);
3661                 }
3662         }
3663         return (B_TRUE);
3664 }
3665
3666 /*
3667  * check if this zvol is allowable for use as a dump device; zero if
3668  * it is, > 0 if it isn't, < 0 if it isn't a zvol
3669  */
3670 int
3671 zvol_check_dump_config(char *arg)
3672 {
3673         zpool_handle_t *zhp = NULL;
3674         nvlist_t *config, *nvroot;
3675         char *p, *volname;
3676         nvlist_t **top;
3677         uint_t toplevels;
3678         libzfs_handle_t *hdl;
3679         char errbuf[1024];
3680         char poolname[ZPOOL_MAXNAMELEN];
3681         int pathlen = strlen(ZVOL_FULL_DEV_DIR);
3682         int ret = 1;
3683
3684         if (strncmp(arg, ZVOL_FULL_DEV_DIR, pathlen)) {
3685                 return (-1);
3686         }
3687
3688         (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
3689             "dump is not supported on device '%s'"), arg);
3690
3691         if ((hdl = libzfs_init()) == NULL)
3692                 return (1);
3693         libzfs_print_on_error(hdl, B_TRUE);
3694
3695         volname = arg + pathlen;
3696
3697         /* check the configuration of the pool */
3698         if ((p = strchr(volname, '/')) == NULL) {
3699                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3700                     "malformed dataset name"));
3701                 (void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf);
3702                 return (1);
3703         } else if (p - volname >= ZFS_MAXNAMELEN) {
3704                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3705                     "dataset name is too long"));
3706                 (void) zfs_error(hdl, EZFS_NAMETOOLONG, errbuf);
3707                 return (1);
3708         } else {
3709                 (void) strncpy(poolname, volname, p - volname);
3710                 poolname[p - volname] = '\0';
3711         }
3712
3713         if ((zhp = zpool_open(hdl, poolname)) == NULL) {
3714                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3715                     "could not open pool '%s'"), poolname);
3716                 (void) zfs_error(hdl, EZFS_OPENFAILED, errbuf);
3717                 goto out;
3718         }
3719         config = zpool_get_config(zhp, NULL);
3720         if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
3721             &nvroot) != 0) {
3722                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3723                     "could not obtain vdev configuration for  '%s'"), poolname);
3724                 (void) zfs_error(hdl, EZFS_INVALCONFIG, errbuf);
3725                 goto out;
3726         }
3727
3728         verify(nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
3729             &top, &toplevels) == 0);
3730         if (toplevels != 1) {
3731                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3732                     "'%s' has multiple top level vdevs"), poolname);
3733                 (void) zfs_error(hdl, EZFS_DEVOVERFLOW, errbuf);
3734                 goto out;
3735         }
3736
3737         if (!supported_dump_vdev_type(hdl, top[0], errbuf)) {
3738                 goto out;
3739         }
3740         ret = 0;
3741
3742 out:
3743         if (zhp)
3744                 zpool_close(zhp);
3745         libzfs_fini(hdl);
3746         return (ret);
3747 }