Rebase master to b108
[zfs.git] / lib / libzfs / libzfs_pool.c
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21
22 /*
23  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26
27 #include <alloca.h>
28 #include <assert.h>
29 #include <ctype.h>
30 #include <errno.h>
31 #include <devid.h>
32 #include <dirent.h>
33 #include <fcntl.h>
34 #include <libintl.h>
35 #include <stdio.h>
36 #include <stdlib.h>
37 #include <strings.h>
38 #include <unistd.h>
39 #include <zone.h>
40 #include <sys/efi_partition.h>
41 #include <sys/vtoc.h>
42 #include <sys/zfs_ioctl.h>
43 #include <sys/zio.h>
44 #include <strings.h>
45
46 #include "zfs_namecheck.h"
47 #include "zfs_prop.h"
48 #include "libzfs_impl.h"
49
50 static int read_efi_label(nvlist_t *config, diskaddr_t *sb);
51
52 #if defined(__i386) || defined(__amd64)
53 #define BOOTCMD "installgrub(1M)"
54 #else
55 #define BOOTCMD "installboot(1M)"
56 #endif
57
58 /*
59  * ====================================================================
60  *   zpool property functions
61  * ====================================================================
62  */
63
64 static int
65 zpool_get_all_props(zpool_handle_t *zhp)
66 {
67         zfs_cmd_t zc = { 0 };
68         libzfs_handle_t *hdl = zhp->zpool_hdl;
69
70         (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
71
72         if (zcmd_alloc_dst_nvlist(hdl, &zc, 0) != 0)
73                 return (-1);
74
75         while (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_PROPS, &zc) != 0) {
76                 if (errno == ENOMEM) {
77                         if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
78                                 zcmd_free_nvlists(&zc);
79                                 return (-1);
80                         }
81                 } else {
82                         zcmd_free_nvlists(&zc);
83                         return (-1);
84                 }
85         }
86
87         if (zcmd_read_dst_nvlist(hdl, &zc, &zhp->zpool_props) != 0) {
88                 zcmd_free_nvlists(&zc);
89                 return (-1);
90         }
91
92         zcmd_free_nvlists(&zc);
93
94         return (0);
95 }
96
97 static int
98 zpool_props_refresh(zpool_handle_t *zhp)
99 {
100         nvlist_t *old_props;
101
102         old_props = zhp->zpool_props;
103
104         if (zpool_get_all_props(zhp) != 0)
105                 return (-1);
106
107         nvlist_free(old_props);
108         return (0);
109 }
110
111 static char *
112 zpool_get_prop_string(zpool_handle_t *zhp, zpool_prop_t prop,
113     zprop_source_t *src)
114 {
115         nvlist_t *nv, *nvl;
116         uint64_t ival;
117         char *value;
118         zprop_source_t source;
119
120         nvl = zhp->zpool_props;
121         if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) {
122                 verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &ival) == 0);
123                 source = ival;
124                 verify(nvlist_lookup_string(nv, ZPROP_VALUE, &value) == 0);
125         } else {
126                 source = ZPROP_SRC_DEFAULT;
127                 if ((value = (char *)zpool_prop_default_string(prop)) == NULL)
128                         value = "-";
129         }
130
131         if (src)
132                 *src = source;
133
134         return (value);
135 }
136
137 uint64_t
138 zpool_get_prop_int(zpool_handle_t *zhp, zpool_prop_t prop, zprop_source_t *src)
139 {
140         nvlist_t *nv, *nvl;
141         uint64_t value;
142         zprop_source_t source;
143
144         if (zhp->zpool_props == NULL && zpool_get_all_props(zhp)) {
145                 /*
146                  * zpool_get_all_props() has most likely failed because
147                  * the pool is faulted, but if all we need is the top level
148                  * vdev's guid then get it from the zhp config nvlist.
149                  */
150                 if ((prop == ZPOOL_PROP_GUID) &&
151                     (nvlist_lookup_nvlist(zhp->zpool_config,
152                     ZPOOL_CONFIG_VDEV_TREE, &nv) == 0) &&
153                     (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &value)
154                     == 0)) {
155                         return (value);
156                 }
157                 return (zpool_prop_default_numeric(prop));
158         }
159
160         nvl = zhp->zpool_props;
161         if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) {
162                 verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &value) == 0);
163                 source = value;
164                 verify(nvlist_lookup_uint64(nv, ZPROP_VALUE, &value) == 0);
165         } else {
166                 source = ZPROP_SRC_DEFAULT;
167                 value = zpool_prop_default_numeric(prop);
168         }
169
170         if (src)
171                 *src = source;
172
173         return (value);
174 }
175
176 /*
177  * Map VDEV STATE to printed strings.
178  */
179 char *
180 zpool_state_to_name(vdev_state_t state, vdev_aux_t aux)
181 {
182         switch (state) {
183         case VDEV_STATE_CLOSED:
184         case VDEV_STATE_OFFLINE:
185                 return (gettext("OFFLINE"));
186         case VDEV_STATE_REMOVED:
187                 return (gettext("REMOVED"));
188         case VDEV_STATE_CANT_OPEN:
189                 if (aux == VDEV_AUX_CORRUPT_DATA || aux == VDEV_AUX_BAD_LOG)
190                         return (gettext("FAULTED"));
191                 else
192                         return (gettext("UNAVAIL"));
193         case VDEV_STATE_FAULTED:
194                 return (gettext("FAULTED"));
195         case VDEV_STATE_DEGRADED:
196                 return (gettext("DEGRADED"));
197         case VDEV_STATE_HEALTHY:
198                 return (gettext("ONLINE"));
199         }
200
201         return (gettext("UNKNOWN"));
202 }
203
204 /*
205  * Get a zpool property value for 'prop' and return the value in
206  * a pre-allocated buffer.
207  */
208 int
209 zpool_get_prop(zpool_handle_t *zhp, zpool_prop_t prop, char *buf, size_t len,
210     zprop_source_t *srctype)
211 {
212         uint64_t intval;
213         const char *strval;
214         zprop_source_t src = ZPROP_SRC_NONE;
215         nvlist_t *nvroot;
216         vdev_stat_t *vs;
217         uint_t vsc;
218
219         if (zpool_get_state(zhp) == POOL_STATE_UNAVAIL) {
220                 switch (prop) {
221                 case ZPOOL_PROP_NAME:
222                         (void) strlcpy(buf, zpool_get_name(zhp), len);
223                         break;
224
225                 case ZPOOL_PROP_HEALTH:
226                         (void) strlcpy(buf, "FAULTED", len);
227                         break;
228
229                 case ZPOOL_PROP_GUID:
230                         intval = zpool_get_prop_int(zhp, prop, &src);
231                         (void) snprintf(buf, len, "%llu", intval);
232                         break;
233
234                 case ZPOOL_PROP_ALTROOT:
235                 case ZPOOL_PROP_CACHEFILE:
236                         if (zhp->zpool_props != NULL ||
237                             zpool_get_all_props(zhp) == 0) {
238                                 (void) strlcpy(buf,
239                                     zpool_get_prop_string(zhp, prop, &src),
240                                     len);
241                                 if (srctype != NULL)
242                                         *srctype = src;
243                                 return (0);
244                         }
245                         /* FALLTHROUGH */
246                 default:
247                         (void) strlcpy(buf, "-", len);
248                         break;
249                 }
250
251                 if (srctype != NULL)
252                         *srctype = src;
253                 return (0);
254         }
255
256         if (zhp->zpool_props == NULL && zpool_get_all_props(zhp) &&
257             prop != ZPOOL_PROP_NAME)
258                 return (-1);
259
260         switch (zpool_prop_get_type(prop)) {
261         case PROP_TYPE_STRING:
262                 (void) strlcpy(buf, zpool_get_prop_string(zhp, prop, &src),
263                     len);
264                 break;
265
266         case PROP_TYPE_NUMBER:
267                 intval = zpool_get_prop_int(zhp, prop, &src);
268
269                 switch (prop) {
270                 case ZPOOL_PROP_SIZE:
271                 case ZPOOL_PROP_USED:
272                 case ZPOOL_PROP_AVAILABLE:
273                         (void) zfs_nicenum(intval, buf, len);
274                         break;
275
276                 case ZPOOL_PROP_CAPACITY:
277                         (void) snprintf(buf, len, "%llu%%",
278                             (u_longlong_t)intval);
279                         break;
280
281                 case ZPOOL_PROP_HEALTH:
282                         verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
283                             ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
284                         verify(nvlist_lookup_uint64_array(nvroot,
285                             ZPOOL_CONFIG_STATS, (uint64_t **)&vs, &vsc) == 0);
286
287                         (void) strlcpy(buf, zpool_state_to_name(intval,
288                             vs->vs_aux), len);
289                         break;
290                 default:
291                         (void) snprintf(buf, len, "%llu", intval);
292                 }
293                 break;
294
295         case PROP_TYPE_INDEX:
296                 intval = zpool_get_prop_int(zhp, prop, &src);
297                 if (zpool_prop_index_to_string(prop, intval, &strval)
298                     != 0)
299                         return (-1);
300                 (void) strlcpy(buf, strval, len);
301                 break;
302
303         default:
304                 abort();
305         }
306
307         if (srctype)
308                 *srctype = src;
309
310         return (0);
311 }
312
313 /*
314  * Check if the bootfs name has the same pool name as it is set to.
315  * Assuming bootfs is a valid dataset name.
316  */
317 static boolean_t
318 bootfs_name_valid(const char *pool, char *bootfs)
319 {
320         int len = strlen(pool);
321
322         if (!zfs_name_valid(bootfs, ZFS_TYPE_FILESYSTEM|ZFS_TYPE_SNAPSHOT))
323                 return (B_FALSE);
324
325         if (strncmp(pool, bootfs, len) == 0 &&
326             (bootfs[len] == '/' || bootfs[len] == '\0'))
327                 return (B_TRUE);
328
329         return (B_FALSE);
330 }
331
332 /*
333  * Inspect the configuration to determine if any of the devices contain
334  * an EFI label.
335  */
336 static boolean_t
337 pool_uses_efi(nvlist_t *config)
338 {
339         nvlist_t **child;
340         uint_t c, children;
341
342         if (nvlist_lookup_nvlist_array(config, ZPOOL_CONFIG_CHILDREN,
343             &child, &children) != 0)
344                 return (read_efi_label(config, NULL) >= 0);
345
346         for (c = 0; c < children; c++) {
347                 if (pool_uses_efi(child[c]))
348                         return (B_TRUE);
349         }
350         return (B_FALSE);
351 }
352
353 static boolean_t
354 pool_is_bootable(zpool_handle_t *zhp)
355 {
356         char bootfs[ZPOOL_MAXNAMELEN];
357
358         return (zpool_get_prop(zhp, ZPOOL_PROP_BOOTFS, bootfs,
359             sizeof (bootfs), NULL) == 0 && strncmp(bootfs, "-",
360             sizeof (bootfs)) != 0);
361 }
362
363
364 /*
365  * Given an nvlist of zpool properties to be set, validate that they are
366  * correct, and parse any numeric properties (index, boolean, etc) if they are
367  * specified as strings.
368  */
369 static nvlist_t *
370 zpool_valid_proplist(libzfs_handle_t *hdl, const char *poolname,
371     nvlist_t *props, uint64_t version, boolean_t create_or_import, char *errbuf)
372 {
373         nvpair_t *elem;
374         nvlist_t *retprops;
375         zpool_prop_t prop;
376         char *strval;
377         uint64_t intval;
378         char *slash;
379         struct stat64 statbuf;
380         zpool_handle_t *zhp;
381         nvlist_t *nvroot;
382
383         if (nvlist_alloc(&retprops, NV_UNIQUE_NAME, 0) != 0) {
384                 (void) no_memory(hdl);
385                 return (NULL);
386         }
387
388         elem = NULL;
389         while ((elem = nvlist_next_nvpair(props, elem)) != NULL) {
390                 const char *propname = nvpair_name(elem);
391
392                 /*
393                  * Make sure this property is valid and applies to this type.
394                  */
395                 if ((prop = zpool_name_to_prop(propname)) == ZPROP_INVAL) {
396                         zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
397                             "invalid property '%s'"), propname);
398                         (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
399                         goto error;
400                 }
401
402                 if (zpool_prop_readonly(prop)) {
403                         zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' "
404                             "is readonly"), propname);
405                         (void) zfs_error(hdl, EZFS_PROPREADONLY, errbuf);
406                         goto error;
407                 }
408
409                 if (zprop_parse_value(hdl, elem, prop, ZFS_TYPE_POOL, retprops,
410                     &strval, &intval, errbuf) != 0)
411                         goto error;
412
413                 /*
414                  * Perform additional checking for specific properties.
415                  */
416                 switch (prop) {
417                 case ZPOOL_PROP_VERSION:
418                         if (intval < version || intval > SPA_VERSION) {
419                                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
420                                     "property '%s' number %d is invalid."),
421                                     propname, intval);
422                                 (void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
423                                 goto error;
424                         }
425                         break;
426
427                 case ZPOOL_PROP_BOOTFS:
428                         if (create_or_import) {
429                                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
430                                     "property '%s' cannot be set at creation "
431                                     "or import time"), propname);
432                                 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
433                                 goto error;
434                         }
435
436                         if (version < SPA_VERSION_BOOTFS) {
437                                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
438                                     "pool must be upgraded to support "
439                                     "'%s' property"), propname);
440                                 (void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
441                                 goto error;
442                         }
443
444                         /*
445                          * bootfs property value has to be a dataset name and
446                          * the dataset has to be in the same pool as it sets to.
447                          */
448                         if (strval[0] != '\0' && !bootfs_name_valid(poolname,
449                             strval)) {
450                                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' "
451                                     "is an invalid name"), strval);
452                                 (void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf);
453                                 goto error;
454                         }
455
456                         if ((zhp = zpool_open_canfail(hdl, poolname)) == NULL) {
457                                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
458                                     "could not open pool '%s'"), poolname);
459                                 (void) zfs_error(hdl, EZFS_OPENFAILED, errbuf);
460                                 goto error;
461                         }
462                         verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
463                             ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
464
465                         /*
466                          * bootfs property cannot be set on a disk which has
467                          * been EFI labeled.
468                          */
469                         if (pool_uses_efi(nvroot)) {
470                                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
471                                     "property '%s' not supported on "
472                                     "EFI labeled devices"), propname);
473                                 (void) zfs_error(hdl, EZFS_POOL_NOTSUP, errbuf);
474                                 zpool_close(zhp);
475                                 goto error;
476                         }
477                         zpool_close(zhp);
478                         break;
479
480                 case ZPOOL_PROP_ALTROOT:
481                         if (!create_or_import) {
482                                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
483                                     "property '%s' can only be set during pool "
484                                     "creation or import"), propname);
485                                 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
486                                 goto error;
487                         }
488
489                         if (strval[0] != '/') {
490                                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
491                                     "bad alternate root '%s'"), strval);
492                                 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
493                                 goto error;
494                         }
495                         break;
496
497                 case ZPOOL_PROP_CACHEFILE:
498                         if (strval[0] == '\0')
499                                 break;
500
501                         if (strcmp(strval, "none") == 0)
502                                 break;
503
504                         if (strval[0] != '/') {
505                                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
506                                     "property '%s' must be empty, an "
507                                     "absolute path, or 'none'"), propname);
508                                 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
509                                 goto error;
510                         }
511
512                         slash = strrchr(strval, '/');
513
514                         if (slash[1] == '\0' || strcmp(slash, "/.") == 0 ||
515                             strcmp(slash, "/..") == 0) {
516                                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
517                                     "'%s' is not a valid file"), strval);
518                                 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
519                                 goto error;
520                         }
521
522                         *slash = '\0';
523
524                         if (strval[0] != '\0' &&
525                             (stat64(strval, &statbuf) != 0 ||
526                             !S_ISDIR(statbuf.st_mode))) {
527                                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
528                                     "'%s' is not a valid directory"),
529                                     strval);
530                                 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
531                                 goto error;
532                         }
533
534                         *slash = '/';
535                         break;
536                 }
537         }
538
539         return (retprops);
540 error:
541         nvlist_free(retprops);
542         return (NULL);
543 }
544
545 /*
546  * Set zpool property : propname=propval.
547  */
548 int
549 zpool_set_prop(zpool_handle_t *zhp, const char *propname, const char *propval)
550 {
551         zfs_cmd_t zc = { 0 };
552         int ret = -1;
553         char errbuf[1024];
554         nvlist_t *nvl = NULL;
555         nvlist_t *realprops;
556         uint64_t version;
557
558         (void) snprintf(errbuf, sizeof (errbuf),
559             dgettext(TEXT_DOMAIN, "cannot set property for '%s'"),
560             zhp->zpool_name);
561
562         if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0)
563                 return (no_memory(zhp->zpool_hdl));
564
565         if (nvlist_add_string(nvl, propname, propval) != 0) {
566                 nvlist_free(nvl);
567                 return (no_memory(zhp->zpool_hdl));
568         }
569
570         version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL);
571         if ((realprops = zpool_valid_proplist(zhp->zpool_hdl,
572             zhp->zpool_name, nvl, version, B_FALSE, errbuf)) == NULL) {
573                 nvlist_free(nvl);
574                 return (-1);
575         }
576
577         nvlist_free(nvl);
578         nvl = realprops;
579
580         /*
581          * Execute the corresponding ioctl() to set this property.
582          */
583         (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
584
585         if (zcmd_write_src_nvlist(zhp->zpool_hdl, &zc, nvl) != 0) {
586                 nvlist_free(nvl);
587                 return (-1);
588         }
589
590         ret = zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_SET_PROPS, &zc);
591
592         zcmd_free_nvlists(&zc);
593         nvlist_free(nvl);
594
595         if (ret)
596                 (void) zpool_standard_error(zhp->zpool_hdl, errno, errbuf);
597         else
598                 (void) zpool_props_refresh(zhp);
599
600         return (ret);
601 }
602
603 int
604 zpool_expand_proplist(zpool_handle_t *zhp, zprop_list_t **plp)
605 {
606         libzfs_handle_t *hdl = zhp->zpool_hdl;
607         zprop_list_t *entry;
608         char buf[ZFS_MAXPROPLEN];
609
610         if (zprop_expand_list(hdl, plp, ZFS_TYPE_POOL) != 0)
611                 return (-1);
612
613         for (entry = *plp; entry != NULL; entry = entry->pl_next) {
614
615                 if (entry->pl_fixed)
616                         continue;
617
618                 if (entry->pl_prop != ZPROP_INVAL &&
619                     zpool_get_prop(zhp, entry->pl_prop, buf, sizeof (buf),
620                     NULL) == 0) {
621                         if (strlen(buf) > entry->pl_width)
622                                 entry->pl_width = strlen(buf);
623                 }
624         }
625
626         return (0);
627 }
628
629
630 /*
631  * Validate the given pool name, optionally putting an extended error message in
632  * 'buf'.
633  */
634 boolean_t
635 zpool_name_valid(libzfs_handle_t *hdl, boolean_t isopen, const char *pool)
636 {
637         namecheck_err_t why;
638         char what;
639         int ret;
640
641         ret = pool_namecheck(pool, &why, &what);
642
643         /*
644          * The rules for reserved pool names were extended at a later point.
645          * But we need to support users with existing pools that may now be
646          * invalid.  So we only check for this expanded set of names during a
647          * create (or import), and only in userland.
648          */
649         if (ret == 0 && !isopen &&
650             (strncmp(pool, "mirror", 6) == 0 ||
651             strncmp(pool, "raidz", 5) == 0 ||
652             strncmp(pool, "spare", 5) == 0 ||
653             strcmp(pool, "log") == 0)) {
654                 if (hdl != NULL)
655                         zfs_error_aux(hdl,
656                             dgettext(TEXT_DOMAIN, "name is reserved"));
657                 return (B_FALSE);
658         }
659
660
661         if (ret != 0) {
662                 if (hdl != NULL) {
663                         switch (why) {
664                         case NAME_ERR_TOOLONG:
665                                 zfs_error_aux(hdl,
666                                     dgettext(TEXT_DOMAIN, "name is too long"));
667                                 break;
668
669                         case NAME_ERR_INVALCHAR:
670                                 zfs_error_aux(hdl,
671                                     dgettext(TEXT_DOMAIN, "invalid character "
672                                     "'%c' in pool name"), what);
673                                 break;
674
675                         case NAME_ERR_NOLETTER:
676                                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
677                                     "name must begin with a letter"));
678                                 break;
679
680                         case NAME_ERR_RESERVED:
681                                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
682                                     "name is reserved"));
683                                 break;
684
685                         case NAME_ERR_DISKLIKE:
686                                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
687                                     "pool name is reserved"));
688                                 break;
689
690                         case NAME_ERR_LEADING_SLASH:
691                                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
692                                     "leading slash in name"));
693                                 break;
694
695                         case NAME_ERR_EMPTY_COMPONENT:
696                                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
697                                     "empty component in name"));
698                                 break;
699
700                         case NAME_ERR_TRAILING_SLASH:
701                                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
702                                     "trailing slash in name"));
703                                 break;
704
705                         case NAME_ERR_MULTIPLE_AT:
706                                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
707                                     "multiple '@' delimiters in name"));
708                                 break;
709
710                         }
711                 }
712                 return (B_FALSE);
713         }
714
715         return (B_TRUE);
716 }
717
718 /*
719  * Open a handle to the given pool, even if the pool is currently in the FAULTED
720  * state.
721  */
722 zpool_handle_t *
723 zpool_open_canfail(libzfs_handle_t *hdl, const char *pool)
724 {
725         zpool_handle_t *zhp;
726         boolean_t missing;
727
728         /*
729          * Make sure the pool name is valid.
730          */
731         if (!zpool_name_valid(hdl, B_TRUE, pool)) {
732                 (void) zfs_error_fmt(hdl, EZFS_INVALIDNAME,
733                     dgettext(TEXT_DOMAIN, "cannot open '%s'"),
734                     pool);
735                 return (NULL);
736         }
737
738         if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL)
739                 return (NULL);
740
741         zhp->zpool_hdl = hdl;
742         (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name));
743
744         if (zpool_refresh_stats(zhp, &missing) != 0) {
745                 zpool_close(zhp);
746                 return (NULL);
747         }
748
749         if (missing) {
750                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "no such pool"));
751                 (void) zfs_error_fmt(hdl, EZFS_NOENT,
752                     dgettext(TEXT_DOMAIN, "cannot open '%s'"), pool);
753                 zpool_close(zhp);
754                 return (NULL);
755         }
756
757         return (zhp);
758 }
759
760 /*
761  * Like the above, but silent on error.  Used when iterating over pools (because
762  * the configuration cache may be out of date).
763  */
764 int
765 zpool_open_silent(libzfs_handle_t *hdl, const char *pool, zpool_handle_t **ret)
766 {
767         zpool_handle_t *zhp;
768         boolean_t missing;
769
770         if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL)
771                 return (-1);
772
773         zhp->zpool_hdl = hdl;
774         (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name));
775
776         if (zpool_refresh_stats(zhp, &missing) != 0) {
777                 zpool_close(zhp);
778                 return (-1);
779         }
780
781         if (missing) {
782                 zpool_close(zhp);
783                 *ret = NULL;
784                 return (0);
785         }
786
787         *ret = zhp;
788         return (0);
789 }
790
791 /*
792  * Similar to zpool_open_canfail(), but refuses to open pools in the faulted
793  * state.
794  */
795 zpool_handle_t *
796 zpool_open(libzfs_handle_t *hdl, const char *pool)
797 {
798         zpool_handle_t *zhp;
799
800         if ((zhp = zpool_open_canfail(hdl, pool)) == NULL)
801                 return (NULL);
802
803         if (zhp->zpool_state == POOL_STATE_UNAVAIL) {
804                 (void) zfs_error_fmt(hdl, EZFS_POOLUNAVAIL,
805                     dgettext(TEXT_DOMAIN, "cannot open '%s'"), zhp->zpool_name);
806                 zpool_close(zhp);
807                 return (NULL);
808         }
809
810         return (zhp);
811 }
812
813 /*
814  * Close the handle.  Simply frees the memory associated with the handle.
815  */
816 void
817 zpool_close(zpool_handle_t *zhp)
818 {
819         if (zhp->zpool_config)
820                 nvlist_free(zhp->zpool_config);
821         if (zhp->zpool_old_config)
822                 nvlist_free(zhp->zpool_old_config);
823         if (zhp->zpool_props)
824                 nvlist_free(zhp->zpool_props);
825         free(zhp);
826 }
827
828 /*
829  * Return the name of the pool.
830  */
831 const char *
832 zpool_get_name(zpool_handle_t *zhp)
833 {
834         return (zhp->zpool_name);
835 }
836
837
838 /*
839  * Return the state of the pool (ACTIVE or UNAVAILABLE)
840  */
841 int
842 zpool_get_state(zpool_handle_t *zhp)
843 {
844         return (zhp->zpool_state);
845 }
846
847 /*
848  * Create the named pool, using the provided vdev list.  It is assumed
849  * that the consumer has already validated the contents of the nvlist, so we
850  * don't have to worry about error semantics.
851  */
852 int
853 zpool_create(libzfs_handle_t *hdl, const char *pool, nvlist_t *nvroot,
854     nvlist_t *props, nvlist_t *fsprops)
855 {
856         zfs_cmd_t zc = { 0 };
857         nvlist_t *zc_fsprops = NULL;
858         nvlist_t *zc_props = NULL;
859         char msg[1024];
860         char *altroot;
861         int ret = -1;
862
863         (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
864             "cannot create '%s'"), pool);
865
866         if (!zpool_name_valid(hdl, B_FALSE, pool))
867                 return (zfs_error(hdl, EZFS_INVALIDNAME, msg));
868
869         if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
870                 return (-1);
871
872         if (props) {
873                 if ((zc_props = zpool_valid_proplist(hdl, pool, props,
874                     SPA_VERSION_1, B_TRUE, msg)) == NULL) {
875                         goto create_failed;
876                 }
877         }
878
879         if (fsprops) {
880                 uint64_t zoned;
881                 char *zonestr;
882
883                 zoned = ((nvlist_lookup_string(fsprops,
884                     zfs_prop_to_name(ZFS_PROP_ZONED), &zonestr) == 0) &&
885                     strcmp(zonestr, "on") == 0);
886
887                 if ((zc_fsprops = zfs_valid_proplist(hdl,
888                     ZFS_TYPE_FILESYSTEM, fsprops, zoned, NULL, msg)) == NULL) {
889                         goto create_failed;
890                 }
891                 if (!zc_props &&
892                     (nvlist_alloc(&zc_props, NV_UNIQUE_NAME, 0) != 0)) {
893                         goto create_failed;
894                 }
895                 if (nvlist_add_nvlist(zc_props,
896                     ZPOOL_ROOTFS_PROPS, zc_fsprops) != 0) {
897                         goto create_failed;
898                 }
899         }
900
901         if (zc_props && zcmd_write_src_nvlist(hdl, &zc, zc_props) != 0)
902                 goto create_failed;
903
904         (void) strlcpy(zc.zc_name, pool, sizeof (zc.zc_name));
905
906         if ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_CREATE, &zc)) != 0) {
907
908                 zcmd_free_nvlists(&zc);
909                 nvlist_free(zc_props);
910                 nvlist_free(zc_fsprops);
911
912                 switch (errno) {
913                 case EBUSY:
914                         /*
915                          * This can happen if the user has specified the same
916                          * device multiple times.  We can't reliably detect this
917                          * until we try to add it and see we already have a
918                          * label.
919                          */
920                         zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
921                             "one or more vdevs refer to the same device"));
922                         return (zfs_error(hdl, EZFS_BADDEV, msg));
923
924                 case EOVERFLOW:
925                         /*
926                          * This occurs when one of the devices is below
927                          * SPA_MINDEVSIZE.  Unfortunately, we can't detect which
928                          * device was the problem device since there's no
929                          * reliable way to determine device size from userland.
930                          */
931                         {
932                                 char buf[64];
933
934                                 zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf));
935
936                                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
937                                     "one or more devices is less than the "
938                                     "minimum size (%s)"), buf);
939                         }
940                         return (zfs_error(hdl, EZFS_BADDEV, msg));
941
942                 case ENOSPC:
943                         zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
944                             "one or more devices is out of space"));
945                         return (zfs_error(hdl, EZFS_BADDEV, msg));
946
947                 case ENOTBLK:
948                         zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
949                             "cache device must be a disk or disk slice"));
950                         return (zfs_error(hdl, EZFS_BADDEV, msg));
951
952                 default:
953                         return (zpool_standard_error(hdl, errno, msg));
954                 }
955         }
956
957         /*
958          * If this is an alternate root pool, then we automatically set the
959          * mountpoint of the root dataset to be '/'.
960          */
961         if (nvlist_lookup_string(props, zpool_prop_to_name(ZPOOL_PROP_ALTROOT),
962             &altroot) == 0) {
963                 zfs_handle_t *zhp;
964
965                 verify((zhp = zfs_open(hdl, pool, ZFS_TYPE_DATASET)) != NULL);
966                 verify(zfs_prop_set(zhp, zfs_prop_to_name(ZFS_PROP_MOUNTPOINT),
967                     "/") == 0);
968
969                 zfs_close(zhp);
970         }
971
972 create_failed:
973         zcmd_free_nvlists(&zc);
974         nvlist_free(zc_props);
975         nvlist_free(zc_fsprops);
976         return (ret);
977 }
978
979 /*
980  * Destroy the given pool.  It is up to the caller to ensure that there are no
981  * datasets left in the pool.
982  */
983 int
984 zpool_destroy(zpool_handle_t *zhp)
985 {
986         zfs_cmd_t zc = { 0 };
987         zfs_handle_t *zfp = NULL;
988         libzfs_handle_t *hdl = zhp->zpool_hdl;
989         char msg[1024];
990
991         if (zhp->zpool_state == POOL_STATE_ACTIVE &&
992             (zfp = zfs_open(zhp->zpool_hdl, zhp->zpool_name,
993             ZFS_TYPE_FILESYSTEM)) == NULL)
994                 return (-1);
995
996         if (zpool_remove_zvol_links(zhp) != 0)
997                 return (-1);
998
999         (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1000
1001         if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_DESTROY, &zc) != 0) {
1002                 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1003                     "cannot destroy '%s'"), zhp->zpool_name);
1004
1005                 if (errno == EROFS) {
1006                         zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1007                             "one or more devices is read only"));
1008                         (void) zfs_error(hdl, EZFS_BADDEV, msg);
1009                 } else {
1010                         (void) zpool_standard_error(hdl, errno, msg);
1011                 }
1012
1013                 if (zfp)
1014                         zfs_close(zfp);
1015                 return (-1);
1016         }
1017
1018         if (zfp) {
1019                 remove_mountpoint(zfp);
1020                 zfs_close(zfp);
1021         }
1022
1023         return (0);
1024 }
1025
1026 /*
1027  * Add the given vdevs to the pool.  The caller must have already performed the
1028  * necessary verification to ensure that the vdev specification is well-formed.
1029  */
1030 int
1031 zpool_add(zpool_handle_t *zhp, nvlist_t *nvroot)
1032 {
1033         zfs_cmd_t zc = { 0 };
1034         int ret;
1035         libzfs_handle_t *hdl = zhp->zpool_hdl;
1036         char msg[1024];
1037         nvlist_t **spares, **l2cache;
1038         uint_t nspares, nl2cache;
1039
1040         (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1041             "cannot add to '%s'"), zhp->zpool_name);
1042
1043         if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) <
1044             SPA_VERSION_SPARES &&
1045             nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
1046             &spares, &nspares) == 0) {
1047                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be "
1048                     "upgraded to add hot spares"));
1049                 return (zfs_error(hdl, EZFS_BADVERSION, msg));
1050         }
1051
1052         if (pool_is_bootable(zhp) && nvlist_lookup_nvlist_array(nvroot,
1053             ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0) {
1054                 uint64_t s;
1055
1056                 for (s = 0; s < nspares; s++) {
1057                         char *path;
1058
1059                         if (nvlist_lookup_string(spares[s], ZPOOL_CONFIG_PATH,
1060                             &path) == 0 && pool_uses_efi(spares[s])) {
1061                                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1062                                     "device '%s' contains an EFI label and "
1063                                     "cannot be used on root pools."),
1064                                     zpool_vdev_name(hdl, NULL, spares[s]));
1065                                 return (zfs_error(hdl, EZFS_POOL_NOTSUP, msg));
1066                         }
1067                 }
1068         }
1069
1070         if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) <
1071             SPA_VERSION_L2CACHE &&
1072             nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
1073             &l2cache, &nl2cache) == 0) {
1074                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be "
1075                     "upgraded to add cache devices"));
1076                 return (zfs_error(hdl, EZFS_BADVERSION, msg));
1077         }
1078
1079         if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
1080                 return (-1);
1081         (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1082
1083         if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_VDEV_ADD, &zc) != 0) {
1084                 switch (errno) {
1085                 case EBUSY:
1086                         /*
1087                          * This can happen if the user has specified the same
1088                          * device multiple times.  We can't reliably detect this
1089                          * until we try to add it and see we already have a
1090                          * label.
1091                          */
1092                         zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1093                             "one or more vdevs refer to the same device"));
1094                         (void) zfs_error(hdl, EZFS_BADDEV, msg);
1095                         break;
1096
1097                 case EOVERFLOW:
1098                         /*
1099                          * This occurrs when one of the devices is below
1100                          * SPA_MINDEVSIZE.  Unfortunately, we can't detect which
1101                          * device was the problem device since there's no
1102                          * reliable way to determine device size from userland.
1103                          */
1104                         {
1105                                 char buf[64];
1106
1107                                 zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf));
1108
1109                                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1110                                     "device is less than the minimum "
1111                                     "size (%s)"), buf);
1112                         }
1113                         (void) zfs_error(hdl, EZFS_BADDEV, msg);
1114                         break;
1115
1116                 case ENOTSUP:
1117                         zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1118                             "pool must be upgraded to add these vdevs"));
1119                         (void) zfs_error(hdl, EZFS_BADVERSION, msg);
1120                         break;
1121
1122                 case EDOM:
1123                         zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1124                             "root pool can not have multiple vdevs"
1125                             " or separate logs"));
1126                         (void) zfs_error(hdl, EZFS_POOL_NOTSUP, msg);
1127                         break;
1128
1129                 case ENOTBLK:
1130                         zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1131                             "cache device must be a disk or disk slice"));
1132                         (void) zfs_error(hdl, EZFS_BADDEV, msg);
1133                         break;
1134
1135                 default:
1136                         (void) zpool_standard_error(hdl, errno, msg);
1137                 }
1138
1139                 ret = -1;
1140         } else {
1141                 ret = 0;
1142         }
1143
1144         zcmd_free_nvlists(&zc);
1145
1146         return (ret);
1147 }
1148
1149 /*
1150  * Exports the pool from the system.  The caller must ensure that there are no
1151  * mounted datasets in the pool.
1152  */
1153 int
1154 zpool_export_common(zpool_handle_t *zhp, boolean_t force, boolean_t hardforce)
1155 {
1156         zfs_cmd_t zc = { 0 };
1157         char msg[1024];
1158
1159         if (zpool_remove_zvol_links(zhp) != 0)
1160                 return (-1);
1161
1162         (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1163             "cannot export '%s'"), zhp->zpool_name);
1164
1165         (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1166         zc.zc_cookie = force;
1167         zc.zc_guid = hardforce;
1168
1169         if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_EXPORT, &zc) != 0) {
1170                 switch (errno) {
1171                 case EXDEV:
1172                         zfs_error_aux(zhp->zpool_hdl, dgettext(TEXT_DOMAIN,
1173                             "use '-f' to override the following errors:\n"
1174                             "'%s' has an active shared spare which could be"
1175                             " used by other pools once '%s' is exported."),
1176                             zhp->zpool_name, zhp->zpool_name);
1177                         return (zfs_error(zhp->zpool_hdl, EZFS_ACTIVE_SPARE,
1178                             msg));
1179                 default:
1180                         return (zpool_standard_error_fmt(zhp->zpool_hdl, errno,
1181                             msg));
1182                 }
1183         }
1184
1185         return (0);
1186 }
1187
1188 int
1189 zpool_export(zpool_handle_t *zhp, boolean_t force)
1190 {
1191         return (zpool_export_common(zhp, force, B_FALSE));
1192 }
1193
1194 int
1195 zpool_export_force(zpool_handle_t *zhp)
1196 {
1197         return (zpool_export_common(zhp, B_TRUE, B_TRUE));
1198 }
1199
1200 /*
1201  * zpool_import() is a contracted interface. Should be kept the same
1202  * if possible.
1203  *
1204  * Applications should use zpool_import_props() to import a pool with
1205  * new properties value to be set.
1206  */
1207 int
1208 zpool_import(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,
1209     char *altroot)
1210 {
1211         nvlist_t *props = NULL;
1212         int ret;
1213
1214         if (altroot != NULL) {
1215                 if (nvlist_alloc(&props, NV_UNIQUE_NAME, 0) != 0) {
1216                         return (zfs_error_fmt(hdl, EZFS_NOMEM,
1217                             dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1218                             newname));
1219                 }
1220
1221                 if (nvlist_add_string(props,
1222                     zpool_prop_to_name(ZPOOL_PROP_ALTROOT), altroot) != 0 ||
1223                     nvlist_add_string(props,
1224                     zpool_prop_to_name(ZPOOL_PROP_CACHEFILE), "none") != 0) {
1225                         nvlist_free(props);
1226                         return (zfs_error_fmt(hdl, EZFS_NOMEM,
1227                             dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1228                             newname));
1229                 }
1230         }
1231
1232         ret = zpool_import_props(hdl, config, newname, props, B_FALSE);
1233         if (props)
1234                 nvlist_free(props);
1235         return (ret);
1236 }
1237
1238 /*
1239  * Import the given pool using the known configuration and a list of
1240  * properties to be set. The configuration should have come from
1241  * zpool_find_import(). The 'newname' parameters control whether the pool
1242  * is imported with a different name.
1243  */
1244 int
1245 zpool_import_props(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,
1246     nvlist_t *props, boolean_t importfaulted)
1247 {
1248         zfs_cmd_t zc = { 0 };
1249         char *thename;
1250         char *origname;
1251         int ret;
1252         char errbuf[1024];
1253
1254         verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
1255             &origname) == 0);
1256
1257         (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
1258             "cannot import pool '%s'"), origname);
1259
1260         if (newname != NULL) {
1261                 if (!zpool_name_valid(hdl, B_FALSE, newname))
1262                         return (zfs_error_fmt(hdl, EZFS_INVALIDNAME,
1263                             dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1264                             newname));
1265                 thename = (char *)newname;
1266         } else {
1267                 thename = origname;
1268         }
1269
1270         if (props) {
1271                 uint64_t version;
1272
1273                 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
1274                     &version) == 0);
1275
1276                 if ((props = zpool_valid_proplist(hdl, origname,
1277                     props, version, B_TRUE, errbuf)) == NULL) {
1278                         return (-1);
1279                 } else if (zcmd_write_src_nvlist(hdl, &zc, props) != 0) {
1280                         nvlist_free(props);
1281                         return (-1);
1282                 }
1283         }
1284
1285         (void) strlcpy(zc.zc_name, thename, sizeof (zc.zc_name));
1286
1287         verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
1288             &zc.zc_guid) == 0);
1289
1290         if (zcmd_write_conf_nvlist(hdl, &zc, config) != 0) {
1291                 nvlist_free(props);
1292                 return (-1);
1293         }
1294
1295         zc.zc_cookie = (uint64_t)importfaulted;
1296         ret = 0;
1297         if (zfs_ioctl(hdl, ZFS_IOC_POOL_IMPORT, &zc) != 0) {
1298                 char desc[1024];
1299                 if (newname == NULL)
1300                         (void) snprintf(desc, sizeof (desc),
1301                             dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1302                             thename);
1303                 else
1304                         (void) snprintf(desc, sizeof (desc),
1305                             dgettext(TEXT_DOMAIN, "cannot import '%s' as '%s'"),
1306                             origname, thename);
1307
1308                 switch (errno) {
1309                 case ENOTSUP:
1310                         /*
1311                          * Unsupported version.
1312                          */
1313                         (void) zfs_error(hdl, EZFS_BADVERSION, desc);
1314                         break;
1315
1316                 case EINVAL:
1317                         (void) zfs_error(hdl, EZFS_INVALCONFIG, desc);
1318                         break;
1319
1320                 default:
1321                         (void) zpool_standard_error(hdl, errno, desc);
1322                 }
1323
1324                 ret = -1;
1325         } else {
1326                 zpool_handle_t *zhp;
1327
1328                 /*
1329                  * This should never fail, but play it safe anyway.
1330                  */
1331                 if (zpool_open_silent(hdl, thename, &zhp) != 0) {
1332                         ret = -1;
1333                 } else if (zhp != NULL) {
1334                         ret = zpool_create_zvol_links(zhp);
1335                         zpool_close(zhp);
1336                 }
1337
1338         }
1339
1340         zcmd_free_nvlists(&zc);
1341         nvlist_free(props);
1342
1343         return (ret);
1344 }
1345
1346 /*
1347  * Scrub the pool.
1348  */
1349 int
1350 zpool_scrub(zpool_handle_t *zhp, pool_scrub_type_t type)
1351 {
1352         zfs_cmd_t zc = { 0 };
1353         char msg[1024];
1354         libzfs_handle_t *hdl = zhp->zpool_hdl;
1355
1356         (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1357         zc.zc_cookie = type;
1358
1359         if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_SCRUB, &zc) == 0)
1360                 return (0);
1361
1362         (void) snprintf(msg, sizeof (msg),
1363             dgettext(TEXT_DOMAIN, "cannot scrub %s"), zc.zc_name);
1364
1365         if (errno == EBUSY)
1366                 return (zfs_error(hdl, EZFS_RESILVERING, msg));
1367         else
1368                 return (zpool_standard_error(hdl, errno, msg));
1369 }
1370
1371 /*
1372  * 'avail_spare' is set to TRUE if the provided guid refers to an AVAIL
1373  * spare; but FALSE if its an INUSE spare.
1374  */
1375 static nvlist_t *
1376 vdev_to_nvlist_iter(nvlist_t *nv, const char *search, uint64_t guid,
1377     boolean_t *avail_spare, boolean_t *l2cache, boolean_t *log)
1378 {
1379         uint_t c, children;
1380         nvlist_t **child;
1381         uint64_t theguid, present;
1382         char *path;
1383         uint64_t wholedisk = 0;
1384         nvlist_t *ret;
1385         uint64_t is_log;
1386
1387         verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &theguid) == 0);
1388
1389         if (search == NULL &&
1390             nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT, &present) == 0) {
1391                 /*
1392                  * If the device has never been present since import, the only
1393                  * reliable way to match the vdev is by GUID.
1394                  */
1395                 if (theguid == guid)
1396                         return (nv);
1397         } else if (search != NULL &&
1398             nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) {
1399                 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
1400                     &wholedisk);
1401                 if (wholedisk) {
1402                         /*
1403                          * For whole disks, the internal path has 's0', but the
1404                          * path passed in by the user doesn't.
1405                          */
1406                         if (strlen(search) == strlen(path) - 2 &&
1407                             strncmp(search, path, strlen(search)) == 0)
1408                                 return (nv);
1409                 } else if (strcmp(search, path) == 0) {
1410                         return (nv);
1411                 }
1412         }
1413
1414         if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
1415             &child, &children) != 0)
1416                 return (NULL);
1417
1418         for (c = 0; c < children; c++) {
1419                 if ((ret = vdev_to_nvlist_iter(child[c], search, guid,
1420                     avail_spare, l2cache, NULL)) != NULL) {
1421                         /*
1422                          * The 'is_log' value is only set for the toplevel
1423                          * vdev, not the leaf vdevs.  So we always lookup the
1424                          * log device from the root of the vdev tree (where
1425                          * 'log' is non-NULL).
1426                          */
1427                         if (log != NULL &&
1428                             nvlist_lookup_uint64(child[c],
1429                             ZPOOL_CONFIG_IS_LOG, &is_log) == 0 &&
1430                             is_log) {
1431                                 *log = B_TRUE;
1432                         }
1433                         return (ret);
1434                 }
1435         }
1436
1437         if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES,
1438             &child, &children) == 0) {
1439                 for (c = 0; c < children; c++) {
1440                         if ((ret = vdev_to_nvlist_iter(child[c], search, guid,
1441                             avail_spare, l2cache, NULL)) != NULL) {
1442                                 *avail_spare = B_TRUE;
1443                                 return (ret);
1444                         }
1445                 }
1446         }
1447
1448         if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE,
1449             &child, &children) == 0) {
1450                 for (c = 0; c < children; c++) {
1451                         if ((ret = vdev_to_nvlist_iter(child[c], search, guid,
1452                             avail_spare, l2cache, NULL)) != NULL) {
1453                                 *l2cache = B_TRUE;
1454                                 return (ret);
1455                         }
1456                 }
1457         }
1458
1459         return (NULL);
1460 }
1461
1462 nvlist_t *
1463 zpool_find_vdev(zpool_handle_t *zhp, const char *path, boolean_t *avail_spare,
1464     boolean_t *l2cache, boolean_t *log)
1465 {
1466         char buf[MAXPATHLEN];
1467         const char *search;
1468         char *end;
1469         nvlist_t *nvroot;
1470         uint64_t guid;
1471
1472         guid = strtoull(path, &end, 10);
1473         if (guid != 0 && *end == '\0') {
1474                 search = NULL;
1475         } else if (path[0] != '/') {
1476                 (void) snprintf(buf, sizeof (buf), "%s%s", "/dev/dsk/", path);
1477                 search = buf;
1478         } else {
1479                 search = path;
1480         }
1481
1482         verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
1483             &nvroot) == 0);
1484
1485         *avail_spare = B_FALSE;
1486         *l2cache = B_FALSE;
1487         if (log != NULL)
1488                 *log = B_FALSE;
1489         return (vdev_to_nvlist_iter(nvroot, search, guid, avail_spare,
1490             l2cache, log));
1491 }
1492
1493 static int
1494 vdev_online(nvlist_t *nv)
1495 {
1496         uint64_t ival;
1497
1498         if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_OFFLINE, &ival) == 0 ||
1499             nvlist_lookup_uint64(nv, ZPOOL_CONFIG_FAULTED, &ival) == 0 ||
1500             nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVED, &ival) == 0)
1501                 return (0);
1502
1503         return (1);
1504 }
1505
1506 /*
1507  * Get phys_path for a root pool
1508  * Return 0 on success; non-zeron on failure.
1509  */
1510 int
1511 zpool_get_physpath(zpool_handle_t *zhp, char *physpath)
1512 {
1513         nvlist_t *vdev_root;
1514         nvlist_t **child;
1515         uint_t count;
1516         int i;
1517
1518         /*
1519          * Make sure this is a root pool, as phys_path doesn't mean
1520          * anything to a non-root pool.
1521          */
1522         if (!pool_is_bootable(zhp))
1523                 return (-1);
1524
1525         verify(nvlist_lookup_nvlist(zhp->zpool_config,
1526             ZPOOL_CONFIG_VDEV_TREE, &vdev_root) == 0);
1527
1528         if (nvlist_lookup_nvlist_array(vdev_root, ZPOOL_CONFIG_CHILDREN,
1529             &child, &count) != 0)
1530                 return (-2);
1531
1532         for (i = 0; i < count; i++) {
1533                 nvlist_t **child2;
1534                 uint_t count2;
1535                 char *type;
1536                 char *tmppath;
1537                 int j;
1538
1539                 if (nvlist_lookup_string(child[i], ZPOOL_CONFIG_TYPE, &type)
1540                     != 0)
1541                         return (-3);
1542
1543                 if (strcmp(type, VDEV_TYPE_DISK) == 0) {
1544                         if (!vdev_online(child[i]))
1545                                 return (-8);
1546                         verify(nvlist_lookup_string(child[i],
1547                             ZPOOL_CONFIG_PHYS_PATH, &tmppath) == 0);
1548                         (void) strncpy(physpath, tmppath, strlen(tmppath));
1549                 } else if (strcmp(type, VDEV_TYPE_MIRROR) == 0) {
1550                         if (nvlist_lookup_nvlist_array(child[i],
1551                             ZPOOL_CONFIG_CHILDREN, &child2, &count2) != 0)
1552                                 return (-4);
1553
1554                         for (j = 0; j < count2; j++) {
1555                                 if (!vdev_online(child2[j]))
1556                                         return (-8);
1557                                 if (nvlist_lookup_string(child2[j],
1558                                     ZPOOL_CONFIG_PHYS_PATH, &tmppath) != 0)
1559                                         return (-5);
1560
1561                                 if ((strlen(physpath) + strlen(tmppath)) >
1562                                     MAXNAMELEN)
1563                                         return (-6);
1564
1565                                 if (strlen(physpath) == 0) {
1566                                         (void) strncpy(physpath, tmppath,
1567                                             strlen(tmppath));
1568                                 } else {
1569                                         (void) strcat(physpath, " ");
1570                                         (void) strcat(physpath, tmppath);
1571                                 }
1572                         }
1573                 } else {
1574                         return (-7);
1575                 }
1576         }
1577
1578         return (0);
1579 }
1580
1581 /*
1582  * Returns TRUE if the given guid corresponds to the given type.
1583  * This is used to check for hot spares (INUSE or not), and level 2 cache
1584  * devices.
1585  */
1586 static boolean_t
1587 is_guid_type(zpool_handle_t *zhp, uint64_t guid, const char *type)
1588 {
1589         uint64_t target_guid;
1590         nvlist_t *nvroot;
1591         nvlist_t **list;
1592         uint_t count;
1593         int i;
1594
1595         verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
1596             &nvroot) == 0);
1597         if (nvlist_lookup_nvlist_array(nvroot, type, &list, &count) == 0) {
1598                 for (i = 0; i < count; i++) {
1599                         verify(nvlist_lookup_uint64(list[i], ZPOOL_CONFIG_GUID,
1600                             &target_guid) == 0);
1601                         if (guid == target_guid)
1602                                 return (B_TRUE);
1603                 }
1604         }
1605
1606         return (B_FALSE);
1607 }
1608
1609 /*
1610  * Bring the specified vdev online.   The 'flags' parameter is a set of the
1611  * ZFS_ONLINE_* flags.
1612  */
1613 int
1614 zpool_vdev_online(zpool_handle_t *zhp, const char *path, int flags,
1615     vdev_state_t *newstate)
1616 {
1617         zfs_cmd_t zc = { 0 };
1618         char msg[1024];
1619         nvlist_t *tgt;
1620         boolean_t avail_spare, l2cache;
1621         libzfs_handle_t *hdl = zhp->zpool_hdl;
1622
1623         (void) snprintf(msg, sizeof (msg),
1624             dgettext(TEXT_DOMAIN, "cannot online %s"), path);
1625
1626         (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1627         if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
1628             NULL)) == NULL)
1629                 return (zfs_error(hdl, EZFS_NODEVICE, msg));
1630
1631         verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
1632
1633         if (avail_spare ||
1634             is_guid_type(zhp, zc.zc_guid, ZPOOL_CONFIG_SPARES) == B_TRUE)
1635                 return (zfs_error(hdl, EZFS_ISSPARE, msg));
1636
1637         zc.zc_cookie = VDEV_STATE_ONLINE;
1638         zc.zc_obj = flags;
1639
1640         if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_VDEV_SET_STATE, &zc) != 0)
1641                 return (zpool_standard_error(hdl, errno, msg));
1642
1643         *newstate = zc.zc_cookie;
1644         return (0);
1645 }
1646
1647 /*
1648  * Take the specified vdev offline
1649  */
1650 int
1651 zpool_vdev_offline(zpool_handle_t *zhp, const char *path, boolean_t istmp)
1652 {
1653         zfs_cmd_t zc = { 0 };
1654         char msg[1024];
1655         nvlist_t *tgt;
1656         boolean_t avail_spare, l2cache;
1657         libzfs_handle_t *hdl = zhp->zpool_hdl;
1658
1659         (void) snprintf(msg, sizeof (msg),
1660             dgettext(TEXT_DOMAIN, "cannot offline %s"), path);
1661
1662         (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1663         if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
1664             NULL)) == NULL)
1665                 return (zfs_error(hdl, EZFS_NODEVICE, msg));
1666
1667         verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
1668
1669         if (avail_spare ||
1670             is_guid_type(zhp, zc.zc_guid, ZPOOL_CONFIG_SPARES) == B_TRUE)
1671                 return (zfs_error(hdl, EZFS_ISSPARE, msg));
1672
1673         zc.zc_cookie = VDEV_STATE_OFFLINE;
1674         zc.zc_obj = istmp ? ZFS_OFFLINE_TEMPORARY : 0;
1675
1676         if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
1677                 return (0);
1678
1679         switch (errno) {
1680         case EBUSY:
1681
1682                 /*
1683                  * There are no other replicas of this device.
1684                  */
1685                 return (zfs_error(hdl, EZFS_NOREPLICAS, msg));
1686
1687         default:
1688                 return (zpool_standard_error(hdl, errno, msg));
1689         }
1690 }
1691
1692 /*
1693  * Mark the given vdev faulted.
1694  */
1695 int
1696 zpool_vdev_fault(zpool_handle_t *zhp, uint64_t guid)
1697 {
1698         zfs_cmd_t zc = { 0 };
1699         char msg[1024];
1700         libzfs_handle_t *hdl = zhp->zpool_hdl;
1701
1702         (void) snprintf(msg, sizeof (msg),
1703             dgettext(TEXT_DOMAIN, "cannot fault %llu"), guid);
1704
1705         (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1706         zc.zc_guid = guid;
1707         zc.zc_cookie = VDEV_STATE_FAULTED;
1708
1709         if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
1710                 return (0);
1711
1712         switch (errno) {
1713         case EBUSY:
1714
1715                 /*
1716                  * There are no other replicas of this device.
1717                  */
1718                 return (zfs_error(hdl, EZFS_NOREPLICAS, msg));
1719
1720         default:
1721                 return (zpool_standard_error(hdl, errno, msg));
1722         }
1723
1724 }
1725
1726 /*
1727  * Mark the given vdev degraded.
1728  */
1729 int
1730 zpool_vdev_degrade(zpool_handle_t *zhp, uint64_t guid)
1731 {
1732         zfs_cmd_t zc = { 0 };
1733         char msg[1024];
1734         libzfs_handle_t *hdl = zhp->zpool_hdl;
1735
1736         (void) snprintf(msg, sizeof (msg),
1737             dgettext(TEXT_DOMAIN, "cannot degrade %llu"), guid);
1738
1739         (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1740         zc.zc_guid = guid;
1741         zc.zc_cookie = VDEV_STATE_DEGRADED;
1742
1743         if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
1744                 return (0);
1745
1746         return (zpool_standard_error(hdl, errno, msg));
1747 }
1748
1749 /*
1750  * Returns TRUE if the given nvlist is a vdev that was originally swapped in as
1751  * a hot spare.
1752  */
1753 static boolean_t
1754 is_replacing_spare(nvlist_t *search, nvlist_t *tgt, int which)
1755 {
1756         nvlist_t **child;
1757         uint_t c, children;
1758         char *type;
1759
1760         if (nvlist_lookup_nvlist_array(search, ZPOOL_CONFIG_CHILDREN, &child,
1761             &children) == 0) {
1762                 verify(nvlist_lookup_string(search, ZPOOL_CONFIG_TYPE,
1763                     &type) == 0);
1764
1765                 if (strcmp(type, VDEV_TYPE_SPARE) == 0 &&
1766                     children == 2 && child[which] == tgt)
1767                         return (B_TRUE);
1768
1769                 for (c = 0; c < children; c++)
1770                         if (is_replacing_spare(child[c], tgt, which))
1771                                 return (B_TRUE);
1772         }
1773
1774         return (B_FALSE);
1775 }
1776
1777 /*
1778  * Attach new_disk (fully described by nvroot) to old_disk.
1779  * If 'replacing' is specified, the new disk will replace the old one.
1780  */
1781 int
1782 zpool_vdev_attach(zpool_handle_t *zhp,
1783     const char *old_disk, const char *new_disk, nvlist_t *nvroot, int replacing)
1784 {
1785         zfs_cmd_t zc = { 0 };
1786         char msg[1024];
1787         int ret;
1788         nvlist_t *tgt;
1789         boolean_t avail_spare, l2cache, islog;
1790         uint64_t val;
1791         char *path, *newname;
1792         nvlist_t **child;
1793         uint_t children;
1794         nvlist_t *config_root;
1795         libzfs_handle_t *hdl = zhp->zpool_hdl;
1796         boolean_t rootpool = pool_is_bootable(zhp);
1797
1798         if (replacing)
1799                 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1800                     "cannot replace %s with %s"), old_disk, new_disk);
1801         else
1802                 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1803                     "cannot attach %s to %s"), new_disk, old_disk);
1804
1805         /*
1806          * If this is a root pool, make sure that we're not attaching an
1807          * EFI labeled device.
1808          */
1809         if (rootpool && pool_uses_efi(nvroot)) {
1810                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1811                     "EFI labeled devices are not supported on root pools."));
1812                 return (zfs_error(hdl, EZFS_POOL_NOTSUP, msg));
1813         }
1814
1815         (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1816         if ((tgt = zpool_find_vdev(zhp, old_disk, &avail_spare, &l2cache,
1817             &islog)) == 0)
1818                 return (zfs_error(hdl, EZFS_NODEVICE, msg));
1819
1820         if (avail_spare)
1821                 return (zfs_error(hdl, EZFS_ISSPARE, msg));
1822
1823         if (l2cache)
1824                 return (zfs_error(hdl, EZFS_ISL2CACHE, msg));
1825
1826         verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
1827         zc.zc_cookie = replacing;
1828
1829         if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
1830             &child, &children) != 0 || children != 1) {
1831                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1832                     "new device must be a single disk"));
1833                 return (zfs_error(hdl, EZFS_INVALCONFIG, msg));
1834         }
1835
1836         verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
1837             ZPOOL_CONFIG_VDEV_TREE, &config_root) == 0);
1838
1839         if ((newname = zpool_vdev_name(NULL, NULL, child[0])) == NULL)
1840                 return (-1);
1841
1842         /*
1843          * If the target is a hot spare that has been swapped in, we can only
1844          * replace it with another hot spare.
1845          */
1846         if (replacing &&
1847             nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_IS_SPARE, &val) == 0 &&
1848             (zpool_find_vdev(zhp, newname, &avail_spare, &l2cache,
1849             NULL) == NULL || !avail_spare) &&
1850             is_replacing_spare(config_root, tgt, 1)) {
1851                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1852                     "can only be replaced by another hot spare"));
1853                 free(newname);
1854                 return (zfs_error(hdl, EZFS_BADTARGET, msg));
1855         }
1856
1857         /*
1858          * If we are attempting to replace a spare, it canot be applied to an
1859          * already spared device.
1860          */
1861         if (replacing &&
1862             nvlist_lookup_string(child[0], ZPOOL_CONFIG_PATH, &path) == 0 &&
1863             zpool_find_vdev(zhp, newname, &avail_spare,
1864             &l2cache, NULL) != NULL && avail_spare &&
1865             is_replacing_spare(config_root, tgt, 0)) {
1866                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1867                     "device has already been replaced with a spare"));
1868                 free(newname);
1869                 return (zfs_error(hdl, EZFS_BADTARGET, msg));
1870         }
1871
1872         free(newname);
1873
1874         if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
1875                 return (-1);
1876
1877         ret = zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_VDEV_ATTACH, &zc);
1878
1879         zcmd_free_nvlists(&zc);
1880
1881         if (ret == 0) {
1882                 if (rootpool) {
1883                         /*
1884                          * XXX - This should be removed once we can
1885                          * automatically install the bootblocks on the
1886                          * newly attached disk.
1887                          */
1888                         (void) fprintf(stderr, dgettext(TEXT_DOMAIN, "Please "
1889                             "be sure to invoke %s to make '%s' bootable.\n"),
1890                             BOOTCMD, new_disk);
1891                 }
1892                 return (0);
1893         }
1894
1895         switch (errno) {
1896         case ENOTSUP:
1897                 /*
1898                  * Can't attach to or replace this type of vdev.
1899                  */
1900                 if (replacing) {
1901                         if (islog)
1902                                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1903                                     "cannot replace a log with a spare"));
1904                         else
1905                                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1906                                     "cannot replace a replacing device"));
1907                 } else {
1908                         zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1909                             "can only attach to mirrors and top-level "
1910                             "disks"));
1911                 }
1912                 (void) zfs_error(hdl, EZFS_BADTARGET, msg);
1913                 break;
1914
1915         case EINVAL:
1916                 /*
1917                  * The new device must be a single disk.
1918                  */
1919                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1920                     "new device must be a single disk"));
1921                 (void) zfs_error(hdl, EZFS_INVALCONFIG, msg);
1922                 break;
1923
1924         case EBUSY:
1925                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "%s is busy"),
1926                     new_disk);
1927                 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1928                 break;
1929
1930         case EOVERFLOW:
1931                 /*
1932                  * The new device is too small.
1933                  */
1934                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1935                     "device is too small"));
1936                 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1937                 break;
1938
1939         case EDOM:
1940                 /*
1941                  * The new device has a different alignment requirement.
1942                  */
1943                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1944                     "devices have different sector alignment"));
1945                 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1946                 break;
1947
1948         case ENAMETOOLONG:
1949                 /*
1950                  * The resulting top-level vdev spec won't fit in the label.
1951                  */
1952                 (void) zfs_error(hdl, EZFS_DEVOVERFLOW, msg);
1953                 break;
1954
1955         default:
1956                 (void) zpool_standard_error(hdl, errno, msg);
1957         }
1958
1959         return (-1);
1960 }
1961
1962 /*
1963  * Detach the specified device.
1964  */
1965 int
1966 zpool_vdev_detach(zpool_handle_t *zhp, const char *path)
1967 {
1968         zfs_cmd_t zc = { 0 };
1969         char msg[1024];
1970         nvlist_t *tgt;
1971         boolean_t avail_spare, l2cache;
1972         libzfs_handle_t *hdl = zhp->zpool_hdl;
1973
1974         (void) snprintf(msg, sizeof (msg),
1975             dgettext(TEXT_DOMAIN, "cannot detach %s"), path);
1976
1977         (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1978         if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
1979             NULL)) == 0)
1980                 return (zfs_error(hdl, EZFS_NODEVICE, msg));
1981
1982         if (avail_spare)
1983                 return (zfs_error(hdl, EZFS_ISSPARE, msg));
1984
1985         if (l2cache)
1986                 return (zfs_error(hdl, EZFS_ISL2CACHE, msg));
1987
1988         verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
1989
1990         if (zfs_ioctl(hdl, ZFS_IOC_VDEV_DETACH, &zc) == 0)
1991                 return (0);
1992
1993         switch (errno) {
1994
1995         case ENOTSUP:
1996                 /*
1997                  * Can't detach from this type of vdev.
1998                  */
1999                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "only "
2000                     "applicable to mirror and replacing vdevs"));
2001                 (void) zfs_error(zhp->zpool_hdl, EZFS_BADTARGET, msg);
2002                 break;
2003
2004         case EBUSY:
2005                 /*
2006                  * There are no other replicas of this device.
2007                  */
2008                 (void) zfs_error(hdl, EZFS_NOREPLICAS, msg);
2009                 break;
2010
2011         default:
2012                 (void) zpool_standard_error(hdl, errno, msg);
2013         }
2014
2015         return (-1);
2016 }
2017
2018 /*
2019  * Remove the given device.  Currently, this is supported only for hot spares
2020  * and level 2 cache devices.
2021  */
2022 int
2023 zpool_vdev_remove(zpool_handle_t *zhp, const char *path)
2024 {
2025         zfs_cmd_t zc = { 0 };
2026         char msg[1024];
2027         nvlist_t *tgt;
2028         boolean_t avail_spare, l2cache;
2029         libzfs_handle_t *hdl = zhp->zpool_hdl;
2030
2031         (void) snprintf(msg, sizeof (msg),
2032             dgettext(TEXT_DOMAIN, "cannot remove %s"), path);
2033
2034         (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2035         if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
2036             NULL)) == 0)
2037                 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2038
2039         if (!avail_spare && !l2cache) {
2040                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2041                     "only inactive hot spares or cache devices "
2042                     "can be removed"));
2043                 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2044         }
2045
2046         verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2047
2048         if (zfs_ioctl(hdl, ZFS_IOC_VDEV_REMOVE, &zc) == 0)
2049                 return (0);
2050
2051         return (zpool_standard_error(hdl, errno, msg));
2052 }
2053
2054 /*
2055  * Clear the errors for the pool, or the particular device if specified.
2056  */
2057 int
2058 zpool_clear(zpool_handle_t *zhp, const char *path)
2059 {
2060         zfs_cmd_t zc = { 0 };
2061         char msg[1024];
2062         nvlist_t *tgt;
2063         boolean_t avail_spare, l2cache;
2064         libzfs_handle_t *hdl = zhp->zpool_hdl;
2065
2066         if (path)
2067                 (void) snprintf(msg, sizeof (msg),
2068                     dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),
2069                     path);
2070         else
2071                 (void) snprintf(msg, sizeof (msg),
2072                     dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),
2073                     zhp->zpool_name);
2074
2075         (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2076         if (path) {
2077                 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare,
2078                     &l2cache, NULL)) == 0)
2079                         return (zfs_error(hdl, EZFS_NODEVICE, msg));
2080
2081                 /*
2082                  * Don't allow error clearing for hot spares.  Do allow
2083                  * error clearing for l2cache devices.
2084                  */
2085                 if (avail_spare)
2086                         return (zfs_error(hdl, EZFS_ISSPARE, msg));
2087
2088                 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID,
2089                     &zc.zc_guid) == 0);
2090         }
2091
2092         if (zfs_ioctl(hdl, ZFS_IOC_CLEAR, &zc) == 0)
2093                 return (0);
2094
2095         return (zpool_standard_error(hdl, errno, msg));
2096 }
2097
2098 /*
2099  * Similar to zpool_clear(), but takes a GUID (used by fmd).
2100  */
2101 int
2102 zpool_vdev_clear(zpool_handle_t *zhp, uint64_t guid)
2103 {
2104         zfs_cmd_t zc = { 0 };
2105         char msg[1024];
2106         libzfs_handle_t *hdl = zhp->zpool_hdl;
2107
2108         (void) snprintf(msg, sizeof (msg),
2109             dgettext(TEXT_DOMAIN, "cannot clear errors for %llx"),
2110             guid);
2111
2112         (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2113         zc.zc_guid = guid;
2114
2115         if (ioctl(hdl->libzfs_fd, ZFS_IOC_CLEAR, &zc) == 0)
2116                 return (0);
2117
2118         return (zpool_standard_error(hdl, errno, msg));
2119 }
2120
2121 /*
2122  * Iterate over all zvols in a given pool by walking the /dev/zvol/dsk/<pool>
2123  * hierarchy.
2124  */
2125 int
2126 zpool_iter_zvol(zpool_handle_t *zhp, int (*cb)(const char *, void *),
2127     void *data)
2128 {
2129         libzfs_handle_t *hdl = zhp->zpool_hdl;
2130         char (*paths)[MAXPATHLEN];
2131         size_t size = 4;
2132         int curr, fd, base, ret = 0;
2133         DIR *dirp;
2134         struct dirent *dp;
2135         struct stat st;
2136
2137         if ((base = open("/dev/zvol/dsk", O_RDONLY)) < 0)
2138                 return (errno == ENOENT ? 0 : -1);
2139
2140         if (fstatat(base, zhp->zpool_name, &st, 0) != 0) {
2141                 int err = errno;
2142                 (void) close(base);
2143                 return (err == ENOENT ? 0 : -1);
2144         }
2145
2146         /*
2147          * Oddly this wasn't a directory -- ignore that failure since we
2148          * know there are no links lower in the (non-existant) hierarchy.
2149          */
2150         if (!S_ISDIR(st.st_mode)) {
2151                 (void) close(base);
2152                 return (0);
2153         }
2154
2155         if ((paths = zfs_alloc(hdl, size * sizeof (paths[0]))) == NULL) {
2156                 (void) close(base);
2157                 return (-1);
2158         }
2159
2160         (void) strlcpy(paths[0], zhp->zpool_name, sizeof (paths[0]));
2161         curr = 0;
2162
2163         while (curr >= 0) {
2164                 if (fstatat(base, paths[curr], &st, AT_SYMLINK_NOFOLLOW) != 0)
2165                         goto err;
2166
2167                 if (S_ISDIR(st.st_mode)) {
2168                         if ((fd = openat(base, paths[curr], O_RDONLY)) < 0)
2169                                 goto err;
2170
2171                         if ((dirp = fdopendir(fd)) == NULL) {
2172                                 (void) close(fd);
2173                                 goto err;
2174                         }
2175
2176                         while ((dp = readdir(dirp)) != NULL) {
2177                                 if (dp->d_name[0] == '.')
2178                                         continue;
2179
2180                                 if (curr + 1 == size) {
2181                                         paths = zfs_realloc(hdl, paths,
2182                                             size * sizeof (paths[0]),
2183                                             size * 2 * sizeof (paths[0]));
2184                                         if (paths == NULL) {
2185                                                 (void) closedir(dirp);
2186                                                 (void) close(fd);
2187                                                 goto err;
2188                                         }
2189
2190                                         size *= 2;
2191                                 }
2192
2193                                 (void) strlcpy(paths[curr + 1], paths[curr],
2194                                     sizeof (paths[curr + 1]));
2195                                 (void) strlcat(paths[curr], "/",
2196                                     sizeof (paths[curr]));
2197                                 (void) strlcat(paths[curr], dp->d_name,
2198                                     sizeof (paths[curr]));
2199                                 curr++;
2200                         }
2201
2202                         (void) closedir(dirp);
2203
2204                 } else {
2205                         if ((ret = cb(paths[curr], data)) != 0)
2206                                 break;
2207                 }
2208
2209                 curr--;
2210         }
2211
2212         free(paths);
2213         (void) close(base);
2214
2215         return (ret);
2216
2217 err:
2218         free(paths);
2219         (void) close(base);
2220         return (-1);
2221 }
2222
2223 typedef struct zvol_cb {
2224         zpool_handle_t *zcb_pool;
2225         boolean_t zcb_create;
2226 } zvol_cb_t;
2227
2228 /*ARGSUSED*/
2229 static int
2230 do_zvol_create(zfs_handle_t *zhp, void *data)
2231 {
2232         int ret = 0;
2233
2234         if (ZFS_IS_VOLUME(zhp)) {
2235                 (void) zvol_create_link(zhp->zfs_hdl, zhp->zfs_name);
2236                 ret = zfs_iter_snapshots(zhp, do_zvol_create, NULL);
2237         }
2238
2239         if (ret == 0)
2240                 ret = zfs_iter_filesystems(zhp, do_zvol_create, NULL);
2241
2242         zfs_close(zhp);
2243
2244         return (ret);
2245 }
2246
2247 /*
2248  * Iterate over all zvols in the pool and make any necessary minor nodes.
2249  */
2250 int
2251 zpool_create_zvol_links(zpool_handle_t *zhp)
2252 {
2253         zfs_handle_t *zfp;
2254         int ret;
2255
2256         /*
2257          * If the pool is unavailable, just return success.
2258          */
2259         if ((zfp = make_dataset_handle(zhp->zpool_hdl,
2260             zhp->zpool_name)) == NULL)
2261                 return (0);
2262
2263         ret = zfs_iter_filesystems(zfp, do_zvol_create, NULL);
2264
2265         zfs_close(zfp);
2266         return (ret);
2267 }
2268
2269 static int
2270 do_zvol_remove(const char *dataset, void *data)
2271 {
2272         zpool_handle_t *zhp = data;
2273
2274         return (zvol_remove_link(zhp->zpool_hdl, dataset));
2275 }
2276
2277 /*
2278  * Iterate over all zvols in the pool and remove any minor nodes.  We iterate
2279  * by examining the /dev links so that a corrupted pool doesn't impede this
2280  * operation.
2281  */
2282 int
2283 zpool_remove_zvol_links(zpool_handle_t *zhp)
2284 {
2285         return (zpool_iter_zvol(zhp, do_zvol_remove, zhp));
2286 }
2287
2288 /*
2289  * Convert from a devid string to a path.
2290  */
2291 static char *
2292 devid_to_path(char *devid_str)
2293 {
2294         ddi_devid_t devid;
2295         char *minor;
2296         char *path;
2297         devid_nmlist_t *list = NULL;
2298         int ret;
2299
2300         if (devid_str_decode(devid_str, &devid, &minor) != 0)
2301                 return (NULL);
2302
2303         ret = devid_deviceid_to_nmlist("/dev", devid, minor, &list);
2304
2305         devid_str_free(minor);
2306         devid_free(devid);
2307
2308         if (ret != 0)
2309                 return (NULL);
2310
2311         if ((path = strdup(list[0].devname)) == NULL)
2312                 return (NULL);
2313
2314         devid_free_nmlist(list);
2315
2316         return (path);
2317 }
2318
2319 /*
2320  * Convert from a path to a devid string.
2321  */
2322 static char *
2323 path_to_devid(const char *path)
2324 {
2325         int fd;
2326         ddi_devid_t devid;
2327         char *minor, *ret;
2328
2329         if ((fd = open(path, O_RDONLY)) < 0)
2330                 return (NULL);
2331
2332         minor = NULL;
2333         ret = NULL;
2334         if (devid_get(fd, &devid) == 0) {
2335                 if (devid_get_minor_name(fd, &minor) == 0)
2336                         ret = devid_str_encode(devid, minor);
2337                 if (minor != NULL)
2338                         devid_str_free(minor);
2339                 devid_free(devid);
2340         }
2341         (void) close(fd);
2342
2343         return (ret);
2344 }
2345
2346 /*
2347  * Issue the necessary ioctl() to update the stored path value for the vdev.  We
2348  * ignore any failure here, since a common case is for an unprivileged user to
2349  * type 'zpool status', and we'll display the correct information anyway.
2350  */
2351 static void
2352 set_path(zpool_handle_t *zhp, nvlist_t *nv, const char *path)
2353 {
2354         zfs_cmd_t zc = { 0 };
2355
2356         (void) strncpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2357         (void) strncpy(zc.zc_value, path, sizeof (zc.zc_value));
2358         verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
2359             &zc.zc_guid) == 0);
2360
2361         (void) ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_SETPATH, &zc);
2362 }
2363
2364 /*
2365  * Given a vdev, return the name to display in iostat.  If the vdev has a path,
2366  * we use that, stripping off any leading "/dev/dsk/"; if not, we use the type.
2367  * We also check if this is a whole disk, in which case we strip off the
2368  * trailing 's0' slice name.
2369  *
2370  * This routine is also responsible for identifying when disks have been
2371  * reconfigured in a new location.  The kernel will have opened the device by
2372  * devid, but the path will still refer to the old location.  To catch this, we
2373  * first do a path -> devid translation (which is fast for the common case).  If
2374  * the devid matches, we're done.  If not, we do a reverse devid -> path
2375  * translation and issue the appropriate ioctl() to update the path of the vdev.
2376  * If 'zhp' is NULL, then this is an exported pool, and we don't need to do any
2377  * of these checks.
2378  */
2379 char *
2380 zpool_vdev_name(libzfs_handle_t *hdl, zpool_handle_t *zhp, nvlist_t *nv)
2381 {
2382         char *path, *devid;
2383         uint64_t value;
2384         char buf[64];
2385         vdev_stat_t *vs;
2386         uint_t vsc;
2387
2388         if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT,
2389             &value) == 0) {
2390                 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
2391                     &value) == 0);
2392                 (void) snprintf(buf, sizeof (buf), "%llu",
2393                     (u_longlong_t)value);
2394                 path = buf;
2395         } else if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) {
2396
2397                 /*
2398                  * If the device is dead (faulted, offline, etc) then don't
2399                  * bother opening it.  Otherwise we may be forcing the user to
2400                  * open a misbehaving device, which can have undesirable
2401                  * effects.
2402                  */
2403                 if ((nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_STATS,
2404                     (uint64_t **)&vs, &vsc) != 0 ||
2405                     vs->vs_state >= VDEV_STATE_DEGRADED) &&
2406                     zhp != NULL &&
2407                     nvlist_lookup_string(nv, ZPOOL_CONFIG_DEVID, &devid) == 0) {
2408                         /*
2409                          * Determine if the current path is correct.
2410                          */
2411                         char *newdevid = path_to_devid(path);
2412
2413                         if (newdevid == NULL ||
2414                             strcmp(devid, newdevid) != 0) {
2415                                 char *newpath;
2416
2417                                 if ((newpath = devid_to_path(devid)) != NULL) {
2418                                         /*
2419                                          * Update the path appropriately.
2420                                          */
2421                                         set_path(zhp, nv, newpath);
2422                                         if (nvlist_add_string(nv,
2423                                             ZPOOL_CONFIG_PATH, newpath) == 0)
2424                                                 verify(nvlist_lookup_string(nv,
2425                                                     ZPOOL_CONFIG_PATH,
2426                                                     &path) == 0);
2427                                         free(newpath);
2428                                 }
2429                         }
2430
2431                         if (newdevid)
2432                                 devid_str_free(newdevid);
2433                 }
2434
2435                 if (strncmp(path, "/dev/dsk/", 9) == 0)
2436                         path += 9;
2437
2438                 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
2439                     &value) == 0 && value) {
2440                         char *tmp = zfs_strdup(hdl, path);
2441                         if (tmp == NULL)
2442                                 return (NULL);
2443                         tmp[strlen(path) - 2] = '\0';
2444                         return (tmp);
2445                 }
2446         } else {
2447                 verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &path) == 0);
2448
2449                 /*
2450                  * If it's a raidz device, we need to stick in the parity level.
2451                  */
2452                 if (strcmp(path, VDEV_TYPE_RAIDZ) == 0) {
2453                         verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NPARITY,
2454                             &value) == 0);
2455                         (void) snprintf(buf, sizeof (buf), "%s%llu", path,
2456                             (u_longlong_t)value);
2457                         path = buf;
2458                 }
2459         }
2460
2461         return (zfs_strdup(hdl, path));
2462 }
2463
2464 static int
2465 zbookmark_compare(const void *a, const void *b)
2466 {
2467         return (memcmp(a, b, sizeof (zbookmark_t)));
2468 }
2469
2470 /*
2471  * Retrieve the persistent error log, uniquify the members, and return to the
2472  * caller.
2473  */
2474 int
2475 zpool_get_errlog(zpool_handle_t *zhp, nvlist_t **nverrlistp)
2476 {
2477         zfs_cmd_t zc = { 0 };
2478         uint64_t count;
2479         zbookmark_t *zb = NULL;
2480         int i;
2481
2482         /*
2483          * Retrieve the raw error list from the kernel.  If the number of errors
2484          * has increased, allocate more space and continue until we get the
2485          * entire list.
2486          */
2487         verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_ERRCOUNT,
2488             &count) == 0);
2489         if (count == 0)
2490                 return (0);
2491         if ((zc.zc_nvlist_dst = (uintptr_t)zfs_alloc(zhp->zpool_hdl,
2492             count * sizeof (zbookmark_t))) == (uintptr_t)NULL)
2493                 return (-1);
2494         zc.zc_nvlist_dst_size = count;
2495         (void) strcpy(zc.zc_name, zhp->zpool_name);
2496         for (;;) {
2497                 if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_ERROR_LOG,
2498                     &zc) != 0) {
2499                         free((void *)(uintptr_t)zc.zc_nvlist_dst);
2500                         if (errno == ENOMEM) {
2501                                 count = zc.zc_nvlist_dst_size;
2502                                 if ((zc.zc_nvlist_dst = (uintptr_t)
2503                                     zfs_alloc(zhp->zpool_hdl, count *
2504                                     sizeof (zbookmark_t))) == (uintptr_t)NULL)
2505                                         return (-1);
2506                         } else {
2507                                 return (-1);
2508                         }
2509                 } else {
2510                         break;
2511                 }
2512         }
2513
2514         /*
2515          * Sort the resulting bookmarks.  This is a little confusing due to the
2516          * implementation of ZFS_IOC_ERROR_LOG.  The bookmarks are copied last
2517          * to first, and 'zc_nvlist_dst_size' indicates the number of boomarks
2518          * _not_ copied as part of the process.  So we point the start of our
2519          * array appropriate and decrement the total number of elements.
2520          */
2521         zb = ((zbookmark_t *)(uintptr_t)zc.zc_nvlist_dst) +
2522             zc.zc_nvlist_dst_size;
2523         count -= zc.zc_nvlist_dst_size;
2524
2525         qsort(zb, count, sizeof (zbookmark_t), zbookmark_compare);
2526
2527         verify(nvlist_alloc(nverrlistp, 0, KM_SLEEP) == 0);
2528
2529         /*
2530          * Fill in the nverrlistp with nvlist's of dataset and object numbers.
2531          */
2532         for (i = 0; i < count; i++) {
2533                 nvlist_t *nv;
2534
2535                 /* ignoring zb_blkid and zb_level for now */
2536                 if (i > 0 && zb[i-1].zb_objset == zb[i].zb_objset &&
2537                     zb[i-1].zb_object == zb[i].zb_object)
2538                         continue;
2539
2540                 if (nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) != 0)
2541                         goto nomem;
2542                 if (nvlist_add_uint64(nv, ZPOOL_ERR_DATASET,
2543                     zb[i].zb_objset) != 0) {
2544                         nvlist_free(nv);
2545                         goto nomem;
2546                 }
2547                 if (nvlist_add_uint64(nv, ZPOOL_ERR_OBJECT,
2548                     zb[i].zb_object) != 0) {
2549                         nvlist_free(nv);
2550                         goto nomem;
2551                 }
2552                 if (nvlist_add_nvlist(*nverrlistp, "ejk", nv) != 0) {
2553                         nvlist_free(nv);
2554                         goto nomem;
2555                 }
2556                 nvlist_free(nv);
2557         }
2558
2559         free((void *)(uintptr_t)zc.zc_nvlist_dst);
2560         return (0);
2561
2562 nomem:
2563         free((void *)(uintptr_t)zc.zc_nvlist_dst);
2564         return (no_memory(zhp->zpool_hdl));
2565 }
2566
2567 /*
2568  * Upgrade a ZFS pool to the latest on-disk version.
2569  */
2570 int
2571 zpool_upgrade(zpool_handle_t *zhp, uint64_t new_version)
2572 {
2573         zfs_cmd_t zc = { 0 };
2574         libzfs_handle_t *hdl = zhp->zpool_hdl;
2575
2576         (void) strcpy(zc.zc_name, zhp->zpool_name);
2577         zc.zc_cookie = new_version;
2578
2579         if (zfs_ioctl(hdl, ZFS_IOC_POOL_UPGRADE, &zc) != 0)
2580                 return (zpool_standard_error_fmt(hdl, errno,
2581                     dgettext(TEXT_DOMAIN, "cannot upgrade '%s'"),
2582                     zhp->zpool_name));
2583         return (0);
2584 }
2585
2586 void
2587 zpool_set_history_str(const char *subcommand, int argc, char **argv,
2588     char *history_str)
2589 {
2590         int i;
2591
2592         (void) strlcpy(history_str, subcommand, HIS_MAX_RECORD_LEN);
2593         for (i = 1; i < argc; i++) {
2594                 if (strlen(history_str) + 1 + strlen(argv[i]) >
2595                     HIS_MAX_RECORD_LEN)
2596                         break;
2597                 (void) strlcat(history_str, " ", HIS_MAX_RECORD_LEN);
2598                 (void) strlcat(history_str, argv[i], HIS_MAX_RECORD_LEN);
2599         }
2600 }
2601
2602 /*
2603  * Stage command history for logging.
2604  */
2605 int
2606 zpool_stage_history(libzfs_handle_t *hdl, const char *history_str)
2607 {
2608         if (history_str == NULL)
2609                 return (EINVAL);
2610
2611         if (strlen(history_str) > HIS_MAX_RECORD_LEN)
2612                 return (EINVAL);
2613
2614         if (hdl->libzfs_log_str != NULL)
2615                 free(hdl->libzfs_log_str);
2616
2617         if ((hdl->libzfs_log_str = strdup(history_str)) == NULL)
2618                 return (no_memory(hdl));
2619
2620         return (0);
2621 }
2622
2623 /*
2624  * Perform ioctl to get some command history of a pool.
2625  *
2626  * 'buf' is the buffer to fill up to 'len' bytes.  'off' is the
2627  * logical offset of the history buffer to start reading from.
2628  *
2629  * Upon return, 'off' is the next logical offset to read from and
2630  * 'len' is the actual amount of bytes read into 'buf'.
2631  */
2632 static int
2633 get_history(zpool_handle_t *zhp, char *buf, uint64_t *off, uint64_t *len)
2634 {
2635         zfs_cmd_t zc = { 0 };
2636         libzfs_handle_t *hdl = zhp->zpool_hdl;
2637
2638         (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2639
2640         zc.zc_history = (uint64_t)(uintptr_t)buf;
2641         zc.zc_history_len = *len;
2642         zc.zc_history_offset = *off;
2643
2644         if (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_HISTORY, &zc) != 0) {
2645                 switch (errno) {
2646                 case EPERM:
2647                         return (zfs_error_fmt(hdl, EZFS_PERM,
2648                             dgettext(TEXT_DOMAIN,
2649                             "cannot show history for pool '%s'"),
2650                             zhp->zpool_name));
2651                 case ENOENT:
2652                         return (zfs_error_fmt(hdl, EZFS_NOHISTORY,
2653                             dgettext(TEXT_DOMAIN, "cannot get history for pool "
2654                             "'%s'"), zhp->zpool_name));
2655                 case ENOTSUP:
2656                         return (zfs_error_fmt(hdl, EZFS_BADVERSION,
2657                             dgettext(TEXT_DOMAIN, "cannot get history for pool "
2658                             "'%s', pool must be upgraded"), zhp->zpool_name));
2659                 default:
2660                         return (zpool_standard_error_fmt(hdl, errno,
2661                             dgettext(TEXT_DOMAIN,
2662                             "cannot get history for '%s'"), zhp->zpool_name));
2663                 }
2664         }
2665
2666         *len = zc.zc_history_len;
2667         *off = zc.zc_history_offset;
2668
2669         return (0);
2670 }
2671
2672 /*
2673  * Process the buffer of nvlists, unpacking and storing each nvlist record
2674  * into 'records'.  'leftover' is set to the number of bytes that weren't
2675  * processed as there wasn't a complete record.
2676  */
2677 static int
2678 zpool_history_unpack(char *buf, uint64_t bytes_read, uint64_t *leftover,
2679     nvlist_t ***records, uint_t *numrecords)
2680 {
2681         uint64_t reclen;
2682         nvlist_t *nv;
2683         int i;
2684
2685         while (bytes_read > sizeof (reclen)) {
2686
2687                 /* get length of packed record (stored as little endian) */
2688                 for (i = 0, reclen = 0; i < sizeof (reclen); i++)
2689                         reclen += (uint64_t)(((uchar_t *)buf)[i]) << (8*i);
2690
2691                 if (bytes_read < sizeof (reclen) + reclen)
2692                         break;
2693
2694                 /* unpack record */
2695                 if (nvlist_unpack(buf + sizeof (reclen), reclen, &nv, 0) != 0)
2696                         return (ENOMEM);
2697                 bytes_read -= sizeof (reclen) + reclen;
2698                 buf += sizeof (reclen) + reclen;
2699
2700                 /* add record to nvlist array */
2701                 (*numrecords)++;
2702                 if (ISP2(*numrecords + 1)) {
2703                         *records = realloc(*records,
2704                             *numrecords * 2 * sizeof (nvlist_t *));
2705                 }
2706                 (*records)[*numrecords - 1] = nv;
2707         }
2708
2709         *leftover = bytes_read;
2710         return (0);
2711 }
2712
2713 #define HIS_BUF_LEN     (128*1024)
2714
2715 /*
2716  * Retrieve the command history of a pool.
2717  */
2718 int
2719 zpool_get_history(zpool_handle_t *zhp, nvlist_t **nvhisp)
2720 {
2721         char buf[HIS_BUF_LEN];
2722         uint64_t off = 0;
2723         nvlist_t **records = NULL;
2724         uint_t numrecords = 0;
2725         int err, i;
2726
2727         do {
2728                 uint64_t bytes_read = sizeof (buf);
2729                 uint64_t leftover;
2730
2731                 if ((err = get_history(zhp, buf, &off, &bytes_read)) != 0)
2732                         break;
2733
2734                 /* if nothing else was read in, we're at EOF, just return */
2735                 if (!bytes_read)
2736                         break;
2737
2738                 if ((err = zpool_history_unpack(buf, bytes_read,
2739                     &leftover, &records, &numrecords)) != 0)
2740                         break;
2741                 off -= leftover;
2742
2743                 /* CONSTCOND */
2744         } while (1);
2745
2746         if (!err) {
2747                 verify(nvlist_alloc(nvhisp, NV_UNIQUE_NAME, 0) == 0);
2748                 verify(nvlist_add_nvlist_array(*nvhisp, ZPOOL_HIST_RECORD,
2749                     records, numrecords) == 0);
2750         }
2751         for (i = 0; i < numrecords; i++)
2752                 nvlist_free(records[i]);
2753         free(records);
2754
2755         return (err);
2756 }
2757
2758 void
2759 zpool_obj_to_path(zpool_handle_t *zhp, uint64_t dsobj, uint64_t obj,
2760     char *pathname, size_t len)
2761 {
2762         zfs_cmd_t zc = { 0 };
2763         boolean_t mounted = B_FALSE;
2764         char *mntpnt = NULL;
2765         char dsname[MAXNAMELEN];
2766
2767         if (dsobj == 0) {
2768                 /* special case for the MOS */
2769                 (void) snprintf(pathname, len, "<metadata>:<0x%llx>", obj);
2770                 return;
2771         }
2772
2773         /* get the dataset's name */
2774         (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2775         zc.zc_obj = dsobj;
2776         if (ioctl(zhp->zpool_hdl->libzfs_fd,
2777             ZFS_IOC_DSOBJ_TO_DSNAME, &zc) != 0) {
2778                 /* just write out a path of two object numbers */
2779                 (void) snprintf(pathname, len, "<0x%llx>:<0x%llx>",
2780                     dsobj, obj);
2781                 return;
2782         }
2783         (void) strlcpy(dsname, zc.zc_value, sizeof (dsname));
2784
2785         /* find out if the dataset is mounted */
2786         mounted = is_mounted(zhp->zpool_hdl, dsname, &mntpnt);
2787
2788         /* get the corrupted object's path */
2789         (void) strlcpy(zc.zc_name, dsname, sizeof (zc.zc_name));
2790         zc.zc_obj = obj;
2791         if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_OBJ_TO_PATH,
2792             &zc) == 0) {
2793                 if (mounted) {
2794                         (void) snprintf(pathname, len, "%s%s", mntpnt,
2795                             zc.zc_value);
2796                 } else {
2797                         (void) snprintf(pathname, len, "%s:%s",
2798                             dsname, zc.zc_value);
2799                 }
2800         } else {
2801                 (void) snprintf(pathname, len, "%s:<0x%llx>", dsname, obj);
2802         }
2803         free(mntpnt);
2804 }
2805
2806 #define RDISK_ROOT      "/dev/rdsk"
2807 #define BACKUP_SLICE    "s2"
2808 /*
2809  * Don't start the slice at the default block of 34; many storage
2810  * devices will use a stripe width of 128k, so start there instead.
2811  */
2812 #define NEW_START_BLOCK 256
2813
2814 /*
2815  * Read the EFI label from the config, if a label does not exist then
2816  * pass back the error to the caller. If the caller has passed a non-NULL
2817  * diskaddr argument then we set it to the starting address of the EFI
2818  * partition.
2819  */
2820 static int
2821 read_efi_label(nvlist_t *config, diskaddr_t *sb)
2822 {
2823         char *path;
2824         int fd;
2825         char diskname[MAXPATHLEN];
2826         int err = -1;
2827
2828         if (nvlist_lookup_string(config, ZPOOL_CONFIG_PATH, &path) != 0)
2829                 return (err);
2830
2831         (void) snprintf(diskname, sizeof (diskname), "%s%s", RDISK_ROOT,
2832             strrchr(path, '/'));
2833         if ((fd = open(diskname, O_RDONLY|O_NDELAY)) >= 0) {
2834                 struct dk_gpt *vtoc;
2835
2836                 if ((err = efi_alloc_and_read(fd, &vtoc)) >= 0) {
2837                         if (sb != NULL)
2838                                 *sb = vtoc->efi_parts[0].p_start;
2839                         efi_free(vtoc);
2840                 }
2841                 (void) close(fd);
2842         }
2843         return (err);
2844 }
2845
2846 /*
2847  * determine where a partition starts on a disk in the current
2848  * configuration
2849  */
2850 static diskaddr_t
2851 find_start_block(nvlist_t *config)
2852 {
2853         nvlist_t **child;
2854         uint_t c, children;
2855         diskaddr_t sb = MAXOFFSET_T;
2856         uint64_t wholedisk;
2857
2858         if (nvlist_lookup_nvlist_array(config,
2859             ZPOOL_CONFIG_CHILDREN, &child, &children) != 0) {
2860                 if (nvlist_lookup_uint64(config,
2861                     ZPOOL_CONFIG_WHOLE_DISK,
2862                     &wholedisk) != 0 || !wholedisk) {
2863                         return (MAXOFFSET_T);
2864                 }
2865                 if (read_efi_label(config, &sb) < 0)
2866                         sb = MAXOFFSET_T;
2867                 return (sb);
2868         }
2869
2870         for (c = 0; c < children; c++) {
2871                 sb = find_start_block(child[c]);
2872                 if (sb != MAXOFFSET_T) {
2873                         return (sb);
2874                 }
2875         }
2876         return (MAXOFFSET_T);
2877 }
2878
2879 /*
2880  * Label an individual disk.  The name provided is the short name,
2881  * stripped of any leading /dev path.
2882  */
2883 int
2884 zpool_label_disk(libzfs_handle_t *hdl, zpool_handle_t *zhp, char *name)
2885 {
2886         char path[MAXPATHLEN];
2887         struct dk_gpt *vtoc;
2888         int fd;
2889         size_t resv = EFI_MIN_RESV_SIZE;
2890         uint64_t slice_size;
2891         diskaddr_t start_block;
2892         char errbuf[1024];
2893
2894         /* prepare an error message just in case */
2895         (void) snprintf(errbuf, sizeof (errbuf),
2896             dgettext(TEXT_DOMAIN, "cannot label '%s'"), name);
2897
2898         if (zhp) {
2899                 nvlist_t *nvroot;
2900
2901                 if (pool_is_bootable(zhp)) {
2902                         zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2903                             "EFI labeled devices are not supported on root "
2904                             "pools."));
2905                         return (zfs_error(hdl, EZFS_POOL_NOTSUP, errbuf));
2906                 }
2907
2908                 verify(nvlist_lookup_nvlist(zhp->zpool_config,
2909                     ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
2910
2911                 if (zhp->zpool_start_block == 0)
2912                         start_block = find_start_block(nvroot);
2913                 else
2914                         start_block = zhp->zpool_start_block;
2915                 zhp->zpool_start_block = start_block;
2916         } else {
2917                 /* new pool */
2918                 start_block = NEW_START_BLOCK;
2919         }
2920
2921         (void) snprintf(path, sizeof (path), "%s/%s%s", RDISK_ROOT, name,
2922             BACKUP_SLICE);
2923
2924         if ((fd = open(path, O_RDWR | O_NDELAY)) < 0) {
2925                 /*
2926                  * This shouldn't happen.  We've long since verified that this
2927                  * is a valid device.
2928                  */
2929                 zfs_error_aux(hdl,
2930                     dgettext(TEXT_DOMAIN, "unable to open device"));
2931                 return (zfs_error(hdl, EZFS_OPENFAILED, errbuf));
2932         }
2933
2934         if (efi_alloc_and_init(fd, EFI_NUMPAR, &vtoc) != 0) {
2935                 /*
2936                  * The only way this can fail is if we run out of memory, or we
2937                  * were unable to read the disk's capacity
2938                  */
2939                 if (errno == ENOMEM)
2940                         (void) no_memory(hdl);
2941
2942                 (void) close(fd);
2943                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2944                     "unable to read disk capacity"), name);
2945
2946                 return (zfs_error(hdl, EZFS_NOCAP, errbuf));
2947         }
2948
2949         slice_size = vtoc->efi_last_u_lba + 1;
2950         slice_size -= EFI_MIN_RESV_SIZE;
2951         if (start_block == MAXOFFSET_T)
2952                 start_block = NEW_START_BLOCK;
2953         slice_size -= start_block;
2954
2955         vtoc->efi_parts[0].p_start = start_block;
2956         vtoc->efi_parts[0].p_size = slice_size;
2957
2958         /*
2959          * Why we use V_USR: V_BACKUP confuses users, and is considered
2960          * disposable by some EFI utilities (since EFI doesn't have a backup
2961          * slice).  V_UNASSIGNED is supposed to be used only for zero size
2962          * partitions, and efi_write() will fail if we use it.  V_ROOT, V_BOOT,
2963          * etc. were all pretty specific.  V_USR is as close to reality as we
2964          * can get, in the absence of V_OTHER.
2965          */
2966         vtoc->efi_parts[0].p_tag = V_USR;
2967         (void) strcpy(vtoc->efi_parts[0].p_name, "zfs");
2968
2969         vtoc->efi_parts[8].p_start = slice_size + start_block;
2970         vtoc->efi_parts[8].p_size = resv;
2971         vtoc->efi_parts[8].p_tag = V_RESERVED;
2972
2973         if (efi_write(fd, vtoc) != 0) {
2974                 /*
2975                  * Some block drivers (like pcata) may not support EFI
2976                  * GPT labels.  Print out a helpful error message dir-
2977                  * ecting the user to manually label the disk and give
2978                  * a specific slice.
2979                  */
2980                 (void) close(fd);
2981                 efi_free(vtoc);
2982
2983                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2984                     "try using fdisk(1M) and then provide a specific slice"));
2985                 return (zfs_error(hdl, EZFS_LABELFAILED, errbuf));
2986         }
2987
2988         (void) close(fd);
2989         efi_free(vtoc);
2990         return (0);
2991 }
2992
2993 static boolean_t
2994 supported_dump_vdev_type(libzfs_handle_t *hdl, nvlist_t *config, char *errbuf)
2995 {
2996         char *type;
2997         nvlist_t **child;
2998         uint_t children, c;
2999
3000         verify(nvlist_lookup_string(config, ZPOOL_CONFIG_TYPE, &type) == 0);
3001         if (strcmp(type, VDEV_TYPE_RAIDZ) == 0 ||
3002             strcmp(type, VDEV_TYPE_FILE) == 0 ||
3003             strcmp(type, VDEV_TYPE_LOG) == 0 ||
3004             strcmp(type, VDEV_TYPE_MISSING) == 0) {
3005                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3006                     "vdev type '%s' is not supported"), type);
3007                 (void) zfs_error(hdl, EZFS_VDEVNOTSUP, errbuf);
3008                 return (B_FALSE);
3009         }
3010         if (nvlist_lookup_nvlist_array(config, ZPOOL_CONFIG_CHILDREN,
3011             &child, &children) == 0) {
3012                 for (c = 0; c < children; c++) {
3013                         if (!supported_dump_vdev_type(hdl, child[c], errbuf))
3014                                 return (B_FALSE);
3015                 }
3016         }
3017         return (B_TRUE);
3018 }
3019
3020 /*
3021  * check if this zvol is allowable for use as a dump device; zero if
3022  * it is, > 0 if it isn't, < 0 if it isn't a zvol
3023  */
3024 int
3025 zvol_check_dump_config(char *arg)
3026 {
3027         zpool_handle_t *zhp = NULL;
3028         nvlist_t *config, *nvroot;
3029         char *p, *volname;
3030         nvlist_t **top;
3031         uint_t toplevels;
3032         libzfs_handle_t *hdl;
3033         char errbuf[1024];
3034         char poolname[ZPOOL_MAXNAMELEN];
3035         int pathlen = strlen(ZVOL_FULL_DEV_DIR);
3036         int ret = 1;
3037
3038         if (strncmp(arg, ZVOL_FULL_DEV_DIR, pathlen)) {
3039                 return (-1);
3040         }
3041
3042         (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
3043             "dump is not supported on device '%s'"), arg);
3044
3045         if ((hdl = libzfs_init()) == NULL)
3046                 return (1);
3047         libzfs_print_on_error(hdl, B_TRUE);
3048
3049         volname = arg + pathlen;
3050
3051         /* check the configuration of the pool */
3052         if ((p = strchr(volname, '/')) == NULL) {
3053                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3054                     "malformed dataset name"));
3055                 (void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf);
3056                 return (1);
3057         } else if (p - volname >= ZFS_MAXNAMELEN) {
3058                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3059                     "dataset name is too long"));
3060                 (void) zfs_error(hdl, EZFS_NAMETOOLONG, errbuf);
3061                 return (1);
3062         } else {
3063                 (void) strncpy(poolname, volname, p - volname);
3064                 poolname[p - volname] = '\0';
3065         }
3066
3067         if ((zhp = zpool_open(hdl, poolname)) == NULL) {
3068                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3069                     "could not open pool '%s'"), poolname);
3070                 (void) zfs_error(hdl, EZFS_OPENFAILED, errbuf);
3071                 goto out;
3072         }
3073         config = zpool_get_config(zhp, NULL);
3074         if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
3075             &nvroot) != 0) {
3076                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3077                     "could not obtain vdev configuration for  '%s'"), poolname);
3078                 (void) zfs_error(hdl, EZFS_INVALCONFIG, errbuf);
3079                 goto out;
3080         }
3081
3082         verify(nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
3083             &top, &toplevels) == 0);
3084         if (toplevels != 1) {
3085                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3086                     "'%s' has multiple top level vdevs"), poolname);
3087                 (void) zfs_error(hdl, EZFS_DEVOVERFLOW, errbuf);
3088                 goto out;
3089         }
3090
3091         if (!supported_dump_vdev_type(hdl, top[0], errbuf)) {
3092                 goto out;
3093         }
3094         ret = 0;
3095
3096 out:
3097         if (zhp)
3098                 zpool_close(zhp);
3099         libzfs_fini(hdl);
3100         return (ret);
3101 }