4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
25 * Copyright (c) 2011 by Delphix. All rights reserved.
39 #include <sys/efi_partition.h>
41 #include <sys/zfs_ioctl.h>
44 #include "zfs_namecheck.h"
46 #include "libzfs_impl.h"
47 #include "zfs_comutil.h"
49 static int read_efi_label(nvlist_t *config, diskaddr_t *sb);
51 typedef struct prop_flags {
52 int create:1; /* Validate property on creation */
53 int import:1; /* Validate property on import */
57 * ====================================================================
58 * zpool property functions
59 * ====================================================================
63 zpool_get_all_props(zpool_handle_t *zhp)
65 zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
66 libzfs_handle_t *hdl = zhp->zpool_hdl;
68 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
70 if (zcmd_alloc_dst_nvlist(hdl, &zc, 0) != 0)
73 while (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_PROPS, &zc) != 0) {
74 if (errno == ENOMEM) {
75 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
76 zcmd_free_nvlists(&zc);
80 zcmd_free_nvlists(&zc);
85 if (zcmd_read_dst_nvlist(hdl, &zc, &zhp->zpool_props) != 0) {
86 zcmd_free_nvlists(&zc);
90 zcmd_free_nvlists(&zc);
96 zpool_props_refresh(zpool_handle_t *zhp)
100 old_props = zhp->zpool_props;
102 if (zpool_get_all_props(zhp) != 0)
105 nvlist_free(old_props);
110 zpool_get_prop_string(zpool_handle_t *zhp, zpool_prop_t prop,
116 zprop_source_t source;
118 nvl = zhp->zpool_props;
119 if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) {
120 verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &ival) == 0);
122 verify(nvlist_lookup_string(nv, ZPROP_VALUE, &value) == 0);
124 source = ZPROP_SRC_DEFAULT;
125 if ((value = (char *)zpool_prop_default_string(prop)) == NULL)
136 zpool_get_prop_int(zpool_handle_t *zhp, zpool_prop_t prop, zprop_source_t *src)
140 zprop_source_t source;
142 if (zhp->zpool_props == NULL && zpool_get_all_props(zhp)) {
144 * zpool_get_all_props() has most likely failed because
145 * the pool is faulted, but if all we need is the top level
146 * vdev's guid then get it from the zhp config nvlist.
148 if ((prop == ZPOOL_PROP_GUID) &&
149 (nvlist_lookup_nvlist(zhp->zpool_config,
150 ZPOOL_CONFIG_VDEV_TREE, &nv) == 0) &&
151 (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &value)
155 return (zpool_prop_default_numeric(prop));
158 nvl = zhp->zpool_props;
159 if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) {
160 verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &value) == 0);
162 verify(nvlist_lookup_uint64(nv, ZPROP_VALUE, &value) == 0);
164 source = ZPROP_SRC_DEFAULT;
165 value = zpool_prop_default_numeric(prop);
175 * Map VDEV STATE to printed strings.
178 zpool_state_to_name(vdev_state_t state, vdev_aux_t aux)
183 case VDEV_STATE_CLOSED:
184 case VDEV_STATE_OFFLINE:
185 return (gettext("OFFLINE"));
186 case VDEV_STATE_REMOVED:
187 return (gettext("REMOVED"));
188 case VDEV_STATE_CANT_OPEN:
189 if (aux == VDEV_AUX_CORRUPT_DATA || aux == VDEV_AUX_BAD_LOG)
190 return (gettext("FAULTED"));
191 else if (aux == VDEV_AUX_SPLIT_POOL)
192 return (gettext("SPLIT"));
194 return (gettext("UNAVAIL"));
195 case VDEV_STATE_FAULTED:
196 return (gettext("FAULTED"));
197 case VDEV_STATE_DEGRADED:
198 return (gettext("DEGRADED"));
199 case VDEV_STATE_HEALTHY:
200 return (gettext("ONLINE"));
203 return (gettext("UNKNOWN"));
207 * Get a zpool property value for 'prop' and return the value in
208 * a pre-allocated buffer.
211 zpool_get_prop(zpool_handle_t *zhp, zpool_prop_t prop, char *buf, size_t len,
212 zprop_source_t *srctype)
216 zprop_source_t src = ZPROP_SRC_NONE;
221 if (zpool_get_state(zhp) == POOL_STATE_UNAVAIL) {
223 case ZPOOL_PROP_NAME:
224 (void) strlcpy(buf, zpool_get_name(zhp), len);
227 case ZPOOL_PROP_HEALTH:
228 (void) strlcpy(buf, "FAULTED", len);
231 case ZPOOL_PROP_GUID:
232 intval = zpool_get_prop_int(zhp, prop, &src);
233 (void) snprintf(buf, len, "%llu", (u_longlong_t)intval);
236 case ZPOOL_PROP_ALTROOT:
237 case ZPOOL_PROP_CACHEFILE:
238 if (zhp->zpool_props != NULL ||
239 zpool_get_all_props(zhp) == 0) {
241 zpool_get_prop_string(zhp, prop, &src),
249 (void) strlcpy(buf, "-", len);
258 if (zhp->zpool_props == NULL && zpool_get_all_props(zhp) &&
259 prop != ZPOOL_PROP_NAME)
262 switch (zpool_prop_get_type(prop)) {
263 case PROP_TYPE_STRING:
264 (void) strlcpy(buf, zpool_get_prop_string(zhp, prop, &src),
268 case PROP_TYPE_NUMBER:
269 intval = zpool_get_prop_int(zhp, prop, &src);
272 case ZPOOL_PROP_SIZE:
273 case ZPOOL_PROP_ALLOCATED:
274 case ZPOOL_PROP_FREE:
275 case ZPOOL_PROP_ASHIFT:
276 (void) zfs_nicenum(intval, buf, len);
279 case ZPOOL_PROP_CAPACITY:
280 (void) snprintf(buf, len, "%llu%%",
281 (u_longlong_t)intval);
284 case ZPOOL_PROP_DEDUPRATIO:
285 (void) snprintf(buf, len, "%llu.%02llux",
286 (u_longlong_t)(intval / 100),
287 (u_longlong_t)(intval % 100));
290 case ZPOOL_PROP_HEALTH:
291 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
292 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
293 verify(nvlist_lookup_uint64_array(nvroot,
294 ZPOOL_CONFIG_VDEV_STATS, (uint64_t **)&vs, &vsc)
297 (void) strlcpy(buf, zpool_state_to_name(intval,
301 (void) snprintf(buf, len, "%llu", (u_longlong_t)intval);
305 case PROP_TYPE_INDEX:
306 intval = zpool_get_prop_int(zhp, prop, &src);
307 if (zpool_prop_index_to_string(prop, intval, &strval)
310 (void) strlcpy(buf, strval, len);
324 * Check if the bootfs name has the same pool name as it is set to.
325 * Assuming bootfs is a valid dataset name.
328 bootfs_name_valid(const char *pool, char *bootfs)
330 int len = strlen(pool);
332 if (!zfs_name_valid(bootfs, ZFS_TYPE_FILESYSTEM|ZFS_TYPE_SNAPSHOT))
335 if (strncmp(pool, bootfs, len) == 0 &&
336 (bootfs[len] == '/' || bootfs[len] == '\0'))
343 * Inspect the configuration to determine if any of the devices contain
347 pool_uses_efi(nvlist_t *config)
352 if (nvlist_lookup_nvlist_array(config, ZPOOL_CONFIG_CHILDREN,
353 &child, &children) != 0)
354 return (read_efi_label(config, NULL) >= 0);
356 for (c = 0; c < children; c++) {
357 if (pool_uses_efi(child[c]))
364 pool_is_bootable(zpool_handle_t *zhp)
366 char bootfs[ZPOOL_MAXNAMELEN];
368 return (zpool_get_prop(zhp, ZPOOL_PROP_BOOTFS, bootfs,
369 sizeof (bootfs), NULL) == 0 && strncmp(bootfs, "-",
370 sizeof (bootfs)) != 0);
375 * Given an nvlist of zpool properties to be set, validate that they are
376 * correct, and parse any numeric properties (index, boolean, etc) if they are
377 * specified as strings.
380 zpool_valid_proplist(libzfs_handle_t *hdl, const char *poolname,
381 nvlist_t *props, uint64_t version, prop_flags_t flags, char *errbuf)
389 struct stat64 statbuf;
393 if (nvlist_alloc(&retprops, NV_UNIQUE_NAME, 0) != 0) {
394 (void) no_memory(hdl);
399 while ((elem = nvlist_next_nvpair(props, elem)) != NULL) {
400 const char *propname = nvpair_name(elem);
403 * Make sure this property is valid and applies to this type.
405 if ((prop = zpool_name_to_prop(propname)) == ZPROP_INVAL) {
406 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
407 "invalid property '%s'"), propname);
408 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
412 if (zpool_prop_readonly(prop)) {
413 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' "
414 "is readonly"), propname);
415 (void) zfs_error(hdl, EZFS_PROPREADONLY, errbuf);
419 if (zprop_parse_value(hdl, elem, prop, ZFS_TYPE_POOL, retprops,
420 &strval, &intval, errbuf) != 0)
424 * Perform additional checking for specific properties.
429 case ZPOOL_PROP_VERSION:
430 if (intval < version || intval > SPA_VERSION) {
431 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
432 "property '%s' number %d is invalid."),
434 (void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
439 case ZPOOL_PROP_ASHIFT:
441 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
442 "property '%s' can only be set at "
443 "creation time"), propname);
444 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
448 if (intval != 0 && (intval < 9 || intval > 13)) {
449 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
450 "property '%s' number %d is invalid."),
452 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
457 case ZPOOL_PROP_BOOTFS:
458 if (flags.create || flags.import) {
459 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
460 "property '%s' cannot be set at creation "
461 "or import time"), propname);
462 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
466 if (version < SPA_VERSION_BOOTFS) {
467 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
468 "pool must be upgraded to support "
469 "'%s' property"), propname);
470 (void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
475 * bootfs property value has to be a dataset name and
476 * the dataset has to be in the same pool as it sets to.
478 if (strval[0] != '\0' && !bootfs_name_valid(poolname,
480 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' "
481 "is an invalid name"), strval);
482 (void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf);
486 if ((zhp = zpool_open_canfail(hdl, poolname)) == NULL) {
487 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
488 "could not open pool '%s'"), poolname);
489 (void) zfs_error(hdl, EZFS_OPENFAILED, errbuf);
492 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
493 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
495 #if defined(__sun__) || defined(__sun)
497 * bootfs property cannot be set on a disk which has
500 if (pool_uses_efi(nvroot)) {
501 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
502 "property '%s' not supported on "
503 "EFI labeled devices"), propname);
504 (void) zfs_error(hdl, EZFS_POOL_NOTSUP, errbuf);
512 case ZPOOL_PROP_ALTROOT:
513 if (!flags.create && !flags.import) {
514 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
515 "property '%s' can only be set during pool "
516 "creation or import"), propname);
517 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
521 if (strval[0] != '/') {
522 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
523 "bad alternate root '%s'"), strval);
524 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
529 case ZPOOL_PROP_CACHEFILE:
530 if (strval[0] == '\0')
533 if (strcmp(strval, "none") == 0)
536 if (strval[0] != '/') {
537 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
538 "property '%s' must be empty, an "
539 "absolute path, or 'none'"), propname);
540 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
544 slash = strrchr(strval, '/');
546 if (slash[1] == '\0' || strcmp(slash, "/.") == 0 ||
547 strcmp(slash, "/..") == 0) {
548 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
549 "'%s' is not a valid file"), strval);
550 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
556 if (strval[0] != '\0' &&
557 (stat64(strval, &statbuf) != 0 ||
558 !S_ISDIR(statbuf.st_mode))) {
559 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
560 "'%s' is not a valid directory"),
562 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
569 case ZPOOL_PROP_READONLY:
571 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
572 "property '%s' can only be set at "
573 "import time"), propname);
574 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
583 nvlist_free(retprops);
588 * Set zpool property : propname=propval.
591 zpool_set_prop(zpool_handle_t *zhp, const char *propname, const char *propval)
593 zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
596 nvlist_t *nvl = NULL;
599 prop_flags_t flags = { 0 };
601 (void) snprintf(errbuf, sizeof (errbuf),
602 dgettext(TEXT_DOMAIN, "cannot set property for '%s'"),
605 if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0)
606 return (no_memory(zhp->zpool_hdl));
608 if (nvlist_add_string(nvl, propname, propval) != 0) {
610 return (no_memory(zhp->zpool_hdl));
613 version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL);
614 if ((realprops = zpool_valid_proplist(zhp->zpool_hdl,
615 zhp->zpool_name, nvl, version, flags, errbuf)) == NULL) {
624 * Execute the corresponding ioctl() to set this property.
626 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
628 if (zcmd_write_src_nvlist(zhp->zpool_hdl, &zc, nvl) != 0) {
633 ret = zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_SET_PROPS, &zc);
635 zcmd_free_nvlists(&zc);
639 (void) zpool_standard_error(zhp->zpool_hdl, errno, errbuf);
641 (void) zpool_props_refresh(zhp);
647 zpool_expand_proplist(zpool_handle_t *zhp, zprop_list_t **plp)
649 libzfs_handle_t *hdl = zhp->zpool_hdl;
651 char buf[ZFS_MAXPROPLEN];
653 if (zprop_expand_list(hdl, plp, ZFS_TYPE_POOL) != 0)
656 for (entry = *plp; entry != NULL; entry = entry->pl_next) {
661 if (entry->pl_prop != ZPROP_INVAL &&
662 zpool_get_prop(zhp, entry->pl_prop, buf, sizeof (buf),
664 if (strlen(buf) > entry->pl_width)
665 entry->pl_width = strlen(buf);
674 * Don't start the slice at the default block of 34; many storage
675 * devices will use a stripe width of 128k, other vendors prefer a 1m
676 * alignment. It is best to play it safe and ensure a 1m alignment
677 * given 512B blocks. When the block size is larger by a power of 2
678 * we will still be 1m aligned. Some devices are sensitive to the
679 * partition ending alignment as well.
681 #define NEW_START_BLOCK 2048
682 #define PARTITION_END_ALIGNMENT 2048
685 * Validate the given pool name, optionally putting an extended error message in
689 zpool_name_valid(libzfs_handle_t *hdl, boolean_t isopen, const char *pool)
695 ret = pool_namecheck(pool, &why, &what);
698 * The rules for reserved pool names were extended at a later point.
699 * But we need to support users with existing pools that may now be
700 * invalid. So we only check for this expanded set of names during a
701 * create (or import), and only in userland.
703 if (ret == 0 && !isopen &&
704 (strncmp(pool, "mirror", 6) == 0 ||
705 strncmp(pool, "raidz", 5) == 0 ||
706 strncmp(pool, "spare", 5) == 0 ||
707 strcmp(pool, "log") == 0)) {
710 dgettext(TEXT_DOMAIN, "name is reserved"));
718 case NAME_ERR_TOOLONG:
720 dgettext(TEXT_DOMAIN, "name is too long"));
723 case NAME_ERR_INVALCHAR:
725 dgettext(TEXT_DOMAIN, "invalid character "
726 "'%c' in pool name"), what);
729 case NAME_ERR_NOLETTER:
730 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
731 "name must begin with a letter"));
734 case NAME_ERR_RESERVED:
735 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
736 "name is reserved"));
739 case NAME_ERR_DISKLIKE:
740 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
741 "pool name is reserved"));
744 case NAME_ERR_LEADING_SLASH:
745 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
746 "leading slash in name"));
749 case NAME_ERR_EMPTY_COMPONENT:
750 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
751 "empty component in name"));
754 case NAME_ERR_TRAILING_SLASH:
755 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
756 "trailing slash in name"));
759 case NAME_ERR_MULTIPLE_AT:
760 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
761 "multiple '@' delimiters in name"));
764 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
765 "permission set is missing '@'"));
776 * Open a handle to the given pool, even if the pool is currently in the FAULTED
780 zpool_open_canfail(libzfs_handle_t *hdl, const char *pool)
786 * Make sure the pool name is valid.
788 if (!zpool_name_valid(hdl, B_TRUE, pool)) {
789 (void) zfs_error_fmt(hdl, EZFS_INVALIDNAME,
790 dgettext(TEXT_DOMAIN, "cannot open '%s'"),
795 if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL)
798 zhp->zpool_hdl = hdl;
799 (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name));
801 if (zpool_refresh_stats(zhp, &missing) != 0) {
807 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "no such pool"));
808 (void) zfs_error_fmt(hdl, EZFS_NOENT,
809 dgettext(TEXT_DOMAIN, "cannot open '%s'"), pool);
818 * Like the above, but silent on error. Used when iterating over pools (because
819 * the configuration cache may be out of date).
822 zpool_open_silent(libzfs_handle_t *hdl, const char *pool, zpool_handle_t **ret)
827 if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL)
830 zhp->zpool_hdl = hdl;
831 (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name));
833 if (zpool_refresh_stats(zhp, &missing) != 0) {
849 * Similar to zpool_open_canfail(), but refuses to open pools in the faulted
853 zpool_open(libzfs_handle_t *hdl, const char *pool)
857 if ((zhp = zpool_open_canfail(hdl, pool)) == NULL)
860 if (zhp->zpool_state == POOL_STATE_UNAVAIL) {
861 (void) zfs_error_fmt(hdl, EZFS_POOLUNAVAIL,
862 dgettext(TEXT_DOMAIN, "cannot open '%s'"), zhp->zpool_name);
871 * Close the handle. Simply frees the memory associated with the handle.
874 zpool_close(zpool_handle_t *zhp)
876 if (zhp->zpool_config)
877 nvlist_free(zhp->zpool_config);
878 if (zhp->zpool_old_config)
879 nvlist_free(zhp->zpool_old_config);
880 if (zhp->zpool_props)
881 nvlist_free(zhp->zpool_props);
886 * Return the name of the pool.
889 zpool_get_name(zpool_handle_t *zhp)
891 return (zhp->zpool_name);
896 * Return the state of the pool (ACTIVE or UNAVAILABLE)
899 zpool_get_state(zpool_handle_t *zhp)
901 return (zhp->zpool_state);
905 * Create the named pool, using the provided vdev list. It is assumed
906 * that the consumer has already validated the contents of the nvlist, so we
907 * don't have to worry about error semantics.
910 zpool_create(libzfs_handle_t *hdl, const char *pool, nvlist_t *nvroot,
911 nvlist_t *props, nvlist_t *fsprops)
913 zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
914 nvlist_t *zc_fsprops = NULL;
915 nvlist_t *zc_props = NULL;
920 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
921 "cannot create '%s'"), pool);
923 if (!zpool_name_valid(hdl, B_FALSE, pool))
924 return (zfs_error(hdl, EZFS_INVALIDNAME, msg));
926 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
930 prop_flags_t flags = { .create = B_TRUE, .import = B_FALSE };
932 if ((zc_props = zpool_valid_proplist(hdl, pool, props,
933 SPA_VERSION_1, flags, msg)) == NULL) {
942 zoned = ((nvlist_lookup_string(fsprops,
943 zfs_prop_to_name(ZFS_PROP_ZONED), &zonestr) == 0) &&
944 strcmp(zonestr, "on") == 0);
946 if ((zc_fsprops = zfs_valid_proplist(hdl,
947 ZFS_TYPE_FILESYSTEM, fsprops, zoned, NULL, msg)) == NULL) {
951 (nvlist_alloc(&zc_props, NV_UNIQUE_NAME, 0) != 0)) {
954 if (nvlist_add_nvlist(zc_props,
955 ZPOOL_ROOTFS_PROPS, zc_fsprops) != 0) {
960 if (zc_props && zcmd_write_src_nvlist(hdl, &zc, zc_props) != 0)
963 (void) strlcpy(zc.zc_name, pool, sizeof (zc.zc_name));
965 if ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_CREATE, &zc)) != 0) {
967 zcmd_free_nvlists(&zc);
968 nvlist_free(zc_props);
969 nvlist_free(zc_fsprops);
974 * This can happen if the user has specified the same
975 * device multiple times. We can't reliably detect this
976 * until we try to add it and see we already have a
977 * label. This can also happen under if the device is
978 * part of an active md or lvm device.
980 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
981 "one or more vdevs refer to the same device, or one of\n"
982 "the devices is part of an active md or lvm device"));
983 return (zfs_error(hdl, EZFS_BADDEV, msg));
987 * This occurs when one of the devices is below
988 * SPA_MINDEVSIZE. Unfortunately, we can't detect which
989 * device was the problem device since there's no
990 * reliable way to determine device size from userland.
995 zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf));
997 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
998 "one or more devices is less than the "
999 "minimum size (%s)"), buf);
1001 return (zfs_error(hdl, EZFS_BADDEV, msg));
1004 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1005 "one or more devices is out of space"));
1006 return (zfs_error(hdl, EZFS_BADDEV, msg));
1009 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1010 "cache device must be a disk or disk slice"));
1011 return (zfs_error(hdl, EZFS_BADDEV, msg));
1014 return (zpool_standard_error(hdl, errno, msg));
1019 * If this is an alternate root pool, then we automatically set the
1020 * mountpoint of the root dataset to be '/'.
1022 if (nvlist_lookup_string(props, zpool_prop_to_name(ZPOOL_PROP_ALTROOT),
1026 verify((zhp = zfs_open(hdl, pool, ZFS_TYPE_DATASET)) != NULL);
1027 verify(zfs_prop_set(zhp, zfs_prop_to_name(ZFS_PROP_MOUNTPOINT),
1034 zcmd_free_nvlists(&zc);
1035 nvlist_free(zc_props);
1036 nvlist_free(zc_fsprops);
1041 * Destroy the given pool. It is up to the caller to ensure that there are no
1042 * datasets left in the pool.
1045 zpool_destroy(zpool_handle_t *zhp)
1047 zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
1048 zfs_handle_t *zfp = NULL;
1049 libzfs_handle_t *hdl = zhp->zpool_hdl;
1052 if (zhp->zpool_state == POOL_STATE_ACTIVE &&
1053 (zfp = zfs_open(hdl, zhp->zpool_name, ZFS_TYPE_FILESYSTEM)) == NULL)
1056 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1058 if (zfs_ioctl(hdl, ZFS_IOC_POOL_DESTROY, &zc) != 0) {
1059 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1060 "cannot destroy '%s'"), zhp->zpool_name);
1062 if (errno == EROFS) {
1063 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1064 "one or more devices is read only"));
1065 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1067 (void) zpool_standard_error(hdl, errno, msg);
1076 remove_mountpoint(zfp);
1084 * Add the given vdevs to the pool. The caller must have already performed the
1085 * necessary verification to ensure that the vdev specification is well-formed.
1088 zpool_add(zpool_handle_t *zhp, nvlist_t *nvroot)
1090 zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
1092 libzfs_handle_t *hdl = zhp->zpool_hdl;
1094 nvlist_t **spares, **l2cache;
1095 uint_t nspares, nl2cache;
1097 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1098 "cannot add to '%s'"), zhp->zpool_name);
1100 if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) <
1101 SPA_VERSION_SPARES &&
1102 nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
1103 &spares, &nspares) == 0) {
1104 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be "
1105 "upgraded to add hot spares"));
1106 return (zfs_error(hdl, EZFS_BADVERSION, msg));
1109 if (pool_is_bootable(zhp) && nvlist_lookup_nvlist_array(nvroot,
1110 ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0) {
1113 for (s = 0; s < nspares; s++) {
1116 if (nvlist_lookup_string(spares[s], ZPOOL_CONFIG_PATH,
1117 &path) == 0 && pool_uses_efi(spares[s])) {
1118 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1119 "device '%s' contains an EFI label and "
1120 "cannot be used on root pools."),
1121 zpool_vdev_name(hdl, NULL, spares[s],
1123 return (zfs_error(hdl, EZFS_POOL_NOTSUP, msg));
1128 if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) <
1129 SPA_VERSION_L2CACHE &&
1130 nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
1131 &l2cache, &nl2cache) == 0) {
1132 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be "
1133 "upgraded to add cache devices"));
1134 return (zfs_error(hdl, EZFS_BADVERSION, msg));
1137 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
1139 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1141 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_ADD, &zc) != 0) {
1145 * This can happen if the user has specified the same
1146 * device multiple times. We can't reliably detect this
1147 * until we try to add it and see we already have a
1150 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1151 "one or more vdevs refer to the same device"));
1152 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1157 * This occurrs when one of the devices is below
1158 * SPA_MINDEVSIZE. Unfortunately, we can't detect which
1159 * device was the problem device since there's no
1160 * reliable way to determine device size from userland.
1165 zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf));
1167 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1168 "device is less than the minimum "
1171 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1175 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1176 "pool must be upgraded to add these vdevs"));
1177 (void) zfs_error(hdl, EZFS_BADVERSION, msg);
1181 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1182 "root pool can not have multiple vdevs"
1183 " or separate logs"));
1184 (void) zfs_error(hdl, EZFS_POOL_NOTSUP, msg);
1188 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1189 "cache device must be a disk or disk slice"));
1190 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1194 (void) zpool_standard_error(hdl, errno, msg);
1202 zcmd_free_nvlists(&zc);
1208 * Exports the pool from the system. The caller must ensure that there are no
1209 * mounted datasets in the pool.
1212 zpool_export_common(zpool_handle_t *zhp, boolean_t force, boolean_t hardforce)
1214 zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
1217 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1218 "cannot export '%s'"), zhp->zpool_name);
1220 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1221 zc.zc_cookie = force;
1222 zc.zc_guid = hardforce;
1224 if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_EXPORT, &zc) != 0) {
1227 zfs_error_aux(zhp->zpool_hdl, dgettext(TEXT_DOMAIN,
1228 "use '-f' to override the following errors:\n"
1229 "'%s' has an active shared spare which could be"
1230 " used by other pools once '%s' is exported."),
1231 zhp->zpool_name, zhp->zpool_name);
1232 return (zfs_error(zhp->zpool_hdl, EZFS_ACTIVE_SPARE,
1235 return (zpool_standard_error_fmt(zhp->zpool_hdl, errno,
1244 zpool_export(zpool_handle_t *zhp, boolean_t force)
1246 return (zpool_export_common(zhp, force, B_FALSE));
1250 zpool_export_force(zpool_handle_t *zhp)
1252 return (zpool_export_common(zhp, B_TRUE, B_TRUE));
1256 zpool_rewind_exclaim(libzfs_handle_t *hdl, const char *name, boolean_t dryrun,
1259 nvlist_t *nv = NULL;
1265 if (!hdl->libzfs_printerr || config == NULL)
1268 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nv) != 0)
1271 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_TIME, &rewindto) != 0)
1273 (void) nvlist_lookup_int64(nv, ZPOOL_CONFIG_REWIND_TIME, &loss);
1275 if (localtime_r((time_t *)&rewindto, &t) != NULL &&
1276 strftime(timestr, 128, "%c", &t) != 0) {
1278 (void) printf(dgettext(TEXT_DOMAIN,
1279 "Would be able to return %s "
1280 "to its state as of %s.\n"),
1283 (void) printf(dgettext(TEXT_DOMAIN,
1284 "Pool %s returned to its state as of %s.\n"),
1288 (void) printf(dgettext(TEXT_DOMAIN,
1289 "%s approximately %lld "),
1290 dryrun ? "Would discard" : "Discarded",
1291 ((longlong_t)loss + 30) / 60);
1292 (void) printf(dgettext(TEXT_DOMAIN,
1293 "minutes of transactions.\n"));
1294 } else if (loss > 0) {
1295 (void) printf(dgettext(TEXT_DOMAIN,
1296 "%s approximately %lld "),
1297 dryrun ? "Would discard" : "Discarded",
1299 (void) printf(dgettext(TEXT_DOMAIN,
1300 "seconds of transactions.\n"));
1306 zpool_explain_recover(libzfs_handle_t *hdl, const char *name, int reason,
1309 nvlist_t *nv = NULL;
1311 uint64_t edata = UINT64_MAX;
1316 if (!hdl->libzfs_printerr)
1320 (void) printf(dgettext(TEXT_DOMAIN, "action: "));
1322 (void) printf(dgettext(TEXT_DOMAIN, "\t"));
1324 /* All attempted rewinds failed if ZPOOL_CONFIG_LOAD_TIME missing */
1325 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nv) != 0 ||
1326 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_TIME, &rewindto) != 0)
1329 (void) nvlist_lookup_int64(nv, ZPOOL_CONFIG_REWIND_TIME, &loss);
1330 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_DATA_ERRORS,
1333 (void) printf(dgettext(TEXT_DOMAIN,
1334 "Recovery is possible, but will result in some data loss.\n"));
1336 if (localtime_r((time_t *)&rewindto, &t) != NULL &&
1337 strftime(timestr, 128, "%c", &t) != 0) {
1338 (void) printf(dgettext(TEXT_DOMAIN,
1339 "\tReturning the pool to its state as of %s\n"
1340 "\tshould correct the problem. "),
1343 (void) printf(dgettext(TEXT_DOMAIN,
1344 "\tReverting the pool to an earlier state "
1345 "should correct the problem.\n\t"));
1349 (void) printf(dgettext(TEXT_DOMAIN,
1350 "Approximately %lld minutes of data\n"
1351 "\tmust be discarded, irreversibly. "),
1352 ((longlong_t)loss + 30) / 60);
1353 } else if (loss > 0) {
1354 (void) printf(dgettext(TEXT_DOMAIN,
1355 "Approximately %lld seconds of data\n"
1356 "\tmust be discarded, irreversibly. "),
1359 if (edata != 0 && edata != UINT64_MAX) {
1361 (void) printf(dgettext(TEXT_DOMAIN,
1362 "After rewind, at least\n"
1363 "\tone persistent user-data error will remain. "));
1365 (void) printf(dgettext(TEXT_DOMAIN,
1366 "After rewind, several\n"
1367 "\tpersistent user-data errors will remain. "));
1370 (void) printf(dgettext(TEXT_DOMAIN,
1371 "Recovery can be attempted\n\tby executing 'zpool %s -F %s'. "),
1372 reason >= 0 ? "clear" : "import", name);
1374 (void) printf(dgettext(TEXT_DOMAIN,
1375 "A scrub of the pool\n"
1376 "\tis strongly recommended after recovery.\n"));
1380 (void) printf(dgettext(TEXT_DOMAIN,
1381 "Destroy and re-create the pool from\n\ta backup source.\n"));
1385 * zpool_import() is a contracted interface. Should be kept the same
1388 * Applications should use zpool_import_props() to import a pool with
1389 * new properties value to be set.
1392 zpool_import(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,
1395 nvlist_t *props = NULL;
1398 if (altroot != NULL) {
1399 if (nvlist_alloc(&props, NV_UNIQUE_NAME, 0) != 0) {
1400 return (zfs_error_fmt(hdl, EZFS_NOMEM,
1401 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1405 if (nvlist_add_string(props,
1406 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), altroot) != 0 ||
1407 nvlist_add_string(props,
1408 zpool_prop_to_name(ZPOOL_PROP_CACHEFILE), "none") != 0) {
1410 return (zfs_error_fmt(hdl, EZFS_NOMEM,
1411 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1416 ret = zpool_import_props(hdl, config, newname, props,
1424 print_vdev_tree(libzfs_handle_t *hdl, const char *name, nvlist_t *nv,
1430 uint64_t is_log = 0;
1432 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_LOG,
1436 (void) printf("\t%*s%s%s\n", indent, "", name,
1437 is_log ? " [log]" : "");
1439 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
1440 &child, &children) != 0)
1443 for (c = 0; c < children; c++) {
1444 vname = zpool_vdev_name(hdl, NULL, child[c], B_TRUE);
1445 print_vdev_tree(hdl, vname, child[c], indent + 2);
1451 * Import the given pool using the known configuration and a list of
1452 * properties to be set. The configuration should have come from
1453 * zpool_find_import(). The 'newname' parameters control whether the pool
1454 * is imported with a different name.
1457 zpool_import_props(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,
1458 nvlist_t *props, int flags)
1460 zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
1461 zpool_rewind_policy_t policy;
1462 nvlist_t *nv = NULL;
1463 nvlist_t *nvinfo = NULL;
1464 nvlist_t *missing = NULL;
1471 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
1474 (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
1475 "cannot import pool '%s'"), origname);
1477 if (newname != NULL) {
1478 if (!zpool_name_valid(hdl, B_FALSE, newname))
1479 return (zfs_error_fmt(hdl, EZFS_INVALIDNAME,
1480 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1482 thename = (char *)newname;
1489 prop_flags_t flags = { .create = B_FALSE, .import = B_TRUE };
1491 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
1494 if ((props = zpool_valid_proplist(hdl, origname,
1495 props, version, flags, errbuf)) == NULL) {
1497 } else if (zcmd_write_src_nvlist(hdl, &zc, props) != 0) {
1503 (void) strlcpy(zc.zc_name, thename, sizeof (zc.zc_name));
1505 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
1508 if (zcmd_write_conf_nvlist(hdl, &zc, config) != 0) {
1512 if (zcmd_alloc_dst_nvlist(hdl, &zc, zc.zc_nvlist_conf_size * 2) != 0) {
1517 zc.zc_cookie = flags;
1518 while ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_IMPORT, &zc)) != 0 &&
1520 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
1521 zcmd_free_nvlists(&zc);
1528 (void) zcmd_read_dst_nvlist(hdl, &zc, &nv);
1529 zpool_get_rewind_policy(config, &policy);
1535 * Dry-run failed, but we print out what success
1536 * looks like if we found a best txg
1538 if (policy.zrp_request & ZPOOL_TRY_REWIND) {
1539 zpool_rewind_exclaim(hdl, newname ? origname : thename,
1545 if (newname == NULL)
1546 (void) snprintf(desc, sizeof (desc),
1547 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1550 (void) snprintf(desc, sizeof (desc),
1551 dgettext(TEXT_DOMAIN, "cannot import '%s' as '%s'"),
1557 * Unsupported version.
1559 (void) zfs_error(hdl, EZFS_BADVERSION, desc);
1563 (void) zfs_error(hdl, EZFS_INVALCONFIG, desc);
1567 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1568 "one or more devices is read only"));
1569 (void) zfs_error(hdl, EZFS_BADDEV, desc);
1573 if (nv && nvlist_lookup_nvlist(nv,
1574 ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0 &&
1575 nvlist_lookup_nvlist(nvinfo,
1576 ZPOOL_CONFIG_MISSING_DEVICES, &missing) == 0) {
1577 (void) printf(dgettext(TEXT_DOMAIN,
1578 "The devices below are missing, use "
1579 "'-m' to import the pool anyway:\n"));
1580 print_vdev_tree(hdl, NULL, missing, 2);
1581 (void) printf("\n");
1583 (void) zpool_standard_error(hdl, error, desc);
1587 (void) zpool_standard_error(hdl, error, desc);
1591 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1592 "one or more devices are already in use\n"));
1593 (void) zfs_error(hdl, EZFS_BADDEV, desc);
1597 (void) zpool_standard_error(hdl, error, desc);
1598 zpool_explain_recover(hdl,
1599 newname ? origname : thename, -error, nv);
1606 zpool_handle_t *zhp;
1609 * This should never fail, but play it safe anyway.
1611 if (zpool_open_silent(hdl, thename, &zhp) != 0)
1613 else if (zhp != NULL)
1615 if (policy.zrp_request &
1616 (ZPOOL_DO_REWIND | ZPOOL_TRY_REWIND)) {
1617 zpool_rewind_exclaim(hdl, newname ? origname : thename,
1618 ((policy.zrp_request & ZPOOL_TRY_REWIND) != 0), nv);
1624 zcmd_free_nvlists(&zc);
1634 zpool_scan(zpool_handle_t *zhp, pool_scan_func_t func)
1636 zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
1638 libzfs_handle_t *hdl = zhp->zpool_hdl;
1640 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1641 zc.zc_cookie = func;
1643 if (zfs_ioctl(hdl, ZFS_IOC_POOL_SCAN, &zc) == 0 ||
1644 (errno == ENOENT && func != POOL_SCAN_NONE))
1647 if (func == POOL_SCAN_SCRUB) {
1648 (void) snprintf(msg, sizeof (msg),
1649 dgettext(TEXT_DOMAIN, "cannot scrub %s"), zc.zc_name);
1650 } else if (func == POOL_SCAN_NONE) {
1651 (void) snprintf(msg, sizeof (msg),
1652 dgettext(TEXT_DOMAIN, "cannot cancel scrubbing %s"),
1655 assert(!"unexpected result");
1658 if (errno == EBUSY) {
1660 pool_scan_stat_t *ps = NULL;
1663 verify(nvlist_lookup_nvlist(zhp->zpool_config,
1664 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
1665 (void) nvlist_lookup_uint64_array(nvroot,
1666 ZPOOL_CONFIG_SCAN_STATS, (uint64_t **)&ps, &psc);
1667 if (ps && ps->pss_func == POOL_SCAN_SCRUB)
1668 return (zfs_error(hdl, EZFS_SCRUBBING, msg));
1670 return (zfs_error(hdl, EZFS_RESILVERING, msg));
1671 } else if (errno == ENOENT) {
1672 return (zfs_error(hdl, EZFS_NO_SCRUB, msg));
1674 return (zpool_standard_error(hdl, errno, msg));
1679 * Find a vdev that matches the search criteria specified. We use the
1680 * the nvpair name to determine how we should look for the device.
1681 * 'avail_spare' is set to TRUE if the provided guid refers to an AVAIL
1682 * spare; but FALSE if its an INUSE spare.
1685 vdev_to_nvlist_iter(nvlist_t *nv, nvlist_t *search, boolean_t *avail_spare,
1686 boolean_t *l2cache, boolean_t *log)
1693 nvpair_t *pair = nvlist_next_nvpair(search, NULL);
1695 /* Nothing to look for */
1696 if (search == NULL || pair == NULL)
1699 /* Obtain the key we will use to search */
1700 srchkey = nvpair_name(pair);
1702 switch (nvpair_type(pair)) {
1703 case DATA_TYPE_UINT64:
1704 if (strcmp(srchkey, ZPOOL_CONFIG_GUID) == 0) {
1705 uint64_t srchval, theguid;
1707 verify(nvpair_value_uint64(pair, &srchval) == 0);
1708 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
1710 if (theguid == srchval)
1715 case DATA_TYPE_STRING: {
1716 char *srchval, *val;
1718 verify(nvpair_value_string(pair, &srchval) == 0);
1719 if (nvlist_lookup_string(nv, srchkey, &val) != 0)
1723 * Search for the requested value. Special cases:
1725 * - ZPOOL_CONFIG_PATH for whole disk entries. These end in with a
1726 * partition suffix "1", "-part1", or "p1". The suffix is hidden
1727 * from the user, but included in the string, so this matches around
1729 * - looking for a top-level vdev name (i.e. ZPOOL_CONFIG_TYPE).
1731 * Otherwise, all other searches are simple string compares.
1733 if (strcmp(srchkey, ZPOOL_CONFIG_PATH) == 0) {
1734 uint64_t wholedisk = 0;
1736 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
1739 char buf[MAXPATHLEN];
1741 zfs_append_partition(srchval, buf, sizeof (buf));
1742 if (strcmp(val, buf) == 0)
1747 } else if (strcmp(srchkey, ZPOOL_CONFIG_TYPE) == 0 && val) {
1748 char *type, *idx, *end, *p;
1749 uint64_t id, vdev_id;
1752 * Determine our vdev type, keeping in mind
1753 * that the srchval is composed of a type and
1754 * vdev id pair (i.e. mirror-4).
1756 if ((type = strdup(srchval)) == NULL)
1759 if ((p = strrchr(type, '-')) == NULL) {
1767 * If the types don't match then keep looking.
1769 if (strncmp(val, type, strlen(val)) != 0) {
1774 verify(strncmp(type, VDEV_TYPE_RAIDZ,
1775 strlen(VDEV_TYPE_RAIDZ)) == 0 ||
1776 strncmp(type, VDEV_TYPE_MIRROR,
1777 strlen(VDEV_TYPE_MIRROR)) == 0);
1778 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID,
1782 vdev_id = strtoull(idx, &end, 10);
1789 * Now verify that we have the correct vdev id.
1798 if (strcmp(srchval, val) == 0)
1807 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
1808 &child, &children) != 0)
1811 for (c = 0; c < children; c++) {
1812 if ((ret = vdev_to_nvlist_iter(child[c], search,
1813 avail_spare, l2cache, NULL)) != NULL) {
1815 * The 'is_log' value is only set for the toplevel
1816 * vdev, not the leaf vdevs. So we always lookup the
1817 * log device from the root of the vdev tree (where
1818 * 'log' is non-NULL).
1821 nvlist_lookup_uint64(child[c],
1822 ZPOOL_CONFIG_IS_LOG, &is_log) == 0 &&
1830 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES,
1831 &child, &children) == 0) {
1832 for (c = 0; c < children; c++) {
1833 if ((ret = vdev_to_nvlist_iter(child[c], search,
1834 avail_spare, l2cache, NULL)) != NULL) {
1835 *avail_spare = B_TRUE;
1841 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE,
1842 &child, &children) == 0) {
1843 for (c = 0; c < children; c++) {
1844 if ((ret = vdev_to_nvlist_iter(child[c], search,
1845 avail_spare, l2cache, NULL)) != NULL) {
1856 * Given a physical path (minus the "/devices" prefix), find the
1860 zpool_find_vdev_by_physpath(zpool_handle_t *zhp, const char *ppath,
1861 boolean_t *avail_spare, boolean_t *l2cache, boolean_t *log)
1863 nvlist_t *search, *nvroot, *ret;
1865 verify(nvlist_alloc(&search, NV_UNIQUE_NAME, KM_SLEEP) == 0);
1866 verify(nvlist_add_string(search, ZPOOL_CONFIG_PHYS_PATH, ppath) == 0);
1868 verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
1871 *avail_spare = B_FALSE;
1875 ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log);
1876 nvlist_free(search);
1882 * Determine if we have an "interior" top-level vdev (i.e mirror/raidz).
1885 zpool_vdev_is_interior(const char *name)
1887 if (strncmp(name, VDEV_TYPE_RAIDZ, strlen(VDEV_TYPE_RAIDZ)) == 0 ||
1888 strncmp(name, VDEV_TYPE_MIRROR, strlen(VDEV_TYPE_MIRROR)) == 0)
1894 zpool_find_vdev(zpool_handle_t *zhp, const char *path, boolean_t *avail_spare,
1895 boolean_t *l2cache, boolean_t *log)
1897 char buf[MAXPATHLEN];
1899 nvlist_t *nvroot, *search, *ret;
1902 verify(nvlist_alloc(&search, NV_UNIQUE_NAME, KM_SLEEP) == 0);
1904 guid = strtoull(path, &end, 10);
1905 if (guid != 0 && *end == '\0') {
1906 verify(nvlist_add_uint64(search, ZPOOL_CONFIG_GUID, guid) == 0);
1907 } else if (zpool_vdev_is_interior(path)) {
1908 verify(nvlist_add_string(search, ZPOOL_CONFIG_TYPE, path) == 0);
1909 } else if (path[0] != '/') {
1910 if (zfs_resolve_shortname(path, buf, sizeof (buf)) < 0) {
1911 nvlist_free(search);
1914 verify(nvlist_add_string(search, ZPOOL_CONFIG_PATH, buf) == 0);
1916 verify(nvlist_add_string(search, ZPOOL_CONFIG_PATH, path) == 0);
1919 verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
1922 *avail_spare = B_FALSE;
1926 ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log);
1927 nvlist_free(search);
1933 vdev_online(nvlist_t *nv)
1937 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_OFFLINE, &ival) == 0 ||
1938 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_FAULTED, &ival) == 0 ||
1939 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVED, &ival) == 0)
1946 * Helper function for zpool_get_physpaths().
1949 vdev_get_one_physpath(nvlist_t *config, char *physpath, size_t physpath_size,
1950 size_t *bytes_written)
1952 size_t bytes_left, pos, rsz;
1956 if (nvlist_lookup_string(config, ZPOOL_CONFIG_PHYS_PATH,
1958 return (EZFS_NODEVICE);
1960 pos = *bytes_written;
1961 bytes_left = physpath_size - pos;
1962 format = (pos == 0) ? "%s" : " %s";
1964 rsz = snprintf(physpath + pos, bytes_left, format, tmppath);
1965 *bytes_written += rsz;
1967 if (rsz >= bytes_left) {
1968 /* if physpath was not copied properly, clear it */
1969 if (bytes_left != 0) {
1972 return (EZFS_NOSPC);
1978 vdev_get_physpaths(nvlist_t *nv, char *physpath, size_t phypath_size,
1979 size_t *rsz, boolean_t is_spare)
1984 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) != 0)
1985 return (EZFS_INVALCONFIG);
1987 if (strcmp(type, VDEV_TYPE_DISK) == 0) {
1989 * An active spare device has ZPOOL_CONFIG_IS_SPARE set.
1990 * For a spare vdev, we only want to boot from the active
1995 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_SPARE,
1998 return (EZFS_INVALCONFIG);
2001 if (vdev_online(nv)) {
2002 if ((ret = vdev_get_one_physpath(nv, physpath,
2003 phypath_size, rsz)) != 0)
2006 } else if (strcmp(type, VDEV_TYPE_MIRROR) == 0 ||
2007 strcmp(type, VDEV_TYPE_REPLACING) == 0 ||
2008 (is_spare = (strcmp(type, VDEV_TYPE_SPARE) == 0))) {
2013 if (nvlist_lookup_nvlist_array(nv,
2014 ZPOOL_CONFIG_CHILDREN, &child, &count) != 0)
2015 return (EZFS_INVALCONFIG);
2017 for (i = 0; i < count; i++) {
2018 ret = vdev_get_physpaths(child[i], physpath,
2019 phypath_size, rsz, is_spare);
2020 if (ret == EZFS_NOSPC)
2025 return (EZFS_POOL_INVALARG);
2029 * Get phys_path for a root pool config.
2030 * Return 0 on success; non-zero on failure.
2033 zpool_get_config_physpath(nvlist_t *config, char *physpath, size_t phypath_size)
2036 nvlist_t *vdev_root;
2043 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
2045 return (EZFS_INVALCONFIG);
2047 if (nvlist_lookup_string(vdev_root, ZPOOL_CONFIG_TYPE, &type) != 0 ||
2048 nvlist_lookup_nvlist_array(vdev_root, ZPOOL_CONFIG_CHILDREN,
2049 &child, &count) != 0)
2050 return (EZFS_INVALCONFIG);
2053 * root pool can not have EFI labeled disks and can only have
2054 * a single top-level vdev.
2056 if (strcmp(type, VDEV_TYPE_ROOT) != 0 || count != 1 ||
2057 pool_uses_efi(vdev_root))
2058 return (EZFS_POOL_INVALARG);
2060 (void) vdev_get_physpaths(child[0], physpath, phypath_size, &rsz,
2063 /* No online devices */
2065 return (EZFS_NODEVICE);
2071 * Get phys_path for a root pool
2072 * Return 0 on success; non-zero on failure.
2075 zpool_get_physpath(zpool_handle_t *zhp, char *physpath, size_t phypath_size)
2077 return (zpool_get_config_physpath(zhp->zpool_config, physpath,
2082 * If the device has being dynamically expanded then we need to relabel
2083 * the disk to use the new unallocated space.
2086 zpool_relabel_disk(libzfs_handle_t *hdl, const char *path, const char *msg)
2090 if ((fd = open(path, O_RDWR|O_DIRECT)) < 0) {
2091 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot "
2092 "relabel '%s': unable to open device: %d"), path, errno);
2093 return (zfs_error(hdl, EZFS_OPENFAILED, msg));
2097 * It's possible that we might encounter an error if the device
2098 * does not have any unallocated space left. If so, we simply
2099 * ignore that error and continue on.
2101 error = efi_use_whole_disk(fd);
2103 if (error && error != VT_ENOSPC) {
2104 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot "
2105 "relabel '%s': unable to read disk capacity"), path);
2106 return (zfs_error(hdl, EZFS_NOCAP, msg));
2112 * Bring the specified vdev online. The 'flags' parameter is a set of the
2113 * ZFS_ONLINE_* flags.
2116 zpool_vdev_online(zpool_handle_t *zhp, const char *path, int flags,
2117 vdev_state_t *newstate)
2119 zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
2122 boolean_t avail_spare, l2cache, islog;
2123 libzfs_handle_t *hdl = zhp->zpool_hdl;
2126 if (flags & ZFS_ONLINE_EXPAND) {
2127 (void) snprintf(msg, sizeof (msg),
2128 dgettext(TEXT_DOMAIN, "cannot expand %s"), path);
2130 (void) snprintf(msg, sizeof (msg),
2131 dgettext(TEXT_DOMAIN, "cannot online %s"), path);
2134 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2135 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
2137 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2139 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2142 return (zfs_error(hdl, EZFS_ISSPARE, msg));
2144 if (flags & ZFS_ONLINE_EXPAND ||
2145 zpool_get_prop_int(zhp, ZPOOL_PROP_AUTOEXPAND, NULL)) {
2146 uint64_t wholedisk = 0;
2148 (void) nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_WHOLE_DISK,
2152 * XXX - L2ARC 1.0 devices can't support expansion.
2155 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2156 "cannot expand cache devices"));
2157 return (zfs_error(hdl, EZFS_VDEVNOTSUP, msg));
2161 const char *fullpath = path;
2162 char buf[MAXPATHLEN];
2164 if (path[0] != '/') {
2165 error = zfs_resolve_shortname(path, buf,
2168 return (zfs_error(hdl, EZFS_NODEVICE,
2174 error = zpool_relabel_disk(hdl, fullpath, msg);
2180 zc.zc_cookie = VDEV_STATE_ONLINE;
2183 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) != 0) {
2184 if (errno == EINVAL) {
2185 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "was split "
2186 "from this pool into a new one. Use '%s' "
2187 "instead"), "zpool detach");
2188 return (zfs_error(hdl, EZFS_POSTSPLIT_ONLINE, msg));
2190 return (zpool_standard_error(hdl, errno, msg));
2193 *newstate = zc.zc_cookie;
2198 * Take the specified vdev offline
2201 zpool_vdev_offline(zpool_handle_t *zhp, const char *path, boolean_t istmp)
2203 zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
2206 boolean_t avail_spare, l2cache;
2207 libzfs_handle_t *hdl = zhp->zpool_hdl;
2209 (void) snprintf(msg, sizeof (msg),
2210 dgettext(TEXT_DOMAIN, "cannot offline %s"), path);
2212 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2213 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
2215 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2217 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2220 return (zfs_error(hdl, EZFS_ISSPARE, msg));
2222 zc.zc_cookie = VDEV_STATE_OFFLINE;
2223 zc.zc_obj = istmp ? ZFS_OFFLINE_TEMPORARY : 0;
2225 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
2232 * There are no other replicas of this device.
2234 return (zfs_error(hdl, EZFS_NOREPLICAS, msg));
2238 * The log device has unplayed logs
2240 return (zfs_error(hdl, EZFS_UNPLAYED_LOGS, msg));
2243 return (zpool_standard_error(hdl, errno, msg));
2248 * Mark the given vdev faulted.
2251 zpool_vdev_fault(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux)
2253 zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
2255 libzfs_handle_t *hdl = zhp->zpool_hdl;
2257 (void) snprintf(msg, sizeof (msg),
2258 dgettext(TEXT_DOMAIN, "cannot fault %llu"), (u_longlong_t)guid);
2260 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2262 zc.zc_cookie = VDEV_STATE_FAULTED;
2265 if (ioctl(hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
2272 * There are no other replicas of this device.
2274 return (zfs_error(hdl, EZFS_NOREPLICAS, msg));
2277 return (zpool_standard_error(hdl, errno, msg));
2283 * Mark the given vdev degraded.
2286 zpool_vdev_degrade(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux)
2288 zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
2290 libzfs_handle_t *hdl = zhp->zpool_hdl;
2292 (void) snprintf(msg, sizeof (msg),
2293 dgettext(TEXT_DOMAIN, "cannot degrade %llu"), (u_longlong_t)guid);
2295 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2297 zc.zc_cookie = VDEV_STATE_DEGRADED;
2300 if (ioctl(hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
2303 return (zpool_standard_error(hdl, errno, msg));
2307 * Returns TRUE if the given nvlist is a vdev that was originally swapped in as
2311 is_replacing_spare(nvlist_t *search, nvlist_t *tgt, int which)
2317 if (nvlist_lookup_nvlist_array(search, ZPOOL_CONFIG_CHILDREN, &child,
2319 verify(nvlist_lookup_string(search, ZPOOL_CONFIG_TYPE,
2322 if (strcmp(type, VDEV_TYPE_SPARE) == 0 &&
2323 children == 2 && child[which] == tgt)
2326 for (c = 0; c < children; c++)
2327 if (is_replacing_spare(child[c], tgt, which))
2335 * Attach new_disk (fully described by nvroot) to old_disk.
2336 * If 'replacing' is specified, the new disk will replace the old one.
2339 zpool_vdev_attach(zpool_handle_t *zhp,
2340 const char *old_disk, const char *new_disk, nvlist_t *nvroot, int replacing)
2342 zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
2346 boolean_t avail_spare, l2cache, islog;
2351 nvlist_t *config_root;
2352 libzfs_handle_t *hdl = zhp->zpool_hdl;
2353 boolean_t rootpool = pool_is_bootable(zhp);
2356 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
2357 "cannot replace %s with %s"), old_disk, new_disk);
2359 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
2360 "cannot attach %s to %s"), new_disk, old_disk);
2363 * If this is a root pool, make sure that we're not attaching an
2364 * EFI labeled device.
2366 if (rootpool && pool_uses_efi(nvroot)) {
2367 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2368 "EFI labeled devices are not supported on root pools."));
2369 return (zfs_error(hdl, EZFS_POOL_NOTSUP, msg));
2372 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2373 if ((tgt = zpool_find_vdev(zhp, old_disk, &avail_spare, &l2cache,
2375 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2378 return (zfs_error(hdl, EZFS_ISSPARE, msg));
2381 return (zfs_error(hdl, EZFS_ISL2CACHE, msg));
2383 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2384 zc.zc_cookie = replacing;
2386 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
2387 &child, &children) != 0 || children != 1) {
2388 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2389 "new device must be a single disk"));
2390 return (zfs_error(hdl, EZFS_INVALCONFIG, msg));
2393 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
2394 ZPOOL_CONFIG_VDEV_TREE, &config_root) == 0);
2396 if ((newname = zpool_vdev_name(NULL, NULL, child[0], B_FALSE)) == NULL)
2400 * If the target is a hot spare that has been swapped in, we can only
2401 * replace it with another hot spare.
2404 nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_IS_SPARE, &val) == 0 &&
2405 (zpool_find_vdev(zhp, newname, &avail_spare, &l2cache,
2406 NULL) == NULL || !avail_spare) &&
2407 is_replacing_spare(config_root, tgt, 1)) {
2408 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2409 "can only be replaced by another hot spare"));
2411 return (zfs_error(hdl, EZFS_BADTARGET, msg));
2416 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
2419 ret = zfs_ioctl(hdl, ZFS_IOC_VDEV_ATTACH, &zc);
2421 zcmd_free_nvlists(&zc);
2426 * XXX need a better way to prevent user from
2427 * booting up a half-baked vdev.
2429 (void) fprintf(stderr, dgettext(TEXT_DOMAIN, "Make "
2430 "sure to wait until resilver is done "
2431 "before rebooting.\n"));
2439 * Can't attach to or replace this type of vdev.
2442 uint64_t version = zpool_get_prop_int(zhp,
2443 ZPOOL_PROP_VERSION, NULL);
2446 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2447 "cannot replace a log with a spare"));
2448 else if (version >= SPA_VERSION_MULTI_REPLACE)
2449 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2450 "already in replacing/spare config; wait "
2451 "for completion or use 'zpool detach'"));
2453 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2454 "cannot replace a replacing device"));
2456 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2457 "can only attach to mirrors and top-level "
2460 (void) zfs_error(hdl, EZFS_BADTARGET, msg);
2465 * The new device must be a single disk.
2467 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2468 "new device must be a single disk"));
2469 (void) zfs_error(hdl, EZFS_INVALCONFIG, msg);
2473 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "%s is busy"),
2475 (void) zfs_error(hdl, EZFS_BADDEV, msg);
2480 * The new device is too small.
2482 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2483 "device is too small"));
2484 (void) zfs_error(hdl, EZFS_BADDEV, msg);
2489 * The new device has a different alignment requirement.
2491 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2492 "devices have different sector alignment"));
2493 (void) zfs_error(hdl, EZFS_BADDEV, msg);
2498 * The resulting top-level vdev spec won't fit in the label.
2500 (void) zfs_error(hdl, EZFS_DEVOVERFLOW, msg);
2504 (void) zpool_standard_error(hdl, errno, msg);
2511 * Detach the specified device.
2514 zpool_vdev_detach(zpool_handle_t *zhp, const char *path)
2516 zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
2519 boolean_t avail_spare, l2cache;
2520 libzfs_handle_t *hdl = zhp->zpool_hdl;
2522 (void) snprintf(msg, sizeof (msg),
2523 dgettext(TEXT_DOMAIN, "cannot detach %s"), path);
2525 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2526 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
2528 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2531 return (zfs_error(hdl, EZFS_ISSPARE, msg));
2534 return (zfs_error(hdl, EZFS_ISL2CACHE, msg));
2536 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2538 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_DETACH, &zc) == 0)
2545 * Can't detach from this type of vdev.
2547 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "only "
2548 "applicable to mirror and replacing vdevs"));
2549 (void) zfs_error(hdl, EZFS_BADTARGET, msg);
2554 * There are no other replicas of this device.
2556 (void) zfs_error(hdl, EZFS_NOREPLICAS, msg);
2560 (void) zpool_standard_error(hdl, errno, msg);
2567 * Find a mirror vdev in the source nvlist.
2569 * The mchild array contains a list of disks in one of the top-level mirrors
2570 * of the source pool. The schild array contains a list of disks that the
2571 * user specified on the command line. We loop over the mchild array to
2572 * see if any entry in the schild array matches.
2574 * If a disk in the mchild array is found in the schild array, we return
2575 * the index of that entry. Otherwise we return -1.
2578 find_vdev_entry(zpool_handle_t *zhp, nvlist_t **mchild, uint_t mchildren,
2579 nvlist_t **schild, uint_t schildren)
2583 for (mc = 0; mc < mchildren; mc++) {
2585 char *mpath = zpool_vdev_name(zhp->zpool_hdl, zhp,
2586 mchild[mc], B_FALSE);
2588 for (sc = 0; sc < schildren; sc++) {
2589 char *spath = zpool_vdev_name(zhp->zpool_hdl, zhp,
2590 schild[sc], B_FALSE);
2591 boolean_t result = (strcmp(mpath, spath) == 0);
2607 * Split a mirror pool. If newroot points to null, then a new nvlist
2608 * is generated and it is the responsibility of the caller to free it.
2611 zpool_vdev_split(zpool_handle_t *zhp, char *newname, nvlist_t **newroot,
2612 nvlist_t *props, splitflags_t flags)
2614 zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
2616 nvlist_t *tree, *config, **child, **newchild, *newconfig = NULL;
2617 nvlist_t **varray = NULL, *zc_props = NULL;
2618 uint_t c, children, newchildren, lastlog = 0, vcount, found = 0;
2619 libzfs_handle_t *hdl = zhp->zpool_hdl;
2621 boolean_t freelist = B_FALSE, memory_err = B_TRUE;
2624 (void) snprintf(msg, sizeof (msg),
2625 dgettext(TEXT_DOMAIN, "Unable to split %s"), zhp->zpool_name);
2627 if (!zpool_name_valid(hdl, B_FALSE, newname))
2628 return (zfs_error(hdl, EZFS_INVALIDNAME, msg));
2630 if ((config = zpool_get_config(zhp, NULL)) == NULL) {
2631 (void) fprintf(stderr, gettext("Internal error: unable to "
2632 "retrieve pool configuration\n"));
2636 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &tree)
2638 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, &vers) == 0);
2641 prop_flags_t flags = { .create = B_FALSE, .import = B_TRUE };
2642 if ((zc_props = zpool_valid_proplist(hdl, zhp->zpool_name,
2643 props, vers, flags, msg)) == NULL)
2647 if (nvlist_lookup_nvlist_array(tree, ZPOOL_CONFIG_CHILDREN, &child,
2649 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2650 "Source pool is missing vdev tree"));
2652 nvlist_free(zc_props);
2656 varray = zfs_alloc(hdl, children * sizeof (nvlist_t *));
2659 if (*newroot == NULL ||
2660 nvlist_lookup_nvlist_array(*newroot, ZPOOL_CONFIG_CHILDREN,
2661 &newchild, &newchildren) != 0)
2664 for (c = 0; c < children; c++) {
2665 uint64_t is_log = B_FALSE, is_hole = B_FALSE;
2667 nvlist_t **mchild, *vdev;
2672 * Unlike cache & spares, slogs are stored in the
2673 * ZPOOL_CONFIG_CHILDREN array. We filter them out here.
2675 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
2677 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE,
2679 if (is_log || is_hole) {
2681 * Create a hole vdev and put it in the config.
2683 if (nvlist_alloc(&vdev, NV_UNIQUE_NAME, 0) != 0)
2685 if (nvlist_add_string(vdev, ZPOOL_CONFIG_TYPE,
2686 VDEV_TYPE_HOLE) != 0)
2688 if (nvlist_add_uint64(vdev, ZPOOL_CONFIG_IS_HOLE,
2693 varray[vcount++] = vdev;
2697 verify(nvlist_lookup_string(child[c], ZPOOL_CONFIG_TYPE, &type)
2699 if (strcmp(type, VDEV_TYPE_MIRROR) != 0) {
2700 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2701 "Source pool must be composed only of mirrors\n"));
2702 retval = zfs_error(hdl, EZFS_INVALCONFIG, msg);
2706 verify(nvlist_lookup_nvlist_array(child[c],
2707 ZPOOL_CONFIG_CHILDREN, &mchild, &mchildren) == 0);
2709 /* find or add an entry for this top-level vdev */
2710 if (newchildren > 0 &&
2711 (entry = find_vdev_entry(zhp, mchild, mchildren,
2712 newchild, newchildren)) >= 0) {
2713 /* We found a disk that the user specified. */
2714 vdev = mchild[entry];
2717 /* User didn't specify a disk for this vdev. */
2718 vdev = mchild[mchildren - 1];
2721 if (nvlist_dup(vdev, &varray[vcount++], 0) != 0)
2725 /* did we find every disk the user specified? */
2726 if (found != newchildren) {
2727 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "Device list must "
2728 "include at most one disk from each mirror"));
2729 retval = zfs_error(hdl, EZFS_INVALCONFIG, msg);
2733 /* Prepare the nvlist for populating. */
2734 if (*newroot == NULL) {
2735 if (nvlist_alloc(newroot, NV_UNIQUE_NAME, 0) != 0)
2738 if (nvlist_add_string(*newroot, ZPOOL_CONFIG_TYPE,
2739 VDEV_TYPE_ROOT) != 0)
2742 verify(nvlist_remove_all(*newroot, ZPOOL_CONFIG_CHILDREN) == 0);
2745 /* Add all the children we found */
2746 if (nvlist_add_nvlist_array(*newroot, ZPOOL_CONFIG_CHILDREN, varray,
2747 lastlog == 0 ? vcount : lastlog) != 0)
2751 * If we're just doing a dry run, exit now with success.
2754 memory_err = B_FALSE;
2759 /* now build up the config list & call the ioctl */
2760 if (nvlist_alloc(&newconfig, NV_UNIQUE_NAME, 0) != 0)
2763 if (nvlist_add_nvlist(newconfig,
2764 ZPOOL_CONFIG_VDEV_TREE, *newroot) != 0 ||
2765 nvlist_add_string(newconfig,
2766 ZPOOL_CONFIG_POOL_NAME, newname) != 0 ||
2767 nvlist_add_uint64(newconfig, ZPOOL_CONFIG_VERSION, vers) != 0)
2771 * The new pool is automatically part of the namespace unless we
2772 * explicitly export it.
2775 zc.zc_cookie = ZPOOL_EXPORT_AFTER_SPLIT;
2776 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2777 (void) strlcpy(zc.zc_string, newname, sizeof (zc.zc_string));
2778 if (zcmd_write_conf_nvlist(hdl, &zc, newconfig) != 0)
2780 if (zc_props != NULL && zcmd_write_src_nvlist(hdl, &zc, zc_props) != 0)
2783 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SPLIT, &zc) != 0) {
2784 retval = zpool_standard_error(hdl, errno, msg);
2789 memory_err = B_FALSE;
2792 if (varray != NULL) {
2795 for (v = 0; v < vcount; v++)
2796 nvlist_free(varray[v]);
2799 zcmd_free_nvlists(&zc);
2801 nvlist_free(zc_props);
2803 nvlist_free(newconfig);
2805 nvlist_free(*newroot);
2813 return (no_memory(hdl));
2819 * Remove the given device. Currently, this is supported only for hot spares
2820 * and level 2 cache devices.
2823 zpool_vdev_remove(zpool_handle_t *zhp, const char *path)
2825 zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
2828 boolean_t avail_spare, l2cache, islog;
2829 libzfs_handle_t *hdl = zhp->zpool_hdl;
2832 (void) snprintf(msg, sizeof (msg),
2833 dgettext(TEXT_DOMAIN, "cannot remove %s"), path);
2835 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2836 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
2838 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2840 * XXX - this should just go away.
2842 if (!avail_spare && !l2cache && !islog) {
2843 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2844 "only inactive hot spares, cache, top-level, "
2845 "or log devices can be removed"));
2846 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2849 version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL);
2850 if (islog && version < SPA_VERSION_HOLES) {
2851 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2852 "pool must be upgrade to support log removal"));
2853 return (zfs_error(hdl, EZFS_BADVERSION, msg));
2856 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2858 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_REMOVE, &zc) == 0)
2861 return (zpool_standard_error(hdl, errno, msg));
2865 * Clear the errors for the pool, or the particular device if specified.
2868 zpool_clear(zpool_handle_t *zhp, const char *path, nvlist_t *rewindnvl)
2870 zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
2873 zpool_rewind_policy_t policy;
2874 boolean_t avail_spare, l2cache;
2875 libzfs_handle_t *hdl = zhp->zpool_hdl;
2876 nvlist_t *nvi = NULL;
2880 (void) snprintf(msg, sizeof (msg),
2881 dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),
2884 (void) snprintf(msg, sizeof (msg),
2885 dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),
2888 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2890 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare,
2891 &l2cache, NULL)) == 0)
2892 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2895 * Don't allow error clearing for hot spares. Do allow
2896 * error clearing for l2cache devices.
2899 return (zfs_error(hdl, EZFS_ISSPARE, msg));
2901 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID,
2905 zpool_get_rewind_policy(rewindnvl, &policy);
2906 zc.zc_cookie = policy.zrp_request;
2908 if (zcmd_alloc_dst_nvlist(hdl, &zc, zhp->zpool_config_size * 2) != 0)
2911 if (zcmd_write_src_nvlist(hdl, &zc, rewindnvl) != 0)
2914 while ((error = zfs_ioctl(hdl, ZFS_IOC_CLEAR, &zc)) != 0 &&
2916 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
2917 zcmd_free_nvlists(&zc);
2922 if (!error || ((policy.zrp_request & ZPOOL_TRY_REWIND) &&
2923 errno != EPERM && errno != EACCES)) {
2924 if (policy.zrp_request &
2925 (ZPOOL_DO_REWIND | ZPOOL_TRY_REWIND)) {
2926 (void) zcmd_read_dst_nvlist(hdl, &zc, &nvi);
2927 zpool_rewind_exclaim(hdl, zc.zc_name,
2928 ((policy.zrp_request & ZPOOL_TRY_REWIND) != 0),
2932 zcmd_free_nvlists(&zc);
2936 zcmd_free_nvlists(&zc);
2937 return (zpool_standard_error(hdl, errno, msg));
2941 * Similar to zpool_clear(), but takes a GUID (used by fmd).
2944 zpool_vdev_clear(zpool_handle_t *zhp, uint64_t guid)
2946 zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
2948 libzfs_handle_t *hdl = zhp->zpool_hdl;
2950 (void) snprintf(msg, sizeof (msg),
2951 dgettext(TEXT_DOMAIN, "cannot clear errors for %llx"),
2952 (u_longlong_t)guid);
2954 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2956 zc.zc_cookie = ZPOOL_NO_REWIND;
2958 if (ioctl(hdl->libzfs_fd, ZFS_IOC_CLEAR, &zc) == 0)
2961 return (zpool_standard_error(hdl, errno, msg));
2965 * Change the GUID for a pool.
2968 zpool_reguid(zpool_handle_t *zhp)
2971 libzfs_handle_t *hdl = zhp->zpool_hdl;
2972 zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
2974 (void) snprintf(msg, sizeof (msg),
2975 dgettext(TEXT_DOMAIN, "cannot reguid '%s'"), zhp->zpool_name);
2977 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2978 if (zfs_ioctl(hdl, ZFS_IOC_POOL_REGUID, &zc) == 0)
2981 return (zpool_standard_error(hdl, errno, msg));
2985 * Convert from a devid string to a path.
2988 devid_to_path(char *devid_str)
2993 devid_nmlist_t *list = NULL;
2996 if (devid_str_decode(devid_str, &devid, &minor) != 0)
2999 ret = devid_deviceid_to_nmlist("/dev", devid, minor, &list);
3001 devid_str_free(minor);
3007 if ((path = strdup(list[0].devname)) == NULL)
3010 devid_free_nmlist(list);
3016 * Convert from a path to a devid string.
3019 path_to_devid(const char *path)
3025 if ((fd = open(path, O_RDONLY)) < 0)
3030 if (devid_get(fd, &devid) == 0) {
3031 if (devid_get_minor_name(fd, &minor) == 0)
3032 ret = devid_str_encode(devid, minor);
3034 devid_str_free(minor);
3043 * Issue the necessary ioctl() to update the stored path value for the vdev. We
3044 * ignore any failure here, since a common case is for an unprivileged user to
3045 * type 'zpool status', and we'll display the correct information anyway.
3048 set_path(zpool_handle_t *zhp, nvlist_t *nv, const char *path)
3050 zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
3052 (void) strncpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3053 (void) strncpy(zc.zc_value, path, sizeof (zc.zc_value));
3054 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
3057 (void) ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_SETPATH, &zc);
3061 * Remove partition suffix from a vdev path. Partition suffixes may take three
3062 * forms: "-partX", "pX", or "X", where X is a string of digits. The second
3063 * case only occurs when the suffix is preceded by a digit, i.e. "md0p0" The
3064 * third case only occurs when preceded by a string matching the regular
3065 * expression "^[hs]d[a-z]+", i.e. a scsi or ide disk.
3068 strip_partition(libzfs_handle_t *hdl, char *path)
3070 char *tmp = zfs_strdup(hdl, path);
3071 char *part = NULL, *d = NULL;
3073 if ((part = strstr(tmp, "-part")) && part != tmp) {
3075 } else if ((part = strrchr(tmp, 'p')) &&
3076 part > tmp + 1 && isdigit(*(part-1))) {
3078 } else if ((tmp[0] == 'h' || tmp[0] == 's') && tmp[1] == 'd') {
3079 for (d = &tmp[2]; isalpha(*d); part = ++d);
3081 if (part && d && *d != '\0') {
3082 for (; isdigit(*d); d++);
3089 #define PATH_BUF_LEN 64
3092 * Given a vdev, return the name to display in iostat. If the vdev has a path,
3093 * we use that, stripping off any leading "/dev/dsk/"; if not, we use the type.
3094 * We also check if this is a whole disk, in which case we strip off the
3095 * trailing 's0' slice name.
3097 * This routine is also responsible for identifying when disks have been
3098 * reconfigured in a new location. The kernel will have opened the device by
3099 * devid, but the path will still refer to the old location. To catch this, we
3100 * first do a path -> devid translation (which is fast for the common case). If
3101 * the devid matches, we're done. If not, we do a reverse devid -> path
3102 * translation and issue the appropriate ioctl() to update the path of the vdev.
3103 * If 'zhp' is NULL, then this is an exported pool, and we don't need to do any
3107 zpool_vdev_name(libzfs_handle_t *hdl, zpool_handle_t *zhp, nvlist_t *nv,
3110 char *path, *devid, *type;
3112 char buf[PATH_BUF_LEN];
3116 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT,
3118 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
3120 (void) snprintf(buf, sizeof (buf), "%llu",
3121 (u_longlong_t)value);
3123 } else if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) {
3125 * If the device is dead (faulted, offline, etc) then don't
3126 * bother opening it. Otherwise we may be forcing the user to
3127 * open a misbehaving device, which can have undesirable
3130 if ((nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS,
3131 (uint64_t **)&vs, &vsc) != 0 ||
3132 vs->vs_state >= VDEV_STATE_DEGRADED) &&
3134 nvlist_lookup_string(nv, ZPOOL_CONFIG_DEVID, &devid) == 0) {
3136 * Determine if the current path is correct.
3138 char *newdevid = path_to_devid(path);
3140 if (newdevid == NULL ||
3141 strcmp(devid, newdevid) != 0) {
3144 if ((newpath = devid_to_path(devid)) != NULL) {
3146 * Update the path appropriately.
3148 set_path(zhp, nv, newpath);
3149 if (nvlist_add_string(nv,
3150 ZPOOL_CONFIG_PATH, newpath) == 0)
3151 verify(nvlist_lookup_string(nv,
3159 devid_str_free(newdevid);
3163 * For a block device only use the name.
3165 verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) == 0);
3166 if (strcmp(type, VDEV_TYPE_DISK) == 0) {
3167 path = strrchr(path, '/');
3172 * Remove the partition from the path it this is a whole disk.
3174 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
3175 &value) == 0 && value) {
3176 return strip_partition(hdl, path);
3179 verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &path) == 0);
3182 * If it's a raidz device, we need to stick in the parity level.
3184 if (strcmp(path, VDEV_TYPE_RAIDZ) == 0) {
3185 char tmpbuf[PATH_BUF_LEN];
3187 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NPARITY,
3189 (void) snprintf(tmpbuf, sizeof (tmpbuf), "%s%llu", path,
3190 (u_longlong_t)value);
3195 * We identify each top-level vdev by using a <type-id>
3196 * naming convention.
3201 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID,
3203 (void) snprintf(buf, sizeof (buf), "%s-%llu", path,
3209 return (zfs_strdup(hdl, path));
3213 zbookmark_compare(const void *a, const void *b)
3215 return (memcmp(a, b, sizeof (zbookmark_t)));
3219 * Retrieve the persistent error log, uniquify the members, and return to the
3223 zpool_get_errlog(zpool_handle_t *zhp, nvlist_t **nverrlistp)
3225 zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
3227 zbookmark_t *zb = NULL;
3231 * Retrieve the raw error list from the kernel. If the number of errors
3232 * has increased, allocate more space and continue until we get the
3235 verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_ERRCOUNT,
3239 if ((zc.zc_nvlist_dst = (uintptr_t)zfs_alloc(zhp->zpool_hdl,
3240 count * sizeof (zbookmark_t))) == (uintptr_t)NULL)
3242 zc.zc_nvlist_dst_size = count;
3243 (void) strcpy(zc.zc_name, zhp->zpool_name);
3245 if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_ERROR_LOG,
3247 free((void *)(uintptr_t)zc.zc_nvlist_dst);
3248 if (errno == ENOMEM) {
3249 count = zc.zc_nvlist_dst_size;
3250 if ((zc.zc_nvlist_dst = (uintptr_t)
3251 zfs_alloc(zhp->zpool_hdl, count *
3252 sizeof (zbookmark_t))) == (uintptr_t)NULL)
3263 * Sort the resulting bookmarks. This is a little confusing due to the
3264 * implementation of ZFS_IOC_ERROR_LOG. The bookmarks are copied last
3265 * to first, and 'zc_nvlist_dst_size' indicates the number of boomarks
3266 * _not_ copied as part of the process. So we point the start of our
3267 * array appropriate and decrement the total number of elements.
3269 zb = ((zbookmark_t *)(uintptr_t)zc.zc_nvlist_dst) +
3270 zc.zc_nvlist_dst_size;
3271 count -= zc.zc_nvlist_dst_size;
3273 qsort(zb, count, sizeof (zbookmark_t), zbookmark_compare);
3275 verify(nvlist_alloc(nverrlistp, 0, KM_SLEEP) == 0);
3278 * Fill in the nverrlistp with nvlist's of dataset and object numbers.
3280 for (i = 0; i < count; i++) {
3283 /* ignoring zb_blkid and zb_level for now */
3284 if (i > 0 && zb[i-1].zb_objset == zb[i].zb_objset &&
3285 zb[i-1].zb_object == zb[i].zb_object)
3288 if (nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) != 0)
3290 if (nvlist_add_uint64(nv, ZPOOL_ERR_DATASET,
3291 zb[i].zb_objset) != 0) {
3295 if (nvlist_add_uint64(nv, ZPOOL_ERR_OBJECT,
3296 zb[i].zb_object) != 0) {
3300 if (nvlist_add_nvlist(*nverrlistp, "ejk", nv) != 0) {
3307 free((void *)(uintptr_t)zc.zc_nvlist_dst);
3311 free((void *)(uintptr_t)zc.zc_nvlist_dst);
3312 return (no_memory(zhp->zpool_hdl));
3316 * Upgrade a ZFS pool to the latest on-disk version.
3319 zpool_upgrade(zpool_handle_t *zhp, uint64_t new_version)
3321 zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
3322 libzfs_handle_t *hdl = zhp->zpool_hdl;
3324 (void) strcpy(zc.zc_name, zhp->zpool_name);
3325 zc.zc_cookie = new_version;
3327 if (zfs_ioctl(hdl, ZFS_IOC_POOL_UPGRADE, &zc) != 0)
3328 return (zpool_standard_error_fmt(hdl, errno,
3329 dgettext(TEXT_DOMAIN, "cannot upgrade '%s'"),
3335 zpool_set_history_str(const char *subcommand, int argc, char **argv,
3340 (void) strlcpy(history_str, subcommand, HIS_MAX_RECORD_LEN);
3341 for (i = 1; i < argc; i++) {
3342 if (strlen(history_str) + 1 + strlen(argv[i]) >
3345 (void) strlcat(history_str, " ", HIS_MAX_RECORD_LEN);
3346 (void) strlcat(history_str, argv[i], HIS_MAX_RECORD_LEN);
3351 * Stage command history for logging.
3354 zpool_stage_history(libzfs_handle_t *hdl, const char *history_str)
3356 if (history_str == NULL)
3359 if (strlen(history_str) > HIS_MAX_RECORD_LEN)
3362 if (hdl->libzfs_log_str != NULL)
3363 free(hdl->libzfs_log_str);
3365 if ((hdl->libzfs_log_str = strdup(history_str)) == NULL)
3366 return (no_memory(hdl));
3372 * Perform ioctl to get some command history of a pool.
3374 * 'buf' is the buffer to fill up to 'len' bytes. 'off' is the
3375 * logical offset of the history buffer to start reading from.
3377 * Upon return, 'off' is the next logical offset to read from and
3378 * 'len' is the actual amount of bytes read into 'buf'.
3381 get_history(zpool_handle_t *zhp, char *buf, uint64_t *off, uint64_t *len)
3383 zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
3384 libzfs_handle_t *hdl = zhp->zpool_hdl;
3386 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3388 zc.zc_history = (uint64_t)(uintptr_t)buf;
3389 zc.zc_history_len = *len;
3390 zc.zc_history_offset = *off;
3392 if (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_HISTORY, &zc) != 0) {
3395 return (zfs_error_fmt(hdl, EZFS_PERM,
3396 dgettext(TEXT_DOMAIN,
3397 "cannot show history for pool '%s'"),
3400 return (zfs_error_fmt(hdl, EZFS_NOHISTORY,
3401 dgettext(TEXT_DOMAIN, "cannot get history for pool "
3402 "'%s'"), zhp->zpool_name));
3404 return (zfs_error_fmt(hdl, EZFS_BADVERSION,
3405 dgettext(TEXT_DOMAIN, "cannot get history for pool "
3406 "'%s', pool must be upgraded"), zhp->zpool_name));
3408 return (zpool_standard_error_fmt(hdl, errno,
3409 dgettext(TEXT_DOMAIN,
3410 "cannot get history for '%s'"), zhp->zpool_name));
3414 *len = zc.zc_history_len;
3415 *off = zc.zc_history_offset;
3421 * Process the buffer of nvlists, unpacking and storing each nvlist record
3422 * into 'records'. 'leftover' is set to the number of bytes that weren't
3423 * processed as there wasn't a complete record.
3426 zpool_history_unpack(char *buf, uint64_t bytes_read, uint64_t *leftover,
3427 nvlist_t ***records, uint_t *numrecords)
3433 while (bytes_read > sizeof (reclen)) {
3435 /* get length of packed record (stored as little endian) */
3436 for (i = 0, reclen = 0; i < sizeof (reclen); i++)
3437 reclen += (uint64_t)(((uchar_t *)buf)[i]) << (8*i);
3439 if (bytes_read < sizeof (reclen) + reclen)
3443 if (nvlist_unpack(buf + sizeof (reclen), reclen, &nv, 0) != 0)
3445 bytes_read -= sizeof (reclen) + reclen;
3446 buf += sizeof (reclen) + reclen;
3448 /* add record to nvlist array */
3450 if (ISP2(*numrecords + 1)) {
3451 *records = realloc(*records,
3452 *numrecords * 2 * sizeof (nvlist_t *));
3454 (*records)[*numrecords - 1] = nv;
3457 *leftover = bytes_read;
3461 #define HIS_BUF_LEN (128*1024)
3464 * Retrieve the command history of a pool.
3467 zpool_get_history(zpool_handle_t *zhp, nvlist_t **nvhisp)
3469 char buf[HIS_BUF_LEN];
3471 nvlist_t **records = NULL;
3472 uint_t numrecords = 0;
3476 uint64_t bytes_read = sizeof (buf);
3479 if ((err = get_history(zhp, buf, &off, &bytes_read)) != 0)
3482 /* if nothing else was read in, we're at EOF, just return */
3486 if ((err = zpool_history_unpack(buf, bytes_read,
3487 &leftover, &records, &numrecords)) != 0)
3495 verify(nvlist_alloc(nvhisp, NV_UNIQUE_NAME, 0) == 0);
3496 verify(nvlist_add_nvlist_array(*nvhisp, ZPOOL_HIST_RECORD,
3497 records, numrecords) == 0);
3499 for (i = 0; i < numrecords; i++)
3500 nvlist_free(records[i]);
3507 * Retrieve the next event. If there is a new event available 'nvp' will
3508 * contain a newly allocated nvlist and 'dropped' will be set to the number
3509 * of missed events since the last call to this function. When 'nvp' is
3510 * set to NULL it indicates no new events are available. In either case
3511 * the function returns 0 and it is up to the caller to free 'nvp'. In
3512 * the case of a fatal error the function will return a non-zero value.
3513 * When the function is called in blocking mode it will not return until
3514 * a new event is available.
3517 zpool_events_next(libzfs_handle_t *hdl, nvlist_t **nvp,
3518 int *dropped, int block, int cleanup_fd)
3520 zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
3525 zc.zc_cleanup_fd = cleanup_fd;
3528 zc.zc_guid = ZEVENT_NONBLOCK;
3530 if (zcmd_alloc_dst_nvlist(hdl, &zc, ZEVENT_SIZE) != 0)
3534 if (zfs_ioctl(hdl, ZFS_IOC_EVENTS_NEXT, &zc) != 0) {
3537 error = zfs_error_fmt(hdl, EZFS_POOLUNAVAIL,
3538 dgettext(TEXT_DOMAIN, "zfs shutdown"));
3541 /* Blocking error case should not occur */
3543 error = zpool_standard_error_fmt(hdl, errno,
3544 dgettext(TEXT_DOMAIN, "cannot get event"));
3548 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
3549 error = zfs_error_fmt(hdl, EZFS_NOMEM,
3550 dgettext(TEXT_DOMAIN, "cannot get event"));
3556 error = zpool_standard_error_fmt(hdl, errno,
3557 dgettext(TEXT_DOMAIN, "cannot get event"));
3562 error = zcmd_read_dst_nvlist(hdl, &zc, nvp);
3566 *dropped = (int)zc.zc_cookie;
3568 zcmd_free_nvlists(&zc);
3577 zpool_events_clear(libzfs_handle_t *hdl, int *count)
3579 zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
3582 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
3583 "cannot clear events"));
3585 if (zfs_ioctl(hdl, ZFS_IOC_EVENTS_CLEAR, &zc) != 0)
3586 return (zpool_standard_error_fmt(hdl, errno, msg));
3589 *count = (int)zc.zc_cookie; /* # of events cleared */
3595 zpool_obj_to_path(zpool_handle_t *zhp, uint64_t dsobj, uint64_t obj,
3596 char *pathname, size_t len)
3598 zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
3599 boolean_t mounted = B_FALSE;
3600 char *mntpnt = NULL;
3601 char dsname[MAXNAMELEN];
3604 /* special case for the MOS */
3605 (void) snprintf(pathname, len, "<metadata>:<0x%llx>", (longlong_t)obj);
3609 /* get the dataset's name */
3610 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3612 if (ioctl(zhp->zpool_hdl->libzfs_fd,
3613 ZFS_IOC_DSOBJ_TO_DSNAME, &zc) != 0) {
3614 /* just write out a path of two object numbers */
3615 (void) snprintf(pathname, len, "<0x%llx>:<0x%llx>",
3616 (longlong_t)dsobj, (longlong_t)obj);
3619 (void) strlcpy(dsname, zc.zc_value, sizeof (dsname));
3621 /* find out if the dataset is mounted */
3622 mounted = is_mounted(zhp->zpool_hdl, dsname, &mntpnt);
3624 /* get the corrupted object's path */
3625 (void) strlcpy(zc.zc_name, dsname, sizeof (zc.zc_name));
3627 if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_OBJ_TO_PATH,
3630 (void) snprintf(pathname, len, "%s%s", mntpnt,
3633 (void) snprintf(pathname, len, "%s:%s",
3634 dsname, zc.zc_value);
3637 (void) snprintf(pathname, len, "%s:<0x%llx>", dsname, (longlong_t)obj);
3643 * Read the EFI label from the config, if a label does not exist then
3644 * pass back the error to the caller. If the caller has passed a non-NULL
3645 * diskaddr argument then we set it to the starting address of the EFI
3649 read_efi_label(nvlist_t *config, diskaddr_t *sb)
3653 char diskname[MAXPATHLEN];
3656 if (nvlist_lookup_string(config, ZPOOL_CONFIG_PATH, &path) != 0)
3659 (void) snprintf(diskname, sizeof (diskname), "%s%s", RDISK_ROOT,
3660 strrchr(path, '/'));
3661 if ((fd = open(diskname, O_RDWR|O_DIRECT)) >= 0) {
3662 struct dk_gpt *vtoc;
3664 if ((err = efi_alloc_and_read(fd, &vtoc)) >= 0) {
3666 *sb = vtoc->efi_parts[0].p_start;
3675 * determine where a partition starts on a disk in the current
3679 find_start_block(nvlist_t *config)
3683 diskaddr_t sb = MAXOFFSET_T;
3686 if (nvlist_lookup_nvlist_array(config,
3687 ZPOOL_CONFIG_CHILDREN, &child, &children) != 0) {
3688 if (nvlist_lookup_uint64(config,
3689 ZPOOL_CONFIG_WHOLE_DISK,
3690 &wholedisk) != 0 || !wholedisk) {
3691 return (MAXOFFSET_T);
3693 if (read_efi_label(config, &sb) < 0)
3698 for (c = 0; c < children; c++) {
3699 sb = find_start_block(child[c]);
3700 if (sb != MAXOFFSET_T) {
3704 return (MAXOFFSET_T);
3708 zpool_label_disk_wait(char *path, int timeout)
3710 struct stat64 statbuf;
3714 * Wait timeout miliseconds for a newly created device to be available
3715 * from the given path. There is a small window when a /dev/ device
3716 * will exist and the udev link will not, so we must wait for the
3717 * symlink. Depending on the udev rules this may take a few seconds.
3719 for (i = 0; i < timeout; i++) {
3723 if ((stat64(path, &statbuf) == 0) && (errno == 0))
3731 zpool_label_disk_check(char *path)
3733 struct dk_gpt *vtoc;
3736 if ((fd = open(path, O_RDWR|O_DIRECT)) < 0)
3739 if ((err = efi_alloc_and_read(fd, &vtoc)) != 0) {
3744 if (vtoc->efi_flags & EFI_GPT_PRIMARY_CORRUPT) {
3756 * Label an individual disk. The name provided is the short name,
3757 * stripped of any leading /dev path.
3760 zpool_label_disk(libzfs_handle_t *hdl, zpool_handle_t *zhp, char *name)
3762 char path[MAXPATHLEN];
3763 struct dk_gpt *vtoc;
3765 size_t resv = EFI_MIN_RESV_SIZE;
3766 uint64_t slice_size;
3767 diskaddr_t start_block;
3770 /* prepare an error message just in case */
3771 (void) snprintf(errbuf, sizeof (errbuf),
3772 dgettext(TEXT_DOMAIN, "cannot label '%s'"), name);
3777 if (pool_is_bootable(zhp)) {
3778 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3779 "EFI labeled devices are not supported on root "
3781 return (zfs_error(hdl, EZFS_POOL_NOTSUP, errbuf));
3784 verify(nvlist_lookup_nvlist(zhp->zpool_config,
3785 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
3787 if (zhp->zpool_start_block == 0)
3788 start_block = find_start_block(nvroot);
3790 start_block = zhp->zpool_start_block;
3791 zhp->zpool_start_block = start_block;
3794 start_block = NEW_START_BLOCK;
3797 (void) snprintf(path, sizeof (path), "%s/%s%s", RDISK_ROOT, name,
3800 if ((fd = open(path, O_RDWR|O_DIRECT)) < 0) {
3802 * This shouldn't happen. We've long since verified that this
3803 * is a valid device.
3805 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot "
3806 "label '%s': unable to open device: %d"), path, errno);
3807 return (zfs_error(hdl, EZFS_OPENFAILED, errbuf));
3810 if (efi_alloc_and_init(fd, EFI_NUMPAR, &vtoc) != 0) {
3812 * The only way this can fail is if we run out of memory, or we
3813 * were unable to read the disk's capacity
3815 if (errno == ENOMEM)
3816 (void) no_memory(hdl);
3819 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot "
3820 "label '%s': unable to read disk capacity"), path);
3822 return (zfs_error(hdl, EZFS_NOCAP, errbuf));
3825 slice_size = vtoc->efi_last_u_lba + 1;
3826 slice_size -= EFI_MIN_RESV_SIZE;
3827 if (start_block == MAXOFFSET_T)
3828 start_block = NEW_START_BLOCK;
3829 slice_size -= start_block;
3830 slice_size = P2ALIGN(slice_size, PARTITION_END_ALIGNMENT);
3832 vtoc->efi_parts[0].p_start = start_block;
3833 vtoc->efi_parts[0].p_size = slice_size;
3836 * Why we use V_USR: V_BACKUP confuses users, and is considered
3837 * disposable by some EFI utilities (since EFI doesn't have a backup
3838 * slice). V_UNASSIGNED is supposed to be used only for zero size
3839 * partitions, and efi_write() will fail if we use it. V_ROOT, V_BOOT,
3840 * etc. were all pretty specific. V_USR is as close to reality as we
3841 * can get, in the absence of V_OTHER.
3843 vtoc->efi_parts[0].p_tag = V_USR;
3844 (void) strcpy(vtoc->efi_parts[0].p_name, "zfs");
3846 vtoc->efi_parts[8].p_start = slice_size + start_block;
3847 vtoc->efi_parts[8].p_size = resv;
3848 vtoc->efi_parts[8].p_tag = V_RESERVED;
3850 if ((rval = efi_write(fd, vtoc)) != 0) {
3852 * Some block drivers (like pcata) may not support EFI
3853 * GPT labels. Print out a helpful error message dir-
3854 * ecting the user to manually label the disk and give
3860 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "try using "
3861 "parted(8) and then provide a specific slice: %d"), rval);
3862 return (zfs_error(hdl, EZFS_LABELFAILED, errbuf));
3868 /* Wait for the first expected slice to appear. */
3869 (void) snprintf(path, sizeof (path), "%s/%s%s%s", DISK_ROOT, name,
3870 isdigit(name[strlen(name)-1]) ? "p" : "", FIRST_SLICE);
3871 rval = zpool_label_disk_wait(path, 3000);
3873 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "failed to "
3874 "detect device partitions on '%s': %d"), path, rval);
3875 return (zfs_error(hdl, EZFS_LABELFAILED, errbuf));
3878 /* We can't be to paranoid. Read the label back and verify it. */
3879 (void) snprintf(path, sizeof (path), "%s/%s", DISK_ROOT, name);
3880 rval = zpool_label_disk_check(path);
3882 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "freshly written "
3883 "EFI label on '%s' is damaged. Ensure\nthis device "
3884 "is not in in use, and is functioning properly: %d"),
3886 return (zfs_error(hdl, EZFS_LABELFAILED, errbuf));