4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
40 #include <sys/efi_partition.h>
42 #include <sys/zfs_ioctl.h>
46 #include "zfs_namecheck.h"
48 #include "libzfs_impl.h"
50 static int read_efi_label(nvlist_t *config, diskaddr_t *sb);
52 #if defined(__i386) || defined(__amd64)
53 #define BOOTCMD "installgrub(1M)"
55 #define BOOTCMD "installboot(1M)"
59 * ====================================================================
60 * zpool property functions
61 * ====================================================================
65 zpool_get_all_props(zpool_handle_t *zhp)
68 libzfs_handle_t *hdl = zhp->zpool_hdl;
70 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
72 if (zcmd_alloc_dst_nvlist(hdl, &zc, 0) != 0)
75 while (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_PROPS, &zc) != 0) {
76 if (errno == ENOMEM) {
77 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
78 zcmd_free_nvlists(&zc);
82 zcmd_free_nvlists(&zc);
87 if (zcmd_read_dst_nvlist(hdl, &zc, &zhp->zpool_props) != 0) {
88 zcmd_free_nvlists(&zc);
92 zcmd_free_nvlists(&zc);
98 zpool_props_refresh(zpool_handle_t *zhp)
102 old_props = zhp->zpool_props;
104 if (zpool_get_all_props(zhp) != 0)
107 nvlist_free(old_props);
112 zpool_get_prop_string(zpool_handle_t *zhp, zpool_prop_t prop,
118 zprop_source_t source;
120 nvl = zhp->zpool_props;
121 if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) {
122 verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &ival) == 0);
124 verify(nvlist_lookup_string(nv, ZPROP_VALUE, &value) == 0);
126 source = ZPROP_SRC_DEFAULT;
127 if ((value = (char *)zpool_prop_default_string(prop)) == NULL)
138 zpool_get_prop_int(zpool_handle_t *zhp, zpool_prop_t prop, zprop_source_t *src)
142 zprop_source_t source;
144 if (zhp->zpool_props == NULL && zpool_get_all_props(zhp)) {
146 * zpool_get_all_props() has most likely failed because
147 * the pool is faulted, but if all we need is the top level
148 * vdev's guid then get it from the zhp config nvlist.
150 if ((prop == ZPOOL_PROP_GUID) &&
151 (nvlist_lookup_nvlist(zhp->zpool_config,
152 ZPOOL_CONFIG_VDEV_TREE, &nv) == 0) &&
153 (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &value)
157 return (zpool_prop_default_numeric(prop));
160 nvl = zhp->zpool_props;
161 if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) {
162 verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &value) == 0);
164 verify(nvlist_lookup_uint64(nv, ZPROP_VALUE, &value) == 0);
166 source = ZPROP_SRC_DEFAULT;
167 value = zpool_prop_default_numeric(prop);
177 * Map VDEV STATE to printed strings.
180 zpool_state_to_name(vdev_state_t state, vdev_aux_t aux)
183 case VDEV_STATE_CLOSED:
184 case VDEV_STATE_OFFLINE:
185 return (gettext("OFFLINE"));
186 case VDEV_STATE_REMOVED:
187 return (gettext("REMOVED"));
188 case VDEV_STATE_CANT_OPEN:
189 if (aux == VDEV_AUX_CORRUPT_DATA || aux == VDEV_AUX_BAD_LOG)
190 return (gettext("FAULTED"));
192 return (gettext("UNAVAIL"));
193 case VDEV_STATE_FAULTED:
194 return (gettext("FAULTED"));
195 case VDEV_STATE_DEGRADED:
196 return (gettext("DEGRADED"));
197 case VDEV_STATE_HEALTHY:
198 return (gettext("ONLINE"));
201 return (gettext("UNKNOWN"));
205 * Get a zpool property value for 'prop' and return the value in
206 * a pre-allocated buffer.
209 zpool_get_prop(zpool_handle_t *zhp, zpool_prop_t prop, char *buf, size_t len,
210 zprop_source_t *srctype)
214 zprop_source_t src = ZPROP_SRC_NONE;
219 if (zpool_get_state(zhp) == POOL_STATE_UNAVAIL) {
221 case ZPOOL_PROP_NAME:
222 (void) strlcpy(buf, zpool_get_name(zhp), len);
225 case ZPOOL_PROP_HEALTH:
226 (void) strlcpy(buf, "FAULTED", len);
229 case ZPOOL_PROP_GUID:
230 intval = zpool_get_prop_int(zhp, prop, &src);
231 (void) snprintf(buf, len, "%llu", intval);
234 case ZPOOL_PROP_ALTROOT:
235 case ZPOOL_PROP_CACHEFILE:
236 if (zhp->zpool_props != NULL ||
237 zpool_get_all_props(zhp) == 0) {
239 zpool_get_prop_string(zhp, prop, &src),
247 (void) strlcpy(buf, "-", len);
256 if (zhp->zpool_props == NULL && zpool_get_all_props(zhp) &&
257 prop != ZPOOL_PROP_NAME)
260 switch (zpool_prop_get_type(prop)) {
261 case PROP_TYPE_STRING:
262 (void) strlcpy(buf, zpool_get_prop_string(zhp, prop, &src),
266 case PROP_TYPE_NUMBER:
267 intval = zpool_get_prop_int(zhp, prop, &src);
270 case ZPOOL_PROP_SIZE:
271 case ZPOOL_PROP_USED:
272 case ZPOOL_PROP_AVAILABLE:
273 (void) zfs_nicenum(intval, buf, len);
276 case ZPOOL_PROP_CAPACITY:
277 (void) snprintf(buf, len, "%llu%%",
278 (u_longlong_t)intval);
281 case ZPOOL_PROP_HEALTH:
282 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
283 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
284 verify(nvlist_lookup_uint64_array(nvroot,
285 ZPOOL_CONFIG_STATS, (uint64_t **)&vs, &vsc) == 0);
287 (void) strlcpy(buf, zpool_state_to_name(intval,
291 (void) snprintf(buf, len, "%llu", intval);
295 case PROP_TYPE_INDEX:
296 intval = zpool_get_prop_int(zhp, prop, &src);
297 if (zpool_prop_index_to_string(prop, intval, &strval)
300 (void) strlcpy(buf, strval, len);
314 * Check if the bootfs name has the same pool name as it is set to.
315 * Assuming bootfs is a valid dataset name.
318 bootfs_name_valid(const char *pool, char *bootfs)
320 int len = strlen(pool);
322 if (!zfs_name_valid(bootfs, ZFS_TYPE_FILESYSTEM|ZFS_TYPE_SNAPSHOT))
325 if (strncmp(pool, bootfs, len) == 0 &&
326 (bootfs[len] == '/' || bootfs[len] == '\0'))
333 * Inspect the configuration to determine if any of the devices contain
337 pool_uses_efi(nvlist_t *config)
342 if (nvlist_lookup_nvlist_array(config, ZPOOL_CONFIG_CHILDREN,
343 &child, &children) != 0)
344 return (read_efi_label(config, NULL) >= 0);
346 for (c = 0; c < children; c++) {
347 if (pool_uses_efi(child[c]))
354 pool_is_bootable(zpool_handle_t *zhp)
356 char bootfs[ZPOOL_MAXNAMELEN];
358 return (zpool_get_prop(zhp, ZPOOL_PROP_BOOTFS, bootfs,
359 sizeof (bootfs), NULL) == 0 && strncmp(bootfs, "-",
360 sizeof (bootfs)) != 0);
365 * Given an nvlist of zpool properties to be set, validate that they are
366 * correct, and parse any numeric properties (index, boolean, etc) if they are
367 * specified as strings.
370 zpool_valid_proplist(libzfs_handle_t *hdl, const char *poolname,
371 nvlist_t *props, uint64_t version, boolean_t create_or_import, char *errbuf)
379 struct stat64 statbuf;
383 if (nvlist_alloc(&retprops, NV_UNIQUE_NAME, 0) != 0) {
384 (void) no_memory(hdl);
389 while ((elem = nvlist_next_nvpair(props, elem)) != NULL) {
390 const char *propname = nvpair_name(elem);
393 * Make sure this property is valid and applies to this type.
395 if ((prop = zpool_name_to_prop(propname)) == ZPROP_INVAL) {
396 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
397 "invalid property '%s'"), propname);
398 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
402 if (zpool_prop_readonly(prop)) {
403 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' "
404 "is readonly"), propname);
405 (void) zfs_error(hdl, EZFS_PROPREADONLY, errbuf);
409 if (zprop_parse_value(hdl, elem, prop, ZFS_TYPE_POOL, retprops,
410 &strval, &intval, errbuf) != 0)
414 * Perform additional checking for specific properties.
417 case ZPOOL_PROP_VERSION:
418 if (intval < version || intval > SPA_VERSION) {
419 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
420 "property '%s' number %d is invalid."),
422 (void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
427 case ZPOOL_PROP_BOOTFS:
428 if (create_or_import) {
429 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
430 "property '%s' cannot be set at creation "
431 "or import time"), propname);
432 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
436 if (version < SPA_VERSION_BOOTFS) {
437 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
438 "pool must be upgraded to support "
439 "'%s' property"), propname);
440 (void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
445 * bootfs property value has to be a dataset name and
446 * the dataset has to be in the same pool as it sets to.
448 if (strval[0] != '\0' && !bootfs_name_valid(poolname,
450 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' "
451 "is an invalid name"), strval);
452 (void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf);
456 if ((zhp = zpool_open_canfail(hdl, poolname)) == NULL) {
457 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
458 "could not open pool '%s'"), poolname);
459 (void) zfs_error(hdl, EZFS_OPENFAILED, errbuf);
462 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
463 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
466 * bootfs property cannot be set on a disk which has
469 if (pool_uses_efi(nvroot)) {
470 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
471 "property '%s' not supported on "
472 "EFI labeled devices"), propname);
473 (void) zfs_error(hdl, EZFS_POOL_NOTSUP, errbuf);
480 case ZPOOL_PROP_ALTROOT:
481 if (!create_or_import) {
482 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
483 "property '%s' can only be set during pool "
484 "creation or import"), propname);
485 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
489 if (strval[0] != '/') {
490 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
491 "bad alternate root '%s'"), strval);
492 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
497 case ZPOOL_PROP_CACHEFILE:
498 if (strval[0] == '\0')
501 if (strcmp(strval, "none") == 0)
504 if (strval[0] != '/') {
505 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
506 "property '%s' must be empty, an "
507 "absolute path, or 'none'"), propname);
508 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
512 slash = strrchr(strval, '/');
514 if (slash[1] == '\0' || strcmp(slash, "/.") == 0 ||
515 strcmp(slash, "/..") == 0) {
516 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
517 "'%s' is not a valid file"), strval);
518 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
524 if (strval[0] != '\0' &&
525 (stat64(strval, &statbuf) != 0 ||
526 !S_ISDIR(statbuf.st_mode))) {
527 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
528 "'%s' is not a valid directory"),
530 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
541 nvlist_free(retprops);
546 * Set zpool property : propname=propval.
549 zpool_set_prop(zpool_handle_t *zhp, const char *propname, const char *propval)
551 zfs_cmd_t zc = { 0 };
554 nvlist_t *nvl = NULL;
558 (void) snprintf(errbuf, sizeof (errbuf),
559 dgettext(TEXT_DOMAIN, "cannot set property for '%s'"),
562 if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0)
563 return (no_memory(zhp->zpool_hdl));
565 if (nvlist_add_string(nvl, propname, propval) != 0) {
567 return (no_memory(zhp->zpool_hdl));
570 version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL);
571 if ((realprops = zpool_valid_proplist(zhp->zpool_hdl,
572 zhp->zpool_name, nvl, version, B_FALSE, errbuf)) == NULL) {
581 * Execute the corresponding ioctl() to set this property.
583 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
585 if (zcmd_write_src_nvlist(zhp->zpool_hdl, &zc, nvl) != 0) {
590 ret = zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_SET_PROPS, &zc);
592 zcmd_free_nvlists(&zc);
596 (void) zpool_standard_error(zhp->zpool_hdl, errno, errbuf);
598 (void) zpool_props_refresh(zhp);
604 zpool_expand_proplist(zpool_handle_t *zhp, zprop_list_t **plp)
606 libzfs_handle_t *hdl = zhp->zpool_hdl;
608 char buf[ZFS_MAXPROPLEN];
610 if (zprop_expand_list(hdl, plp, ZFS_TYPE_POOL) != 0)
613 for (entry = *plp; entry != NULL; entry = entry->pl_next) {
618 if (entry->pl_prop != ZPROP_INVAL &&
619 zpool_get_prop(zhp, entry->pl_prop, buf, sizeof (buf),
621 if (strlen(buf) > entry->pl_width)
622 entry->pl_width = strlen(buf);
631 * Validate the given pool name, optionally putting an extended error message in
635 zpool_name_valid(libzfs_handle_t *hdl, boolean_t isopen, const char *pool)
641 ret = pool_namecheck(pool, &why, &what);
644 * The rules for reserved pool names were extended at a later point.
645 * But we need to support users with existing pools that may now be
646 * invalid. So we only check for this expanded set of names during a
647 * create (or import), and only in userland.
649 if (ret == 0 && !isopen &&
650 (strncmp(pool, "mirror", 6) == 0 ||
651 strncmp(pool, "raidz", 5) == 0 ||
652 strncmp(pool, "spare", 5) == 0 ||
653 strcmp(pool, "log") == 0)) {
656 dgettext(TEXT_DOMAIN, "name is reserved"));
664 case NAME_ERR_TOOLONG:
666 dgettext(TEXT_DOMAIN, "name is too long"));
669 case NAME_ERR_INVALCHAR:
671 dgettext(TEXT_DOMAIN, "invalid character "
672 "'%c' in pool name"), what);
675 case NAME_ERR_NOLETTER:
676 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
677 "name must begin with a letter"));
680 case NAME_ERR_RESERVED:
681 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
682 "name is reserved"));
685 case NAME_ERR_DISKLIKE:
686 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
687 "pool name is reserved"));
690 case NAME_ERR_LEADING_SLASH:
691 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
692 "leading slash in name"));
695 case NAME_ERR_EMPTY_COMPONENT:
696 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
697 "empty component in name"));
700 case NAME_ERR_TRAILING_SLASH:
701 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
702 "trailing slash in name"));
705 case NAME_ERR_MULTIPLE_AT:
706 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
707 "multiple '@' delimiters in name"));
719 * Open a handle to the given pool, even if the pool is currently in the FAULTED
723 zpool_open_canfail(libzfs_handle_t *hdl, const char *pool)
729 * Make sure the pool name is valid.
731 if (!zpool_name_valid(hdl, B_TRUE, pool)) {
732 (void) zfs_error_fmt(hdl, EZFS_INVALIDNAME,
733 dgettext(TEXT_DOMAIN, "cannot open '%s'"),
738 if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL)
741 zhp->zpool_hdl = hdl;
742 (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name));
744 if (zpool_refresh_stats(zhp, &missing) != 0) {
750 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "no such pool"));
751 (void) zfs_error_fmt(hdl, EZFS_NOENT,
752 dgettext(TEXT_DOMAIN, "cannot open '%s'"), pool);
761 * Like the above, but silent on error. Used when iterating over pools (because
762 * the configuration cache may be out of date).
765 zpool_open_silent(libzfs_handle_t *hdl, const char *pool, zpool_handle_t **ret)
770 if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL)
773 zhp->zpool_hdl = hdl;
774 (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name));
776 if (zpool_refresh_stats(zhp, &missing) != 0) {
792 * Similar to zpool_open_canfail(), but refuses to open pools in the faulted
796 zpool_open(libzfs_handle_t *hdl, const char *pool)
800 if ((zhp = zpool_open_canfail(hdl, pool)) == NULL)
803 if (zhp->zpool_state == POOL_STATE_UNAVAIL) {
804 (void) zfs_error_fmt(hdl, EZFS_POOLUNAVAIL,
805 dgettext(TEXT_DOMAIN, "cannot open '%s'"), zhp->zpool_name);
814 * Close the handle. Simply frees the memory associated with the handle.
817 zpool_close(zpool_handle_t *zhp)
819 if (zhp->zpool_config)
820 nvlist_free(zhp->zpool_config);
821 if (zhp->zpool_old_config)
822 nvlist_free(zhp->zpool_old_config);
823 if (zhp->zpool_props)
824 nvlist_free(zhp->zpool_props);
829 * Return the name of the pool.
832 zpool_get_name(zpool_handle_t *zhp)
834 return (zhp->zpool_name);
839 * Return the state of the pool (ACTIVE or UNAVAILABLE)
842 zpool_get_state(zpool_handle_t *zhp)
844 return (zhp->zpool_state);
848 * Create the named pool, using the provided vdev list. It is assumed
849 * that the consumer has already validated the contents of the nvlist, so we
850 * don't have to worry about error semantics.
853 zpool_create(libzfs_handle_t *hdl, const char *pool, nvlist_t *nvroot,
854 nvlist_t *props, nvlist_t *fsprops)
856 zfs_cmd_t zc = { 0 };
857 nvlist_t *zc_fsprops = NULL;
858 nvlist_t *zc_props = NULL;
863 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
864 "cannot create '%s'"), pool);
866 if (!zpool_name_valid(hdl, B_FALSE, pool))
867 return (zfs_error(hdl, EZFS_INVALIDNAME, msg));
869 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
873 if ((zc_props = zpool_valid_proplist(hdl, pool, props,
874 SPA_VERSION_1, B_TRUE, msg)) == NULL) {
883 zoned = ((nvlist_lookup_string(fsprops,
884 zfs_prop_to_name(ZFS_PROP_ZONED), &zonestr) == 0) &&
885 strcmp(zonestr, "on") == 0);
887 if ((zc_fsprops = zfs_valid_proplist(hdl,
888 ZFS_TYPE_FILESYSTEM, fsprops, zoned, NULL, msg)) == NULL) {
892 (nvlist_alloc(&zc_props, NV_UNIQUE_NAME, 0) != 0)) {
895 if (nvlist_add_nvlist(zc_props,
896 ZPOOL_ROOTFS_PROPS, zc_fsprops) != 0) {
901 if (zc_props && zcmd_write_src_nvlist(hdl, &zc, zc_props) != 0)
904 (void) strlcpy(zc.zc_name, pool, sizeof (zc.zc_name));
906 if ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_CREATE, &zc)) != 0) {
908 zcmd_free_nvlists(&zc);
909 nvlist_free(zc_props);
910 nvlist_free(zc_fsprops);
915 * This can happen if the user has specified the same
916 * device multiple times. We can't reliably detect this
917 * until we try to add it and see we already have a
920 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
921 "one or more vdevs refer to the same device"));
922 return (zfs_error(hdl, EZFS_BADDEV, msg));
926 * This occurs when one of the devices is below
927 * SPA_MINDEVSIZE. Unfortunately, we can't detect which
928 * device was the problem device since there's no
929 * reliable way to determine device size from userland.
934 zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf));
936 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
937 "one or more devices is less than the "
938 "minimum size (%s)"), buf);
940 return (zfs_error(hdl, EZFS_BADDEV, msg));
943 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
944 "one or more devices is out of space"));
945 return (zfs_error(hdl, EZFS_BADDEV, msg));
948 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
949 "cache device must be a disk or disk slice"));
950 return (zfs_error(hdl, EZFS_BADDEV, msg));
953 return (zpool_standard_error(hdl, errno, msg));
958 * If this is an alternate root pool, then we automatically set the
959 * mountpoint of the root dataset to be '/'.
961 if (nvlist_lookup_string(props, zpool_prop_to_name(ZPOOL_PROP_ALTROOT),
965 verify((zhp = zfs_open(hdl, pool, ZFS_TYPE_DATASET)) != NULL);
966 verify(zfs_prop_set(zhp, zfs_prop_to_name(ZFS_PROP_MOUNTPOINT),
973 zcmd_free_nvlists(&zc);
974 nvlist_free(zc_props);
975 nvlist_free(zc_fsprops);
980 * Destroy the given pool. It is up to the caller to ensure that there are no
981 * datasets left in the pool.
984 zpool_destroy(zpool_handle_t *zhp)
986 zfs_cmd_t zc = { 0 };
987 zfs_handle_t *zfp = NULL;
988 libzfs_handle_t *hdl = zhp->zpool_hdl;
991 if (zhp->zpool_state == POOL_STATE_ACTIVE &&
992 (zfp = zfs_open(zhp->zpool_hdl, zhp->zpool_name,
993 ZFS_TYPE_FILESYSTEM)) == NULL)
996 if (zpool_remove_zvol_links(zhp) != 0)
999 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1001 if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_DESTROY, &zc) != 0) {
1002 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1003 "cannot destroy '%s'"), zhp->zpool_name);
1005 if (errno == EROFS) {
1006 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1007 "one or more devices is read only"));
1008 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1010 (void) zpool_standard_error(hdl, errno, msg);
1019 remove_mountpoint(zfp);
1027 * Add the given vdevs to the pool. The caller must have already performed the
1028 * necessary verification to ensure that the vdev specification is well-formed.
1031 zpool_add(zpool_handle_t *zhp, nvlist_t *nvroot)
1033 zfs_cmd_t zc = { 0 };
1035 libzfs_handle_t *hdl = zhp->zpool_hdl;
1037 nvlist_t **spares, **l2cache;
1038 uint_t nspares, nl2cache;
1040 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1041 "cannot add to '%s'"), zhp->zpool_name);
1043 if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) <
1044 SPA_VERSION_SPARES &&
1045 nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
1046 &spares, &nspares) == 0) {
1047 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be "
1048 "upgraded to add hot spares"));
1049 return (zfs_error(hdl, EZFS_BADVERSION, msg));
1052 if (pool_is_bootable(zhp) && nvlist_lookup_nvlist_array(nvroot,
1053 ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0) {
1056 for (s = 0; s < nspares; s++) {
1059 if (nvlist_lookup_string(spares[s], ZPOOL_CONFIG_PATH,
1060 &path) == 0 && pool_uses_efi(spares[s])) {
1061 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1062 "device '%s' contains an EFI label and "
1063 "cannot be used on root pools."),
1064 zpool_vdev_name(hdl, NULL, spares[s]));
1065 return (zfs_error(hdl, EZFS_POOL_NOTSUP, msg));
1070 if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) <
1071 SPA_VERSION_L2CACHE &&
1072 nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
1073 &l2cache, &nl2cache) == 0) {
1074 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be "
1075 "upgraded to add cache devices"));
1076 return (zfs_error(hdl, EZFS_BADVERSION, msg));
1079 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
1081 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1083 if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_VDEV_ADD, &zc) != 0) {
1087 * This can happen if the user has specified the same
1088 * device multiple times. We can't reliably detect this
1089 * until we try to add it and see we already have a
1092 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1093 "one or more vdevs refer to the same device"));
1094 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1099 * This occurrs when one of the devices is below
1100 * SPA_MINDEVSIZE. Unfortunately, we can't detect which
1101 * device was the problem device since there's no
1102 * reliable way to determine device size from userland.
1107 zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf));
1109 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1110 "device is less than the minimum "
1113 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1117 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1118 "pool must be upgraded to add these vdevs"));
1119 (void) zfs_error(hdl, EZFS_BADVERSION, msg);
1123 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1124 "root pool can not have multiple vdevs"
1125 " or separate logs"));
1126 (void) zfs_error(hdl, EZFS_POOL_NOTSUP, msg);
1130 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1131 "cache device must be a disk or disk slice"));
1132 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1136 (void) zpool_standard_error(hdl, errno, msg);
1144 zcmd_free_nvlists(&zc);
1150 * Exports the pool from the system. The caller must ensure that there are no
1151 * mounted datasets in the pool.
1154 zpool_export_common(zpool_handle_t *zhp, boolean_t force, boolean_t hardforce)
1156 zfs_cmd_t zc = { 0 };
1159 if (zpool_remove_zvol_links(zhp) != 0)
1162 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1163 "cannot export '%s'"), zhp->zpool_name);
1165 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1166 zc.zc_cookie = force;
1167 zc.zc_guid = hardforce;
1169 if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_EXPORT, &zc) != 0) {
1172 zfs_error_aux(zhp->zpool_hdl, dgettext(TEXT_DOMAIN,
1173 "use '-f' to override the following errors:\n"
1174 "'%s' has an active shared spare which could be"
1175 " used by other pools once '%s' is exported."),
1176 zhp->zpool_name, zhp->zpool_name);
1177 return (zfs_error(zhp->zpool_hdl, EZFS_ACTIVE_SPARE,
1180 return (zpool_standard_error_fmt(zhp->zpool_hdl, errno,
1189 zpool_export(zpool_handle_t *zhp, boolean_t force)
1191 return (zpool_export_common(zhp, force, B_FALSE));
1195 zpool_export_force(zpool_handle_t *zhp)
1197 return (zpool_export_common(zhp, B_TRUE, B_TRUE));
1201 * zpool_import() is a contracted interface. Should be kept the same
1204 * Applications should use zpool_import_props() to import a pool with
1205 * new properties value to be set.
1208 zpool_import(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,
1211 nvlist_t *props = NULL;
1214 if (altroot != NULL) {
1215 if (nvlist_alloc(&props, NV_UNIQUE_NAME, 0) != 0) {
1216 return (zfs_error_fmt(hdl, EZFS_NOMEM,
1217 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1221 if (nvlist_add_string(props,
1222 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), altroot) != 0 ||
1223 nvlist_add_string(props,
1224 zpool_prop_to_name(ZPOOL_PROP_CACHEFILE), "none") != 0) {
1226 return (zfs_error_fmt(hdl, EZFS_NOMEM,
1227 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1232 ret = zpool_import_props(hdl, config, newname, props, B_FALSE);
1239 * Import the given pool using the known configuration and a list of
1240 * properties to be set. The configuration should have come from
1241 * zpool_find_import(). The 'newname' parameters control whether the pool
1242 * is imported with a different name.
1245 zpool_import_props(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,
1246 nvlist_t *props, boolean_t importfaulted)
1248 zfs_cmd_t zc = { 0 };
1254 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
1257 (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
1258 "cannot import pool '%s'"), origname);
1260 if (newname != NULL) {
1261 if (!zpool_name_valid(hdl, B_FALSE, newname))
1262 return (zfs_error_fmt(hdl, EZFS_INVALIDNAME,
1263 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1265 thename = (char *)newname;
1273 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
1276 if ((props = zpool_valid_proplist(hdl, origname,
1277 props, version, B_TRUE, errbuf)) == NULL) {
1279 } else if (zcmd_write_src_nvlist(hdl, &zc, props) != 0) {
1285 (void) strlcpy(zc.zc_name, thename, sizeof (zc.zc_name));
1287 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
1290 if (zcmd_write_conf_nvlist(hdl, &zc, config) != 0) {
1295 zc.zc_cookie = (uint64_t)importfaulted;
1297 if (zfs_ioctl(hdl, ZFS_IOC_POOL_IMPORT, &zc) != 0) {
1299 if (newname == NULL)
1300 (void) snprintf(desc, sizeof (desc),
1301 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1304 (void) snprintf(desc, sizeof (desc),
1305 dgettext(TEXT_DOMAIN, "cannot import '%s' as '%s'"),
1311 * Unsupported version.
1313 (void) zfs_error(hdl, EZFS_BADVERSION, desc);
1317 (void) zfs_error(hdl, EZFS_INVALCONFIG, desc);
1321 (void) zpool_standard_error(hdl, errno, desc);
1326 zpool_handle_t *zhp;
1329 * This should never fail, but play it safe anyway.
1331 if (zpool_open_silent(hdl, thename, &zhp) != 0) {
1333 } else if (zhp != NULL) {
1334 ret = zpool_create_zvol_links(zhp);
1340 zcmd_free_nvlists(&zc);
1350 zpool_scrub(zpool_handle_t *zhp, pool_scrub_type_t type)
1352 zfs_cmd_t zc = { 0 };
1354 libzfs_handle_t *hdl = zhp->zpool_hdl;
1356 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1357 zc.zc_cookie = type;
1359 if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_SCRUB, &zc) == 0)
1362 (void) snprintf(msg, sizeof (msg),
1363 dgettext(TEXT_DOMAIN, "cannot scrub %s"), zc.zc_name);
1366 return (zfs_error(hdl, EZFS_RESILVERING, msg));
1368 return (zpool_standard_error(hdl, errno, msg));
1372 * 'avail_spare' is set to TRUE if the provided guid refers to an AVAIL
1373 * spare; but FALSE if its an INUSE spare.
1376 vdev_to_nvlist_iter(nvlist_t *nv, const char *search, uint64_t guid,
1377 boolean_t *avail_spare, boolean_t *l2cache, boolean_t *log)
1381 uint64_t theguid, present;
1383 uint64_t wholedisk = 0;
1387 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &theguid) == 0);
1389 if (search == NULL &&
1390 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT, &present) == 0) {
1392 * If the device has never been present since import, the only
1393 * reliable way to match the vdev is by GUID.
1395 if (theguid == guid)
1397 } else if (search != NULL &&
1398 nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) {
1399 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
1403 * For whole disks, the internal path has 's0', but the
1404 * path passed in by the user doesn't.
1406 if (strlen(search) == strlen(path) - 2 &&
1407 strncmp(search, path, strlen(search)) == 0)
1409 } else if (strcmp(search, path) == 0) {
1414 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
1415 &child, &children) != 0)
1418 for (c = 0; c < children; c++) {
1419 if ((ret = vdev_to_nvlist_iter(child[c], search, guid,
1420 avail_spare, l2cache, NULL)) != NULL) {
1422 * The 'is_log' value is only set for the toplevel
1423 * vdev, not the leaf vdevs. So we always lookup the
1424 * log device from the root of the vdev tree (where
1425 * 'log' is non-NULL).
1428 nvlist_lookup_uint64(child[c],
1429 ZPOOL_CONFIG_IS_LOG, &is_log) == 0 &&
1437 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES,
1438 &child, &children) == 0) {
1439 for (c = 0; c < children; c++) {
1440 if ((ret = vdev_to_nvlist_iter(child[c], search, guid,
1441 avail_spare, l2cache, NULL)) != NULL) {
1442 *avail_spare = B_TRUE;
1448 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE,
1449 &child, &children) == 0) {
1450 for (c = 0; c < children; c++) {
1451 if ((ret = vdev_to_nvlist_iter(child[c], search, guid,
1452 avail_spare, l2cache, NULL)) != NULL) {
1463 zpool_find_vdev(zpool_handle_t *zhp, const char *path, boolean_t *avail_spare,
1464 boolean_t *l2cache, boolean_t *log)
1466 char buf[MAXPATHLEN];
1472 guid = strtoull(path, &end, 10);
1473 if (guid != 0 && *end == '\0') {
1475 } else if (path[0] != '/') {
1476 (void) snprintf(buf, sizeof (buf), "%s%s", "/dev/dsk/", path);
1482 verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
1485 *avail_spare = B_FALSE;
1489 return (vdev_to_nvlist_iter(nvroot, search, guid, avail_spare,
1494 vdev_online(nvlist_t *nv)
1498 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_OFFLINE, &ival) == 0 ||
1499 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_FAULTED, &ival) == 0 ||
1500 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVED, &ival) == 0)
1507 * Get phys_path for a root pool
1508 * Return 0 on success; non-zeron on failure.
1511 zpool_get_physpath(zpool_handle_t *zhp, char *physpath)
1513 nvlist_t *vdev_root;
1519 * Make sure this is a root pool, as phys_path doesn't mean
1520 * anything to a non-root pool.
1522 if (!pool_is_bootable(zhp))
1525 verify(nvlist_lookup_nvlist(zhp->zpool_config,
1526 ZPOOL_CONFIG_VDEV_TREE, &vdev_root) == 0);
1528 if (nvlist_lookup_nvlist_array(vdev_root, ZPOOL_CONFIG_CHILDREN,
1529 &child, &count) != 0)
1532 for (i = 0; i < count; i++) {
1539 if (nvlist_lookup_string(child[i], ZPOOL_CONFIG_TYPE, &type)
1543 if (strcmp(type, VDEV_TYPE_DISK) == 0) {
1544 if (!vdev_online(child[i]))
1546 verify(nvlist_lookup_string(child[i],
1547 ZPOOL_CONFIG_PHYS_PATH, &tmppath) == 0);
1548 (void) strncpy(physpath, tmppath, strlen(tmppath));
1549 } else if (strcmp(type, VDEV_TYPE_MIRROR) == 0) {
1550 if (nvlist_lookup_nvlist_array(child[i],
1551 ZPOOL_CONFIG_CHILDREN, &child2, &count2) != 0)
1554 for (j = 0; j < count2; j++) {
1555 if (!vdev_online(child2[j]))
1557 if (nvlist_lookup_string(child2[j],
1558 ZPOOL_CONFIG_PHYS_PATH, &tmppath) != 0)
1561 if ((strlen(physpath) + strlen(tmppath)) >
1565 if (strlen(physpath) == 0) {
1566 (void) strncpy(physpath, tmppath,
1569 (void) strcat(physpath, " ");
1570 (void) strcat(physpath, tmppath);
1582 * Returns TRUE if the given guid corresponds to the given type.
1583 * This is used to check for hot spares (INUSE or not), and level 2 cache
1587 is_guid_type(zpool_handle_t *zhp, uint64_t guid, const char *type)
1589 uint64_t target_guid;
1595 verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
1597 if (nvlist_lookup_nvlist_array(nvroot, type, &list, &count) == 0) {
1598 for (i = 0; i < count; i++) {
1599 verify(nvlist_lookup_uint64(list[i], ZPOOL_CONFIG_GUID,
1600 &target_guid) == 0);
1601 if (guid == target_guid)
1610 * Bring the specified vdev online. The 'flags' parameter is a set of the
1611 * ZFS_ONLINE_* flags.
1614 zpool_vdev_online(zpool_handle_t *zhp, const char *path, int flags,
1615 vdev_state_t *newstate)
1617 zfs_cmd_t zc = { 0 };
1620 boolean_t avail_spare, l2cache;
1621 libzfs_handle_t *hdl = zhp->zpool_hdl;
1623 (void) snprintf(msg, sizeof (msg),
1624 dgettext(TEXT_DOMAIN, "cannot online %s"), path);
1626 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1627 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
1629 return (zfs_error(hdl, EZFS_NODEVICE, msg));
1631 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
1634 is_guid_type(zhp, zc.zc_guid, ZPOOL_CONFIG_SPARES) == B_TRUE)
1635 return (zfs_error(hdl, EZFS_ISSPARE, msg));
1637 zc.zc_cookie = VDEV_STATE_ONLINE;
1640 if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_VDEV_SET_STATE, &zc) != 0)
1641 return (zpool_standard_error(hdl, errno, msg));
1643 *newstate = zc.zc_cookie;
1648 * Take the specified vdev offline
1651 zpool_vdev_offline(zpool_handle_t *zhp, const char *path, boolean_t istmp)
1653 zfs_cmd_t zc = { 0 };
1656 boolean_t avail_spare, l2cache;
1657 libzfs_handle_t *hdl = zhp->zpool_hdl;
1659 (void) snprintf(msg, sizeof (msg),
1660 dgettext(TEXT_DOMAIN, "cannot offline %s"), path);
1662 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1663 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
1665 return (zfs_error(hdl, EZFS_NODEVICE, msg));
1667 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
1670 is_guid_type(zhp, zc.zc_guid, ZPOOL_CONFIG_SPARES) == B_TRUE)
1671 return (zfs_error(hdl, EZFS_ISSPARE, msg));
1673 zc.zc_cookie = VDEV_STATE_OFFLINE;
1674 zc.zc_obj = istmp ? ZFS_OFFLINE_TEMPORARY : 0;
1676 if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
1683 * There are no other replicas of this device.
1685 return (zfs_error(hdl, EZFS_NOREPLICAS, msg));
1688 return (zpool_standard_error(hdl, errno, msg));
1693 * Mark the given vdev faulted.
1696 zpool_vdev_fault(zpool_handle_t *zhp, uint64_t guid)
1698 zfs_cmd_t zc = { 0 };
1700 libzfs_handle_t *hdl = zhp->zpool_hdl;
1702 (void) snprintf(msg, sizeof (msg),
1703 dgettext(TEXT_DOMAIN, "cannot fault %llu"), guid);
1705 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1707 zc.zc_cookie = VDEV_STATE_FAULTED;
1709 if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
1716 * There are no other replicas of this device.
1718 return (zfs_error(hdl, EZFS_NOREPLICAS, msg));
1721 return (zpool_standard_error(hdl, errno, msg));
1727 * Mark the given vdev degraded.
1730 zpool_vdev_degrade(zpool_handle_t *zhp, uint64_t guid)
1732 zfs_cmd_t zc = { 0 };
1734 libzfs_handle_t *hdl = zhp->zpool_hdl;
1736 (void) snprintf(msg, sizeof (msg),
1737 dgettext(TEXT_DOMAIN, "cannot degrade %llu"), guid);
1739 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1741 zc.zc_cookie = VDEV_STATE_DEGRADED;
1743 if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
1746 return (zpool_standard_error(hdl, errno, msg));
1750 * Returns TRUE if the given nvlist is a vdev that was originally swapped in as
1754 is_replacing_spare(nvlist_t *search, nvlist_t *tgt, int which)
1760 if (nvlist_lookup_nvlist_array(search, ZPOOL_CONFIG_CHILDREN, &child,
1762 verify(nvlist_lookup_string(search, ZPOOL_CONFIG_TYPE,
1765 if (strcmp(type, VDEV_TYPE_SPARE) == 0 &&
1766 children == 2 && child[which] == tgt)
1769 for (c = 0; c < children; c++)
1770 if (is_replacing_spare(child[c], tgt, which))
1778 * Attach new_disk (fully described by nvroot) to old_disk.
1779 * If 'replacing' is specified, the new disk will replace the old one.
1782 zpool_vdev_attach(zpool_handle_t *zhp,
1783 const char *old_disk, const char *new_disk, nvlist_t *nvroot, int replacing)
1785 zfs_cmd_t zc = { 0 };
1789 boolean_t avail_spare, l2cache, islog;
1791 char *path, *newname;
1794 nvlist_t *config_root;
1795 libzfs_handle_t *hdl = zhp->zpool_hdl;
1796 boolean_t rootpool = pool_is_bootable(zhp);
1799 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1800 "cannot replace %s with %s"), old_disk, new_disk);
1802 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1803 "cannot attach %s to %s"), new_disk, old_disk);
1806 * If this is a root pool, make sure that we're not attaching an
1807 * EFI labeled device.
1809 if (rootpool && pool_uses_efi(nvroot)) {
1810 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1811 "EFI labeled devices are not supported on root pools."));
1812 return (zfs_error(hdl, EZFS_POOL_NOTSUP, msg));
1815 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1816 if ((tgt = zpool_find_vdev(zhp, old_disk, &avail_spare, &l2cache,
1818 return (zfs_error(hdl, EZFS_NODEVICE, msg));
1821 return (zfs_error(hdl, EZFS_ISSPARE, msg));
1824 return (zfs_error(hdl, EZFS_ISL2CACHE, msg));
1826 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
1827 zc.zc_cookie = replacing;
1829 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
1830 &child, &children) != 0 || children != 1) {
1831 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1832 "new device must be a single disk"));
1833 return (zfs_error(hdl, EZFS_INVALCONFIG, msg));
1836 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
1837 ZPOOL_CONFIG_VDEV_TREE, &config_root) == 0);
1839 if ((newname = zpool_vdev_name(NULL, NULL, child[0])) == NULL)
1843 * If the target is a hot spare that has been swapped in, we can only
1844 * replace it with another hot spare.
1847 nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_IS_SPARE, &val) == 0 &&
1848 (zpool_find_vdev(zhp, newname, &avail_spare, &l2cache,
1849 NULL) == NULL || !avail_spare) &&
1850 is_replacing_spare(config_root, tgt, 1)) {
1851 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1852 "can only be replaced by another hot spare"));
1854 return (zfs_error(hdl, EZFS_BADTARGET, msg));
1858 * If we are attempting to replace a spare, it canot be applied to an
1859 * already spared device.
1862 nvlist_lookup_string(child[0], ZPOOL_CONFIG_PATH, &path) == 0 &&
1863 zpool_find_vdev(zhp, newname, &avail_spare,
1864 &l2cache, NULL) != NULL && avail_spare &&
1865 is_replacing_spare(config_root, tgt, 0)) {
1866 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1867 "device has already been replaced with a spare"));
1869 return (zfs_error(hdl, EZFS_BADTARGET, msg));
1874 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
1877 ret = zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_VDEV_ATTACH, &zc);
1879 zcmd_free_nvlists(&zc);
1884 * XXX - This should be removed once we can
1885 * automatically install the bootblocks on the
1886 * newly attached disk.
1888 (void) fprintf(stderr, dgettext(TEXT_DOMAIN, "Please "
1889 "be sure to invoke %s to make '%s' bootable.\n"),
1898 * Can't attach to or replace this type of vdev.
1902 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1903 "cannot replace a log with a spare"));
1905 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1906 "cannot replace a replacing device"));
1908 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1909 "can only attach to mirrors and top-level "
1912 (void) zfs_error(hdl, EZFS_BADTARGET, msg);
1917 * The new device must be a single disk.
1919 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1920 "new device must be a single disk"));
1921 (void) zfs_error(hdl, EZFS_INVALCONFIG, msg);
1925 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "%s is busy"),
1927 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1932 * The new device is too small.
1934 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1935 "device is too small"));
1936 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1941 * The new device has a different alignment requirement.
1943 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1944 "devices have different sector alignment"));
1945 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1950 * The resulting top-level vdev spec won't fit in the label.
1952 (void) zfs_error(hdl, EZFS_DEVOVERFLOW, msg);
1956 (void) zpool_standard_error(hdl, errno, msg);
1963 * Detach the specified device.
1966 zpool_vdev_detach(zpool_handle_t *zhp, const char *path)
1968 zfs_cmd_t zc = { 0 };
1971 boolean_t avail_spare, l2cache;
1972 libzfs_handle_t *hdl = zhp->zpool_hdl;
1974 (void) snprintf(msg, sizeof (msg),
1975 dgettext(TEXT_DOMAIN, "cannot detach %s"), path);
1977 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1978 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
1980 return (zfs_error(hdl, EZFS_NODEVICE, msg));
1983 return (zfs_error(hdl, EZFS_ISSPARE, msg));
1986 return (zfs_error(hdl, EZFS_ISL2CACHE, msg));
1988 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
1990 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_DETACH, &zc) == 0)
1997 * Can't detach from this type of vdev.
1999 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "only "
2000 "applicable to mirror and replacing vdevs"));
2001 (void) zfs_error(zhp->zpool_hdl, EZFS_BADTARGET, msg);
2006 * There are no other replicas of this device.
2008 (void) zfs_error(hdl, EZFS_NOREPLICAS, msg);
2012 (void) zpool_standard_error(hdl, errno, msg);
2019 * Remove the given device. Currently, this is supported only for hot spares
2020 * and level 2 cache devices.
2023 zpool_vdev_remove(zpool_handle_t *zhp, const char *path)
2025 zfs_cmd_t zc = { 0 };
2028 boolean_t avail_spare, l2cache;
2029 libzfs_handle_t *hdl = zhp->zpool_hdl;
2031 (void) snprintf(msg, sizeof (msg),
2032 dgettext(TEXT_DOMAIN, "cannot remove %s"), path);
2034 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2035 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
2037 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2039 if (!avail_spare && !l2cache) {
2040 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2041 "only inactive hot spares or cache devices "
2043 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2046 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2048 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_REMOVE, &zc) == 0)
2051 return (zpool_standard_error(hdl, errno, msg));
2055 * Clear the errors for the pool, or the particular device if specified.
2058 zpool_clear(zpool_handle_t *zhp, const char *path)
2060 zfs_cmd_t zc = { 0 };
2063 boolean_t avail_spare, l2cache;
2064 libzfs_handle_t *hdl = zhp->zpool_hdl;
2067 (void) snprintf(msg, sizeof (msg),
2068 dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),
2071 (void) snprintf(msg, sizeof (msg),
2072 dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),
2075 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2077 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare,
2078 &l2cache, NULL)) == 0)
2079 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2082 * Don't allow error clearing for hot spares. Do allow
2083 * error clearing for l2cache devices.
2086 return (zfs_error(hdl, EZFS_ISSPARE, msg));
2088 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID,
2092 if (zfs_ioctl(hdl, ZFS_IOC_CLEAR, &zc) == 0)
2095 return (zpool_standard_error(hdl, errno, msg));
2099 * Similar to zpool_clear(), but takes a GUID (used by fmd).
2102 zpool_vdev_clear(zpool_handle_t *zhp, uint64_t guid)
2104 zfs_cmd_t zc = { 0 };
2106 libzfs_handle_t *hdl = zhp->zpool_hdl;
2108 (void) snprintf(msg, sizeof (msg),
2109 dgettext(TEXT_DOMAIN, "cannot clear errors for %llx"),
2112 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2115 if (ioctl(hdl->libzfs_fd, ZFS_IOC_CLEAR, &zc) == 0)
2118 return (zpool_standard_error(hdl, errno, msg));
2122 * Iterate over all zvols in a given pool by walking the /dev/zvol/dsk/<pool>
2126 zpool_iter_zvol(zpool_handle_t *zhp, int (*cb)(const char *, void *),
2129 libzfs_handle_t *hdl = zhp->zpool_hdl;
2130 char (*paths)[MAXPATHLEN];
2132 int curr, fd, base, ret = 0;
2137 if ((base = open("/dev/zvol/dsk", O_RDONLY)) < 0)
2138 return (errno == ENOENT ? 0 : -1);
2140 if (fstatat(base, zhp->zpool_name, &st, 0) != 0) {
2143 return (err == ENOENT ? 0 : -1);
2147 * Oddly this wasn't a directory -- ignore that failure since we
2148 * know there are no links lower in the (non-existant) hierarchy.
2150 if (!S_ISDIR(st.st_mode)) {
2155 if ((paths = zfs_alloc(hdl, size * sizeof (paths[0]))) == NULL) {
2160 (void) strlcpy(paths[0], zhp->zpool_name, sizeof (paths[0]));
2164 if (fstatat(base, paths[curr], &st, AT_SYMLINK_NOFOLLOW) != 0)
2167 if (S_ISDIR(st.st_mode)) {
2168 if ((fd = openat(base, paths[curr], O_RDONLY)) < 0)
2171 if ((dirp = fdopendir(fd)) == NULL) {
2176 while ((dp = readdir(dirp)) != NULL) {
2177 if (dp->d_name[0] == '.')
2180 if (curr + 1 == size) {
2181 paths = zfs_realloc(hdl, paths,
2182 size * sizeof (paths[0]),
2183 size * 2 * sizeof (paths[0]));
2184 if (paths == NULL) {
2185 (void) closedir(dirp);
2193 (void) strlcpy(paths[curr + 1], paths[curr],
2194 sizeof (paths[curr + 1]));
2195 (void) strlcat(paths[curr], "/",
2196 sizeof (paths[curr]));
2197 (void) strlcat(paths[curr], dp->d_name,
2198 sizeof (paths[curr]));
2202 (void) closedir(dirp);
2205 if ((ret = cb(paths[curr], data)) != 0)
2223 typedef struct zvol_cb {
2224 zpool_handle_t *zcb_pool;
2225 boolean_t zcb_create;
2230 do_zvol_create(zfs_handle_t *zhp, void *data)
2234 if (ZFS_IS_VOLUME(zhp)) {
2235 (void) zvol_create_link(zhp->zfs_hdl, zhp->zfs_name);
2236 ret = zfs_iter_snapshots(zhp, do_zvol_create, NULL);
2240 ret = zfs_iter_filesystems(zhp, do_zvol_create, NULL);
2248 * Iterate over all zvols in the pool and make any necessary minor nodes.
2251 zpool_create_zvol_links(zpool_handle_t *zhp)
2257 * If the pool is unavailable, just return success.
2259 if ((zfp = make_dataset_handle(zhp->zpool_hdl,
2260 zhp->zpool_name)) == NULL)
2263 ret = zfs_iter_filesystems(zfp, do_zvol_create, NULL);
2270 do_zvol_remove(const char *dataset, void *data)
2272 zpool_handle_t *zhp = data;
2274 return (zvol_remove_link(zhp->zpool_hdl, dataset));
2278 * Iterate over all zvols in the pool and remove any minor nodes. We iterate
2279 * by examining the /dev links so that a corrupted pool doesn't impede this
2283 zpool_remove_zvol_links(zpool_handle_t *zhp)
2285 return (zpool_iter_zvol(zhp, do_zvol_remove, zhp));
2289 * Convert from a devid string to a path.
2292 devid_to_path(char *devid_str)
2297 devid_nmlist_t *list = NULL;
2300 if (devid_str_decode(devid_str, &devid, &minor) != 0)
2303 ret = devid_deviceid_to_nmlist("/dev", devid, minor, &list);
2305 devid_str_free(minor);
2311 if ((path = strdup(list[0].devname)) == NULL)
2314 devid_free_nmlist(list);
2320 * Convert from a path to a devid string.
2323 path_to_devid(const char *path)
2329 if ((fd = open(path, O_RDONLY)) < 0)
2334 if (devid_get(fd, &devid) == 0) {
2335 if (devid_get_minor_name(fd, &minor) == 0)
2336 ret = devid_str_encode(devid, minor);
2338 devid_str_free(minor);
2347 * Issue the necessary ioctl() to update the stored path value for the vdev. We
2348 * ignore any failure here, since a common case is for an unprivileged user to
2349 * type 'zpool status', and we'll display the correct information anyway.
2352 set_path(zpool_handle_t *zhp, nvlist_t *nv, const char *path)
2354 zfs_cmd_t zc = { 0 };
2356 (void) strncpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2357 (void) strncpy(zc.zc_value, path, sizeof (zc.zc_value));
2358 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
2361 (void) ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_SETPATH, &zc);
2365 * Given a vdev, return the name to display in iostat. If the vdev has a path,
2366 * we use that, stripping off any leading "/dev/dsk/"; if not, we use the type.
2367 * We also check if this is a whole disk, in which case we strip off the
2368 * trailing 's0' slice name.
2370 * This routine is also responsible for identifying when disks have been
2371 * reconfigured in a new location. The kernel will have opened the device by
2372 * devid, but the path will still refer to the old location. To catch this, we
2373 * first do a path -> devid translation (which is fast for the common case). If
2374 * the devid matches, we're done. If not, we do a reverse devid -> path
2375 * translation and issue the appropriate ioctl() to update the path of the vdev.
2376 * If 'zhp' is NULL, then this is an exported pool, and we don't need to do any
2380 zpool_vdev_name(libzfs_handle_t *hdl, zpool_handle_t *zhp, nvlist_t *nv)
2388 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT,
2390 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
2392 (void) snprintf(buf, sizeof (buf), "%llu",
2393 (u_longlong_t)value);
2395 } else if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) {
2398 * If the device is dead (faulted, offline, etc) then don't
2399 * bother opening it. Otherwise we may be forcing the user to
2400 * open a misbehaving device, which can have undesirable
2403 if ((nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_STATS,
2404 (uint64_t **)&vs, &vsc) != 0 ||
2405 vs->vs_state >= VDEV_STATE_DEGRADED) &&
2407 nvlist_lookup_string(nv, ZPOOL_CONFIG_DEVID, &devid) == 0) {
2409 * Determine if the current path is correct.
2411 char *newdevid = path_to_devid(path);
2413 if (newdevid == NULL ||
2414 strcmp(devid, newdevid) != 0) {
2417 if ((newpath = devid_to_path(devid)) != NULL) {
2419 * Update the path appropriately.
2421 set_path(zhp, nv, newpath);
2422 if (nvlist_add_string(nv,
2423 ZPOOL_CONFIG_PATH, newpath) == 0)
2424 verify(nvlist_lookup_string(nv,
2432 devid_str_free(newdevid);
2435 if (strncmp(path, "/dev/dsk/", 9) == 0)
2438 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
2439 &value) == 0 && value) {
2440 char *tmp = zfs_strdup(hdl, path);
2443 tmp[strlen(path) - 2] = '\0';
2447 verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &path) == 0);
2450 * If it's a raidz device, we need to stick in the parity level.
2452 if (strcmp(path, VDEV_TYPE_RAIDZ) == 0) {
2453 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NPARITY,
2455 (void) snprintf(buf, sizeof (buf), "%s%llu", path,
2456 (u_longlong_t)value);
2461 return (zfs_strdup(hdl, path));
2465 zbookmark_compare(const void *a, const void *b)
2467 return (memcmp(a, b, sizeof (zbookmark_t)));
2471 * Retrieve the persistent error log, uniquify the members, and return to the
2475 zpool_get_errlog(zpool_handle_t *zhp, nvlist_t **nverrlistp)
2477 zfs_cmd_t zc = { 0 };
2479 zbookmark_t *zb = NULL;
2483 * Retrieve the raw error list from the kernel. If the number of errors
2484 * has increased, allocate more space and continue until we get the
2487 verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_ERRCOUNT,
2491 if ((zc.zc_nvlist_dst = (uintptr_t)zfs_alloc(zhp->zpool_hdl,
2492 count * sizeof (zbookmark_t))) == (uintptr_t)NULL)
2494 zc.zc_nvlist_dst_size = count;
2495 (void) strcpy(zc.zc_name, zhp->zpool_name);
2497 if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_ERROR_LOG,
2499 free((void *)(uintptr_t)zc.zc_nvlist_dst);
2500 if (errno == ENOMEM) {
2501 count = zc.zc_nvlist_dst_size;
2502 if ((zc.zc_nvlist_dst = (uintptr_t)
2503 zfs_alloc(zhp->zpool_hdl, count *
2504 sizeof (zbookmark_t))) == (uintptr_t)NULL)
2515 * Sort the resulting bookmarks. This is a little confusing due to the
2516 * implementation of ZFS_IOC_ERROR_LOG. The bookmarks are copied last
2517 * to first, and 'zc_nvlist_dst_size' indicates the number of boomarks
2518 * _not_ copied as part of the process. So we point the start of our
2519 * array appropriate and decrement the total number of elements.
2521 zb = ((zbookmark_t *)(uintptr_t)zc.zc_nvlist_dst) +
2522 zc.zc_nvlist_dst_size;
2523 count -= zc.zc_nvlist_dst_size;
2525 qsort(zb, count, sizeof (zbookmark_t), zbookmark_compare);
2527 verify(nvlist_alloc(nverrlistp, 0, KM_SLEEP) == 0);
2530 * Fill in the nverrlistp with nvlist's of dataset and object numbers.
2532 for (i = 0; i < count; i++) {
2535 /* ignoring zb_blkid and zb_level for now */
2536 if (i > 0 && zb[i-1].zb_objset == zb[i].zb_objset &&
2537 zb[i-1].zb_object == zb[i].zb_object)
2540 if (nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) != 0)
2542 if (nvlist_add_uint64(nv, ZPOOL_ERR_DATASET,
2543 zb[i].zb_objset) != 0) {
2547 if (nvlist_add_uint64(nv, ZPOOL_ERR_OBJECT,
2548 zb[i].zb_object) != 0) {
2552 if (nvlist_add_nvlist(*nverrlistp, "ejk", nv) != 0) {
2559 free((void *)(uintptr_t)zc.zc_nvlist_dst);
2563 free((void *)(uintptr_t)zc.zc_nvlist_dst);
2564 return (no_memory(zhp->zpool_hdl));
2568 * Upgrade a ZFS pool to the latest on-disk version.
2571 zpool_upgrade(zpool_handle_t *zhp, uint64_t new_version)
2573 zfs_cmd_t zc = { 0 };
2574 libzfs_handle_t *hdl = zhp->zpool_hdl;
2576 (void) strcpy(zc.zc_name, zhp->zpool_name);
2577 zc.zc_cookie = new_version;
2579 if (zfs_ioctl(hdl, ZFS_IOC_POOL_UPGRADE, &zc) != 0)
2580 return (zpool_standard_error_fmt(hdl, errno,
2581 dgettext(TEXT_DOMAIN, "cannot upgrade '%s'"),
2587 zpool_set_history_str(const char *subcommand, int argc, char **argv,
2592 (void) strlcpy(history_str, subcommand, HIS_MAX_RECORD_LEN);
2593 for (i = 1; i < argc; i++) {
2594 if (strlen(history_str) + 1 + strlen(argv[i]) >
2597 (void) strlcat(history_str, " ", HIS_MAX_RECORD_LEN);
2598 (void) strlcat(history_str, argv[i], HIS_MAX_RECORD_LEN);
2603 * Stage command history for logging.
2606 zpool_stage_history(libzfs_handle_t *hdl, const char *history_str)
2608 if (history_str == NULL)
2611 if (strlen(history_str) > HIS_MAX_RECORD_LEN)
2614 if (hdl->libzfs_log_str != NULL)
2615 free(hdl->libzfs_log_str);
2617 if ((hdl->libzfs_log_str = strdup(history_str)) == NULL)
2618 return (no_memory(hdl));
2624 * Perform ioctl to get some command history of a pool.
2626 * 'buf' is the buffer to fill up to 'len' bytes. 'off' is the
2627 * logical offset of the history buffer to start reading from.
2629 * Upon return, 'off' is the next logical offset to read from and
2630 * 'len' is the actual amount of bytes read into 'buf'.
2633 get_history(zpool_handle_t *zhp, char *buf, uint64_t *off, uint64_t *len)
2635 zfs_cmd_t zc = { 0 };
2636 libzfs_handle_t *hdl = zhp->zpool_hdl;
2638 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2640 zc.zc_history = (uint64_t)(uintptr_t)buf;
2641 zc.zc_history_len = *len;
2642 zc.zc_history_offset = *off;
2644 if (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_HISTORY, &zc) != 0) {
2647 return (zfs_error_fmt(hdl, EZFS_PERM,
2648 dgettext(TEXT_DOMAIN,
2649 "cannot show history for pool '%s'"),
2652 return (zfs_error_fmt(hdl, EZFS_NOHISTORY,
2653 dgettext(TEXT_DOMAIN, "cannot get history for pool "
2654 "'%s'"), zhp->zpool_name));
2656 return (zfs_error_fmt(hdl, EZFS_BADVERSION,
2657 dgettext(TEXT_DOMAIN, "cannot get history for pool "
2658 "'%s', pool must be upgraded"), zhp->zpool_name));
2660 return (zpool_standard_error_fmt(hdl, errno,
2661 dgettext(TEXT_DOMAIN,
2662 "cannot get history for '%s'"), zhp->zpool_name));
2666 *len = zc.zc_history_len;
2667 *off = zc.zc_history_offset;
2673 * Process the buffer of nvlists, unpacking and storing each nvlist record
2674 * into 'records'. 'leftover' is set to the number of bytes that weren't
2675 * processed as there wasn't a complete record.
2678 zpool_history_unpack(char *buf, uint64_t bytes_read, uint64_t *leftover,
2679 nvlist_t ***records, uint_t *numrecords)
2685 while (bytes_read > sizeof (reclen)) {
2687 /* get length of packed record (stored as little endian) */
2688 for (i = 0, reclen = 0; i < sizeof (reclen); i++)
2689 reclen += (uint64_t)(((uchar_t *)buf)[i]) << (8*i);
2691 if (bytes_read < sizeof (reclen) + reclen)
2695 if (nvlist_unpack(buf + sizeof (reclen), reclen, &nv, 0) != 0)
2697 bytes_read -= sizeof (reclen) + reclen;
2698 buf += sizeof (reclen) + reclen;
2700 /* add record to nvlist array */
2702 if (ISP2(*numrecords + 1)) {
2703 *records = realloc(*records,
2704 *numrecords * 2 * sizeof (nvlist_t *));
2706 (*records)[*numrecords - 1] = nv;
2709 *leftover = bytes_read;
2713 #define HIS_BUF_LEN (128*1024)
2716 * Retrieve the command history of a pool.
2719 zpool_get_history(zpool_handle_t *zhp, nvlist_t **nvhisp)
2721 char buf[HIS_BUF_LEN];
2723 nvlist_t **records = NULL;
2724 uint_t numrecords = 0;
2728 uint64_t bytes_read = sizeof (buf);
2731 if ((err = get_history(zhp, buf, &off, &bytes_read)) != 0)
2734 /* if nothing else was read in, we're at EOF, just return */
2738 if ((err = zpool_history_unpack(buf, bytes_read,
2739 &leftover, &records, &numrecords)) != 0)
2747 verify(nvlist_alloc(nvhisp, NV_UNIQUE_NAME, 0) == 0);
2748 verify(nvlist_add_nvlist_array(*nvhisp, ZPOOL_HIST_RECORD,
2749 records, numrecords) == 0);
2751 for (i = 0; i < numrecords; i++)
2752 nvlist_free(records[i]);
2759 zpool_obj_to_path(zpool_handle_t *zhp, uint64_t dsobj, uint64_t obj,
2760 char *pathname, size_t len)
2762 zfs_cmd_t zc = { 0 };
2763 boolean_t mounted = B_FALSE;
2764 char *mntpnt = NULL;
2765 char dsname[MAXNAMELEN];
2768 /* special case for the MOS */
2769 (void) snprintf(pathname, len, "<metadata>:<0x%llx>", obj);
2773 /* get the dataset's name */
2774 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2776 if (ioctl(zhp->zpool_hdl->libzfs_fd,
2777 ZFS_IOC_DSOBJ_TO_DSNAME, &zc) != 0) {
2778 /* just write out a path of two object numbers */
2779 (void) snprintf(pathname, len, "<0x%llx>:<0x%llx>",
2783 (void) strlcpy(dsname, zc.zc_value, sizeof (dsname));
2785 /* find out if the dataset is mounted */
2786 mounted = is_mounted(zhp->zpool_hdl, dsname, &mntpnt);
2788 /* get the corrupted object's path */
2789 (void) strlcpy(zc.zc_name, dsname, sizeof (zc.zc_name));
2791 if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_OBJ_TO_PATH,
2794 (void) snprintf(pathname, len, "%s%s", mntpnt,
2797 (void) snprintf(pathname, len, "%s:%s",
2798 dsname, zc.zc_value);
2801 (void) snprintf(pathname, len, "%s:<0x%llx>", dsname, obj);
2806 #define RDISK_ROOT "/dev/rdsk"
2807 #define BACKUP_SLICE "s2"
2809 * Don't start the slice at the default block of 34; many storage
2810 * devices will use a stripe width of 128k, so start there instead.
2812 #define NEW_START_BLOCK 256
2815 * Read the EFI label from the config, if a label does not exist then
2816 * pass back the error to the caller. If the caller has passed a non-NULL
2817 * diskaddr argument then we set it to the starting address of the EFI
2821 read_efi_label(nvlist_t *config, diskaddr_t *sb)
2825 char diskname[MAXPATHLEN];
2828 if (nvlist_lookup_string(config, ZPOOL_CONFIG_PATH, &path) != 0)
2831 (void) snprintf(diskname, sizeof (diskname), "%s%s", RDISK_ROOT,
2832 strrchr(path, '/'));
2833 if ((fd = open(diskname, O_RDONLY|O_NDELAY)) >= 0) {
2834 struct dk_gpt *vtoc;
2836 if ((err = efi_alloc_and_read(fd, &vtoc)) >= 0) {
2838 *sb = vtoc->efi_parts[0].p_start;
2847 * determine where a partition starts on a disk in the current
2851 find_start_block(nvlist_t *config)
2855 diskaddr_t sb = MAXOFFSET_T;
2858 if (nvlist_lookup_nvlist_array(config,
2859 ZPOOL_CONFIG_CHILDREN, &child, &children) != 0) {
2860 if (nvlist_lookup_uint64(config,
2861 ZPOOL_CONFIG_WHOLE_DISK,
2862 &wholedisk) != 0 || !wholedisk) {
2863 return (MAXOFFSET_T);
2865 if (read_efi_label(config, &sb) < 0)
2870 for (c = 0; c < children; c++) {
2871 sb = find_start_block(child[c]);
2872 if (sb != MAXOFFSET_T) {
2876 return (MAXOFFSET_T);
2880 * Label an individual disk. The name provided is the short name,
2881 * stripped of any leading /dev path.
2884 zpool_label_disk(libzfs_handle_t *hdl, zpool_handle_t *zhp, char *name)
2886 char path[MAXPATHLEN];
2887 struct dk_gpt *vtoc;
2889 size_t resv = EFI_MIN_RESV_SIZE;
2890 uint64_t slice_size;
2891 diskaddr_t start_block;
2894 /* prepare an error message just in case */
2895 (void) snprintf(errbuf, sizeof (errbuf),
2896 dgettext(TEXT_DOMAIN, "cannot label '%s'"), name);
2901 if (pool_is_bootable(zhp)) {
2902 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2903 "EFI labeled devices are not supported on root "
2905 return (zfs_error(hdl, EZFS_POOL_NOTSUP, errbuf));
2908 verify(nvlist_lookup_nvlist(zhp->zpool_config,
2909 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
2911 if (zhp->zpool_start_block == 0)
2912 start_block = find_start_block(nvroot);
2914 start_block = zhp->zpool_start_block;
2915 zhp->zpool_start_block = start_block;
2918 start_block = NEW_START_BLOCK;
2921 (void) snprintf(path, sizeof (path), "%s/%s%s", RDISK_ROOT, name,
2924 if ((fd = open(path, O_RDWR | O_NDELAY)) < 0) {
2926 * This shouldn't happen. We've long since verified that this
2927 * is a valid device.
2930 dgettext(TEXT_DOMAIN, "unable to open device"));
2931 return (zfs_error(hdl, EZFS_OPENFAILED, errbuf));
2934 if (efi_alloc_and_init(fd, EFI_NUMPAR, &vtoc) != 0) {
2936 * The only way this can fail is if we run out of memory, or we
2937 * were unable to read the disk's capacity
2939 if (errno == ENOMEM)
2940 (void) no_memory(hdl);
2943 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2944 "unable to read disk capacity"), name);
2946 return (zfs_error(hdl, EZFS_NOCAP, errbuf));
2949 slice_size = vtoc->efi_last_u_lba + 1;
2950 slice_size -= EFI_MIN_RESV_SIZE;
2951 if (start_block == MAXOFFSET_T)
2952 start_block = NEW_START_BLOCK;
2953 slice_size -= start_block;
2955 vtoc->efi_parts[0].p_start = start_block;
2956 vtoc->efi_parts[0].p_size = slice_size;
2959 * Why we use V_USR: V_BACKUP confuses users, and is considered
2960 * disposable by some EFI utilities (since EFI doesn't have a backup
2961 * slice). V_UNASSIGNED is supposed to be used only for zero size
2962 * partitions, and efi_write() will fail if we use it. V_ROOT, V_BOOT,
2963 * etc. were all pretty specific. V_USR is as close to reality as we
2964 * can get, in the absence of V_OTHER.
2966 vtoc->efi_parts[0].p_tag = V_USR;
2967 (void) strcpy(vtoc->efi_parts[0].p_name, "zfs");
2969 vtoc->efi_parts[8].p_start = slice_size + start_block;
2970 vtoc->efi_parts[8].p_size = resv;
2971 vtoc->efi_parts[8].p_tag = V_RESERVED;
2973 if (efi_write(fd, vtoc) != 0) {
2975 * Some block drivers (like pcata) may not support EFI
2976 * GPT labels. Print out a helpful error message dir-
2977 * ecting the user to manually label the disk and give
2983 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2984 "try using fdisk(1M) and then provide a specific slice"));
2985 return (zfs_error(hdl, EZFS_LABELFAILED, errbuf));
2994 supported_dump_vdev_type(libzfs_handle_t *hdl, nvlist_t *config, char *errbuf)
3000 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_TYPE, &type) == 0);
3001 if (strcmp(type, VDEV_TYPE_RAIDZ) == 0 ||
3002 strcmp(type, VDEV_TYPE_FILE) == 0 ||
3003 strcmp(type, VDEV_TYPE_LOG) == 0 ||
3004 strcmp(type, VDEV_TYPE_MISSING) == 0) {
3005 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3006 "vdev type '%s' is not supported"), type);
3007 (void) zfs_error(hdl, EZFS_VDEVNOTSUP, errbuf);
3010 if (nvlist_lookup_nvlist_array(config, ZPOOL_CONFIG_CHILDREN,
3011 &child, &children) == 0) {
3012 for (c = 0; c < children; c++) {
3013 if (!supported_dump_vdev_type(hdl, child[c], errbuf))
3021 * check if this zvol is allowable for use as a dump device; zero if
3022 * it is, > 0 if it isn't, < 0 if it isn't a zvol
3025 zvol_check_dump_config(char *arg)
3027 zpool_handle_t *zhp = NULL;
3028 nvlist_t *config, *nvroot;
3032 libzfs_handle_t *hdl;
3034 char poolname[ZPOOL_MAXNAMELEN];
3035 int pathlen = strlen(ZVOL_FULL_DEV_DIR);
3038 if (strncmp(arg, ZVOL_FULL_DEV_DIR, pathlen)) {
3042 (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
3043 "dump is not supported on device '%s'"), arg);
3045 if ((hdl = libzfs_init()) == NULL)
3047 libzfs_print_on_error(hdl, B_TRUE);
3049 volname = arg + pathlen;
3051 /* check the configuration of the pool */
3052 if ((p = strchr(volname, '/')) == NULL) {
3053 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3054 "malformed dataset name"));
3055 (void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf);
3057 } else if (p - volname >= ZFS_MAXNAMELEN) {
3058 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3059 "dataset name is too long"));
3060 (void) zfs_error(hdl, EZFS_NAMETOOLONG, errbuf);
3063 (void) strncpy(poolname, volname, p - volname);
3064 poolname[p - volname] = '\0';
3067 if ((zhp = zpool_open(hdl, poolname)) == NULL) {
3068 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3069 "could not open pool '%s'"), poolname);
3070 (void) zfs_error(hdl, EZFS_OPENFAILED, errbuf);
3073 config = zpool_get_config(zhp, NULL);
3074 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
3076 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3077 "could not obtain vdev configuration for '%s'"), poolname);
3078 (void) zfs_error(hdl, EZFS_INVALCONFIG, errbuf);
3082 verify(nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
3083 &top, &toplevels) == 0);
3084 if (toplevels != 1) {
3085 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3086 "'%s' has multiple top level vdevs"), poolname);
3087 (void) zfs_error(hdl, EZFS_DEVOVERFLOW, errbuf);
3091 if (!supported_dump_vdev_type(hdl, top[0], errbuf)) {