4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
40 #include <sys/efi_partition.h>
42 #include <sys/zfs_ioctl.h>
47 #include "zfs_namecheck.h"
49 #include "libzfs_impl.h"
51 static int read_efi_label(nvlist_t *config, diskaddr_t *sb);
53 #if defined(__i386) || defined(__amd64)
54 #define BOOTCMD "installgrub(1M)"
56 #define BOOTCMD "installboot(1M)"
59 #define DISK_ROOT "/dev/dsk"
60 #define RDISK_ROOT "/dev/rdsk"
61 #define BACKUP_SLICE "s2"
64 * ====================================================================
65 * zpool property functions
66 * ====================================================================
70 zpool_get_all_props(zpool_handle_t *zhp)
73 libzfs_handle_t *hdl = zhp->zpool_hdl;
75 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
77 if (zcmd_alloc_dst_nvlist(hdl, &zc, 0) != 0)
80 while (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_PROPS, &zc) != 0) {
81 if (errno == ENOMEM) {
82 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
83 zcmd_free_nvlists(&zc);
87 zcmd_free_nvlists(&zc);
92 if (zcmd_read_dst_nvlist(hdl, &zc, &zhp->zpool_props) != 0) {
93 zcmd_free_nvlists(&zc);
97 zcmd_free_nvlists(&zc);
103 zpool_props_refresh(zpool_handle_t *zhp)
107 old_props = zhp->zpool_props;
109 if (zpool_get_all_props(zhp) != 0)
112 nvlist_free(old_props);
117 zpool_get_prop_string(zpool_handle_t *zhp, zpool_prop_t prop,
123 zprop_source_t source;
125 nvl = zhp->zpool_props;
126 if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) {
127 verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &ival) == 0);
129 verify(nvlist_lookup_string(nv, ZPROP_VALUE, &value) == 0);
131 source = ZPROP_SRC_DEFAULT;
132 if ((value = (char *)zpool_prop_default_string(prop)) == NULL)
143 zpool_get_prop_int(zpool_handle_t *zhp, zpool_prop_t prop, zprop_source_t *src)
147 zprop_source_t source;
149 if (zhp->zpool_props == NULL && zpool_get_all_props(zhp)) {
151 * zpool_get_all_props() has most likely failed because
152 * the pool is faulted, but if all we need is the top level
153 * vdev's guid then get it from the zhp config nvlist.
155 if ((prop == ZPOOL_PROP_GUID) &&
156 (nvlist_lookup_nvlist(zhp->zpool_config,
157 ZPOOL_CONFIG_VDEV_TREE, &nv) == 0) &&
158 (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &value)
162 return (zpool_prop_default_numeric(prop));
165 nvl = zhp->zpool_props;
166 if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) {
167 verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &value) == 0);
169 verify(nvlist_lookup_uint64(nv, ZPROP_VALUE, &value) == 0);
171 source = ZPROP_SRC_DEFAULT;
172 value = zpool_prop_default_numeric(prop);
182 * Map VDEV STATE to printed strings.
185 zpool_state_to_name(vdev_state_t state, vdev_aux_t aux)
188 case VDEV_STATE_CLOSED:
189 case VDEV_STATE_OFFLINE:
190 return (gettext("OFFLINE"));
191 case VDEV_STATE_REMOVED:
192 return (gettext("REMOVED"));
193 case VDEV_STATE_CANT_OPEN:
194 if (aux == VDEV_AUX_CORRUPT_DATA || aux == VDEV_AUX_BAD_LOG)
195 return (gettext("FAULTED"));
197 return (gettext("UNAVAIL"));
198 case VDEV_STATE_FAULTED:
199 return (gettext("FAULTED"));
200 case VDEV_STATE_DEGRADED:
201 return (gettext("DEGRADED"));
202 case VDEV_STATE_HEALTHY:
203 return (gettext("ONLINE"));
206 return (gettext("UNKNOWN"));
210 * Get a zpool property value for 'prop' and return the value in
211 * a pre-allocated buffer.
214 zpool_get_prop(zpool_handle_t *zhp, zpool_prop_t prop, char *buf, size_t len,
215 zprop_source_t *srctype)
219 zprop_source_t src = ZPROP_SRC_NONE;
224 if (zpool_get_state(zhp) == POOL_STATE_UNAVAIL) {
226 case ZPOOL_PROP_NAME:
227 (void) strlcpy(buf, zpool_get_name(zhp), len);
230 case ZPOOL_PROP_HEALTH:
231 (void) strlcpy(buf, "FAULTED", len);
234 case ZPOOL_PROP_GUID:
235 intval = zpool_get_prop_int(zhp, prop, &src);
236 (void) snprintf(buf, len, "%llu", intval);
239 case ZPOOL_PROP_ALTROOT:
240 case ZPOOL_PROP_CACHEFILE:
241 if (zhp->zpool_props != NULL ||
242 zpool_get_all_props(zhp) == 0) {
244 zpool_get_prop_string(zhp, prop, &src),
252 (void) strlcpy(buf, "-", len);
261 if (zhp->zpool_props == NULL && zpool_get_all_props(zhp) &&
262 prop != ZPOOL_PROP_NAME)
265 switch (zpool_prop_get_type(prop)) {
266 case PROP_TYPE_STRING:
267 (void) strlcpy(buf, zpool_get_prop_string(zhp, prop, &src),
271 case PROP_TYPE_NUMBER:
272 intval = zpool_get_prop_int(zhp, prop, &src);
275 case ZPOOL_PROP_SIZE:
276 case ZPOOL_PROP_USED:
277 case ZPOOL_PROP_AVAILABLE:
278 (void) zfs_nicenum(intval, buf, len);
281 case ZPOOL_PROP_CAPACITY:
282 (void) snprintf(buf, len, "%llu%%",
283 (u_longlong_t)intval);
286 case ZPOOL_PROP_HEALTH:
287 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
288 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
289 verify(nvlist_lookup_uint64_array(nvroot,
290 ZPOOL_CONFIG_STATS, (uint64_t **)&vs, &vsc) == 0);
292 (void) strlcpy(buf, zpool_state_to_name(intval,
296 (void) snprintf(buf, len, "%llu", intval);
300 case PROP_TYPE_INDEX:
301 intval = zpool_get_prop_int(zhp, prop, &src);
302 if (zpool_prop_index_to_string(prop, intval, &strval)
305 (void) strlcpy(buf, strval, len);
319 * Check if the bootfs name has the same pool name as it is set to.
320 * Assuming bootfs is a valid dataset name.
323 bootfs_name_valid(const char *pool, char *bootfs)
325 int len = strlen(pool);
327 if (!zfs_name_valid(bootfs, ZFS_TYPE_FILESYSTEM|ZFS_TYPE_SNAPSHOT))
330 if (strncmp(pool, bootfs, len) == 0 &&
331 (bootfs[len] == '/' || bootfs[len] == '\0'))
338 * Inspect the configuration to determine if any of the devices contain
342 pool_uses_efi(nvlist_t *config)
347 if (nvlist_lookup_nvlist_array(config, ZPOOL_CONFIG_CHILDREN,
348 &child, &children) != 0)
349 return (read_efi_label(config, NULL) >= 0);
351 for (c = 0; c < children; c++) {
352 if (pool_uses_efi(child[c]))
359 pool_is_bootable(zpool_handle_t *zhp)
361 char bootfs[ZPOOL_MAXNAMELEN];
363 return (zpool_get_prop(zhp, ZPOOL_PROP_BOOTFS, bootfs,
364 sizeof (bootfs), NULL) == 0 && strncmp(bootfs, "-",
365 sizeof (bootfs)) != 0);
370 * Given an nvlist of zpool properties to be set, validate that they are
371 * correct, and parse any numeric properties (index, boolean, etc) if they are
372 * specified as strings.
375 zpool_valid_proplist(libzfs_handle_t *hdl, const char *poolname,
376 nvlist_t *props, uint64_t version, boolean_t create_or_import, char *errbuf)
384 struct stat64 statbuf;
388 if (nvlist_alloc(&retprops, NV_UNIQUE_NAME, 0) != 0) {
389 (void) no_memory(hdl);
394 while ((elem = nvlist_next_nvpair(props, elem)) != NULL) {
395 const char *propname = nvpair_name(elem);
398 * Make sure this property is valid and applies to this type.
400 if ((prop = zpool_name_to_prop(propname)) == ZPROP_INVAL) {
401 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
402 "invalid property '%s'"), propname);
403 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
407 if (zpool_prop_readonly(prop)) {
408 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' "
409 "is readonly"), propname);
410 (void) zfs_error(hdl, EZFS_PROPREADONLY, errbuf);
414 if (zprop_parse_value(hdl, elem, prop, ZFS_TYPE_POOL, retprops,
415 &strval, &intval, errbuf) != 0)
419 * Perform additional checking for specific properties.
422 case ZPOOL_PROP_VERSION:
423 if (intval < version || intval > SPA_VERSION) {
424 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
425 "property '%s' number %d is invalid."),
427 (void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
432 case ZPOOL_PROP_BOOTFS:
433 if (create_or_import) {
434 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
435 "property '%s' cannot be set at creation "
436 "or import time"), propname);
437 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
441 if (version < SPA_VERSION_BOOTFS) {
442 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
443 "pool must be upgraded to support "
444 "'%s' property"), propname);
445 (void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
450 * bootfs property value has to be a dataset name and
451 * the dataset has to be in the same pool as it sets to.
453 if (strval[0] != '\0' && !bootfs_name_valid(poolname,
455 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' "
456 "is an invalid name"), strval);
457 (void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf);
461 if ((zhp = zpool_open_canfail(hdl, poolname)) == NULL) {
462 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
463 "could not open pool '%s'"), poolname);
464 (void) zfs_error(hdl, EZFS_OPENFAILED, errbuf);
467 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
468 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
471 * bootfs property cannot be set on a disk which has
474 if (pool_uses_efi(nvroot)) {
475 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
476 "property '%s' not supported on "
477 "EFI labeled devices"), propname);
478 (void) zfs_error(hdl, EZFS_POOL_NOTSUP, errbuf);
485 case ZPOOL_PROP_ALTROOT:
486 if (!create_or_import) {
487 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
488 "property '%s' can only be set during pool "
489 "creation or import"), propname);
490 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
494 if (strval[0] != '/') {
495 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
496 "bad alternate root '%s'"), strval);
497 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
502 case ZPOOL_PROP_CACHEFILE:
503 if (strval[0] == '\0')
506 if (strcmp(strval, "none") == 0)
509 if (strval[0] != '/') {
510 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
511 "property '%s' must be empty, an "
512 "absolute path, or 'none'"), propname);
513 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
517 slash = strrchr(strval, '/');
519 if (slash[1] == '\0' || strcmp(slash, "/.") == 0 ||
520 strcmp(slash, "/..") == 0) {
521 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
522 "'%s' is not a valid file"), strval);
523 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
529 if (strval[0] != '\0' &&
530 (stat64(strval, &statbuf) != 0 ||
531 !S_ISDIR(statbuf.st_mode))) {
532 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
533 "'%s' is not a valid directory"),
535 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
546 nvlist_free(retprops);
551 * Set zpool property : propname=propval.
554 zpool_set_prop(zpool_handle_t *zhp, const char *propname, const char *propval)
556 zfs_cmd_t zc = { 0 };
559 nvlist_t *nvl = NULL;
563 (void) snprintf(errbuf, sizeof (errbuf),
564 dgettext(TEXT_DOMAIN, "cannot set property for '%s'"),
567 if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0)
568 return (no_memory(zhp->zpool_hdl));
570 if (nvlist_add_string(nvl, propname, propval) != 0) {
572 return (no_memory(zhp->zpool_hdl));
575 version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL);
576 if ((realprops = zpool_valid_proplist(zhp->zpool_hdl,
577 zhp->zpool_name, nvl, version, B_FALSE, errbuf)) == NULL) {
586 * Execute the corresponding ioctl() to set this property.
588 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
590 if (zcmd_write_src_nvlist(zhp->zpool_hdl, &zc, nvl) != 0) {
595 ret = zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_SET_PROPS, &zc);
597 zcmd_free_nvlists(&zc);
601 (void) zpool_standard_error(zhp->zpool_hdl, errno, errbuf);
603 (void) zpool_props_refresh(zhp);
609 zpool_expand_proplist(zpool_handle_t *zhp, zprop_list_t **plp)
611 libzfs_handle_t *hdl = zhp->zpool_hdl;
613 char buf[ZFS_MAXPROPLEN];
615 if (zprop_expand_list(hdl, plp, ZFS_TYPE_POOL) != 0)
618 for (entry = *plp; entry != NULL; entry = entry->pl_next) {
623 if (entry->pl_prop != ZPROP_INVAL &&
624 zpool_get_prop(zhp, entry->pl_prop, buf, sizeof (buf),
626 if (strlen(buf) > entry->pl_width)
627 entry->pl_width = strlen(buf);
636 * Don't start the slice at the default block of 34; many storage
637 * devices will use a stripe width of 128k, so start there instead.
639 #define NEW_START_BLOCK 256
642 * Validate the given pool name, optionally putting an extended error message in
646 zpool_name_valid(libzfs_handle_t *hdl, boolean_t isopen, const char *pool)
652 ret = pool_namecheck(pool, &why, &what);
655 * The rules for reserved pool names were extended at a later point.
656 * But we need to support users with existing pools that may now be
657 * invalid. So we only check for this expanded set of names during a
658 * create (or import), and only in userland.
660 if (ret == 0 && !isopen &&
661 (strncmp(pool, "mirror", 6) == 0 ||
662 strncmp(pool, "raidz", 5) == 0 ||
663 strncmp(pool, "spare", 5) == 0 ||
664 strcmp(pool, "log") == 0)) {
667 dgettext(TEXT_DOMAIN, "name is reserved"));
675 case NAME_ERR_TOOLONG:
677 dgettext(TEXT_DOMAIN, "name is too long"));
680 case NAME_ERR_INVALCHAR:
682 dgettext(TEXT_DOMAIN, "invalid character "
683 "'%c' in pool name"), what);
686 case NAME_ERR_NOLETTER:
687 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
688 "name must begin with a letter"));
691 case NAME_ERR_RESERVED:
692 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
693 "name is reserved"));
696 case NAME_ERR_DISKLIKE:
697 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
698 "pool name is reserved"));
701 case NAME_ERR_LEADING_SLASH:
702 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
703 "leading slash in name"));
706 case NAME_ERR_EMPTY_COMPONENT:
707 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
708 "empty component in name"));
711 case NAME_ERR_TRAILING_SLASH:
712 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
713 "trailing slash in name"));
716 case NAME_ERR_MULTIPLE_AT:
717 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
718 "multiple '@' delimiters in name"));
730 * Open a handle to the given pool, even if the pool is currently in the FAULTED
734 zpool_open_canfail(libzfs_handle_t *hdl, const char *pool)
740 * Make sure the pool name is valid.
742 if (!zpool_name_valid(hdl, B_TRUE, pool)) {
743 (void) zfs_error_fmt(hdl, EZFS_INVALIDNAME,
744 dgettext(TEXT_DOMAIN, "cannot open '%s'"),
749 if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL)
752 zhp->zpool_hdl = hdl;
753 (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name));
755 if (zpool_refresh_stats(zhp, &missing) != 0) {
761 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "no such pool"));
762 (void) zfs_error_fmt(hdl, EZFS_NOENT,
763 dgettext(TEXT_DOMAIN, "cannot open '%s'"), pool);
772 * Like the above, but silent on error. Used when iterating over pools (because
773 * the configuration cache may be out of date).
776 zpool_open_silent(libzfs_handle_t *hdl, const char *pool, zpool_handle_t **ret)
781 if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL)
784 zhp->zpool_hdl = hdl;
785 (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name));
787 if (zpool_refresh_stats(zhp, &missing) != 0) {
803 * Similar to zpool_open_canfail(), but refuses to open pools in the faulted
807 zpool_open(libzfs_handle_t *hdl, const char *pool)
811 if ((zhp = zpool_open_canfail(hdl, pool)) == NULL)
814 if (zhp->zpool_state == POOL_STATE_UNAVAIL) {
815 (void) zfs_error_fmt(hdl, EZFS_POOLUNAVAIL,
816 dgettext(TEXT_DOMAIN, "cannot open '%s'"), zhp->zpool_name);
825 * Close the handle. Simply frees the memory associated with the handle.
828 zpool_close(zpool_handle_t *zhp)
830 if (zhp->zpool_config)
831 nvlist_free(zhp->zpool_config);
832 if (zhp->zpool_old_config)
833 nvlist_free(zhp->zpool_old_config);
834 if (zhp->zpool_props)
835 nvlist_free(zhp->zpool_props);
840 * Return the name of the pool.
843 zpool_get_name(zpool_handle_t *zhp)
845 return (zhp->zpool_name);
850 * Return the state of the pool (ACTIVE or UNAVAILABLE)
853 zpool_get_state(zpool_handle_t *zhp)
855 return (zhp->zpool_state);
859 * Create the named pool, using the provided vdev list. It is assumed
860 * that the consumer has already validated the contents of the nvlist, so we
861 * don't have to worry about error semantics.
864 zpool_create(libzfs_handle_t *hdl, const char *pool, nvlist_t *nvroot,
865 nvlist_t *props, nvlist_t *fsprops)
867 zfs_cmd_t zc = { 0 };
868 nvlist_t *zc_fsprops = NULL;
869 nvlist_t *zc_props = NULL;
874 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
875 "cannot create '%s'"), pool);
877 if (!zpool_name_valid(hdl, B_FALSE, pool))
878 return (zfs_error(hdl, EZFS_INVALIDNAME, msg));
880 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
884 if ((zc_props = zpool_valid_proplist(hdl, pool, props,
885 SPA_VERSION_1, B_TRUE, msg)) == NULL) {
894 zoned = ((nvlist_lookup_string(fsprops,
895 zfs_prop_to_name(ZFS_PROP_ZONED), &zonestr) == 0) &&
896 strcmp(zonestr, "on") == 0);
898 if ((zc_fsprops = zfs_valid_proplist(hdl,
899 ZFS_TYPE_FILESYSTEM, fsprops, zoned, NULL, msg)) == NULL) {
903 (nvlist_alloc(&zc_props, NV_UNIQUE_NAME, 0) != 0)) {
906 if (nvlist_add_nvlist(zc_props,
907 ZPOOL_ROOTFS_PROPS, zc_fsprops) != 0) {
912 if (zc_props && zcmd_write_src_nvlist(hdl, &zc, zc_props) != 0)
915 (void) strlcpy(zc.zc_name, pool, sizeof (zc.zc_name));
917 if ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_CREATE, &zc)) != 0) {
919 zcmd_free_nvlists(&zc);
920 nvlist_free(zc_props);
921 nvlist_free(zc_fsprops);
926 * This can happen if the user has specified the same
927 * device multiple times. We can't reliably detect this
928 * until we try to add it and see we already have a
931 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
932 "one or more vdevs refer to the same device"));
933 return (zfs_error(hdl, EZFS_BADDEV, msg));
937 * This occurs when one of the devices is below
938 * SPA_MINDEVSIZE. Unfortunately, we can't detect which
939 * device was the problem device since there's no
940 * reliable way to determine device size from userland.
945 zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf));
947 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
948 "one or more devices is less than the "
949 "minimum size (%s)"), buf);
951 return (zfs_error(hdl, EZFS_BADDEV, msg));
954 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
955 "one or more devices is out of space"));
956 return (zfs_error(hdl, EZFS_BADDEV, msg));
959 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
960 "cache device must be a disk or disk slice"));
961 return (zfs_error(hdl, EZFS_BADDEV, msg));
964 return (zpool_standard_error(hdl, errno, msg));
969 * If this is an alternate root pool, then we automatically set the
970 * mountpoint of the root dataset to be '/'.
972 if (nvlist_lookup_string(props, zpool_prop_to_name(ZPOOL_PROP_ALTROOT),
976 verify((zhp = zfs_open(hdl, pool, ZFS_TYPE_DATASET)) != NULL);
977 verify(zfs_prop_set(zhp, zfs_prop_to_name(ZFS_PROP_MOUNTPOINT),
984 zcmd_free_nvlists(&zc);
985 nvlist_free(zc_props);
986 nvlist_free(zc_fsprops);
991 * Destroy the given pool. It is up to the caller to ensure that there are no
992 * datasets left in the pool.
995 zpool_destroy(zpool_handle_t *zhp)
997 zfs_cmd_t zc = { 0 };
998 zfs_handle_t *zfp = NULL;
999 libzfs_handle_t *hdl = zhp->zpool_hdl;
1002 if (zhp->zpool_state == POOL_STATE_ACTIVE &&
1003 (zfp = zfs_open(zhp->zpool_hdl, zhp->zpool_name,
1004 ZFS_TYPE_FILESYSTEM)) == NULL)
1007 if (zpool_remove_zvol_links(zhp) != 0)
1010 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1012 if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_DESTROY, &zc) != 0) {
1013 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1014 "cannot destroy '%s'"), zhp->zpool_name);
1016 if (errno == EROFS) {
1017 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1018 "one or more devices is read only"));
1019 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1021 (void) zpool_standard_error(hdl, errno, msg);
1030 remove_mountpoint(zfp);
1038 * Add the given vdevs to the pool. The caller must have already performed the
1039 * necessary verification to ensure that the vdev specification is well-formed.
1042 zpool_add(zpool_handle_t *zhp, nvlist_t *nvroot)
1044 zfs_cmd_t zc = { 0 };
1046 libzfs_handle_t *hdl = zhp->zpool_hdl;
1048 nvlist_t **spares, **l2cache;
1049 uint_t nspares, nl2cache;
1051 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1052 "cannot add to '%s'"), zhp->zpool_name);
1054 if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) <
1055 SPA_VERSION_SPARES &&
1056 nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
1057 &spares, &nspares) == 0) {
1058 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be "
1059 "upgraded to add hot spares"));
1060 return (zfs_error(hdl, EZFS_BADVERSION, msg));
1063 if (pool_is_bootable(zhp) && nvlist_lookup_nvlist_array(nvroot,
1064 ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0) {
1067 for (s = 0; s < nspares; s++) {
1070 if (nvlist_lookup_string(spares[s], ZPOOL_CONFIG_PATH,
1071 &path) == 0 && pool_uses_efi(spares[s])) {
1072 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1073 "device '%s' contains an EFI label and "
1074 "cannot be used on root pools."),
1075 zpool_vdev_name(hdl, NULL, spares[s]));
1076 return (zfs_error(hdl, EZFS_POOL_NOTSUP, msg));
1081 if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) <
1082 SPA_VERSION_L2CACHE &&
1083 nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
1084 &l2cache, &nl2cache) == 0) {
1085 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be "
1086 "upgraded to add cache devices"));
1087 return (zfs_error(hdl, EZFS_BADVERSION, msg));
1090 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
1092 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1094 if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_VDEV_ADD, &zc) != 0) {
1098 * This can happen if the user has specified the same
1099 * device multiple times. We can't reliably detect this
1100 * until we try to add it and see we already have a
1103 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1104 "one or more vdevs refer to the same device"));
1105 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1110 * This occurrs when one of the devices is below
1111 * SPA_MINDEVSIZE. Unfortunately, we can't detect which
1112 * device was the problem device since there's no
1113 * reliable way to determine device size from userland.
1118 zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf));
1120 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1121 "device is less than the minimum "
1124 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1128 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1129 "pool must be upgraded to add these vdevs"));
1130 (void) zfs_error(hdl, EZFS_BADVERSION, msg);
1134 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1135 "root pool can not have multiple vdevs"
1136 " or separate logs"));
1137 (void) zfs_error(hdl, EZFS_POOL_NOTSUP, msg);
1141 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1142 "cache device must be a disk or disk slice"));
1143 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1147 (void) zpool_standard_error(hdl, errno, msg);
1155 zcmd_free_nvlists(&zc);
1161 * Exports the pool from the system. The caller must ensure that there are no
1162 * mounted datasets in the pool.
1165 zpool_export_common(zpool_handle_t *zhp, boolean_t force, boolean_t hardforce)
1167 zfs_cmd_t zc = { 0 };
1170 if (zpool_remove_zvol_links(zhp) != 0)
1173 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1174 "cannot export '%s'"), zhp->zpool_name);
1176 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1177 zc.zc_cookie = force;
1178 zc.zc_guid = hardforce;
1180 if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_EXPORT, &zc) != 0) {
1183 zfs_error_aux(zhp->zpool_hdl, dgettext(TEXT_DOMAIN,
1184 "use '-f' to override the following errors:\n"
1185 "'%s' has an active shared spare which could be"
1186 " used by other pools once '%s' is exported."),
1187 zhp->zpool_name, zhp->zpool_name);
1188 return (zfs_error(zhp->zpool_hdl, EZFS_ACTIVE_SPARE,
1191 return (zpool_standard_error_fmt(zhp->zpool_hdl, errno,
1200 zpool_export(zpool_handle_t *zhp, boolean_t force)
1202 return (zpool_export_common(zhp, force, B_FALSE));
1206 zpool_export_force(zpool_handle_t *zhp)
1208 return (zpool_export_common(zhp, B_TRUE, B_TRUE));
1212 * zpool_import() is a contracted interface. Should be kept the same
1215 * Applications should use zpool_import_props() to import a pool with
1216 * new properties value to be set.
1219 zpool_import(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,
1222 nvlist_t *props = NULL;
1225 if (altroot != NULL) {
1226 if (nvlist_alloc(&props, NV_UNIQUE_NAME, 0) != 0) {
1227 return (zfs_error_fmt(hdl, EZFS_NOMEM,
1228 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1232 if (nvlist_add_string(props,
1233 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), altroot) != 0 ||
1234 nvlist_add_string(props,
1235 zpool_prop_to_name(ZPOOL_PROP_CACHEFILE), "none") != 0) {
1237 return (zfs_error_fmt(hdl, EZFS_NOMEM,
1238 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1243 ret = zpool_import_props(hdl, config, newname, props, B_FALSE);
1250 * Import the given pool using the known configuration and a list of
1251 * properties to be set. The configuration should have come from
1252 * zpool_find_import(). The 'newname' parameters control whether the pool
1253 * is imported with a different name.
1256 zpool_import_props(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,
1257 nvlist_t *props, boolean_t importfaulted)
1259 zfs_cmd_t zc = { 0 };
1265 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
1268 (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
1269 "cannot import pool '%s'"), origname);
1271 if (newname != NULL) {
1272 if (!zpool_name_valid(hdl, B_FALSE, newname))
1273 return (zfs_error_fmt(hdl, EZFS_INVALIDNAME,
1274 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1276 thename = (char *)newname;
1284 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
1287 if ((props = zpool_valid_proplist(hdl, origname,
1288 props, version, B_TRUE, errbuf)) == NULL) {
1290 } else if (zcmd_write_src_nvlist(hdl, &zc, props) != 0) {
1296 (void) strlcpy(zc.zc_name, thename, sizeof (zc.zc_name));
1298 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
1301 if (zcmd_write_conf_nvlist(hdl, &zc, config) != 0) {
1306 zc.zc_cookie = (uint64_t)importfaulted;
1308 if (zfs_ioctl(hdl, ZFS_IOC_POOL_IMPORT, &zc) != 0) {
1310 if (newname == NULL)
1311 (void) snprintf(desc, sizeof (desc),
1312 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1315 (void) snprintf(desc, sizeof (desc),
1316 dgettext(TEXT_DOMAIN, "cannot import '%s' as '%s'"),
1322 * Unsupported version.
1324 (void) zfs_error(hdl, EZFS_BADVERSION, desc);
1328 (void) zfs_error(hdl, EZFS_INVALCONFIG, desc);
1332 (void) zpool_standard_error(hdl, errno, desc);
1337 zpool_handle_t *zhp;
1340 * This should never fail, but play it safe anyway.
1342 if (zpool_open_silent(hdl, thename, &zhp) != 0) {
1344 } else if (zhp != NULL) {
1345 ret = zpool_create_zvol_links(zhp);
1351 zcmd_free_nvlists(&zc);
1361 zpool_scrub(zpool_handle_t *zhp, pool_scrub_type_t type)
1363 zfs_cmd_t zc = { 0 };
1365 libzfs_handle_t *hdl = zhp->zpool_hdl;
1367 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1368 zc.zc_cookie = type;
1370 if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_SCRUB, &zc) == 0)
1373 (void) snprintf(msg, sizeof (msg),
1374 dgettext(TEXT_DOMAIN, "cannot scrub %s"), zc.zc_name);
1377 return (zfs_error(hdl, EZFS_RESILVERING, msg));
1379 return (zpool_standard_error(hdl, errno, msg));
1383 * Find a vdev that matches the search criteria specified. We use the
1384 * the nvpair name to determine how we should look for the device.
1385 * 'avail_spare' is set to TRUE if the provided guid refers to an AVAIL
1386 * spare; but FALSE if its an INUSE spare.
1389 vdev_to_nvlist_iter(nvlist_t *nv, nvlist_t *search, boolean_t *avail_spare,
1390 boolean_t *l2cache, boolean_t *log)
1397 nvpair_t *pair = nvlist_next_nvpair(search, NULL);
1399 /* Nothing to look for */
1400 if (search == NULL || pair == NULL)
1403 /* Obtain the key we will use to search */
1404 srchkey = nvpair_name(pair);
1406 switch (nvpair_type(pair)) {
1407 case DATA_TYPE_UINT64: {
1408 uint64_t srchval, theguid, present;
1410 verify(nvpair_value_uint64(pair, &srchval) == 0);
1411 if (strcmp(srchkey, ZPOOL_CONFIG_GUID) == 0) {
1412 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT,
1415 * If the device has never been present since
1416 * import, the only reliable way to match the
1419 verify(nvlist_lookup_uint64(nv,
1420 ZPOOL_CONFIG_GUID, &theguid) == 0);
1421 if (theguid == srchval)
1428 case DATA_TYPE_STRING: {
1429 char *srchval, *val;
1431 verify(nvpair_value_string(pair, &srchval) == 0);
1432 if (nvlist_lookup_string(nv, srchkey, &val) != 0)
1436 * Search for the requested value. We special case the search
1437 * for ZPOOL_CONFIG_PATH when it's a wholedisk. Otherwise,
1438 * all other searches are simple string compares.
1440 if (strcmp(srchkey, ZPOOL_CONFIG_PATH) == 0 && val) {
1441 uint64_t wholedisk = 0;
1443 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
1447 * For whole disks, the internal path has 's0',
1448 * but the path passed in by the user doesn't.
1450 if (strlen(srchval) == strlen(val) - 2 &&
1451 strncmp(srchval, val, strlen(srchval)) == 0)
1460 if (strcmp(srchval, val) == 0)
1469 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
1470 &child, &children) != 0)
1473 for (c = 0; c < children; c++) {
1474 if ((ret = vdev_to_nvlist_iter(child[c], search,
1475 avail_spare, l2cache, NULL)) != NULL) {
1477 * The 'is_log' value is only set for the toplevel
1478 * vdev, not the leaf vdevs. So we always lookup the
1479 * log device from the root of the vdev tree (where
1480 * 'log' is non-NULL).
1483 nvlist_lookup_uint64(child[c],
1484 ZPOOL_CONFIG_IS_LOG, &is_log) == 0 &&
1492 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES,
1493 &child, &children) == 0) {
1494 for (c = 0; c < children; c++) {
1495 if ((ret = vdev_to_nvlist_iter(child[c], search,
1496 avail_spare, l2cache, NULL)) != NULL) {
1497 *avail_spare = B_TRUE;
1503 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE,
1504 &child, &children) == 0) {
1505 for (c = 0; c < children; c++) {
1506 if ((ret = vdev_to_nvlist_iter(child[c], search,
1507 avail_spare, l2cache, NULL)) != NULL) {
1518 * Given a physical path (minus the "/devices" prefix), find the
1522 zpool_find_vdev_by_physpath(zpool_handle_t *zhp, const char *ppath,
1523 boolean_t *avail_spare, boolean_t *l2cache, boolean_t *log)
1525 nvlist_t *search, *nvroot, *ret;
1527 verify(nvlist_alloc(&search, NV_UNIQUE_NAME, KM_SLEEP) == 0);
1528 verify(nvlist_add_string(search, ZPOOL_CONFIG_PHYS_PATH, ppath) == 0);
1530 verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
1533 *avail_spare = B_FALSE;
1534 ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log);
1535 nvlist_free(search);
1541 zpool_find_vdev(zpool_handle_t *zhp, const char *path, boolean_t *avail_spare,
1542 boolean_t *l2cache, boolean_t *log)
1544 char buf[MAXPATHLEN];
1546 nvlist_t *nvroot, *search, *ret;
1549 verify(nvlist_alloc(&search, NV_UNIQUE_NAME, KM_SLEEP) == 0);
1551 guid = strtoull(path, &end, 10);
1552 if (guid != 0 && *end == '\0') {
1553 verify(nvlist_add_uint64(search, ZPOOL_CONFIG_GUID, guid) == 0);
1554 } else if (path[0] != '/') {
1555 (void) snprintf(buf, sizeof (buf), "%s%s", "/dev/dsk/", path);
1556 verify(nvlist_add_string(search, ZPOOL_CONFIG_PATH, buf) == 0);
1558 verify(nvlist_add_string(search, ZPOOL_CONFIG_PATH, path) == 0);
1561 verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
1564 *avail_spare = B_FALSE;
1568 ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log);
1569 nvlist_free(search);
1575 vdev_online(nvlist_t *nv)
1579 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_OFFLINE, &ival) == 0 ||
1580 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_FAULTED, &ival) == 0 ||
1581 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVED, &ival) == 0)
1588 * Helper function for zpool_get_physpaths().
1591 vdev_get_one_physpath(nvlist_t *config, char *physpath, size_t physpath_size,
1592 size_t *bytes_written)
1594 size_t bytes_left, pos, rsz;
1598 if (nvlist_lookup_string(config, ZPOOL_CONFIG_PHYS_PATH,
1600 return (EZFS_NODEVICE);
1602 pos = *bytes_written;
1603 bytes_left = physpath_size - pos;
1604 format = (pos == 0) ? "%s" : " %s";
1606 rsz = snprintf(physpath + pos, bytes_left, format, tmppath);
1607 *bytes_written += rsz;
1609 if (rsz >= bytes_left) {
1610 /* if physpath was not copied properly, clear it */
1611 if (bytes_left != 0) {
1614 return (EZFS_NOSPC);
1620 vdev_get_physpaths(nvlist_t *nv, char *physpath, size_t phypath_size,
1621 size_t *rsz, boolean_t is_spare)
1626 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) != 0)
1627 return (EZFS_INVALCONFIG);
1629 if (strcmp(type, VDEV_TYPE_DISK) == 0) {
1631 * An active spare device has ZPOOL_CONFIG_IS_SPARE set.
1632 * For a spare vdev, we only want to boot from the active
1637 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_SPARE,
1640 return (EZFS_INVALCONFIG);
1643 if (vdev_online(nv)) {
1644 if ((ret = vdev_get_one_physpath(nv, physpath,
1645 phypath_size, rsz)) != 0)
1648 } else if (strcmp(type, VDEV_TYPE_MIRROR) == 0 ||
1649 strcmp(type, VDEV_TYPE_REPLACING) == 0 ||
1650 (is_spare = (strcmp(type, VDEV_TYPE_SPARE) == 0))) {
1655 if (nvlist_lookup_nvlist_array(nv,
1656 ZPOOL_CONFIG_CHILDREN, &child, &count) != 0)
1657 return (EZFS_INVALCONFIG);
1659 for (i = 0; i < count; i++) {
1660 ret = vdev_get_physpaths(child[i], physpath,
1661 phypath_size, rsz, is_spare);
1662 if (ret == EZFS_NOSPC)
1667 return (EZFS_POOL_INVALARG);
1671 * Get phys_path for a root pool config.
1672 * Return 0 on success; non-zero on failure.
1675 zpool_get_config_physpath(nvlist_t *config, char *physpath, size_t phypath_size)
1678 nvlist_t *vdev_root;
1685 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
1687 return (EZFS_INVALCONFIG);
1689 if (nvlist_lookup_string(vdev_root, ZPOOL_CONFIG_TYPE, &type) != 0 ||
1690 nvlist_lookup_nvlist_array(vdev_root, ZPOOL_CONFIG_CHILDREN,
1691 &child, &count) != 0)
1692 return (EZFS_INVALCONFIG);
1695 * root pool can not have EFI labeled disks and can only have
1696 * a single top-level vdev.
1698 if (strcmp(type, VDEV_TYPE_ROOT) != 0 || count != 1 ||
1699 pool_uses_efi(vdev_root))
1700 return (EZFS_POOL_INVALARG);
1702 (void) vdev_get_physpaths(child[0], physpath, phypath_size, &rsz,
1705 /* No online devices */
1707 return (EZFS_NODEVICE);
1713 * Get phys_path for a root pool
1714 * Return 0 on success; non-zero on failure.
1717 zpool_get_physpath(zpool_handle_t *zhp, char *physpath, size_t phypath_size)
1719 return (zpool_get_config_physpath(zhp->zpool_config, physpath,
1724 * Returns TRUE if the given guid corresponds to the given type.
1725 * This is used to check for hot spares (INUSE or not), and level 2 cache
1729 is_guid_type(zpool_handle_t *zhp, uint64_t guid, const char *type)
1731 uint64_t target_guid;
1737 verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
1739 if (nvlist_lookup_nvlist_array(nvroot, type, &list, &count) == 0) {
1740 for (i = 0; i < count; i++) {
1741 verify(nvlist_lookup_uint64(list[i], ZPOOL_CONFIG_GUID,
1742 &target_guid) == 0);
1743 if (guid == target_guid)
1752 * If the device has being dynamically expanded then we need to relabel
1753 * the disk to use the new unallocated space.
1756 zpool_relabel_disk(libzfs_handle_t *hdl, const char *name)
1758 char path[MAXPATHLEN];
1761 int (*_efi_use_whole_disk)(int);
1763 if ((_efi_use_whole_disk = (int (*)(int))dlsym(RTLD_DEFAULT,
1764 "efi_use_whole_disk")) == NULL)
1767 (void) snprintf(path, sizeof (path), "%s/%s", RDISK_ROOT, name);
1769 if ((fd = open(path, O_RDWR | O_NDELAY)) < 0) {
1770 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot "
1771 "relabel '%s': unable to open device"), name);
1772 return (zfs_error(hdl, EZFS_OPENFAILED, errbuf));
1776 * It's possible that we might encounter an error if the device
1777 * does not have any unallocated space left. If so, we simply
1778 * ignore that error and continue on.
1780 error = _efi_use_whole_disk(fd);
1782 if (error && error != VT_ENOSPC) {
1783 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot "
1784 "relabel '%s': unable to read disk capacity"), name);
1785 return (zfs_error(hdl, EZFS_NOCAP, errbuf));
1791 * Bring the specified vdev online. The 'flags' parameter is a set of the
1792 * ZFS_ONLINE_* flags.
1795 zpool_vdev_online(zpool_handle_t *zhp, const char *path, int flags,
1796 vdev_state_t *newstate)
1798 zfs_cmd_t zc = { 0 };
1801 boolean_t avail_spare, l2cache, islog;
1802 libzfs_handle_t *hdl = zhp->zpool_hdl;
1804 if (flags & ZFS_ONLINE_EXPAND) {
1805 (void) snprintf(msg, sizeof (msg),
1806 dgettext(TEXT_DOMAIN, "cannot expand %s"), path);
1808 (void) snprintf(msg, sizeof (msg),
1809 dgettext(TEXT_DOMAIN, "cannot online %s"), path);
1812 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1813 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
1815 return (zfs_error(hdl, EZFS_NODEVICE, msg));
1817 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
1820 is_guid_type(zhp, zc.zc_guid, ZPOOL_CONFIG_SPARES) == B_TRUE)
1821 return (zfs_error(hdl, EZFS_ISSPARE, msg));
1823 if (flags & ZFS_ONLINE_EXPAND ||
1824 zpool_get_prop_int(zhp, ZPOOL_PROP_AUTOEXPAND, NULL)) {
1825 char *pathname = NULL;
1826 uint64_t wholedisk = 0;
1828 (void) nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_WHOLE_DISK,
1830 verify(nvlist_lookup_string(tgt, ZPOOL_CONFIG_PATH,
1834 * XXX - L2ARC 1.0 devices can't support expansion.
1837 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1838 "cannot expand cache devices"));
1839 return (zfs_error(hdl, EZFS_VDEVNOTSUP, msg));
1843 pathname += strlen(DISK_ROOT) + 1;
1844 (void) zpool_relabel_disk(zhp->zpool_hdl, pathname);
1848 zc.zc_cookie = VDEV_STATE_ONLINE;
1851 if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_VDEV_SET_STATE, &zc) != 0)
1852 return (zpool_standard_error(hdl, errno, msg));
1854 *newstate = zc.zc_cookie;
1859 * Take the specified vdev offline
1862 zpool_vdev_offline(zpool_handle_t *zhp, const char *path, boolean_t istmp)
1864 zfs_cmd_t zc = { 0 };
1867 boolean_t avail_spare, l2cache;
1868 libzfs_handle_t *hdl = zhp->zpool_hdl;
1870 (void) snprintf(msg, sizeof (msg),
1871 dgettext(TEXT_DOMAIN, "cannot offline %s"), path);
1873 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1874 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
1876 return (zfs_error(hdl, EZFS_NODEVICE, msg));
1878 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
1881 is_guid_type(zhp, zc.zc_guid, ZPOOL_CONFIG_SPARES) == B_TRUE)
1882 return (zfs_error(hdl, EZFS_ISSPARE, msg));
1884 zc.zc_cookie = VDEV_STATE_OFFLINE;
1885 zc.zc_obj = istmp ? ZFS_OFFLINE_TEMPORARY : 0;
1887 if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
1894 * There are no other replicas of this device.
1896 return (zfs_error(hdl, EZFS_NOREPLICAS, msg));
1900 * The log device has unplayed logs
1902 return (zfs_error(hdl, EZFS_UNPLAYED_LOGS, msg));
1905 return (zpool_standard_error(hdl, errno, msg));
1910 * Mark the given vdev faulted.
1913 zpool_vdev_fault(zpool_handle_t *zhp, uint64_t guid)
1915 zfs_cmd_t zc = { 0 };
1917 libzfs_handle_t *hdl = zhp->zpool_hdl;
1919 (void) snprintf(msg, sizeof (msg),
1920 dgettext(TEXT_DOMAIN, "cannot fault %llu"), guid);
1922 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1924 zc.zc_cookie = VDEV_STATE_FAULTED;
1926 if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
1933 * There are no other replicas of this device.
1935 return (zfs_error(hdl, EZFS_NOREPLICAS, msg));
1938 return (zpool_standard_error(hdl, errno, msg));
1944 * Mark the given vdev degraded.
1947 zpool_vdev_degrade(zpool_handle_t *zhp, uint64_t guid)
1949 zfs_cmd_t zc = { 0 };
1951 libzfs_handle_t *hdl = zhp->zpool_hdl;
1953 (void) snprintf(msg, sizeof (msg),
1954 dgettext(TEXT_DOMAIN, "cannot degrade %llu"), guid);
1956 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1958 zc.zc_cookie = VDEV_STATE_DEGRADED;
1960 if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
1963 return (zpool_standard_error(hdl, errno, msg));
1967 * Returns TRUE if the given nvlist is a vdev that was originally swapped in as
1971 is_replacing_spare(nvlist_t *search, nvlist_t *tgt, int which)
1977 if (nvlist_lookup_nvlist_array(search, ZPOOL_CONFIG_CHILDREN, &child,
1979 verify(nvlist_lookup_string(search, ZPOOL_CONFIG_TYPE,
1982 if (strcmp(type, VDEV_TYPE_SPARE) == 0 &&
1983 children == 2 && child[which] == tgt)
1986 for (c = 0; c < children; c++)
1987 if (is_replacing_spare(child[c], tgt, which))
1995 * Attach new_disk (fully described by nvroot) to old_disk.
1996 * If 'replacing' is specified, the new disk will replace the old one.
1999 zpool_vdev_attach(zpool_handle_t *zhp,
2000 const char *old_disk, const char *new_disk, nvlist_t *nvroot, int replacing)
2002 zfs_cmd_t zc = { 0 };
2006 boolean_t avail_spare, l2cache, islog;
2008 char *path, *newname;
2011 nvlist_t *config_root;
2012 libzfs_handle_t *hdl = zhp->zpool_hdl;
2013 boolean_t rootpool = pool_is_bootable(zhp);
2016 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
2017 "cannot replace %s with %s"), old_disk, new_disk);
2019 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
2020 "cannot attach %s to %s"), new_disk, old_disk);
2023 * If this is a root pool, make sure that we're not attaching an
2024 * EFI labeled device.
2026 if (rootpool && pool_uses_efi(nvroot)) {
2027 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2028 "EFI labeled devices are not supported on root pools."));
2029 return (zfs_error(hdl, EZFS_POOL_NOTSUP, msg));
2032 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2033 if ((tgt = zpool_find_vdev(zhp, old_disk, &avail_spare, &l2cache,
2035 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2038 return (zfs_error(hdl, EZFS_ISSPARE, msg));
2041 return (zfs_error(hdl, EZFS_ISL2CACHE, msg));
2043 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2044 zc.zc_cookie = replacing;
2046 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
2047 &child, &children) != 0 || children != 1) {
2048 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2049 "new device must be a single disk"));
2050 return (zfs_error(hdl, EZFS_INVALCONFIG, msg));
2053 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
2054 ZPOOL_CONFIG_VDEV_TREE, &config_root) == 0);
2056 if ((newname = zpool_vdev_name(NULL, NULL, child[0])) == NULL)
2060 * If the target is a hot spare that has been swapped in, we can only
2061 * replace it with another hot spare.
2064 nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_IS_SPARE, &val) == 0 &&
2065 (zpool_find_vdev(zhp, newname, &avail_spare, &l2cache,
2066 NULL) == NULL || !avail_spare) &&
2067 is_replacing_spare(config_root, tgt, 1)) {
2068 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2069 "can only be replaced by another hot spare"));
2071 return (zfs_error(hdl, EZFS_BADTARGET, msg));
2075 * If we are attempting to replace a spare, it canot be applied to an
2076 * already spared device.
2079 nvlist_lookup_string(child[0], ZPOOL_CONFIG_PATH, &path) == 0 &&
2080 zpool_find_vdev(zhp, newname, &avail_spare,
2081 &l2cache, NULL) != NULL && avail_spare &&
2082 is_replacing_spare(config_root, tgt, 0)) {
2083 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2084 "device has already been replaced with a spare"));
2086 return (zfs_error(hdl, EZFS_BADTARGET, msg));
2091 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
2094 ret = zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_VDEV_ATTACH, &zc);
2096 zcmd_free_nvlists(&zc);
2101 * XXX - This should be removed once we can
2102 * automatically install the bootblocks on the
2103 * newly attached disk.
2105 (void) fprintf(stderr, dgettext(TEXT_DOMAIN, "Please "
2106 "be sure to invoke %s to make '%s' bootable.\n"),
2110 * XXX need a better way to prevent user from
2111 * booting up a half-baked vdev.
2113 (void) fprintf(stderr, dgettext(TEXT_DOMAIN, "Make "
2114 "sure to wait until resilver is done "
2115 "before rebooting.\n"));
2123 * Can't attach to or replace this type of vdev.
2127 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2128 "cannot replace a log with a spare"));
2130 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2131 "cannot replace a replacing device"));
2133 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2134 "can only attach to mirrors and top-level "
2137 (void) zfs_error(hdl, EZFS_BADTARGET, msg);
2142 * The new device must be a single disk.
2144 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2145 "new device must be a single disk"));
2146 (void) zfs_error(hdl, EZFS_INVALCONFIG, msg);
2150 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "%s is busy"),
2152 (void) zfs_error(hdl, EZFS_BADDEV, msg);
2157 * The new device is too small.
2159 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2160 "device is too small"));
2161 (void) zfs_error(hdl, EZFS_BADDEV, msg);
2166 * The new device has a different alignment requirement.
2168 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2169 "devices have different sector alignment"));
2170 (void) zfs_error(hdl, EZFS_BADDEV, msg);
2175 * The resulting top-level vdev spec won't fit in the label.
2177 (void) zfs_error(hdl, EZFS_DEVOVERFLOW, msg);
2181 (void) zpool_standard_error(hdl, errno, msg);
2188 * Detach the specified device.
2191 zpool_vdev_detach(zpool_handle_t *zhp, const char *path)
2193 zfs_cmd_t zc = { 0 };
2196 boolean_t avail_spare, l2cache;
2197 libzfs_handle_t *hdl = zhp->zpool_hdl;
2199 (void) snprintf(msg, sizeof (msg),
2200 dgettext(TEXT_DOMAIN, "cannot detach %s"), path);
2202 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2203 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
2205 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2208 return (zfs_error(hdl, EZFS_ISSPARE, msg));
2211 return (zfs_error(hdl, EZFS_ISL2CACHE, msg));
2213 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2215 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_DETACH, &zc) == 0)
2222 * Can't detach from this type of vdev.
2224 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "only "
2225 "applicable to mirror and replacing vdevs"));
2226 (void) zfs_error(zhp->zpool_hdl, EZFS_BADTARGET, msg);
2231 * There are no other replicas of this device.
2233 (void) zfs_error(hdl, EZFS_NOREPLICAS, msg);
2237 (void) zpool_standard_error(hdl, errno, msg);
2244 * Remove the given device. Currently, this is supported only for hot spares
2245 * and level 2 cache devices.
2248 zpool_vdev_remove(zpool_handle_t *zhp, const char *path)
2250 zfs_cmd_t zc = { 0 };
2253 boolean_t avail_spare, l2cache;
2254 libzfs_handle_t *hdl = zhp->zpool_hdl;
2256 (void) snprintf(msg, sizeof (msg),
2257 dgettext(TEXT_DOMAIN, "cannot remove %s"), path);
2259 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2260 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
2262 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2264 if (!avail_spare && !l2cache) {
2265 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2266 "only inactive hot spares or cache devices "
2268 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2271 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2273 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_REMOVE, &zc) == 0)
2276 return (zpool_standard_error(hdl, errno, msg));
2280 * Clear the errors for the pool, or the particular device if specified.
2283 zpool_clear(zpool_handle_t *zhp, const char *path)
2285 zfs_cmd_t zc = { 0 };
2288 boolean_t avail_spare, l2cache;
2289 libzfs_handle_t *hdl = zhp->zpool_hdl;
2292 (void) snprintf(msg, sizeof (msg),
2293 dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),
2296 (void) snprintf(msg, sizeof (msg),
2297 dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),
2300 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2302 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare,
2303 &l2cache, NULL)) == 0)
2304 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2307 * Don't allow error clearing for hot spares. Do allow
2308 * error clearing for l2cache devices.
2311 return (zfs_error(hdl, EZFS_ISSPARE, msg));
2313 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID,
2317 if (zfs_ioctl(hdl, ZFS_IOC_CLEAR, &zc) == 0)
2320 return (zpool_standard_error(hdl, errno, msg));
2324 * Similar to zpool_clear(), but takes a GUID (used by fmd).
2327 zpool_vdev_clear(zpool_handle_t *zhp, uint64_t guid)
2329 zfs_cmd_t zc = { 0 };
2331 libzfs_handle_t *hdl = zhp->zpool_hdl;
2333 (void) snprintf(msg, sizeof (msg),
2334 dgettext(TEXT_DOMAIN, "cannot clear errors for %llx"),
2337 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2340 if (ioctl(hdl->libzfs_fd, ZFS_IOC_CLEAR, &zc) == 0)
2343 return (zpool_standard_error(hdl, errno, msg));
2347 * Iterate over all zvols in a given pool by walking the /dev/zvol/dsk/<pool>
2351 zpool_iter_zvol(zpool_handle_t *zhp, int (*cb)(const char *, void *),
2354 libzfs_handle_t *hdl = zhp->zpool_hdl;
2355 char (*paths)[MAXPATHLEN];
2357 int curr, fd, base, ret = 0;
2362 if ((base = open("/dev/zvol/dsk", O_RDONLY)) < 0)
2363 return (errno == ENOENT ? 0 : -1);
2365 if (fstatat(base, zhp->zpool_name, &st, 0) != 0) {
2368 return (err == ENOENT ? 0 : -1);
2372 * Oddly this wasn't a directory -- ignore that failure since we
2373 * know there are no links lower in the (non-existant) hierarchy.
2375 if (!S_ISDIR(st.st_mode)) {
2380 if ((paths = zfs_alloc(hdl, size * sizeof (paths[0]))) == NULL) {
2385 (void) strlcpy(paths[0], zhp->zpool_name, sizeof (paths[0]));
2389 if (fstatat(base, paths[curr], &st, AT_SYMLINK_NOFOLLOW) != 0)
2392 if (S_ISDIR(st.st_mode)) {
2393 if ((fd = openat(base, paths[curr], O_RDONLY)) < 0)
2396 if ((dirp = fdopendir(fd)) == NULL) {
2401 while ((dp = readdir(dirp)) != NULL) {
2402 if (dp->d_name[0] == '.')
2405 if (curr + 1 == size) {
2406 paths = zfs_realloc(hdl, paths,
2407 size * sizeof (paths[0]),
2408 size * 2 * sizeof (paths[0]));
2409 if (paths == NULL) {
2410 (void) closedir(dirp);
2418 (void) strlcpy(paths[curr + 1], paths[curr],
2419 sizeof (paths[curr + 1]));
2420 (void) strlcat(paths[curr], "/",
2421 sizeof (paths[curr]));
2422 (void) strlcat(paths[curr], dp->d_name,
2423 sizeof (paths[curr]));
2427 (void) closedir(dirp);
2430 if ((ret = cb(paths[curr], data)) != 0)
2448 typedef struct zvol_cb {
2449 zpool_handle_t *zcb_pool;
2450 boolean_t zcb_create;
2455 do_zvol_create(zfs_handle_t *zhp, void *data)
2459 if (ZFS_IS_VOLUME(zhp)) {
2460 (void) zvol_create_link(zhp->zfs_hdl, zhp->zfs_name);
2461 ret = zfs_iter_snapshots(zhp, do_zvol_create, NULL);
2465 ret = zfs_iter_filesystems(zhp, do_zvol_create, NULL);
2473 * Iterate over all zvols in the pool and make any necessary minor nodes.
2476 zpool_create_zvol_links(zpool_handle_t *zhp)
2482 * If the pool is unavailable, just return success.
2484 if ((zfp = make_dataset_handle(zhp->zpool_hdl,
2485 zhp->zpool_name)) == NULL)
2488 ret = zfs_iter_filesystems(zfp, do_zvol_create, NULL);
2495 do_zvol_remove(const char *dataset, void *data)
2497 zpool_handle_t *zhp = data;
2499 return (zvol_remove_link(zhp->zpool_hdl, dataset));
2503 * Iterate over all zvols in the pool and remove any minor nodes. We iterate
2504 * by examining the /dev links so that a corrupted pool doesn't impede this
2508 zpool_remove_zvol_links(zpool_handle_t *zhp)
2510 return (zpool_iter_zvol(zhp, do_zvol_remove, zhp));
2514 * Convert from a devid string to a path.
2517 devid_to_path(char *devid_str)
2522 devid_nmlist_t *list = NULL;
2525 if (devid_str_decode(devid_str, &devid, &minor) != 0)
2528 ret = devid_deviceid_to_nmlist("/dev", devid, minor, &list);
2530 devid_str_free(minor);
2536 if ((path = strdup(list[0].devname)) == NULL)
2539 devid_free_nmlist(list);
2545 * Convert from a path to a devid string.
2548 path_to_devid(const char *path)
2554 if ((fd = open(path, O_RDONLY)) < 0)
2559 if (devid_get(fd, &devid) == 0) {
2560 if (devid_get_minor_name(fd, &minor) == 0)
2561 ret = devid_str_encode(devid, minor);
2563 devid_str_free(minor);
2572 * Issue the necessary ioctl() to update the stored path value for the vdev. We
2573 * ignore any failure here, since a common case is for an unprivileged user to
2574 * type 'zpool status', and we'll display the correct information anyway.
2577 set_path(zpool_handle_t *zhp, nvlist_t *nv, const char *path)
2579 zfs_cmd_t zc = { 0 };
2581 (void) strncpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2582 (void) strncpy(zc.zc_value, path, sizeof (zc.zc_value));
2583 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
2586 (void) ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_SETPATH, &zc);
2590 * Given a vdev, return the name to display in iostat. If the vdev has a path,
2591 * we use that, stripping off any leading "/dev/dsk/"; if not, we use the type.
2592 * We also check if this is a whole disk, in which case we strip off the
2593 * trailing 's0' slice name.
2595 * This routine is also responsible for identifying when disks have been
2596 * reconfigured in a new location. The kernel will have opened the device by
2597 * devid, but the path will still refer to the old location. To catch this, we
2598 * first do a path -> devid translation (which is fast for the common case). If
2599 * the devid matches, we're done. If not, we do a reverse devid -> path
2600 * translation and issue the appropriate ioctl() to update the path of the vdev.
2601 * If 'zhp' is NULL, then this is an exported pool, and we don't need to do any
2605 zpool_vdev_name(libzfs_handle_t *hdl, zpool_handle_t *zhp, nvlist_t *nv)
2613 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT,
2615 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
2617 (void) snprintf(buf, sizeof (buf), "%llu",
2618 (u_longlong_t)value);
2620 } else if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) {
2623 * If the device is dead (faulted, offline, etc) then don't
2624 * bother opening it. Otherwise we may be forcing the user to
2625 * open a misbehaving device, which can have undesirable
2628 if ((nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_STATS,
2629 (uint64_t **)&vs, &vsc) != 0 ||
2630 vs->vs_state >= VDEV_STATE_DEGRADED) &&
2632 nvlist_lookup_string(nv, ZPOOL_CONFIG_DEVID, &devid) == 0) {
2634 * Determine if the current path is correct.
2636 char *newdevid = path_to_devid(path);
2638 if (newdevid == NULL ||
2639 strcmp(devid, newdevid) != 0) {
2642 if ((newpath = devid_to_path(devid)) != NULL) {
2644 * Update the path appropriately.
2646 set_path(zhp, nv, newpath);
2647 if (nvlist_add_string(nv,
2648 ZPOOL_CONFIG_PATH, newpath) == 0)
2649 verify(nvlist_lookup_string(nv,
2657 devid_str_free(newdevid);
2660 if (strncmp(path, "/dev/dsk/", 9) == 0)
2663 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
2664 &value) == 0 && value) {
2665 char *tmp = zfs_strdup(hdl, path);
2668 tmp[strlen(path) - 2] = '\0';
2672 verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &path) == 0);
2675 * If it's a raidz device, we need to stick in the parity level.
2677 if (strcmp(path, VDEV_TYPE_RAIDZ) == 0) {
2678 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NPARITY,
2680 (void) snprintf(buf, sizeof (buf), "%s%llu", path,
2681 (u_longlong_t)value);
2686 return (zfs_strdup(hdl, path));
2690 zbookmark_compare(const void *a, const void *b)
2692 return (memcmp(a, b, sizeof (zbookmark_t)));
2696 * Retrieve the persistent error log, uniquify the members, and return to the
2700 zpool_get_errlog(zpool_handle_t *zhp, nvlist_t **nverrlistp)
2702 zfs_cmd_t zc = { 0 };
2704 zbookmark_t *zb = NULL;
2708 * Retrieve the raw error list from the kernel. If the number of errors
2709 * has increased, allocate more space and continue until we get the
2712 verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_ERRCOUNT,
2716 if ((zc.zc_nvlist_dst = (uintptr_t)zfs_alloc(zhp->zpool_hdl,
2717 count * sizeof (zbookmark_t))) == (uintptr_t)NULL)
2719 zc.zc_nvlist_dst_size = count;
2720 (void) strcpy(zc.zc_name, zhp->zpool_name);
2722 if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_ERROR_LOG,
2724 free((void *)(uintptr_t)zc.zc_nvlist_dst);
2725 if (errno == ENOMEM) {
2726 count = zc.zc_nvlist_dst_size;
2727 if ((zc.zc_nvlist_dst = (uintptr_t)
2728 zfs_alloc(zhp->zpool_hdl, count *
2729 sizeof (zbookmark_t))) == (uintptr_t)NULL)
2740 * Sort the resulting bookmarks. This is a little confusing due to the
2741 * implementation of ZFS_IOC_ERROR_LOG. The bookmarks are copied last
2742 * to first, and 'zc_nvlist_dst_size' indicates the number of boomarks
2743 * _not_ copied as part of the process. So we point the start of our
2744 * array appropriate and decrement the total number of elements.
2746 zb = ((zbookmark_t *)(uintptr_t)zc.zc_nvlist_dst) +
2747 zc.zc_nvlist_dst_size;
2748 count -= zc.zc_nvlist_dst_size;
2750 qsort(zb, count, sizeof (zbookmark_t), zbookmark_compare);
2752 verify(nvlist_alloc(nverrlistp, 0, KM_SLEEP) == 0);
2755 * Fill in the nverrlistp with nvlist's of dataset and object numbers.
2757 for (i = 0; i < count; i++) {
2760 /* ignoring zb_blkid and zb_level for now */
2761 if (i > 0 && zb[i-1].zb_objset == zb[i].zb_objset &&
2762 zb[i-1].zb_object == zb[i].zb_object)
2765 if (nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) != 0)
2767 if (nvlist_add_uint64(nv, ZPOOL_ERR_DATASET,
2768 zb[i].zb_objset) != 0) {
2772 if (nvlist_add_uint64(nv, ZPOOL_ERR_OBJECT,
2773 zb[i].zb_object) != 0) {
2777 if (nvlist_add_nvlist(*nverrlistp, "ejk", nv) != 0) {
2784 free((void *)(uintptr_t)zc.zc_nvlist_dst);
2788 free((void *)(uintptr_t)zc.zc_nvlist_dst);
2789 return (no_memory(zhp->zpool_hdl));
2793 * Upgrade a ZFS pool to the latest on-disk version.
2796 zpool_upgrade(zpool_handle_t *zhp, uint64_t new_version)
2798 zfs_cmd_t zc = { 0 };
2799 libzfs_handle_t *hdl = zhp->zpool_hdl;
2801 (void) strcpy(zc.zc_name, zhp->zpool_name);
2802 zc.zc_cookie = new_version;
2804 if (zfs_ioctl(hdl, ZFS_IOC_POOL_UPGRADE, &zc) != 0)
2805 return (zpool_standard_error_fmt(hdl, errno,
2806 dgettext(TEXT_DOMAIN, "cannot upgrade '%s'"),
2812 zpool_set_history_str(const char *subcommand, int argc, char **argv,
2817 (void) strlcpy(history_str, subcommand, HIS_MAX_RECORD_LEN);
2818 for (i = 1; i < argc; i++) {
2819 if (strlen(history_str) + 1 + strlen(argv[i]) >
2822 (void) strlcat(history_str, " ", HIS_MAX_RECORD_LEN);
2823 (void) strlcat(history_str, argv[i], HIS_MAX_RECORD_LEN);
2828 * Stage command history for logging.
2831 zpool_stage_history(libzfs_handle_t *hdl, const char *history_str)
2833 if (history_str == NULL)
2836 if (strlen(history_str) > HIS_MAX_RECORD_LEN)
2839 if (hdl->libzfs_log_str != NULL)
2840 free(hdl->libzfs_log_str);
2842 if ((hdl->libzfs_log_str = strdup(history_str)) == NULL)
2843 return (no_memory(hdl));
2849 * Perform ioctl to get some command history of a pool.
2851 * 'buf' is the buffer to fill up to 'len' bytes. 'off' is the
2852 * logical offset of the history buffer to start reading from.
2854 * Upon return, 'off' is the next logical offset to read from and
2855 * 'len' is the actual amount of bytes read into 'buf'.
2858 get_history(zpool_handle_t *zhp, char *buf, uint64_t *off, uint64_t *len)
2860 zfs_cmd_t zc = { 0 };
2861 libzfs_handle_t *hdl = zhp->zpool_hdl;
2863 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2865 zc.zc_history = (uint64_t)(uintptr_t)buf;
2866 zc.zc_history_len = *len;
2867 zc.zc_history_offset = *off;
2869 if (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_HISTORY, &zc) != 0) {
2872 return (zfs_error_fmt(hdl, EZFS_PERM,
2873 dgettext(TEXT_DOMAIN,
2874 "cannot show history for pool '%s'"),
2877 return (zfs_error_fmt(hdl, EZFS_NOHISTORY,
2878 dgettext(TEXT_DOMAIN, "cannot get history for pool "
2879 "'%s'"), zhp->zpool_name));
2881 return (zfs_error_fmt(hdl, EZFS_BADVERSION,
2882 dgettext(TEXT_DOMAIN, "cannot get history for pool "
2883 "'%s', pool must be upgraded"), zhp->zpool_name));
2885 return (zpool_standard_error_fmt(hdl, errno,
2886 dgettext(TEXT_DOMAIN,
2887 "cannot get history for '%s'"), zhp->zpool_name));
2891 *len = zc.zc_history_len;
2892 *off = zc.zc_history_offset;
2898 * Process the buffer of nvlists, unpacking and storing each nvlist record
2899 * into 'records'. 'leftover' is set to the number of bytes that weren't
2900 * processed as there wasn't a complete record.
2903 zpool_history_unpack(char *buf, uint64_t bytes_read, uint64_t *leftover,
2904 nvlist_t ***records, uint_t *numrecords)
2910 while (bytes_read > sizeof (reclen)) {
2912 /* get length of packed record (stored as little endian) */
2913 for (i = 0, reclen = 0; i < sizeof (reclen); i++)
2914 reclen += (uint64_t)(((uchar_t *)buf)[i]) << (8*i);
2916 if (bytes_read < sizeof (reclen) + reclen)
2920 if (nvlist_unpack(buf + sizeof (reclen), reclen, &nv, 0) != 0)
2922 bytes_read -= sizeof (reclen) + reclen;
2923 buf += sizeof (reclen) + reclen;
2925 /* add record to nvlist array */
2927 if (ISP2(*numrecords + 1)) {
2928 *records = realloc(*records,
2929 *numrecords * 2 * sizeof (nvlist_t *));
2931 (*records)[*numrecords - 1] = nv;
2934 *leftover = bytes_read;
2938 #define HIS_BUF_LEN (128*1024)
2941 * Retrieve the command history of a pool.
2944 zpool_get_history(zpool_handle_t *zhp, nvlist_t **nvhisp)
2946 char buf[HIS_BUF_LEN];
2948 nvlist_t **records = NULL;
2949 uint_t numrecords = 0;
2953 uint64_t bytes_read = sizeof (buf);
2956 if ((err = get_history(zhp, buf, &off, &bytes_read)) != 0)
2959 /* if nothing else was read in, we're at EOF, just return */
2963 if ((err = zpool_history_unpack(buf, bytes_read,
2964 &leftover, &records, &numrecords)) != 0)
2972 verify(nvlist_alloc(nvhisp, NV_UNIQUE_NAME, 0) == 0);
2973 verify(nvlist_add_nvlist_array(*nvhisp, ZPOOL_HIST_RECORD,
2974 records, numrecords) == 0);
2976 for (i = 0; i < numrecords; i++)
2977 nvlist_free(records[i]);
2984 zpool_obj_to_path(zpool_handle_t *zhp, uint64_t dsobj, uint64_t obj,
2985 char *pathname, size_t len)
2987 zfs_cmd_t zc = { 0 };
2988 boolean_t mounted = B_FALSE;
2989 char *mntpnt = NULL;
2990 char dsname[MAXNAMELEN];
2993 /* special case for the MOS */
2994 (void) snprintf(pathname, len, "<metadata>:<0x%llx>", obj);
2998 /* get the dataset's name */
2999 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3001 if (ioctl(zhp->zpool_hdl->libzfs_fd,
3002 ZFS_IOC_DSOBJ_TO_DSNAME, &zc) != 0) {
3003 /* just write out a path of two object numbers */
3004 (void) snprintf(pathname, len, "<0x%llx>:<0x%llx>",
3008 (void) strlcpy(dsname, zc.zc_value, sizeof (dsname));
3010 /* find out if the dataset is mounted */
3011 mounted = is_mounted(zhp->zpool_hdl, dsname, &mntpnt);
3013 /* get the corrupted object's path */
3014 (void) strlcpy(zc.zc_name, dsname, sizeof (zc.zc_name));
3016 if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_OBJ_TO_PATH,
3019 (void) snprintf(pathname, len, "%s%s", mntpnt,
3022 (void) snprintf(pathname, len, "%s:%s",
3023 dsname, zc.zc_value);
3026 (void) snprintf(pathname, len, "%s:<0x%llx>", dsname, obj);
3032 * Read the EFI label from the config, if a label does not exist then
3033 * pass back the error to the caller. If the caller has passed a non-NULL
3034 * diskaddr argument then we set it to the starting address of the EFI
3038 read_efi_label(nvlist_t *config, diskaddr_t *sb)
3042 char diskname[MAXPATHLEN];
3045 if (nvlist_lookup_string(config, ZPOOL_CONFIG_PATH, &path) != 0)
3048 (void) snprintf(diskname, sizeof (diskname), "%s%s", RDISK_ROOT,
3049 strrchr(path, '/'));
3050 if ((fd = open(diskname, O_RDONLY|O_NDELAY)) >= 0) {
3051 struct dk_gpt *vtoc;
3053 if ((err = efi_alloc_and_read(fd, &vtoc)) >= 0) {
3055 *sb = vtoc->efi_parts[0].p_start;
3064 * determine where a partition starts on a disk in the current
3068 find_start_block(nvlist_t *config)
3072 diskaddr_t sb = MAXOFFSET_T;
3075 if (nvlist_lookup_nvlist_array(config,
3076 ZPOOL_CONFIG_CHILDREN, &child, &children) != 0) {
3077 if (nvlist_lookup_uint64(config,
3078 ZPOOL_CONFIG_WHOLE_DISK,
3079 &wholedisk) != 0 || !wholedisk) {
3080 return (MAXOFFSET_T);
3082 if (read_efi_label(config, &sb) < 0)
3087 for (c = 0; c < children; c++) {
3088 sb = find_start_block(child[c]);
3089 if (sb != MAXOFFSET_T) {
3093 return (MAXOFFSET_T);
3097 * Label an individual disk. The name provided is the short name,
3098 * stripped of any leading /dev path.
3101 zpool_label_disk(libzfs_handle_t *hdl, zpool_handle_t *zhp, char *name)
3103 char path[MAXPATHLEN];
3104 struct dk_gpt *vtoc;
3106 size_t resv = EFI_MIN_RESV_SIZE;
3107 uint64_t slice_size;
3108 diskaddr_t start_block;
3111 /* prepare an error message just in case */
3112 (void) snprintf(errbuf, sizeof (errbuf),
3113 dgettext(TEXT_DOMAIN, "cannot label '%s'"), name);
3118 if (pool_is_bootable(zhp)) {
3119 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3120 "EFI labeled devices are not supported on root "
3122 return (zfs_error(hdl, EZFS_POOL_NOTSUP, errbuf));
3125 verify(nvlist_lookup_nvlist(zhp->zpool_config,
3126 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
3128 if (zhp->zpool_start_block == 0)
3129 start_block = find_start_block(nvroot);
3131 start_block = zhp->zpool_start_block;
3132 zhp->zpool_start_block = start_block;
3135 start_block = NEW_START_BLOCK;
3138 (void) snprintf(path, sizeof (path), "%s/%s%s", RDISK_ROOT, name,
3141 if ((fd = open(path, O_RDWR | O_NDELAY)) < 0) {
3143 * This shouldn't happen. We've long since verified that this
3144 * is a valid device.
3147 dgettext(TEXT_DOMAIN, "unable to open device"));
3148 return (zfs_error(hdl, EZFS_OPENFAILED, errbuf));
3151 if (efi_alloc_and_init(fd, EFI_NUMPAR, &vtoc) != 0) {
3153 * The only way this can fail is if we run out of memory, or we
3154 * were unable to read the disk's capacity
3156 if (errno == ENOMEM)
3157 (void) no_memory(hdl);
3160 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3161 "unable to read disk capacity"), name);
3163 return (zfs_error(hdl, EZFS_NOCAP, errbuf));
3166 slice_size = vtoc->efi_last_u_lba + 1;
3167 slice_size -= EFI_MIN_RESV_SIZE;
3168 if (start_block == MAXOFFSET_T)
3169 start_block = NEW_START_BLOCK;
3170 slice_size -= start_block;
3172 vtoc->efi_parts[0].p_start = start_block;
3173 vtoc->efi_parts[0].p_size = slice_size;
3176 * Why we use V_USR: V_BACKUP confuses users, and is considered
3177 * disposable by some EFI utilities (since EFI doesn't have a backup
3178 * slice). V_UNASSIGNED is supposed to be used only for zero size
3179 * partitions, and efi_write() will fail if we use it. V_ROOT, V_BOOT,
3180 * etc. were all pretty specific. V_USR is as close to reality as we
3181 * can get, in the absence of V_OTHER.
3183 vtoc->efi_parts[0].p_tag = V_USR;
3184 (void) strcpy(vtoc->efi_parts[0].p_name, "zfs");
3186 vtoc->efi_parts[8].p_start = slice_size + start_block;
3187 vtoc->efi_parts[8].p_size = resv;
3188 vtoc->efi_parts[8].p_tag = V_RESERVED;
3190 if (efi_write(fd, vtoc) != 0) {
3192 * Some block drivers (like pcata) may not support EFI
3193 * GPT labels. Print out a helpful error message dir-
3194 * ecting the user to manually label the disk and give
3200 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3201 "try using fdisk(1M) and then provide a specific slice"));
3202 return (zfs_error(hdl, EZFS_LABELFAILED, errbuf));
3211 supported_dump_vdev_type(libzfs_handle_t *hdl, nvlist_t *config, char *errbuf)
3217 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_TYPE, &type) == 0);
3218 if (strcmp(type, VDEV_TYPE_RAIDZ) == 0 ||
3219 strcmp(type, VDEV_TYPE_FILE) == 0 ||
3220 strcmp(type, VDEV_TYPE_LOG) == 0 ||
3221 strcmp(type, VDEV_TYPE_MISSING) == 0) {
3222 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3223 "vdev type '%s' is not supported"), type);
3224 (void) zfs_error(hdl, EZFS_VDEVNOTSUP, errbuf);
3227 if (nvlist_lookup_nvlist_array(config, ZPOOL_CONFIG_CHILDREN,
3228 &child, &children) == 0) {
3229 for (c = 0; c < children; c++) {
3230 if (!supported_dump_vdev_type(hdl, child[c], errbuf))
3238 * check if this zvol is allowable for use as a dump device; zero if
3239 * it is, > 0 if it isn't, < 0 if it isn't a zvol
3242 zvol_check_dump_config(char *arg)
3244 zpool_handle_t *zhp = NULL;
3245 nvlist_t *config, *nvroot;
3249 libzfs_handle_t *hdl;
3251 char poolname[ZPOOL_MAXNAMELEN];
3252 int pathlen = strlen(ZVOL_FULL_DEV_DIR);
3255 if (strncmp(arg, ZVOL_FULL_DEV_DIR, pathlen)) {
3259 (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
3260 "dump is not supported on device '%s'"), arg);
3262 if ((hdl = libzfs_init()) == NULL)
3264 libzfs_print_on_error(hdl, B_TRUE);
3266 volname = arg + pathlen;
3268 /* check the configuration of the pool */
3269 if ((p = strchr(volname, '/')) == NULL) {
3270 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3271 "malformed dataset name"));
3272 (void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf);
3274 } else if (p - volname >= ZFS_MAXNAMELEN) {
3275 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3276 "dataset name is too long"));
3277 (void) zfs_error(hdl, EZFS_NAMETOOLONG, errbuf);
3280 (void) strncpy(poolname, volname, p - volname);
3281 poolname[p - volname] = '\0';
3284 if ((zhp = zpool_open(hdl, poolname)) == NULL) {
3285 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3286 "could not open pool '%s'"), poolname);
3287 (void) zfs_error(hdl, EZFS_OPENFAILED, errbuf);
3290 config = zpool_get_config(zhp, NULL);
3291 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
3293 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3294 "could not obtain vdev configuration for '%s'"), poolname);
3295 (void) zfs_error(hdl, EZFS_INVALCONFIG, errbuf);
3299 verify(nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
3300 &top, &toplevels) == 0);
3301 if (toplevels != 1) {
3302 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3303 "'%s' has multiple top level vdevs"), poolname);
3304 (void) zfs_error(hdl, EZFS_DEVOVERFLOW, errbuf);
3308 if (!supported_dump_vdev_type(hdl, top[0], errbuf)) {