4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
27 * Functions to convert between a list of vdevs and an nvlist representing the
28 * configuration. Each entry in the list can be one of:
31 * disk=(path=..., devid=...)
40 * While the underlying implementation supports it, group vdevs cannot contain
41 * other group vdevs. All userland verification of devices is contained within
42 * this file. If successful, the nvlist returned can be passed directly to the
43 * kernel; we've done as much verification as possible in userland.
45 * Hot spares are a special case, and passed down as an array of disk vdevs, at
46 * the same level as the root of the vdev tree.
48 * The only function exported by this file is 'make_root_vdev'. The
49 * function performs several passes:
51 * 1. Construct the vdev specification. Performs syntax validation and
52 * makes sure each device is valid.
53 * 2. Check for devices in use. Using libdiskmgt, makes sure that no
54 * devices are also in use. Some can be overridden using the 'force'
55 * flag, others cannot.
56 * 3. Check for replication errors if the 'force' flag is not specified.
57 * validates that the replication level is consistent across the
59 * 4. Call libzfs to label any whole disks with an EFI label.
66 #include <libdiskmgt.h>
68 #include <libnvpair.h>
73 #include <sys/efi_partition.h>
76 #include <sys/mntent.h>
78 #include "zpool_util.h"
80 #define DISK_ROOT "/dev/dsk"
81 #define RDISK_ROOT "/dev/rdsk"
82 #define BACKUP_SLICE "s2"
85 * For any given vdev specification, we can have multiple errors. The
86 * vdev_error() function keeps track of whether we have seen an error yet, and
87 * prints out a header if its the first error we've seen.
94 vdev_error(const char *fmt, ...)
99 (void) fprintf(stderr, gettext("invalid vdev specification\n"));
101 (void) fprintf(stderr, gettext("use '-f' to override "
102 "the following errors:\n"));
104 (void) fprintf(stderr, gettext("the following errors "
105 "must be manually repaired:\n"));
110 (void) vfprintf(stderr, fmt, ap);
115 libdiskmgt_error(int error)
118 * ENXIO/ENODEV is a valid error message if the device doesn't live in
119 * /dev/dsk. Don't bother printing an error message in this case.
121 if (error == ENXIO || error == ENODEV)
124 (void) fprintf(stderr, gettext("warning: device in use checking "
125 "failed: %s\n"), strerror(error));
129 * Validate a device, passing the bulk of the work off to libdiskmgt.
132 check_slice(const char *path, int force, boolean_t wholedisk, boolean_t isspare)
139 who = DM_WHO_ZPOOL_FORCE;
141 who = DM_WHO_ZPOOL_SPARE;
145 if (dm_inuse((char *)path, &msg, who, &error) || error) {
147 libdiskmgt_error(error);
150 vdev_error("%s", msg);
157 * If we're given a whole disk, ignore overlapping slices since we're
158 * about to label it anyway.
161 if (!wholedisk && !force &&
162 (dm_isoverlapping((char *)path, &msg, &error) || error)) {
164 /* dm_isoverlapping returned -1 */
165 vdev_error(gettext("%s overlaps with %s\n"), path, msg);
168 } else if (error != ENODEV) {
169 /* libdiskmgt's devcache only handles physical drives */
170 libdiskmgt_error(error);
180 * Validate a whole disk. Iterate over all slices on the disk and make sure
181 * that none is in use by calling check_slice().
184 check_disk(const char *name, dm_descriptor_t disk, int force, int isspare)
186 dm_descriptor_t *drive, *media, *slice;
192 * Get the drive associated with this disk. This should never fail,
193 * because we already have an alias handle open for the device.
195 if ((drive = dm_get_associated_descriptors(disk, DM_DRIVE,
196 &err)) == NULL || *drive == NULL) {
198 libdiskmgt_error(err);
202 if ((media = dm_get_associated_descriptors(*drive, DM_MEDIA,
204 dm_free_descriptors(drive);
206 libdiskmgt_error(err);
210 dm_free_descriptors(drive);
213 * It is possible that the user has specified a removable media drive,
214 * and the media is not present.
216 if (*media == NULL) {
217 dm_free_descriptors(media);
218 vdev_error(gettext("'%s' has no media in drive\n"), name);
222 if ((slice = dm_get_associated_descriptors(*media, DM_SLICE,
224 dm_free_descriptors(media);
226 libdiskmgt_error(err);
230 dm_free_descriptors(media);
235 * Iterate over all slices and report any errors. We don't care about
236 * overlapping slices because we are using the whole disk.
238 for (i = 0; slice[i] != NULL; i++) {
239 char *name = dm_get_name(slice[i], &err);
241 if (check_slice(name, force, B_TRUE, isspare) != 0)
247 dm_free_descriptors(slice);
255 check_device(const char *path, boolean_t force, boolean_t isspare)
257 dm_descriptor_t desc;
262 * For whole disks, libdiskmgt does not include the leading dev path.
264 dev = strrchr(path, '/');
267 if ((desc = dm_get_descriptor_by_name(DM_ALIAS, dev, &err)) != NULL) {
268 err = check_disk(path, desc, force, isspare);
269 dm_free_descriptor(desc);
273 return (check_slice(path, force, B_FALSE, isspare));
277 * Check that a file is valid. All we can do in this case is check that it's
278 * not in use by another pool, and not in use by swap.
281 check_file(const char *file, boolean_t force, boolean_t isspare)
290 if (dm_inuse_swap(file, &err)) {
292 libdiskmgt_error(err);
294 vdev_error(gettext("%s is currently used by swap. "
295 "Please see swap(1M).\n"), file);
299 if ((fd = open(file, O_RDONLY)) < 0)
302 if (zpool_in_use(g_zfs, fd, &state, &name, &inuse) == 0 && inuse) {
306 case POOL_STATE_ACTIVE:
307 desc = gettext("active");
310 case POOL_STATE_EXPORTED:
311 desc = gettext("exported");
314 case POOL_STATE_POTENTIALLY_ACTIVE:
315 desc = gettext("potentially active");
319 desc = gettext("unknown");
324 * Allow hot spares to be shared between pools.
326 if (state == POOL_STATE_SPARE && isspare)
329 if (state == POOL_STATE_ACTIVE ||
330 state == POOL_STATE_SPARE || !force) {
332 case POOL_STATE_SPARE:
333 vdev_error(gettext("%s is reserved as a hot "
334 "spare for pool %s\n"), file, name);
337 vdev_error(gettext("%s is part of %s pool "
338 "'%s'\n"), file, desc, name);
353 * By "whole disk" we mean an entire physical disk (something we can
354 * label, toggle the write cache on, etc.) as opposed to the full
355 * capacity of a pseudo-device such as lofi or did. We act as if we
356 * are labeling the disk, which should be a pretty good test of whether
357 * it's a viable device or not. Returns B_TRUE if it is and B_FALSE if
361 is_whole_disk(const char *arg)
363 struct dk_gpt *label;
365 char path[MAXPATHLEN];
367 (void) snprintf(path, sizeof (path), "%s%s%s",
368 RDISK_ROOT, strrchr(arg, '/'), BACKUP_SLICE);
369 if ((fd = open(path, O_RDWR | O_NDELAY)) < 0)
371 if (efi_alloc_and_init(fd, EFI_NUMPAR, &label) != 0) {
381 * Create a leaf vdev. Determine if this is a file or a device. If it's a
382 * device, fill in the device id to make a complete nvlist. Valid forms for a
385 * /dev/dsk/xxx Complete disk path
386 * /xxx Full path to file
387 * xxx Shorthand for /dev/dsk/xxx
390 make_leaf_vdev(const char *arg, uint64_t is_log)
392 char path[MAXPATHLEN];
393 struct stat64 statbuf;
394 nvlist_t *vdev = NULL;
396 boolean_t wholedisk = B_FALSE;
399 * Determine what type of vdev this is, and put the full path into
400 * 'path'. We detect whether this is a device of file afterwards by
401 * checking the st_mode of the file.
405 * Complete device or file path. Exact type is determined by
406 * examining the file descriptor afterwards.
408 wholedisk = is_whole_disk(arg);
409 if (!wholedisk && (stat64(arg, &statbuf) != 0)) {
410 (void) fprintf(stderr,
411 gettext("cannot open '%s': %s\n"),
412 arg, strerror(errno));
416 (void) strlcpy(path, arg, sizeof (path));
419 * This may be a short path for a device, or it could be total
420 * gibberish. Check to see if it's a known device in
421 * /dev/dsk/. As part of this check, see if we've been given a
422 * an entire disk (minus the slice number).
424 (void) snprintf(path, sizeof (path), "%s/%s", DISK_ROOT,
426 wholedisk = is_whole_disk(path);
427 if (!wholedisk && (stat64(path, &statbuf) != 0)) {
429 * If we got ENOENT, then the user gave us
430 * gibberish, so try to direct them with a
431 * reasonable error message. Otherwise,
432 * regurgitate strerror() since it's the best we
435 if (errno == ENOENT) {
436 (void) fprintf(stderr,
437 gettext("cannot open '%s': no such "
438 "device in %s\n"), arg, DISK_ROOT);
439 (void) fprintf(stderr,
440 gettext("must be a full path or "
441 "shorthand device name\n"));
444 (void) fprintf(stderr,
445 gettext("cannot open '%s': %s\n"),
446 path, strerror(errno));
453 * Determine whether this is a device or a file.
455 if (wholedisk || S_ISBLK(statbuf.st_mode)) {
456 type = VDEV_TYPE_DISK;
457 } else if (S_ISREG(statbuf.st_mode)) {
458 type = VDEV_TYPE_FILE;
460 (void) fprintf(stderr, gettext("cannot use '%s': must be a "
461 "block device or regular file\n"), path);
466 * Finally, we have the complete device or file, and we know that it is
467 * acceptable to use. Construct the nvlist to describe this vdev. All
468 * vdevs have a 'path' element, and devices also have a 'devid' element.
470 verify(nvlist_alloc(&vdev, NV_UNIQUE_NAME, 0) == 0);
471 verify(nvlist_add_string(vdev, ZPOOL_CONFIG_PATH, path) == 0);
472 verify(nvlist_add_string(vdev, ZPOOL_CONFIG_TYPE, type) == 0);
473 verify(nvlist_add_uint64(vdev, ZPOOL_CONFIG_IS_LOG, is_log) == 0);
474 if (strcmp(type, VDEV_TYPE_DISK) == 0)
475 verify(nvlist_add_uint64(vdev, ZPOOL_CONFIG_WHOLE_DISK,
476 (uint64_t)wholedisk) == 0);
479 * For a whole disk, defer getting its devid until after labeling it.
481 if (S_ISBLK(statbuf.st_mode) && !wholedisk) {
483 * Get the devid for the device.
487 char *minor = NULL, *devid_str = NULL;
489 if ((fd = open(path, O_RDONLY)) < 0) {
490 (void) fprintf(stderr, gettext("cannot open '%s': "
491 "%s\n"), path, strerror(errno));
496 if (devid_get(fd, &devid) == 0) {
497 if (devid_get_minor_name(fd, &minor) == 0 &&
498 (devid_str = devid_str_encode(devid, minor)) !=
500 verify(nvlist_add_string(vdev,
501 ZPOOL_CONFIG_DEVID, devid_str) == 0);
503 if (devid_str != NULL)
504 devid_str_free(devid_str);
506 devid_str_free(minor);
517 * Go through and verify the replication level of the pool is consistent.
518 * Performs the following checks:
520 * For the new spec, verifies that devices in mirrors and raidz are the
523 * If the current configuration already has inconsistent replication
524 * levels, ignore any other potential problems in the new spec.
526 * Otherwise, make sure that the current spec (if there is one) and the new
527 * spec have consistent replication levels.
529 typedef struct replication_level {
531 uint64_t zprl_children;
532 uint64_t zprl_parity;
533 } replication_level_t;
535 #define ZPOOL_FUZZ (16 * 1024 * 1024)
538 * Given a list of toplevel vdevs, return the current replication level. If
539 * the config is inconsistent, then NULL is returned. If 'fatal' is set, then
540 * an error message will be displayed for each self-inconsistent vdev.
542 static replication_level_t *
543 get_replication(nvlist_t *nvroot, boolean_t fatal)
551 replication_level_t lastrep, rep, *ret;
552 boolean_t dontreport;
554 ret = safe_malloc(sizeof (replication_level_t));
556 verify(nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
557 &top, &toplevels) == 0);
559 lastrep.zprl_type = NULL;
560 for (t = 0; t < toplevels; t++) {
561 uint64_t is_log = B_FALSE;
566 * For separate logs we ignore the top level vdev replication
569 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_LOG, &is_log);
573 verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE,
575 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
576 &child, &children) != 0) {
578 * This is a 'file' or 'disk' vdev.
580 rep.zprl_type = type;
581 rep.zprl_children = 1;
587 * This is a mirror or RAID-Z vdev. Go through and make
588 * sure the contents are all the same (files vs. disks),
589 * keeping track of the number of elements in the
592 * We also check that the size of each vdev (if it can
593 * be determined) is the same.
595 rep.zprl_type = type;
596 rep.zprl_children = 0;
598 if (strcmp(type, VDEV_TYPE_RAIDZ) == 0) {
599 verify(nvlist_lookup_uint64(nv,
600 ZPOOL_CONFIG_NPARITY,
601 &rep.zprl_parity) == 0);
602 assert(rep.zprl_parity != 0);
608 * The 'dontreport' variable indicates that we've
609 * already reported an error for this spec, so don't
610 * bother doing it again.
615 for (c = 0; c < children; c++) {
616 nvlist_t *cnv = child[c];
618 struct stat64 statbuf;
619 uint64_t size = -1ULL;
625 verify(nvlist_lookup_string(cnv,
626 ZPOOL_CONFIG_TYPE, &childtype) == 0);
629 * If this is a replacing or spare vdev, then
630 * get the real first child of the vdev.
632 if (strcmp(childtype,
633 VDEV_TYPE_REPLACING) == 0 ||
634 strcmp(childtype, VDEV_TYPE_SPARE) == 0) {
638 verify(nvlist_lookup_nvlist_array(cnv,
639 ZPOOL_CONFIG_CHILDREN, &rchild,
641 assert(rchildren == 2);
644 verify(nvlist_lookup_string(cnv,
649 verify(nvlist_lookup_string(cnv,
650 ZPOOL_CONFIG_PATH, &path) == 0);
653 * If we have a raidz/mirror that combines disks
654 * with files, report it as an error.
656 if (!dontreport && type != NULL &&
657 strcmp(type, childtype) != 0) {
663 "mismatched replication "
664 "level: %s contains both "
665 "files and devices\n"),
673 * According to stat(2), the value of 'st_size'
674 * is undefined for block devices and character
675 * devices. But there is no effective way to
676 * determine the real size in userland.
678 * Instead, we'll take advantage of an
679 * implementation detail of spec_size(). If the
680 * device is currently open, then we (should)
681 * return a valid size.
683 * If we still don't get a valid size (indicated
684 * by a size of 0 or MAXOFFSET_T), then ignore
685 * this device altogether.
687 if ((fd = open(path, O_RDONLY)) >= 0) {
688 err = fstat64(fd, &statbuf);
691 err = stat64(path, &statbuf);
695 statbuf.st_size == 0 ||
696 statbuf.st_size == MAXOFFSET_T)
699 size = statbuf.st_size;
702 * Also make sure that devices and
703 * slices have a consistent size. If
704 * they differ by a significant amount
705 * (~16MB) then report an error.
708 (vdev_size != -1ULL &&
709 (labs(size - vdev_size) >
716 "%s contains devices of "
717 "different sizes\n"),
730 * At this point, we have the replication of the last toplevel
731 * vdev in 'rep'. Compare it to 'lastrep' to see if its
734 if (lastrep.zprl_type != NULL) {
735 if (strcmp(lastrep.zprl_type, rep.zprl_type) != 0) {
741 "mismatched replication level: "
742 "both %s and %s vdevs are "
744 lastrep.zprl_type, rep.zprl_type);
747 } else if (lastrep.zprl_parity != rep.zprl_parity) {
753 "mismatched replication level: "
754 "both %llu and %llu device parity "
755 "%s vdevs are present\n"),
761 } else if (lastrep.zprl_children != rep.zprl_children) {
767 "mismatched replication level: "
768 "both %llu-way and %llu-way %s "
769 "vdevs are present\n"),
770 lastrep.zprl_children,
787 * Check the replication level of the vdev spec against the current pool. Calls
788 * get_replication() to make sure the new spec is self-consistent. If the pool
789 * has a consistent replication level, then we ignore any errors. Otherwise,
790 * report any difference between the two.
793 check_replication(nvlist_t *config, nvlist_t *newroot)
797 replication_level_t *current = NULL, *new;
801 * If we have a current pool configuration, check to see if it's
802 * self-consistent. If not, simply return success.
804 if (config != NULL) {
807 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
809 if ((current = get_replication(nvroot, B_FALSE)) == NULL)
813 * for spares there may be no children, and therefore no
814 * replication level to check
816 if ((nvlist_lookup_nvlist_array(newroot, ZPOOL_CONFIG_CHILDREN,
817 &child, &children) != 0) || (children == 0)) {
823 * If all we have is logs then there's no replication level to check.
825 if (num_logs(newroot) == children) {
831 * Get the replication level of the new vdev spec, reporting any
832 * inconsistencies found.
834 if ((new = get_replication(newroot, B_TRUE)) == NULL) {
840 * Check to see if the new vdev spec matches the replication level of
844 if (current != NULL) {
845 if (strcmp(current->zprl_type, new->zprl_type) != 0) {
847 "mismatched replication level: pool uses %s "
848 "and new vdev is %s\n"),
849 current->zprl_type, new->zprl_type);
851 } else if (current->zprl_parity != new->zprl_parity) {
853 "mismatched replication level: pool uses %llu "
854 "device parity and new vdev uses %llu\n"),
855 current->zprl_parity, new->zprl_parity);
857 } else if (current->zprl_children != new->zprl_children) {
859 "mismatched replication level: pool uses %llu-way "
860 "%s and new vdev uses %llu-way %s\n"),
861 current->zprl_children, current->zprl_type,
862 new->zprl_children, new->zprl_type);
875 * Go through and find any whole disks in the vdev specification, labelling them
876 * as appropriate. When constructing the vdev spec, we were unable to open this
877 * device in order to provide a devid. Now that we have labelled the disk and
878 * know that slice 0 is valid, we can construct the devid now.
880 * If the disk was already labeled with an EFI label, we will have gotten the
881 * devid already (because we were able to open the whole disk). Otherwise, we
882 * need to get the devid after we label the disk.
885 make_disks(zpool_handle_t *zhp, nvlist_t *nv)
889 char *type, *path, *diskname;
890 char buf[MAXPATHLEN];
895 char *minor = NULL, *devid_str = NULL;
897 verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) == 0);
899 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
900 &child, &children) != 0) {
902 if (strcmp(type, VDEV_TYPE_DISK) != 0)
906 * We have a disk device. Get the path to the device
907 * and see if it's a whole disk by appending the backup
908 * slice and stat()ing the device.
910 verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0);
911 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
912 &wholedisk) != 0 || !wholedisk)
915 diskname = strrchr(path, '/');
916 assert(diskname != NULL);
918 if (zpool_label_disk(g_zfs, zhp, diskname) == -1)
922 * Fill in the devid, now that we've labeled the disk.
924 (void) snprintf(buf, sizeof (buf), "%ss0", path);
925 if ((fd = open(buf, O_RDONLY)) < 0) {
926 (void) fprintf(stderr,
927 gettext("cannot open '%s': %s\n"),
928 buf, strerror(errno));
932 if (devid_get(fd, &devid) == 0) {
933 if (devid_get_minor_name(fd, &minor) == 0 &&
934 (devid_str = devid_str_encode(devid, minor)) !=
936 verify(nvlist_add_string(nv,
937 ZPOOL_CONFIG_DEVID, devid_str) == 0);
939 if (devid_str != NULL)
940 devid_str_free(devid_str);
942 devid_str_free(minor);
947 * Update the path to refer to the 's0' slice. The presence of
948 * the 'whole_disk' field indicates to the CLI that we should
949 * chop off the slice number when displaying the device in
952 verify(nvlist_add_string(nv, ZPOOL_CONFIG_PATH, buf) == 0);
959 for (c = 0; c < children; c++)
960 if ((ret = make_disks(zhp, child[c])) != 0)
963 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES,
964 &child, &children) == 0)
965 for (c = 0; c < children; c++)
966 if ((ret = make_disks(zhp, child[c])) != 0)
969 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE,
970 &child, &children) == 0)
971 for (c = 0; c < children; c++)
972 if ((ret = make_disks(zhp, child[c])) != 0)
979 * Determine if the given path is a hot spare within the given configuration.
982 is_spare(nvlist_t *config, const char *path)
988 uint64_t guid, spareguid;
994 if ((fd = open(path, O_RDONLY)) < 0)
997 if (zpool_in_use(g_zfs, fd, &state, &name, &inuse) != 0 ||
999 state != POOL_STATE_SPARE ||
1000 zpool_read_label(fd, &label) != 0) {
1008 verify(nvlist_lookup_uint64(label, ZPOOL_CONFIG_GUID, &guid) == 0);
1011 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
1013 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
1014 &spares, &nspares) == 0) {
1015 for (i = 0; i < nspares; i++) {
1016 verify(nvlist_lookup_uint64(spares[i],
1017 ZPOOL_CONFIG_GUID, &spareguid) == 0);
1018 if (spareguid == guid)
1027 * Go through and find any devices that are in use. We rely on libdiskmgt for
1028 * the majority of this task.
1031 check_in_use(nvlist_t *config, nvlist_t *nv, boolean_t force,
1032 boolean_t replacing, boolean_t isspare)
1038 char buf[MAXPATHLEN];
1041 verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) == 0);
1043 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
1044 &child, &children) != 0) {
1046 verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0);
1049 * As a generic check, we look to see if this is a replace of a
1050 * hot spare within the same pool. If so, we allow it
1051 * regardless of what libdiskmgt or zpool_in_use() says.
1054 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
1055 &wholedisk) == 0 && wholedisk)
1056 (void) snprintf(buf, sizeof (buf), "%ss0",
1059 (void) strlcpy(buf, path, sizeof (buf));
1061 if (is_spare(config, buf))
1065 if (strcmp(type, VDEV_TYPE_DISK) == 0)
1066 ret = check_device(path, force, isspare);
1068 if (strcmp(type, VDEV_TYPE_FILE) == 0)
1069 ret = check_file(path, force, isspare);
1074 for (c = 0; c < children; c++)
1075 if ((ret = check_in_use(config, child[c], force,
1076 replacing, B_FALSE)) != 0)
1079 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES,
1080 &child, &children) == 0)
1081 for (c = 0; c < children; c++)
1082 if ((ret = check_in_use(config, child[c], force,
1083 replacing, B_TRUE)) != 0)
1086 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE,
1087 &child, &children) == 0)
1088 for (c = 0; c < children; c++)
1089 if ((ret = check_in_use(config, child[c], force,
1090 replacing, B_FALSE)) != 0)
1097 is_grouping(const char *type, int *mindev, int *maxdev)
1099 if (strncmp(type, "raidz", 5) == 0) {
1100 const char *p = type + 5;
1106 } else if (*p == '0') {
1107 return (NULL); /* no zero prefixes allowed */
1110 nparity = strtol(p, &end, 10);
1111 if (errno != 0 || nparity < 1 || nparity >= 255 ||
1117 *mindev = nparity + 1;
1120 return (VDEV_TYPE_RAIDZ);
1126 if (strcmp(type, "mirror") == 0) {
1129 return (VDEV_TYPE_MIRROR);
1132 if (strcmp(type, "spare") == 0) {
1135 return (VDEV_TYPE_SPARE);
1138 if (strcmp(type, "log") == 0) {
1141 return (VDEV_TYPE_LOG);
1144 if (strcmp(type, "cache") == 0) {
1147 return (VDEV_TYPE_L2CACHE);
1154 * Construct a syntactically valid vdev specification,
1155 * and ensure that all devices and files exist and can be opened.
1156 * Note: we don't bother freeing anything in the error paths
1157 * because the program is just going to exit anyway.
1160 construct_spec(int argc, char **argv)
1162 nvlist_t *nvroot, *nv, **top, **spares, **l2cache;
1163 int t, toplevels, mindev, maxdev, nspares, nlogs, nl2cache;
1166 boolean_t seen_logs;
1176 seen_logs = B_FALSE;
1182 * If it's a mirror or raidz, the subsequent arguments are
1183 * its leaves -- until we encounter the next mirror or raidz.
1185 if ((type = is_grouping(argv[0], &mindev, &maxdev)) != NULL) {
1186 nvlist_t **child = NULL;
1187 int c, children = 0;
1189 if (strcmp(type, VDEV_TYPE_SPARE) == 0) {
1190 if (spares != NULL) {
1191 (void) fprintf(stderr,
1192 gettext("invalid vdev "
1193 "specification: 'spare' can be "
1194 "specified only once\n"));
1200 if (strcmp(type, VDEV_TYPE_LOG) == 0) {
1202 (void) fprintf(stderr,
1203 gettext("invalid vdev "
1204 "specification: 'log' can be "
1205 "specified only once\n"));
1213 * A log is not a real grouping device.
1214 * We just set is_log and continue.
1219 if (strcmp(type, VDEV_TYPE_L2CACHE) == 0) {
1220 if (l2cache != NULL) {
1221 (void) fprintf(stderr,
1222 gettext("invalid vdev "
1223 "specification: 'cache' can be "
1224 "specified only once\n"));
1231 if (strcmp(type, VDEV_TYPE_MIRROR) != 0) {
1232 (void) fprintf(stderr,
1233 gettext("invalid vdev "
1234 "specification: unsupported 'log' "
1235 "device: %s\n"), type);
1241 for (c = 1; c < argc; c++) {
1242 if (is_grouping(argv[c], NULL, NULL) != NULL)
1245 child = realloc(child,
1246 children * sizeof (nvlist_t *));
1249 if ((nv = make_leaf_vdev(argv[c], B_FALSE))
1252 child[children - 1] = nv;
1255 if (children < mindev) {
1256 (void) fprintf(stderr, gettext("invalid vdev "
1257 "specification: %s requires at least %d "
1258 "devices\n"), argv[0], mindev);
1262 if (children > maxdev) {
1263 (void) fprintf(stderr, gettext("invalid vdev "
1264 "specification: %s supports no more than "
1265 "%d devices\n"), argv[0], maxdev);
1272 if (strcmp(type, VDEV_TYPE_SPARE) == 0) {
1276 } else if (strcmp(type, VDEV_TYPE_L2CACHE) == 0) {
1278 nl2cache = children;
1281 verify(nvlist_alloc(&nv, NV_UNIQUE_NAME,
1283 verify(nvlist_add_string(nv, ZPOOL_CONFIG_TYPE,
1285 verify(nvlist_add_uint64(nv,
1286 ZPOOL_CONFIG_IS_LOG, is_log) == 0);
1287 if (strcmp(type, VDEV_TYPE_RAIDZ) == 0) {
1288 verify(nvlist_add_uint64(nv,
1289 ZPOOL_CONFIG_NPARITY,
1292 verify(nvlist_add_nvlist_array(nv,
1293 ZPOOL_CONFIG_CHILDREN, child,
1296 for (c = 0; c < children; c++)
1297 nvlist_free(child[c]);
1302 * We have a device. Pass off to make_leaf_vdev() to
1303 * construct the appropriate nvlist describing the vdev.
1305 if ((nv = make_leaf_vdev(argv[0], is_log)) == NULL)
1314 top = realloc(top, toplevels * sizeof (nvlist_t *));
1317 top[toplevels - 1] = nv;
1320 if (toplevels == 0 && nspares == 0 && nl2cache == 0) {
1321 (void) fprintf(stderr, gettext("invalid vdev "
1322 "specification: at least one toplevel vdev must be "
1327 if (seen_logs && nlogs == 0) {
1328 (void) fprintf(stderr, gettext("invalid vdev specification: "
1329 "log requires at least 1 device\n"));
1334 * Finally, create nvroot and add all top-level vdevs to it.
1336 verify(nvlist_alloc(&nvroot, NV_UNIQUE_NAME, 0) == 0);
1337 verify(nvlist_add_string(nvroot, ZPOOL_CONFIG_TYPE,
1338 VDEV_TYPE_ROOT) == 0);
1339 verify(nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
1340 top, toplevels) == 0);
1342 verify(nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
1343 spares, nspares) == 0);
1345 verify(nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
1346 l2cache, nl2cache) == 0);
1348 for (t = 0; t < toplevels; t++)
1349 nvlist_free(top[t]);
1350 for (t = 0; t < nspares; t++)
1351 nvlist_free(spares[t]);
1352 for (t = 0; t < nl2cache; t++)
1353 nvlist_free(l2cache[t]);
1364 split_mirror_vdev(zpool_handle_t *zhp, char *newname, nvlist_t *props,
1365 splitflags_t flags, int argc, char **argv)
1367 nvlist_t *newroot = NULL, **child;
1371 if ((newroot = construct_spec(argc, argv)) == NULL) {
1372 (void) fprintf(stderr, gettext("Unable to build a "
1373 "pool from the specified devices\n"));
1377 if (!flags.dryrun && make_disks(zhp, newroot) != 0) {
1378 nvlist_free(newroot);
1382 /* avoid any tricks in the spec */
1383 verify(nvlist_lookup_nvlist_array(newroot,
1384 ZPOOL_CONFIG_CHILDREN, &child, &children) == 0);
1385 for (c = 0; c < children; c++) {
1390 verify(nvlist_lookup_string(child[c],
1391 ZPOOL_CONFIG_PATH, &path) == 0);
1392 if ((type = is_grouping(path, &min, &max)) != NULL) {
1393 (void) fprintf(stderr, gettext("Cannot use "
1394 "'%s' as a device for splitting\n"), type);
1395 nvlist_free(newroot);
1401 if (zpool_vdev_split(zhp, newname, &newroot, props, flags) != 0) {
1402 if (newroot != NULL)
1403 nvlist_free(newroot);
1411 * Get and validate the contents of the given vdev specification. This ensures
1412 * that the nvlist returned is well-formed, that all the devices exist, and that
1413 * they are not currently in use by any other known consumer. The 'poolconfig'
1414 * parameter is the current configuration of the pool when adding devices
1415 * existing pool, and is used to perform additional checks, such as changing the
1416 * replication level of the pool. It can be 'NULL' to indicate that this is a
1417 * new pool. The 'force' flag controls whether devices should be forcefully
1418 * added, even if they appear in use.
1421 make_root_vdev(zpool_handle_t *zhp, int force, int check_rep,
1422 boolean_t replacing, boolean_t dryrun, int argc, char **argv)
1425 nvlist_t *poolconfig = NULL;
1429 * Construct the vdev specification. If this is successful, we know
1430 * that we have a valid specification, and that all devices can be
1433 if ((newroot = construct_spec(argc, argv)) == NULL)
1436 if (zhp && ((poolconfig = zpool_get_config(zhp, NULL)) == NULL))
1440 * Validate each device to make sure that its not shared with another
1441 * subsystem. We do this even if 'force' is set, because there are some
1442 * uses (such as a dedicated dump device) that even '-f' cannot
1445 if (check_in_use(poolconfig, newroot, force, replacing, B_FALSE) != 0) {
1446 nvlist_free(newroot);
1451 * Check the replication level of the given vdevs and report any errors
1452 * found. We include the existing pool spec, if any, as we need to
1453 * catch changes against the existing replication level.
1455 if (check_rep && check_replication(poolconfig, newroot) != 0) {
1456 nvlist_free(newroot);
1461 * Run through the vdev specification and label any whole disks found.
1463 if (!dryrun && make_disks(zhp, newroot) != 0) {
1464 nvlist_free(newroot);