4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
26 #pragma ident "@(#)libzfs_status.c 1.7 07/06/29 SMI"
29 * This file contains the functions which analyze the status of a pool. This
30 * include both the status of an active pool, as well as the status exported
31 * pools. Returns one of the ZPOOL_STATUS_* defines describing the status of
32 * the pool. This status is independent (to a certain degree) from the state of
33 * the pool. A pool's state describes only whether or not it is capable of
34 * providing the necessary fault tolerance for data. The status describes the
35 * overall status of devices. A pool that is online can still have a device
36 * that is experiencing errors.
38 * Only a subset of the possible faults can be detected using 'zpool status',
39 * and not all possible errors correspond to a FMA message ID. The explanation
40 * is left up to the caller, depending on whether it is a live pool or an
47 #include "libzfs_impl.h"
50 * Message ID table. This must be kept in sync with the ZPOOL_STATUS_* defines
51 * in libzfs.h. Note that there are some status results which go past the end
52 * of this table, and hence have no associated message ID.
54 static char *zfs_msgid_table[] = {
68 #define NMSGID (sizeof (zfs_msgid_table) / sizeof (zfs_msgid_table[0]))
72 vdev_missing(uint64_t state, uint64_t aux, uint64_t errs)
74 return (state == VDEV_STATE_CANT_OPEN &&
75 aux == VDEV_AUX_OPEN_FAILED);
80 vdev_faulted(uint64_t state, uint64_t aux, uint64_t errs)
82 return (state == VDEV_STATE_FAULTED);
87 vdev_errors(uint64_t state, uint64_t aux, uint64_t errs)
89 return (state == VDEV_STATE_DEGRADED || errs != 0);
94 vdev_broken(uint64_t state, uint64_t aux, uint64_t errs)
96 return (state == VDEV_STATE_CANT_OPEN);
101 vdev_offlined(uint64_t state, uint64_t aux, uint64_t errs)
103 return (state == VDEV_STATE_OFFLINE);
107 * Detect if any leaf devices that have seen errors or could not be opened.
110 find_vdev_problem(nvlist_t *vdev, int (*func)(uint64_t, uint64_t, uint64_t))
118 * Ignore problems within a 'replacing' vdev, since we're presumably in
119 * the process of repairing any such errors, and don't want to call them
120 * out again. We'll pick up the fact that a resilver is happening
123 verify(nvlist_lookup_string(vdev, ZPOOL_CONFIG_TYPE, &type) == 0);
124 if (strcmp(type, VDEV_TYPE_REPLACING) == 0)
127 if (nvlist_lookup_nvlist_array(vdev, ZPOOL_CONFIG_CHILDREN, &child,
129 for (c = 0; c < children; c++)
130 if (find_vdev_problem(child[c], func))
133 verify(nvlist_lookup_uint64_array(vdev, ZPOOL_CONFIG_STATS,
134 (uint64_t **)&vs, &c) == 0);
136 if (func(vs->vs_state, vs->vs_aux,
138 vs->vs_write_errors +
139 vs->vs_checksum_errors))
147 * Active pool health status.
149 * To determine the status for a pool, we make several passes over the config,
150 * picking the most egregious error we find. In order of importance, we do the
153 * - Check for a complete and valid configuration
154 * - Look for any faulted or missing devices in a non-replicated config
155 * - Check for any data errors
156 * - Check for any faulted or missing devices in a replicated config
157 * - Look for any devices showing errors
158 * - Check for any resilvering devices
160 * There can obviously be multiple errors within a single pool, so this routine
161 * only picks the most damaging of all the current errors to report.
163 static zpool_status_t
164 check_status(nvlist_t *config, boolean_t isimport)
174 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
176 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
178 verify(nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_STATS,
179 (uint64_t **)&vs, &vsc) == 0);
180 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE,
182 (void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_HOSTID, &hostid);
185 * Pool last accessed by another system.
187 if (hostid != 0 && (unsigned long)hostid != gethostid() &&
188 stateval == POOL_STATE_ACTIVE)
189 return (ZPOOL_STATUS_HOSTID_MISMATCH);
192 * Newer on-disk version.
194 if (vs->vs_state == VDEV_STATE_CANT_OPEN &&
195 vs->vs_aux == VDEV_AUX_VERSION_NEWER)
196 return (ZPOOL_STATUS_VERSION_NEWER);
199 * Check that the config is complete.
201 if (vs->vs_state == VDEV_STATE_CANT_OPEN &&
202 vs->vs_aux == VDEV_AUX_BAD_GUID_SUM)
203 return (ZPOOL_STATUS_BAD_GUID_SUM);
206 * Bad devices in non-replicated config.
208 if (vs->vs_state == VDEV_STATE_CANT_OPEN &&
209 find_vdev_problem(nvroot, vdev_faulted))
210 return (ZPOOL_STATUS_FAULTED_DEV_NR);
212 if (vs->vs_state == VDEV_STATE_CANT_OPEN &&
213 find_vdev_problem(nvroot, vdev_missing))
214 return (ZPOOL_STATUS_MISSING_DEV_NR);
216 if (vs->vs_state == VDEV_STATE_CANT_OPEN &&
217 find_vdev_problem(nvroot, vdev_broken))
218 return (ZPOOL_STATUS_CORRUPT_LABEL_NR);
221 * Corrupted pool metadata
223 if (vs->vs_state == VDEV_STATE_CANT_OPEN &&
224 vs->vs_aux == VDEV_AUX_CORRUPT_DATA)
225 return (ZPOOL_STATUS_CORRUPT_POOL);
228 * Persistent data errors.
231 if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_ERRCOUNT,
232 &nerr) == 0 && nerr != 0)
233 return (ZPOOL_STATUS_CORRUPT_DATA);
237 * Missing devices in a replicated config.
239 if (find_vdev_problem(nvroot, vdev_faulted))
240 return (ZPOOL_STATUS_FAULTED_DEV_R);
241 if (find_vdev_problem(nvroot, vdev_missing))
242 return (ZPOOL_STATUS_MISSING_DEV_R);
243 if (find_vdev_problem(nvroot, vdev_broken))
244 return (ZPOOL_STATUS_CORRUPT_LABEL_R);
247 * Devices with errors
249 if (!isimport && find_vdev_problem(nvroot, vdev_errors))
250 return (ZPOOL_STATUS_FAILING_DEV);
255 if (find_vdev_problem(nvroot, vdev_offlined))
256 return (ZPOOL_STATUS_OFFLINE_DEV);
259 * Currently resilvering
261 if (!vs->vs_scrub_complete && vs->vs_scrub_type == POOL_SCRUB_RESILVER)
262 return (ZPOOL_STATUS_RESILVERING);
265 * Outdated, but usable, version
267 if (version < SPA_VERSION)
268 return (ZPOOL_STATUS_VERSION_OLDER);
270 return (ZPOOL_STATUS_OK);
274 zpool_get_status(zpool_handle_t *zhp, char **msgid)
276 zpool_status_t ret = check_status(zhp->zpool_config, B_FALSE);
281 *msgid = zfs_msgid_table[ret];
287 zpool_import_status(nvlist_t *config, char **msgid)
289 zpool_status_t ret = check_status(config, B_TRUE);
294 *msgid = zfs_msgid_table[ret];