4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
27 * This file contains the functions which analyze the status of a pool. This
28 * include both the status of an active pool, as well as the status exported
29 * pools. Returns one of the ZPOOL_STATUS_* defines describing the status of
30 * the pool. This status is independent (to a certain degree) from the state of
31 * the pool. A pool's state describes only whether or not it is capable of
32 * providing the necessary fault tolerance for data. The status describes the
33 * overall status of devices. A pool that is online can still have a device
34 * that is experiencing errors.
36 * Only a subset of the possible faults can be detected using 'zpool status',
37 * and not all possible errors correspond to a FMA message ID. The explanation
38 * is left up to the caller, depending on whether it is a live pool or an
45 #include "libzfs_impl.h"
48 * Message ID table. This must be kept in sync with the ZPOOL_STATUS_* defines
49 * in libzfs.h. Note that there are some status results which go past the end
50 * of this table, and hence have no associated message ID.
52 static char *zfs_msgid_table[] = {
69 #define NMSGID (sizeof (zfs_msgid_table) / sizeof (zfs_msgid_table[0]))
73 vdev_missing(uint64_t state, uint64_t aux, uint64_t errs)
75 return (state == VDEV_STATE_CANT_OPEN &&
76 aux == VDEV_AUX_OPEN_FAILED);
81 vdev_faulted(uint64_t state, uint64_t aux, uint64_t errs)
83 return (state == VDEV_STATE_FAULTED);
88 vdev_errors(uint64_t state, uint64_t aux, uint64_t errs)
90 return (state == VDEV_STATE_DEGRADED || errs != 0);
95 vdev_broken(uint64_t state, uint64_t aux, uint64_t errs)
97 return (state == VDEV_STATE_CANT_OPEN);
102 vdev_offlined(uint64_t state, uint64_t aux, uint64_t errs)
104 return (state == VDEV_STATE_OFFLINE);
109 vdev_removed(uint64_t state, uint64_t aux, uint64_t errs)
111 return (state == VDEV_STATE_REMOVED);
115 * Detect if any leaf devices that have seen errors or could not be opened.
118 find_vdev_problem(nvlist_t *vdev, int (*func)(uint64_t, uint64_t, uint64_t))
126 * Ignore problems within a 'replacing' vdev, since we're presumably in
127 * the process of repairing any such errors, and don't want to call them
128 * out again. We'll pick up the fact that a resilver is happening
131 verify(nvlist_lookup_string(vdev, ZPOOL_CONFIG_TYPE, &type) == 0);
132 if (strcmp(type, VDEV_TYPE_REPLACING) == 0)
135 if (nvlist_lookup_nvlist_array(vdev, ZPOOL_CONFIG_CHILDREN, &child,
137 for (c = 0; c < children; c++)
138 if (find_vdev_problem(child[c], func))
141 verify(nvlist_lookup_uint64_array(vdev, ZPOOL_CONFIG_STATS,
142 (uint64_t **)&vs, &c) == 0);
144 if (func(vs->vs_state, vs->vs_aux,
146 vs->vs_write_errors +
147 vs->vs_checksum_errors))
155 * Active pool health status.
157 * To determine the status for a pool, we make several passes over the config,
158 * picking the most egregious error we find. In order of importance, we do the
161 * - Check for a complete and valid configuration
162 * - Look for any faulted or missing devices in a non-replicated config
163 * - Check for any data errors
164 * - Check for any faulted or missing devices in a replicated config
165 * - Look for any devices showing errors
166 * - Check for any resilvering devices
168 * There can obviously be multiple errors within a single pool, so this routine
169 * only picks the most damaging of all the current errors to report.
171 static zpool_status_t
172 check_status(nvlist_t *config, boolean_t isimport)
183 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
185 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
187 verify(nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_STATS,
188 (uint64_t **)&vs, &vsc) == 0);
189 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE,
191 (void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_HOSTID, &hostid);
194 * Pool last accessed by another system.
196 if (hostid != 0 && (unsigned long)hostid != gethostid() &&
197 stateval == POOL_STATE_ACTIVE)
198 return (ZPOOL_STATUS_HOSTID_MISMATCH);
201 * Newer on-disk version.
203 if (vs->vs_state == VDEV_STATE_CANT_OPEN &&
204 vs->vs_aux == VDEV_AUX_VERSION_NEWER)
205 return (ZPOOL_STATUS_VERSION_NEWER);
208 * Check that the config is complete.
210 if (vs->vs_state == VDEV_STATE_CANT_OPEN &&
211 vs->vs_aux == VDEV_AUX_BAD_GUID_SUM)
212 return (ZPOOL_STATUS_BAD_GUID_SUM);
215 * Check whether the pool has suspended due to failed I/O.
217 if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_SUSPENDED,
219 if (suspended == ZIO_FAILURE_MODE_CONTINUE)
220 return (ZPOOL_STATUS_IO_FAILURE_CONTINUE);
221 return (ZPOOL_STATUS_IO_FAILURE_WAIT);
225 * Could not read a log.
227 if (vs->vs_state == VDEV_STATE_CANT_OPEN &&
228 vs->vs_aux == VDEV_AUX_BAD_LOG) {
229 return (ZPOOL_STATUS_BAD_LOG);
233 * Bad devices in non-replicated config.
235 if (vs->vs_state == VDEV_STATE_CANT_OPEN &&
236 find_vdev_problem(nvroot, vdev_faulted))
237 return (ZPOOL_STATUS_FAULTED_DEV_NR);
239 if (vs->vs_state == VDEV_STATE_CANT_OPEN &&
240 find_vdev_problem(nvroot, vdev_missing))
241 return (ZPOOL_STATUS_MISSING_DEV_NR);
243 if (vs->vs_state == VDEV_STATE_CANT_OPEN &&
244 find_vdev_problem(nvroot, vdev_broken))
245 return (ZPOOL_STATUS_CORRUPT_LABEL_NR);
248 * Corrupted pool metadata
250 if (vs->vs_state == VDEV_STATE_CANT_OPEN &&
251 vs->vs_aux == VDEV_AUX_CORRUPT_DATA)
252 return (ZPOOL_STATUS_CORRUPT_POOL);
255 * Persistent data errors.
258 if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_ERRCOUNT,
259 &nerr) == 0 && nerr != 0)
260 return (ZPOOL_STATUS_CORRUPT_DATA);
264 * Missing devices in a replicated config.
266 if (find_vdev_problem(nvroot, vdev_faulted))
267 return (ZPOOL_STATUS_FAULTED_DEV_R);
268 if (find_vdev_problem(nvroot, vdev_missing))
269 return (ZPOOL_STATUS_MISSING_DEV_R);
270 if (find_vdev_problem(nvroot, vdev_broken))
271 return (ZPOOL_STATUS_CORRUPT_LABEL_R);
274 * Devices with errors
276 if (!isimport && find_vdev_problem(nvroot, vdev_errors))
277 return (ZPOOL_STATUS_FAILING_DEV);
282 if (find_vdev_problem(nvroot, vdev_offlined))
283 return (ZPOOL_STATUS_OFFLINE_DEV);
288 if (find_vdev_problem(nvroot, vdev_removed))
289 return (ZPOOL_STATUS_REMOVED_DEV);
292 * Currently resilvering
294 if (!vs->vs_scrub_complete && vs->vs_scrub_type == POOL_SCRUB_RESILVER)
295 return (ZPOOL_STATUS_RESILVERING);
298 * Outdated, but usable, version
300 if (version < SPA_VERSION)
301 return (ZPOOL_STATUS_VERSION_OLDER);
303 return (ZPOOL_STATUS_OK);
307 zpool_get_status(zpool_handle_t *zhp, char **msgid)
309 zpool_status_t ret = check_status(zhp->zpool_config, B_FALSE);
314 *msgid = zfs_msgid_table[ret];
320 zpool_import_status(nvlist_t *config, char **msgid)
322 zpool_status_t ret = check_status(config, B_TRUE);
327 *msgid = zfs_msgid_table[ret];