4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright (c) 2012 by Delphix. All rights reserved.
28 * This file contains the functions which analyze the status of a pool. This
29 * include both the status of an active pool, as well as the status exported
30 * pools. Returns one of the ZPOOL_STATUS_* defines describing the status of
31 * the pool. This status is independent (to a certain degree) from the state of
32 * the pool. A pool's state describes only whether or not it is capable of
33 * providing the necessary fault tolerance for data. The status describes the
34 * overall status of devices. A pool that is online can still have a device
35 * that is experiencing errors.
37 * Only a subset of the possible faults can be detected using 'zpool status',
38 * and not all possible errors correspond to a FMA message ID. The explanation
39 * is left up to the caller, depending on whether it is a live pool or an
46 #include "libzfs_impl.h"
47 #include "zfeature_common.h"
50 * Message ID table. This must be kept in sync with the ZPOOL_STATUS_* defines
51 * in libzfs.h. Note that there are some status results which go past the end
52 * of this table, and hence have no associated message ID.
54 static char *zfs_msgid_table[] = {
71 #define NMSGID (sizeof (zfs_msgid_table) / sizeof (zfs_msgid_table[0]))
75 vdev_missing(uint64_t state, uint64_t aux, uint64_t errs)
77 return (state == VDEV_STATE_CANT_OPEN &&
78 aux == VDEV_AUX_OPEN_FAILED);
83 vdev_faulted(uint64_t state, uint64_t aux, uint64_t errs)
85 return (state == VDEV_STATE_FAULTED);
90 vdev_errors(uint64_t state, uint64_t aux, uint64_t errs)
92 return (state == VDEV_STATE_DEGRADED || errs != 0);
97 vdev_broken(uint64_t state, uint64_t aux, uint64_t errs)
99 return (state == VDEV_STATE_CANT_OPEN);
104 vdev_offlined(uint64_t state, uint64_t aux, uint64_t errs)
106 return (state == VDEV_STATE_OFFLINE);
111 vdev_removed(uint64_t state, uint64_t aux, uint64_t errs)
113 return (state == VDEV_STATE_REMOVED);
117 * Detect if any leaf devices that have seen errors or could not be opened.
120 find_vdev_problem(nvlist_t *vdev, int (*func)(uint64_t, uint64_t, uint64_t))
128 * Ignore problems within a 'replacing' vdev, since we're presumably in
129 * the process of repairing any such errors, and don't want to call them
130 * out again. We'll pick up the fact that a resilver is happening
133 verify(nvlist_lookup_string(vdev, ZPOOL_CONFIG_TYPE, &type) == 0);
134 if (strcmp(type, VDEV_TYPE_REPLACING) == 0)
137 if (nvlist_lookup_nvlist_array(vdev, ZPOOL_CONFIG_CHILDREN, &child,
139 for (c = 0; c < children; c++)
140 if (find_vdev_problem(child[c], func))
143 verify(nvlist_lookup_uint64_array(vdev, ZPOOL_CONFIG_VDEV_STATS,
144 (uint64_t **)&vs, &c) == 0);
146 if (func(vs->vs_state, vs->vs_aux,
148 vs->vs_write_errors +
149 vs->vs_checksum_errors))
157 * Active pool health status.
159 * To determine the status for a pool, we make several passes over the config,
160 * picking the most egregious error we find. In order of importance, we do the
163 * - Check for a complete and valid configuration
164 * - Look for any faulted or missing devices in a non-replicated config
165 * - Check for any data errors
166 * - Check for any faulted or missing devices in a replicated config
167 * - Look for any devices showing errors
168 * - Check for any resilvering devices
170 * There can obviously be multiple errors within a single pool, so this routine
171 * only picks the most damaging of all the current errors to report.
173 static zpool_status_t
174 check_status(nvlist_t *config, boolean_t isimport)
178 pool_scan_stat_t *ps = NULL;
185 unsigned long system_hostid = gethostid() & 0xffffffff;
187 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
189 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
191 verify(nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_VDEV_STATS,
192 (uint64_t **)&vs, &vsc) == 0);
193 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE,
197 * Currently resilvering a vdev
199 (void) nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_SCAN_STATS,
200 (uint64_t **)&ps, &psc);
201 if (ps && ps->pss_func == POOL_SCAN_RESILVER &&
202 ps->pss_state == DSS_SCANNING)
203 return (ZPOOL_STATUS_RESILVERING);
206 * Pool last accessed by another system.
208 (void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_HOSTID, &hostid);
209 if (hostid != 0 && (unsigned long)hostid != system_hostid &&
210 stateval == POOL_STATE_ACTIVE)
211 return (ZPOOL_STATUS_HOSTID_MISMATCH);
214 * Newer on-disk version.
216 if (vs->vs_state == VDEV_STATE_CANT_OPEN &&
217 vs->vs_aux == VDEV_AUX_VERSION_NEWER)
218 return (ZPOOL_STATUS_VERSION_NEWER);
221 * Unsupported feature(s).
223 if (vs->vs_state == VDEV_STATE_CANT_OPEN &&
224 vs->vs_aux == VDEV_AUX_UNSUP_FEAT) {
227 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO,
229 if (nvlist_exists(nvinfo, ZPOOL_CONFIG_CAN_RDONLY))
230 return (ZPOOL_STATUS_UNSUP_FEAT_WRITE);
231 return (ZPOOL_STATUS_UNSUP_FEAT_READ);
235 * Check that the config is complete.
237 if (vs->vs_state == VDEV_STATE_CANT_OPEN &&
238 vs->vs_aux == VDEV_AUX_BAD_GUID_SUM)
239 return (ZPOOL_STATUS_BAD_GUID_SUM);
242 * Check whether the pool has suspended due to failed I/O.
244 if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_SUSPENDED,
246 if (suspended == ZIO_FAILURE_MODE_CONTINUE)
247 return (ZPOOL_STATUS_IO_FAILURE_CONTINUE);
248 return (ZPOOL_STATUS_IO_FAILURE_WAIT);
252 * Could not read a log.
254 if (vs->vs_state == VDEV_STATE_CANT_OPEN &&
255 vs->vs_aux == VDEV_AUX_BAD_LOG) {
256 return (ZPOOL_STATUS_BAD_LOG);
260 * Bad devices in non-replicated config.
262 if (vs->vs_state == VDEV_STATE_CANT_OPEN &&
263 find_vdev_problem(nvroot, vdev_faulted))
264 return (ZPOOL_STATUS_FAULTED_DEV_NR);
266 if (vs->vs_state == VDEV_STATE_CANT_OPEN &&
267 find_vdev_problem(nvroot, vdev_missing))
268 return (ZPOOL_STATUS_MISSING_DEV_NR);
270 if (vs->vs_state == VDEV_STATE_CANT_OPEN &&
271 find_vdev_problem(nvroot, vdev_broken))
272 return (ZPOOL_STATUS_CORRUPT_LABEL_NR);
275 * Corrupted pool metadata
277 if (vs->vs_state == VDEV_STATE_CANT_OPEN &&
278 vs->vs_aux == VDEV_AUX_CORRUPT_DATA)
279 return (ZPOOL_STATUS_CORRUPT_POOL);
282 * Persistent data errors.
285 if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_ERRCOUNT,
286 &nerr) == 0 && nerr != 0)
287 return (ZPOOL_STATUS_CORRUPT_DATA);
291 * Missing devices in a replicated config.
293 if (find_vdev_problem(nvroot, vdev_faulted))
294 return (ZPOOL_STATUS_FAULTED_DEV_R);
295 if (find_vdev_problem(nvroot, vdev_missing))
296 return (ZPOOL_STATUS_MISSING_DEV_R);
297 if (find_vdev_problem(nvroot, vdev_broken))
298 return (ZPOOL_STATUS_CORRUPT_LABEL_R);
301 * Devices with errors
303 if (!isimport && find_vdev_problem(nvroot, vdev_errors))
304 return (ZPOOL_STATUS_FAILING_DEV);
309 if (find_vdev_problem(nvroot, vdev_offlined))
310 return (ZPOOL_STATUS_OFFLINE_DEV);
315 if (find_vdev_problem(nvroot, vdev_removed))
316 return (ZPOOL_STATUS_REMOVED_DEV);
319 * Outdated, but usable, version
321 if (SPA_VERSION_IS_SUPPORTED(version) && version != SPA_VERSION)
322 return (ZPOOL_STATUS_VERSION_OLDER);
325 * Usable pool with disabled features
327 if (version >= SPA_VERSION_FEATURES) {
332 feat = fnvlist_lookup_nvlist(config,
333 ZPOOL_CONFIG_LOAD_INFO);
334 feat = fnvlist_lookup_nvlist(feat,
335 ZPOOL_CONFIG_ENABLED_FEAT);
337 feat = fnvlist_lookup_nvlist(config,
338 ZPOOL_CONFIG_FEATURE_STATS);
341 for (i = 0; i < SPA_FEATURES; i++) {
342 zfeature_info_t *fi = &spa_feature_table[i];
343 if (!nvlist_exists(feat, fi->fi_guid))
344 return (ZPOOL_STATUS_FEAT_DISABLED);
348 return (ZPOOL_STATUS_OK);
352 zpool_get_status(zpool_handle_t *zhp, char **msgid)
354 zpool_status_t ret = check_status(zhp->zpool_config, B_FALSE);
359 *msgid = zfs_msgid_table[ret];
365 zpool_import_status(nvlist_t *config, char **msgid)
367 zpool_status_t ret = check_status(config, B_TRUE);
372 *msgid = zfs_msgid_table[ret];
378 dump_ddt_stat(const ddt_stat_t *dds, int h)
381 char blocks[6], lsize[6], psize[6], dsize[6];
382 char ref_blocks[6], ref_lsize[6], ref_psize[6], ref_dsize[6];
384 if (dds == NULL || dds->dds_blocks == 0)
388 (void) strcpy(refcnt, "Total");
390 zfs_nicenum(1ULL << h, refcnt, sizeof (refcnt));
392 zfs_nicenum(dds->dds_blocks, blocks, sizeof (blocks));
393 zfs_nicenum(dds->dds_lsize, lsize, sizeof (lsize));
394 zfs_nicenum(dds->dds_psize, psize, sizeof (psize));
395 zfs_nicenum(dds->dds_dsize, dsize, sizeof (dsize));
396 zfs_nicenum(dds->dds_ref_blocks, ref_blocks, sizeof (ref_blocks));
397 zfs_nicenum(dds->dds_ref_lsize, ref_lsize, sizeof (ref_lsize));
398 zfs_nicenum(dds->dds_ref_psize, ref_psize, sizeof (ref_psize));
399 zfs_nicenum(dds->dds_ref_dsize, ref_dsize, sizeof (ref_dsize));
401 (void) printf("%6s %6s %5s %5s %5s %6s %5s %5s %5s\n",
403 blocks, lsize, psize, dsize,
404 ref_blocks, ref_lsize, ref_psize, ref_dsize);
408 * Print the DDT histogram and the column totals.
411 zpool_dump_ddt(const ddt_stat_t *dds_total, const ddt_histogram_t *ddh)
417 (void) printf("bucket "
420 (void) printf("______ "
421 "______________________________ "
422 "______________________________\n");
424 (void) printf("%6s %6s %5s %5s %5s %6s %5s %5s %5s\n",
426 "blocks", "LSIZE", "PSIZE", "DSIZE",
427 "blocks", "LSIZE", "PSIZE", "DSIZE");
429 (void) printf("%6s %6s %5s %5s %5s %6s %5s %5s %5s\n",
431 "------", "-----", "-----", "-----",
432 "------", "-----", "-----", "-----");
434 for (h = 0; h < 64; h++)
435 dump_ddt_stat(&ddh->ddh_stat[h], h);
437 dump_ddt_stat(dds_total, -1);