4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
27 * Copyright (c) 2012 by Delphix. All rights reserved.
31 #include <sys/spa_impl.h>
33 #include <sys/vdev_impl.h>
35 #include <sys/zio_checksum.h>
37 #include <sys/fm/fs/zfs.h>
38 #include <sys/fm/protocol.h>
39 #include <sys/fm/util.h>
40 #include <sys/sysevent.h>
43 * This general routine is responsible for generating all the different ZFS
44 * ereports. The payload is dependent on the class, and which arguments are
45 * supplied to the function:
47 * EREPORT POOL VDEV IO
53 * If we are in a loading state, all errors are chained together by the same
54 * SPA-wide ENA (Error Numeric Association).
56 * For isolated I/O requests, we get the ENA from the zio_t. The propagation
57 * gets very complicated due to RAID-Z, gang blocks, and vdev caching. We want
58 * to chain together all ereports associated with a logical piece of data. For
59 * read I/Os, there are basically three 'types' of I/O, which form a roughly
63 * | Aggregate I/O | No associated logical data or device
67 * +---------------+ Reads associated with a piece of logical data.
68 * | Read I/O | This includes reads on behalf of RAID-Z,
69 * +---------------+ mirrors, gang blocks, retries, etc.
72 * +---------------+ Reads associated with a particular device, but
73 * | Physical I/O | no logical data. Issued as part of vdev caching
74 * +---------------+ and I/O aggregation.
76 * Note that 'physical I/O' here is not the same terminology as used in the rest
77 * of ZIO. Typically, 'physical I/O' simply means that there is no attached
78 * blockpointer. But I/O with no associated block pointer can still be related
79 * to a logical piece of data (i.e. RAID-Z requests).
81 * Purely physical I/O always have unique ENAs. They are not related to a
82 * particular piece of logical data, and therefore cannot be chained together.
83 * We still generate an ereport, but the DE doesn't correlate it with any
84 * logical piece of data. When such an I/O fails, the delegated I/O requests
85 * will issue a retry, which will trigger the 'real' ereport with the correct
88 * We keep track of the ENA for a ZIO chain through the 'io_logical' member.
89 * When a new logical I/O is issued, we set this to point to itself. Child I/Os
90 * then inherit this pointer, so that when it is first set subsequent failures
91 * will use the same ENA. For vdev cache fill and queue aggregation I/O,
92 * this pointer is set to NULL, and no ereport will be generated (since it
93 * doesn't actually correspond to any particular device or piece of data,
94 * and the caller will always retry without caching or queueing anyway).
96 * For checksum errors, we want to include more information about the actual
97 * error which occurs. Accordingly, we build an ereport when the error is
98 * noticed, but instead of sending it in immediately, we hang it off of the
99 * io_cksum_report field of the logical IO. When the logical IO completes
100 * (successfully or not), zfs_ereport_finish_checksum() is called with the
101 * good and bad versions of the buffer (if available), and we annotate the
102 * ereport with information about the differences.
106 zfs_zevent_post_cb(nvlist_t *nvl, nvlist_t *detector)
109 fm_nvlist_destroy(nvl, FM_NVA_FREE);
112 fm_nvlist_destroy(detector, FM_NVA_FREE);
116 zfs_ereport_start(nvlist_t **ereport_out, nvlist_t **detector_out,
117 const char *subclass, spa_t *spa, vdev_t *vd, zio_t *zio,
118 uint64_t stateoroffset, uint64_t size)
120 nvlist_t *ereport, *detector;
126 * If we are doing a spa_tryimport() or in recovery mode,
129 if (spa_load_state(spa) == SPA_LOAD_TRYIMPORT ||
130 spa_load_state(spa) == SPA_LOAD_RECOVER)
134 * If we are in the middle of opening a pool, and the previous attempt
135 * failed, don't bother logging any new ereports - we're just going to
136 * get the same diagnosis anyway.
138 if (spa_load_state(spa) != SPA_LOAD_NONE &&
139 spa->spa_last_open_failed)
144 * If this is not a read or write zio, ignore the error. This
145 * can occur if the DKIOCFLUSHWRITECACHE ioctl fails.
147 if (zio->io_type != ZIO_TYPE_READ &&
148 zio->io_type != ZIO_TYPE_WRITE)
153 * If the vdev has already been marked as failing due
154 * to a failed probe, then ignore any subsequent I/O
155 * errors, as the DE will automatically fault the vdev
156 * on the first such failure. This also catches cases
157 * where vdev_remove_wanted is set and the device has
158 * not yet been asynchronously placed into the REMOVED
161 if (zio->io_vd == vd && !vdev_accessible(vd, zio))
165 * Ignore checksum errors for reads from DTL regions of
168 if (zio->io_type == ZIO_TYPE_READ &&
169 zio->io_error == ECKSUM &&
170 vd->vdev_ops->vdev_op_leaf &&
171 vdev_dtl_contains(vd, DTL_MISSING, zio->io_txg, 1))
177 * For probe failure, we want to avoid posting ereports if we've
178 * already removed the device in the meantime.
181 strcmp(subclass, FM_EREPORT_ZFS_PROBE_FAILURE) == 0 &&
182 (vd->vdev_remove_wanted || vd->vdev_state == VDEV_STATE_REMOVED))
185 if ((ereport = fm_nvlist_create(NULL)) == NULL)
188 if ((detector = fm_nvlist_create(NULL)) == NULL) {
189 fm_nvlist_destroy(ereport, FM_NVA_FREE);
194 * Serialize ereport generation
196 mutex_enter(&spa->spa_errlist_lock);
199 * Determine the ENA to use for this event. If we are in a loading
200 * state, use a SPA-wide ENA. Otherwise, if we are in an I/O state, use
201 * a root zio-wide ENA. Otherwise, simply use a unique ENA.
203 if (spa_load_state(spa) != SPA_LOAD_NONE) {
204 if (spa->spa_ena == 0)
205 spa->spa_ena = fm_ena_generate(0, FM_ENA_FMT1);
207 } else if (zio != NULL && zio->io_logical != NULL) {
208 if (zio->io_logical->io_ena == 0)
209 zio->io_logical->io_ena =
210 fm_ena_generate(0, FM_ENA_FMT1);
211 ena = zio->io_logical->io_ena;
213 ena = fm_ena_generate(0, FM_ENA_FMT1);
217 * Construct the full class, detector, and other standard FMA fields.
219 (void) snprintf(class, sizeof (class), "%s.%s",
220 ZFS_ERROR_CLASS, subclass);
222 fm_fmri_zfs_set(detector, FM_ZFS_SCHEME_VERSION, spa_guid(spa),
223 vd != NULL ? vd->vdev_guid : 0);
225 fm_ereport_set(ereport, FM_EREPORT_VERSION, class, ena, detector, NULL);
228 * Construct the per-ereport payload, depending on which parameters are
233 * Generic payload members common to all ereports.
235 fm_payload_set(ereport, FM_EREPORT_PAYLOAD_ZFS_POOL,
236 DATA_TYPE_STRING, spa_name(spa), FM_EREPORT_PAYLOAD_ZFS_POOL_GUID,
237 DATA_TYPE_UINT64, spa_guid(spa),
238 FM_EREPORT_PAYLOAD_ZFS_POOL_CONTEXT, DATA_TYPE_INT32,
239 spa_load_state(spa), NULL);
242 fm_payload_set(ereport, FM_EREPORT_PAYLOAD_ZFS_POOL_FAILMODE,
244 spa_get_failmode(spa) == ZIO_FAILURE_MODE_WAIT ?
245 FM_EREPORT_FAILMODE_WAIT :
246 spa_get_failmode(spa) == ZIO_FAILURE_MODE_CONTINUE ?
247 FM_EREPORT_FAILMODE_CONTINUE : FM_EREPORT_FAILMODE_PANIC,
252 vdev_t *pvd = vd->vdev_parent;
254 fm_payload_set(ereport, FM_EREPORT_PAYLOAD_ZFS_VDEV_GUID,
255 DATA_TYPE_UINT64, vd->vdev_guid,
256 FM_EREPORT_PAYLOAD_ZFS_VDEV_TYPE,
257 DATA_TYPE_STRING, vd->vdev_ops->vdev_op_type, NULL);
258 if (vd->vdev_path != NULL)
259 fm_payload_set(ereport,
260 FM_EREPORT_PAYLOAD_ZFS_VDEV_PATH,
261 DATA_TYPE_STRING, vd->vdev_path, NULL);
262 if (vd->vdev_devid != NULL)
263 fm_payload_set(ereport,
264 FM_EREPORT_PAYLOAD_ZFS_VDEV_DEVID,
265 DATA_TYPE_STRING, vd->vdev_devid, NULL);
266 if (vd->vdev_fru != NULL)
267 fm_payload_set(ereport,
268 FM_EREPORT_PAYLOAD_ZFS_VDEV_FRU,
269 DATA_TYPE_STRING, vd->vdev_fru, NULL);
272 fm_payload_set(ereport,
273 FM_EREPORT_PAYLOAD_ZFS_PARENT_GUID,
274 DATA_TYPE_UINT64, pvd->vdev_guid,
275 FM_EREPORT_PAYLOAD_ZFS_PARENT_TYPE,
276 DATA_TYPE_STRING, pvd->vdev_ops->vdev_op_type,
279 fm_payload_set(ereport,
280 FM_EREPORT_PAYLOAD_ZFS_PARENT_PATH,
281 DATA_TYPE_STRING, pvd->vdev_path, NULL);
283 fm_payload_set(ereport,
284 FM_EREPORT_PAYLOAD_ZFS_PARENT_DEVID,
285 DATA_TYPE_STRING, pvd->vdev_devid, NULL);
291 * Payload common to all I/Os.
293 fm_payload_set(ereport, FM_EREPORT_PAYLOAD_ZFS_ZIO_ERR,
294 DATA_TYPE_INT32, zio->io_error, NULL);
295 fm_payload_set(ereport, FM_EREPORT_PAYLOAD_ZFS_ZIO_FLAGS,
296 DATA_TYPE_INT32, zio->io_flags, NULL);
297 fm_payload_set(ereport, FM_EREPORT_PAYLOAD_ZFS_ZIO_DELAY,
298 DATA_TYPE_UINT64, zio->io_delay, NULL);
301 * If the 'size' parameter is non-zero, it indicates this is a
302 * RAID-Z or other I/O where the physical offset and length are
303 * provided for us, instead of within the zio_t.
307 fm_payload_set(ereport,
308 FM_EREPORT_PAYLOAD_ZFS_ZIO_OFFSET,
309 DATA_TYPE_UINT64, stateoroffset,
310 FM_EREPORT_PAYLOAD_ZFS_ZIO_SIZE,
311 DATA_TYPE_UINT64, size, NULL);
313 fm_payload_set(ereport,
314 FM_EREPORT_PAYLOAD_ZFS_ZIO_OFFSET,
315 DATA_TYPE_UINT64, zio->io_offset,
316 FM_EREPORT_PAYLOAD_ZFS_ZIO_SIZE,
317 DATA_TYPE_UINT64, zio->io_size, NULL);
321 * Payload for I/Os with corresponding logical information.
323 if (zio->io_logical != NULL)
324 fm_payload_set(ereport,
325 FM_EREPORT_PAYLOAD_ZFS_ZIO_OBJSET,
327 zio->io_logical->io_bookmark.zb_objset,
328 FM_EREPORT_PAYLOAD_ZFS_ZIO_OBJECT,
330 zio->io_logical->io_bookmark.zb_object,
331 FM_EREPORT_PAYLOAD_ZFS_ZIO_LEVEL,
333 zio->io_logical->io_bookmark.zb_level,
334 FM_EREPORT_PAYLOAD_ZFS_ZIO_BLKID,
336 zio->io_logical->io_bookmark.zb_blkid, NULL);
337 } else if (vd != NULL) {
339 * If we have a vdev but no zio, this is a device fault, and the
340 * 'stateoroffset' parameter indicates the previous state of the
343 fm_payload_set(ereport,
344 FM_EREPORT_PAYLOAD_ZFS_PREV_STATE,
345 DATA_TYPE_UINT64, stateoroffset, NULL);
348 mutex_exit(&spa->spa_errlist_lock);
350 *ereport_out = ereport;
351 *detector_out = detector;
354 /* if it's <= 128 bytes, save the corruption directly */
355 #define ZFM_MAX_INLINE (128 / sizeof (uint64_t))
357 #define MAX_RANGES 16
359 typedef struct zfs_ecksum_info {
360 /* histograms of set and cleared bits by bit number in a 64-bit word */
361 uint16_t zei_histogram_set[sizeof (uint64_t) * NBBY];
362 uint16_t zei_histogram_cleared[sizeof (uint64_t) * NBBY];
364 /* inline arrays of bits set and cleared. */
365 uint64_t zei_bits_set[ZFM_MAX_INLINE];
366 uint64_t zei_bits_cleared[ZFM_MAX_INLINE];
369 * for each range, the number of bits set and cleared. The Hamming
370 * distance between the good and bad buffers is the sum of them all.
372 uint32_t zei_range_sets[MAX_RANGES];
373 uint32_t zei_range_clears[MAX_RANGES];
378 } zei_ranges[MAX_RANGES];
380 size_t zei_range_count;
382 uint32_t zei_allowed_mingap;
387 update_histogram(uint64_t value_arg, uint16_t *hist, uint32_t *count)
391 uint64_t value = BE_64(value_arg);
393 /* We store the bits in big-endian (largest-first) order */
394 for (i = 0; i < 64; i++) {
395 if (value & (1ull << i)) {
400 /* update the count of bits changed */
405 * We've now filled up the range array, and need to increase "mingap" and
406 * shrink the range list accordingly. zei_mingap is always the smallest
407 * distance between array entries, so we set the new_allowed_gap to be
408 * one greater than that. We then go through the list, joining together
409 * any ranges which are closer than the new_allowed_gap.
411 * By construction, there will be at least one. We also update zei_mingap
412 * to the new smallest gap, to prepare for our next invocation.
415 zei_shrink_ranges(zfs_ecksum_info_t *eip)
417 uint32_t mingap = UINT32_MAX;
418 uint32_t new_allowed_gap = eip->zei_mingap + 1;
421 size_t max = eip->zei_range_count;
423 struct zei_ranges *r = eip->zei_ranges;
425 ASSERT3U(eip->zei_range_count, >, 0);
426 ASSERT3U(eip->zei_range_count, <=, MAX_RANGES);
429 while (idx < max - 1) {
430 uint32_t start = r[idx].zr_start;
431 uint32_t end = r[idx].zr_end;
433 while (idx < max - 1) {
434 uint32_t nstart, nend, gap;
437 nstart = r[idx].zr_start;
438 nend = r[idx].zr_end;
441 if (gap < new_allowed_gap) {
449 r[output].zr_start = start;
450 r[output].zr_end = end;
453 ASSERT3U(output, <, eip->zei_range_count);
454 eip->zei_range_count = output;
455 eip->zei_mingap = mingap;
456 eip->zei_allowed_mingap = new_allowed_gap;
460 zei_add_range(zfs_ecksum_info_t *eip, int start, int end)
462 struct zei_ranges *r = eip->zei_ranges;
463 size_t count = eip->zei_range_count;
465 if (count >= MAX_RANGES) {
466 zei_shrink_ranges(eip);
467 count = eip->zei_range_count;
470 eip->zei_mingap = UINT32_MAX;
471 eip->zei_allowed_mingap = 1;
473 int gap = start - r[count - 1].zr_end;
475 if (gap < eip->zei_allowed_mingap) {
476 r[count - 1].zr_end = end;
479 if (gap < eip->zei_mingap)
480 eip->zei_mingap = gap;
482 r[count].zr_start = start;
483 r[count].zr_end = end;
484 eip->zei_range_count++;
488 zei_range_total_size(zfs_ecksum_info_t *eip)
490 struct zei_ranges *r = eip->zei_ranges;
491 size_t count = eip->zei_range_count;
495 for (idx = 0; idx < count; idx++)
496 result += (r[idx].zr_end - r[idx].zr_start);
501 static zfs_ecksum_info_t *
502 annotate_ecksum(nvlist_t *ereport, zio_bad_cksum_t *info,
503 const uint8_t *goodbuf, const uint8_t *badbuf, size_t size,
504 boolean_t drop_if_identical)
506 const uint64_t *good = (const uint64_t *)goodbuf;
507 const uint64_t *bad = (const uint64_t *)badbuf;
510 uint64_t allcleared = 0;
512 size_t nui64s = size / sizeof (uint64_t);
522 zfs_ecksum_info_t *eip = kmem_zalloc(sizeof (*eip), KM_SLEEP);
524 /* don't do any annotation for injected checksum errors */
525 if (info != NULL && info->zbc_injected)
528 if (info != NULL && info->zbc_has_cksum) {
529 fm_payload_set(ereport,
530 FM_EREPORT_PAYLOAD_ZFS_CKSUM_EXPECTED,
531 DATA_TYPE_UINT64_ARRAY,
532 sizeof (info->zbc_expected) / sizeof (uint64_t),
533 (uint64_t *)&info->zbc_expected,
534 FM_EREPORT_PAYLOAD_ZFS_CKSUM_ACTUAL,
535 DATA_TYPE_UINT64_ARRAY,
536 sizeof (info->zbc_actual) / sizeof (uint64_t),
537 (uint64_t *)&info->zbc_actual,
538 FM_EREPORT_PAYLOAD_ZFS_CKSUM_ALGO,
540 info->zbc_checksum_name,
543 if (info->zbc_byteswapped) {
544 fm_payload_set(ereport,
545 FM_EREPORT_PAYLOAD_ZFS_CKSUM_BYTESWAP,
546 DATA_TYPE_BOOLEAN, 1,
551 if (badbuf == NULL || goodbuf == NULL)
554 ASSERT3U(nui64s, <=, UINT16_MAX);
555 ASSERT3U(size, ==, nui64s * sizeof (uint64_t));
556 ASSERT3U(size, <=, SPA_MAXBLOCKSIZE);
557 ASSERT3U(size, <=, UINT32_MAX);
559 /* build up the range list by comparing the two buffers. */
560 for (idx = 0; idx < nui64s; idx++) {
561 if (good[idx] == bad[idx]) {
565 zei_add_range(eip, start, idx);
575 zei_add_range(eip, start, idx);
577 /* See if it will fit in our inline buffers */
578 inline_size = zei_range_total_size(eip);
579 if (inline_size > ZFM_MAX_INLINE)
583 * If there is no change and we want to drop if the buffers are
586 if (inline_size == 0 && drop_if_identical) {
587 kmem_free(eip, sizeof (*eip));
592 * Now walk through the ranges, filling in the details of the
593 * differences. Also convert our uint64_t-array offsets to byte
596 for (range = 0; range < eip->zei_range_count; range++) {
597 size_t start = eip->zei_ranges[range].zr_start;
598 size_t end = eip->zei_ranges[range].zr_end;
600 for (idx = start; idx < end; idx++) {
601 uint64_t set, cleared;
603 // bits set in bad, but not in good
604 set = ((~good[idx]) & bad[idx]);
605 // bits set in good, but not in bad
606 cleared = (good[idx] & (~bad[idx]));
609 allcleared |= cleared;
612 ASSERT3U(offset, <, inline_size);
613 eip->zei_bits_set[offset] = set;
614 eip->zei_bits_cleared[offset] = cleared;
618 update_histogram(set, eip->zei_histogram_set,
619 &eip->zei_range_sets[range]);
620 update_histogram(cleared, eip->zei_histogram_cleared,
621 &eip->zei_range_clears[range]);
624 /* convert to byte offsets */
625 eip->zei_ranges[range].zr_start *= sizeof (uint64_t);
626 eip->zei_ranges[range].zr_end *= sizeof (uint64_t);
628 eip->zei_allowed_mingap *= sizeof (uint64_t);
629 inline_size *= sizeof (uint64_t);
631 /* fill in ereport */
632 fm_payload_set(ereport,
633 FM_EREPORT_PAYLOAD_ZFS_BAD_OFFSET_RANGES,
634 DATA_TYPE_UINT32_ARRAY, 2 * eip->zei_range_count,
635 (uint32_t *)eip->zei_ranges,
636 FM_EREPORT_PAYLOAD_ZFS_BAD_RANGE_MIN_GAP,
637 DATA_TYPE_UINT32, eip->zei_allowed_mingap,
638 FM_EREPORT_PAYLOAD_ZFS_BAD_RANGE_SETS,
639 DATA_TYPE_UINT32_ARRAY, eip->zei_range_count, eip->zei_range_sets,
640 FM_EREPORT_PAYLOAD_ZFS_BAD_RANGE_CLEARS,
641 DATA_TYPE_UINT32_ARRAY, eip->zei_range_count, eip->zei_range_clears,
645 fm_payload_set(ereport,
646 FM_EREPORT_PAYLOAD_ZFS_BAD_SET_BITS,
647 DATA_TYPE_UINT8_ARRAY,
648 inline_size, (uint8_t *)eip->zei_bits_set,
649 FM_EREPORT_PAYLOAD_ZFS_BAD_CLEARED_BITS,
650 DATA_TYPE_UINT8_ARRAY,
651 inline_size, (uint8_t *)eip->zei_bits_cleared,
654 fm_payload_set(ereport,
655 FM_EREPORT_PAYLOAD_ZFS_BAD_SET_HISTOGRAM,
656 DATA_TYPE_UINT16_ARRAY,
657 NBBY * sizeof (uint64_t), eip->zei_histogram_set,
658 FM_EREPORT_PAYLOAD_ZFS_BAD_CLEARED_HISTOGRAM,
659 DATA_TYPE_UINT16_ARRAY,
660 NBBY * sizeof (uint64_t), eip->zei_histogram_cleared,
668 zfs_ereport_post(const char *subclass, spa_t *spa, vdev_t *vd, zio_t *zio,
669 uint64_t stateoroffset, uint64_t size)
672 nvlist_t *ereport = NULL;
673 nvlist_t *detector = NULL;
675 zfs_ereport_start(&ereport, &detector,
676 subclass, spa, vd, zio, stateoroffset, size);
681 /* Cleanup is handled by the callback function */
682 zfs_zevent_post(ereport, detector, zfs_zevent_post_cb);
687 zfs_ereport_start_checksum(spa_t *spa, vdev_t *vd,
688 struct zio *zio, uint64_t offset, uint64_t length, void *arg,
689 zio_bad_cksum_t *info)
691 zio_cksum_report_t *report = kmem_zalloc(sizeof (*report), KM_SLEEP);
693 if (zio->io_vsd != NULL)
694 zio->io_vsd_ops->vsd_cksum_report(zio, report, arg);
696 zio_vsd_default_cksum_report(zio, report, arg);
698 /* copy the checksum failure information if it was provided */
700 report->zcr_ckinfo = kmem_zalloc(sizeof (*info), KM_SLEEP);
701 bcopy(info, report->zcr_ckinfo, sizeof (*info));
704 report->zcr_align = 1ULL << vd->vdev_top->vdev_ashift;
705 report->zcr_length = length;
708 zfs_ereport_start(&report->zcr_ereport, &report->zcr_detector,
709 FM_EREPORT_ZFS_CHECKSUM, spa, vd, zio, offset, length);
711 if (report->zcr_ereport == NULL) {
712 report->zcr_free(report->zcr_cbdata, report->zcr_cbinfo);
713 if (report->zcr_ckinfo != NULL) {
714 kmem_free(report->zcr_ckinfo,
715 sizeof (*report->zcr_ckinfo));
717 kmem_free(report, sizeof (*report));
722 mutex_enter(&spa->spa_errlist_lock);
723 report->zcr_next = zio->io_logical->io_cksum_report;
724 zio->io_logical->io_cksum_report = report;
725 mutex_exit(&spa->spa_errlist_lock);
729 zfs_ereport_finish_checksum(zio_cksum_report_t *report,
730 const void *good_data, const void *bad_data, boolean_t drop_if_identical)
733 zfs_ecksum_info_t *info = NULL;
734 info = annotate_ecksum(report->zcr_ereport, report->zcr_ckinfo,
735 good_data, bad_data, report->zcr_length, drop_if_identical);
738 zfs_zevent_post(report->zcr_ereport,
739 report->zcr_detector, zfs_zevent_post_cb);
741 report->zcr_ereport = report->zcr_detector = NULL;
743 kmem_free(info, sizeof (*info));
748 zfs_ereport_free_checksum(zio_cksum_report_t *rpt)
751 if (rpt->zcr_ereport != NULL) {
752 fm_nvlist_destroy(rpt->zcr_ereport,
754 fm_nvlist_destroy(rpt->zcr_detector,
758 rpt->zcr_free(rpt->zcr_cbdata, rpt->zcr_cbinfo);
760 if (rpt->zcr_ckinfo != NULL)
761 kmem_free(rpt->zcr_ckinfo, sizeof (*rpt->zcr_ckinfo));
763 kmem_free(rpt, sizeof (*rpt));
767 zfs_ereport_send_interim_checksum(zio_cksum_report_t *report)
770 zfs_zevent_post(report->zcr_ereport, report->zcr_detector, NULL);
775 zfs_ereport_post_checksum(spa_t *spa, vdev_t *vd,
776 struct zio *zio, uint64_t offset, uint64_t length,
777 const void *good_data, const void *bad_data, zio_bad_cksum_t *zbc)
780 nvlist_t *ereport = NULL;
781 nvlist_t *detector = NULL;
782 zfs_ecksum_info_t *info;
784 zfs_ereport_start(&ereport, &detector,
785 FM_EREPORT_ZFS_CHECKSUM, spa, vd, zio, offset, length);
790 info = annotate_ecksum(ereport, zbc, good_data, bad_data, length,
794 zfs_zevent_post(ereport, detector, zfs_zevent_post_cb);
795 kmem_free(info, sizeof (*info));
801 zfs_post_common(spa_t *spa, vdev_t *vd, const char *name)
807 if (spa_load_state(spa) == SPA_LOAD_TRYIMPORT)
810 if ((resource = fm_nvlist_create(NULL)) == NULL)
813 (void) snprintf(class, sizeof (class), "%s.%s.%s", FM_RSRC_RESOURCE,
814 ZFS_ERROR_CLASS, name);
815 VERIFY(nvlist_add_uint8(resource, FM_VERSION, FM_RSRC_VERSION) == 0);
816 VERIFY(nvlist_add_string(resource, FM_CLASS, class) == 0);
817 VERIFY(nvlist_add_uint64(resource,
818 FM_EREPORT_PAYLOAD_ZFS_POOL_GUID, spa_guid(spa)) == 0);
820 VERIFY(nvlist_add_uint64(resource,
821 FM_EREPORT_PAYLOAD_ZFS_VDEV_GUID, vd->vdev_guid) == 0);
822 VERIFY(nvlist_add_uint64(resource,
823 FM_EREPORT_PAYLOAD_ZFS_VDEV_STATE, vd->vdev_state) == 0);
826 zfs_zevent_post(resource, NULL, zfs_zevent_post_cb);
831 * The 'resource.fs.zfs.removed' event is an internal signal that the given vdev
832 * has been removed from the system. This will cause the DE to ignore any
833 * recent I/O errors, inferring that they are due to the asynchronous device
837 zfs_post_remove(spa_t *spa, vdev_t *vd)
839 zfs_post_common(spa, vd, FM_EREPORT_RESOURCE_REMOVED);
843 * The 'resource.fs.zfs.autoreplace' event is an internal signal that the pool
844 * has the 'autoreplace' property set, and therefore any broken vdevs will be
845 * handled by higher level logic, and no vdev fault should be generated.
848 zfs_post_autoreplace(spa_t *spa, vdev_t *vd)
850 zfs_post_common(spa, vd, FM_EREPORT_RESOURCE_AUTOREPLACE);
854 * The 'resource.fs.zfs.statechange' event is an internal signal that the
855 * given vdev has transitioned its state to DEGRADED or HEALTHY. This will
856 * cause the retire agent to repair any outstanding fault management cases
857 * open because the device was not found (fault.fs.zfs.device).
860 zfs_post_state_change(spa_t *spa, vdev_t *vd)
862 zfs_post_common(spa, vd, FM_EREPORT_RESOURCE_STATECHANGE);
865 #if defined(_KERNEL) && defined(HAVE_SPL)
866 EXPORT_SYMBOL(zfs_ereport_post);
867 EXPORT_SYMBOL(zfs_ereport_post_checksum);
868 EXPORT_SYMBOL(zfs_post_remove);
869 EXPORT_SYMBOL(zfs_post_autoreplace);
870 EXPORT_SYMBOL(zfs_post_state_change);