* ==========================================================================
*/
char *zio_type_name[ZIO_TYPES] = {
- "zio_null", "zio_read", "zio_write", "zio_free", "zio_claim",
- "zio_ioctl"
+ "z_null", "z_rd", "z_wr", "z_fr", "z_cl", "z_ioctl"
};
/*
kmem_cache_t *zio_link_cache;
kmem_cache_t *zio_buf_cache[SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT];
kmem_cache_t *zio_data_buf_cache[SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT];
+int zio_bulk_flags = 0;
+int zio_delay_max = ZIO_DELAY_MAX;
#ifdef _KERNEL
extern vmem_t *zio_alloc_arena;
int zio_buf_debug_limit = 0;
#endif
+static inline void __zio_execute(zio_t *zio);
+
void
zio_init(void)
{
(void) sprintf(name, "zio_buf_%lu", (ulong_t)size);
zio_buf_cache[c] = kmem_cache_create(name, size,
align, NULL, NULL, NULL, NULL, NULL,
- size > zio_buf_debug_limit ? KMC_NODEBUG : 0);
+ (size > zio_buf_debug_limit ? KMC_NODEBUG : 0) |
+ zio_bulk_flags);
(void) sprintf(name, "zio_data_buf_%lu", (ulong_t)size);
zio_data_buf_cache[c] = kmem_cache_create(name, size,
align, NULL, NULL, NULL, NULL, data_alloc_arena,
- size > zio_buf_debug_limit ? KMC_NODEBUG : 0);
+ (size > zio_buf_debug_limit ? KMC_NODEBUG : 0) |
+ zio_bulk_flags);
}
}
if (--*countp == 0 && pio->io_stall == countp) {
pio->io_stall = NULL;
mutex_exit(&pio->io_lock);
- zio_execute(pio);
+ __zio_execute(pio);
} else {
mutex_exit(&pio->io_lock);
}
*/
static zio_pipe_stage_t *zio_pipeline[];
+/*
+ * zio_execute() is a wrapper around the static function
+ * __zio_execute() so that we can force __zio_execute() to be
+ * inlined. This reduces stack overhead which is important
+ * because __zio_execute() is called recursively in several zio
+ * code paths. zio_execute() itself cannot be inlined because
+ * it is externally visible.
+ */
void
zio_execute(zio_t *zio)
{
+ __zio_execute(zio);
+}
+
+__attribute__((always_inline))
+static inline void
+__zio_execute(zio_t *zio)
+{
zio->io_executor = curthread;
while (zio->io_stage < ZIO_STAGE_DONE) {
}
}
+
/*
* ==========================================================================
* Initiate I/O, either sync or async
zio->io_waiter = curthread;
- zio_execute(zio);
+ __zio_execute(zio);
mutex_enter(&zio->io_lock);
while (zio->io_executor != NULL)
zio_add_child(spa->spa_async_zio_root, zio);
}
- zio_execute(zio);
+ __zio_execute(zio);
}
/*
* responsibility of the caller to wait on him.
*/
if (!(pio->io_flags & ZIO_FLAG_GODFATHER))
- zio_execute(pio);
+ __zio_execute(pio);
}
void
vdev_stat_update(zio, zio->io_size);
+ /*
+ * If this I/O is attached to a particular vdev is slow, exeeding
+ * 30 seconds to complete, post an error described the I/O delay.
+ * We ignore these errors if the device is currently unavailable.
+ */
+ if (zio->io_delay >= zio_delay_max) {
+ if (zio->io_vd != NULL && !vdev_is_dead(zio->io_vd))
+ zfs_ereport_post(FM_EREPORT_ZFS_DELAY, zio->io_spa,
+ zio->io_vd, zio, 0, 0);
+ }
+
if (zio->io_error) {
/*
* If this I/O is attached to a particular vdev,
zio_checksum_verify,
zio_done
};
+
+#if defined(_KERNEL) && defined(HAVE_SPL)
+/* Fault injection */
+EXPORT_SYMBOL(zio_injection_enabled);
+EXPORT_SYMBOL(zio_inject_fault);
+EXPORT_SYMBOL(zio_inject_list_next);
+EXPORT_SYMBOL(zio_clear_fault);
+EXPORT_SYMBOL(zio_handle_fault_injection);
+EXPORT_SYMBOL(zio_handle_device_injection);
+EXPORT_SYMBOL(zio_handle_label_injection);
+EXPORT_SYMBOL(zio_priority_table);
+EXPORT_SYMBOL(zio_type_name);
+
+module_param(zio_bulk_flags, int, 0644);
+MODULE_PARM_DESC(zio_bulk_flags, "Additional flags to pass to bulk buffers");
+
+module_param(zio_delay_max, int, 0644);
+MODULE_PARM_DESC(zio_delay_max, "Max zio delay before posting an event (ms)");
+#endif