zfs_mg_alloc_failures = MAX((3 * max_ncpus / 2), 8);
zio_inject_init();
+
+ lz4_init();
}
void
kmem_cache_destroy(zio_cache);
zio_inject_fini();
+
+ lz4_fini();
}
/*
zio->io_vsd_ops = NULL;
zio->io_offset = offset;
zio->io_deadline = 0;
+ zio->io_timestamp = 0;
+ zio->io_delta = 0;
+ zio->io_delay = 0;
zio->io_orig_data = zio->io_data = data;
zio->io_orig_size = zio->io_size = size;
zio->io_orig_flags = zio->io_flags = flags;
zio->io_bp_override = NULL;
zio->io_walk_link = NULL;
zio->io_transform_stack = NULL;
- zio->io_delay = 0;
zio->io_error = 0;
zio->io_child_count = 0;
zio->io_parent_count = 0;
while (zio->io_stage < ZIO_STAGE_DONE) {
enum zio_stage pipeline = zio->io_pipeline;
enum zio_stage stage = zio->io_stage;
- dsl_pool_t *dsl;
+ dsl_pool_t *dp;
boolean_t cut;
int rv;
ASSERT(stage <= ZIO_STAGE_DONE);
- dsl = spa_get_dsl(zio->io_spa);
+ dp = spa_get_dsl(zio->io_spa);
cut = (stage == ZIO_STAGE_VDEV_IO_START) ?
zio_requeue_io_start_cut_in_line : B_FALSE;
* or may wait for an I/O that needs an interrupt thread
* to complete, issue async to avoid deadlock.
*
- * If we are in the txg_sync_thread or being called
- * during pool init issue async to minimize stack depth.
- * Both of these call paths may be recursively called.
- *
* For VDEV_IO_START, we cut in line so that the io will
* be sent to disk promptly.
*/
- if (((stage & ZIO_BLOCKING_STAGES) && zio->io_vd == NULL &&
- zio_taskq_member(zio, ZIO_TASKQ_INTERRUPT)) ||
- (dsl != NULL && dsl_pool_sync_context(dsl))) {
+ if ((stage & ZIO_BLOCKING_STAGES) && zio->io_vd == NULL &&
+ zio_taskq_member(zio, ZIO_TASKQ_INTERRUPT)) {
zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, cut);
return;
}
+#ifdef _KERNEL
+ /*
+ * If we executing in the context of the tx_sync_thread,
+ * or we are performing pool initialization outside of a
+ * zio_taskq[ZIO_TASKQ_ISSUE] context. Then issue the zio
+ * async to minimize stack usage for these deep call paths.
+ */
+ if ((dp && curthread == dp->dp_tx.tx_sync_thread) ||
+ (dp && spa_is_initializing(dp->dp_spa) &&
+ !zio_taskq_member(zio, ZIO_TASKQ_ISSUE))) {
+ zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, cut);
+ return;
+ }
+#endif
+
zio->io_stage = stage;
rv = zio_pipeline[highbit(stage) - 1](zio);
ddt_enter(ddt);
freedde = dde = ddt_lookup(ddt, bp, B_TRUE);
- ddp = ddt_phys_select(dde, bp);
- ddt_phys_decref(ddp);
+ if (dde) {
+ ddp = ddt_phys_select(dde, bp);
+ if (ddp)
+ ddt_phys_decref(ddp);
+ }
ddt_exit(ddt);
return (ZIO_PIPELINE_CONTINUE);
vdev_stat_update(zio, zio->io_size);
/*
- * If this I/O is attached to a particular vdev is slow, exeeding
+ * If this I/O is attached to a particular vdev is slow, exceeding
* 30 seconds to complete, post an error described the I/O delay.
* We ignore these errors if the device is currently unavailable.
*/
- if (zio->io_delay >= zio_delay_max) {
+ if (zio->io_delay >= MSEC_TO_TICK(zio_delay_max)) {
if (zio->io_vd != NULL && !vdev_is_dead(zio->io_vd))
zfs_ereport_post(FM_EREPORT_ZFS_DELAY, zio->io_spa,
zio->io_vd, zio, 0, 0);