*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2011 by Delphix. All rights reserved.
+ * Copyright (c) 2012 by Delphix. All rights reserved.
* Copyright (c) 2011 Nexenta Systems, Inc. All rights reserved.
*/
*/
kmem_cache_t *zio_cache;
kmem_cache_t *zio_link_cache;
+kmem_cache_t *zio_vdev_cache;
kmem_cache_t *zio_buf_cache[SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT];
kmem_cache_t *zio_data_buf_cache[SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT];
int zio_bulk_flags = 0;
extern int zfs_mg_alloc_failures;
/*
+ * The following actions directly effect the spa's sync-to-convergence logic.
+ * The values below define the sync pass when we start performing the action.
+ * Care should be taken when changing these values as they directly impact
+ * spa_sync() performance. Tuning these values may introduce subtle performance
+ * pathologies and should only be done in the context of performance analysis.
+ * These tunables will eventually be removed and replaced with #defines once
+ * enough analysis has been done to determine optimal values.
+ *
+ * The 'zfs_sync_pass_deferred_free' pass must be greater than 1 to ensure that
+ * regular blocks are not deferred.
+ */
+int zfs_sync_pass_deferred_free = 2; /* defer frees starting in this pass */
+int zfs_sync_pass_dont_compress = 5; /* don't compress starting in this pass */
+int zfs_sync_pass_rewrite = 2; /* rewrite new bps starting in this pass */
+
+/*
* An allocating zio is one that either currently has the DVA allocate
* stage set or will have it later in its lifetime.
*/
zio_cons, zio_dest, NULL, NULL, NULL, KMC_KMEM);
zio_link_cache = kmem_cache_create("zio_link_cache",
sizeof (zio_link_t), 0, NULL, NULL, NULL, NULL, NULL, KMC_KMEM);
+ zio_vdev_cache = kmem_cache_create("zio_vdev_cache", sizeof(vdev_io_t),
+ PAGESIZE, NULL, NULL, NULL, NULL, NULL, KMC_VMEM);
/*
* For small buffers, we want a cache for each multiple of
zfs_mg_alloc_failures = MAX((3 * max_ncpus / 2), 8);
zio_inject_init();
+
+ lz4_init();
}
void
zio_data_buf_cache[c] = NULL;
}
+ kmem_cache_destroy(zio_vdev_cache);
kmem_cache_destroy(zio_link_cache);
kmem_cache_destroy(zio_cache);
zio_inject_fini();
+
+ lz4_fini();
}
/*
ASSERT(c < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT);
- return (kmem_cache_alloc(zio_buf_cache[c], KM_PUSHPAGE));
+ return (kmem_cache_alloc(zio_buf_cache[c], KM_PUSHPAGE | KM_NODEBUG));
}
/*
ASSERT(c < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT);
- return (kmem_cache_alloc(zio_data_buf_cache[c], KM_PUSHPAGE));
+ return (kmem_cache_alloc(zio_data_buf_cache[c],
+ KM_PUSHPAGE | KM_NODEBUG));
}
void
}
/*
+ * Dedicated I/O buffers to ensure that memory fragmentation never prevents
+ * or significantly delays the issuing of a zio. These buffers are used
+ * to aggregate I/O and could be used for raidz stripes.
+ */
+void *
+zio_vdev_alloc(void)
+{
+ return (kmem_cache_alloc(zio_vdev_cache, KM_PUSHPAGE));
+}
+
+void
+zio_vdev_free(void *buf)
+{
+ kmem_cache_free(zio_vdev_cache, buf);
+
+}
+
+/*
* ==========================================================================
* Push and pop I/O transform buffers
* ==========================================================================
zio->io_vsd_ops = NULL;
zio->io_offset = offset;
zio->io_deadline = 0;
+ zio->io_timestamp = 0;
+ zio->io_delta = 0;
+ zio->io_delay = 0;
zio->io_orig_data = zio->io_data = data;
zio->io_orig_size = zio->io_size = size;
zio->io_orig_flags = zio->io_flags = flags;
zio->io_bp_override = NULL;
zio->io_walk_link = NULL;
zio->io_transform_stack = NULL;
- zio->io_delay = 0;
zio->io_error = 0;
zio->io_child_count = 0;
zio->io_parent_count = 0;
zp->zp_checksum < ZIO_CHECKSUM_FUNCTIONS &&
zp->zp_compress >= ZIO_COMPRESS_OFF &&
zp->zp_compress < ZIO_COMPRESS_FUNCTIONS &&
- zp->zp_type < DMU_OT_NUMTYPES &&
+ DMU_OT_IS_VALID(zp->zp_type) &&
zp->zp_level < 32 &&
zp->zp_copies > 0 &&
zp->zp_copies <= spa_max_replication(spa) &&
ASSERT(!BP_IS_HOLE(bp));
ASSERT(spa_syncing_txg(spa) == txg);
- ASSERT(spa_sync_pass(spa) <= SYNC_PASS_DEFERRED_FREE);
+ ASSERT(spa_sync_pass(spa) < zfs_sync_pass_deferred_free);
+
+ arc_freed(spa, bp);
zio = zio_create(pio, spa, txg, bp, NULL, BP_GET_PSIZE(bp),
NULL, NULL, ZIO_TYPE_FREE, ZIO_PRIORITY_FREE, flags,
zio_push_transform(zio, cbuf, psize, psize, zio_decompress);
}
- if (!dmu_ot[BP_GET_TYPE(bp)].ot_metadata && BP_GET_LEVEL(bp) == 0)
+ if (!DMU_OT_IS_METADATA(BP_GET_TYPE(bp)) && BP_GET_LEVEL(bp) == 0)
zio->io_flags |= ZIO_FLAG_DONT_CACHE;
if (BP_GET_TYPE(bp) == DMU_OT_DDT_ZAP)
ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
ASSERT(!BP_GET_DEDUP(bp));
- if (pass > SYNC_PASS_DONT_COMPRESS)
+ if (pass >= zfs_sync_pass_dont_compress)
compress = ZIO_COMPRESS_OFF;
/* Make sure someone doesn't change their mind on overwrites */
* There should only be a handful of blocks after pass 1 in any case.
*/
if (bp->blk_birth == zio->io_txg && BP_GET_PSIZE(bp) == psize &&
- pass > SYNC_PASS_REWRITE) {
+ pass >= zfs_sync_pass_rewrite) {
enum zio_stage gang_stages = zio->io_pipeline & ZIO_GANG_STAGES;
ASSERT(psize != 0);
zio->io_pipeline = ZIO_REWRITE_PIPELINE | gang_stages;
*/
static void
-zio_taskq_dispatch(zio_t *zio, enum zio_taskq_type q, boolean_t cutinline)
+zio_taskq_dispatch(zio_t *zio, zio_taskq_type_t q, boolean_t cutinline)
{
spa_t *spa = zio->io_spa;
zio_type_t t = zio->io_type;
t = ZIO_TYPE_NULL;
/*
- * If this is a high priority I/O, then use the high priority taskq.
+ * If this is a high priority I/O, then use the high priority taskq if
+ * available.
*/
if (zio->io_priority == ZIO_PRIORITY_NOW &&
- spa->spa_zio_taskq[t][q + 1] != NULL)
+ spa->spa_zio_taskq[t][q + 1].stqs_count != 0)
q++;
ASSERT3U(q, <, ZIO_TASKQ_TYPES);
* to dispatch the zio to another taskq at the same time.
*/
ASSERT(taskq_empty_ent(&zio->io_tqent));
- taskq_dispatch_ent(spa->spa_zio_taskq[t][q],
- (task_func_t *)zio_execute, zio, flags, &zio->io_tqent);
+ spa_taskq_dispatch_ent(spa, t, q, (task_func_t *)zio_execute, zio,
+ flags, &zio->io_tqent);
}
static boolean_t
-zio_taskq_member(zio_t *zio, enum zio_taskq_type q)
+zio_taskq_member(zio_t *zio, zio_taskq_type_t q)
{
kthread_t *executor = zio->io_executor;
spa_t *spa = zio->io_spa;
zio_type_t t;
- for (t = 0; t < ZIO_TYPES; t++)
- if (taskq_member(spa->spa_zio_taskq[t][q], executor))
- return (B_TRUE);
+ for (t = 0; t < ZIO_TYPES; t++) {
+ spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q];
+ uint_t i;
+ for (i = 0; i < tqs->stqs_count; i++) {
+ if (taskq_member(tqs->stqs_taskq[i], executor))
+ return (B_TRUE);
+ }
+ }
return (B_FALSE);
}
* vdev-level caching or aggregation; (5) the I/O is deferred
* due to vdev-level queueing; (6) the I/O is handed off to
* another thread. In all cases, the pipeline stops whenever
- * there's no CPU work; it never burns a thread in cv_wait().
+ * there's no CPU work; it never burns a thread in cv_wait_io().
*
* There's no locking on io_stage because there's no legitimate way
* for multiple threads to be attempting to process the same I/O.
while (zio->io_stage < ZIO_STAGE_DONE) {
enum zio_stage pipeline = zio->io_pipeline;
enum zio_stage stage = zio->io_stage;
- dsl_pool_t *dsl;
+ dsl_pool_t *dp;
boolean_t cut;
int rv;
ASSERT(stage <= ZIO_STAGE_DONE);
- dsl = spa_get_dsl(zio->io_spa);
+ dp = spa_get_dsl(zio->io_spa);
cut = (stage == ZIO_STAGE_VDEV_IO_START) ?
zio_requeue_io_start_cut_in_line : B_FALSE;
* or may wait for an I/O that needs an interrupt thread
* to complete, issue async to avoid deadlock.
*
- * If we are in the txg_sync_thread or being called
- * during pool init issue async to minimize stack depth.
- * Both of these call paths may be recursively called.
- *
* For VDEV_IO_START, we cut in line so that the io will
* be sent to disk promptly.
*/
- if (((stage & ZIO_BLOCKING_STAGES) && zio->io_vd == NULL &&
- zio_taskq_member(zio, ZIO_TASKQ_INTERRUPT)) ||
- (dsl != NULL && dsl_pool_sync_context(dsl))) {
+ if ((stage & ZIO_BLOCKING_STAGES) && zio->io_vd == NULL &&
+ zio_taskq_member(zio, ZIO_TASKQ_INTERRUPT)) {
zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, cut);
return;
}
+#ifdef _KERNEL
+ /*
+ * If we executing in the context of the tx_sync_thread,
+ * or we are performing pool initialization outside of a
+ * zio_taskq[ZIO_TASKQ_ISSUE] context. Then issue the zio
+ * async to minimize stack usage for these deep call paths.
+ */
+ if ((dp && curthread == dp->dp_tx.tx_sync_thread) ||
+ (dp && spa_is_initializing(dp->dp_spa) &&
+ !zio_taskq_member(zio, ZIO_TASKQ_ISSUE))) {
+ zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, cut);
+ return;
+ }
+#endif
+
zio->io_stage = stage;
rv = zio_pipeline[highbit(stage) - 1](zio);
mutex_enter(&zio->io_lock);
while (zio->io_executor != NULL)
- cv_wait(&zio->io_cv, &zio->io_lock);
+ cv_wait_io(&zio->io_cv, &zio->io_lock);
mutex_exit(&zio->io_lock);
error = zio->io_error;
"failure and the failure mode property for this pool "
"is set to panic.", spa_name(spa));
+ cmn_err(CE_WARN, "Pool '%s' has encountered an uncorrectable I/O "
+ "failure and has been suspended.\n", spa_name(spa));
+
zfs_ereport_post(FM_EREPORT_ZFS_IO_FAILURE, spa, NULL, NULL, 0, 0);
mutex_enter(&spa->spa_suspend_lock);
*/
pio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
+ /*
+ * We didn't allocate this bp, so make sure it doesn't get unmarked.
+ */
+ pio->io_flags &= ~ZIO_FLAG_FASTWRITE;
+
zio_nowait(zio);
return (ZIO_PIPELINE_CONTINUE);
ddt_exit(ddt);
- error = arc_read_nolock(NULL, spa, &blk,
+ error = arc_read(NULL, spa, &blk,
arc_getbuf_func, &abuf, ZIO_PRIORITY_SYNC_READ,
ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE,
&aflags, &zio->io_bookmark);
ddt_enter(ddt);
freedde = dde = ddt_lookup(ddt, bp, B_TRUE);
- ddp = ddt_phys_select(dde, bp);
- ddt_phys_decref(ddp);
+ if (dde) {
+ ddp = ddt_phys_select(dde, bp);
+ if (ddp)
+ ddt_phys_decref(ddp);
+ }
ddt_exit(ddt);
return (ZIO_PIPELINE_CONTINUE);
}
ASSERT(BP_IS_HOLE(bp));
- ASSERT3U(BP_GET_NDVAS(bp), ==, 0);
+ ASSERT0(BP_GET_NDVAS(bp));
ASSERT3U(zio->io_prop.zp_copies, >, 0);
ASSERT3U(zio->io_prop.zp_copies, <=, spa_max_replication(spa));
ASSERT3U(zio->io_size, ==, BP_GET_PSIZE(bp));
flags |= (zio->io_flags & ZIO_FLAG_NODATA) ? METASLAB_GANG_AVOID : 0;
flags |= (zio->io_flags & ZIO_FLAG_GANG_CHILD) ?
METASLAB_GANG_CHILD : 0;
+ flags |= (zio->io_flags & ZIO_FLAG_FASTWRITE) ? METASLAB_FASTWRITE : 0;
error = metaslab_alloc(spa, mc, zio->io_size, bp,
zio->io_prop.zp_copies, zio->io_txg, NULL, flags);
* Try to allocate an intent log block. Return 0 on success, errno on failure.
*/
int
-zio_alloc_zil(spa_t *spa, uint64_t txg, blkptr_t *new_bp, blkptr_t *old_bp,
- uint64_t size, boolean_t use_slog)
+zio_alloc_zil(spa_t *spa, uint64_t txg, blkptr_t *new_bp, uint64_t size,
+ boolean_t use_slog)
{
int error = 1;
ASSERT(txg > spa_syncing_txg(spa));
- if (use_slog)
+ /*
+ * ZIL blocks are always contiguous (i.e. not gang blocks) so we
+ * set the METASLAB_GANG_AVOID flag so that they don't "fast gang"
+ * when allocating them.
+ */
+ if (use_slog) {
error = metaslab_alloc(spa, spa_log_class(spa), size,
- new_bp, 1, txg, old_bp, METASLAB_HINTBP_AVOID);
+ new_bp, 1, txg, NULL,
+ METASLAB_FASTWRITE | METASLAB_GANG_AVOID);
+ }
- if (error)
+ if (error) {
error = metaslab_alloc(spa, spa_normal_class(spa), size,
- new_bp, 1, txg, old_bp, METASLAB_HINTBP_AVOID);
+ new_bp, 1, txg, NULL,
+ METASLAB_FASTWRITE | METASLAB_GANG_AVOID);
+ }
if (error == 0) {
BP_SET_LSIZE(new_bp, size);
vdev_stat_update(zio, zio->io_size);
/*
- * If this I/O is attached to a particular vdev is slow, exeeding
+ * If this I/O is attached to a particular vdev is slow, exceeding
* 30 seconds to complete, post an error described the I/O delay.
* We ignore these errors if the device is currently unavailable.
*/
- if (zio->io_delay >= zio_delay_max) {
+ if (zio->io_delay >= MSEC_TO_TICK(zio_delay_max)) {
if (zio->io_vd != NULL && !vdev_is_dead(zio->io_vd))
zfs_ereport_post(FM_EREPORT_ZFS_DELAY, zio->io_spa,
zio->io_vd, zio, 0, 0);
* Hand it off to the otherwise-unused claim taskq.
*/
ASSERT(taskq_empty_ent(&zio->io_tqent));
- (void) taskq_dispatch_ent(
- zio->io_spa->spa_zio_taskq[ZIO_TYPE_CLAIM][ZIO_TASKQ_ISSUE],
+ spa_taskq_dispatch_ent(zio->io_spa,
+ ZIO_TYPE_CLAIM, ZIO_TASKQ_ISSUE,
(task_func_t *)zio_reexecute, zio, 0,
&zio->io_tqent);
}
zfs_ereport_free_checksum(zcr);
}
+ if (zio->io_flags & ZIO_FLAG_FASTWRITE && zio->io_bp &&
+ !BP_IS_HOLE(zio->io_bp)) {
+ metaslab_fastwrite_unmark(zio->io_spa, zio->io_bp);
+ }
+
/*
* It is the responsibility of the done callback to ensure that this
* particular zio is no longer discoverable for adoption, and as
zio_done
};
+/* dnp is the dnode for zb1->zb_object */
+boolean_t
+zbookmark_is_before(const dnode_phys_t *dnp, const zbookmark_t *zb1,
+ const zbookmark_t *zb2)
+{
+ uint64_t zb1nextL0, zb2thisobj;
+
+ ASSERT(zb1->zb_objset == zb2->zb_objset);
+ ASSERT(zb2->zb_level == 0);
+
+ /*
+ * A bookmark in the deadlist is considered to be after
+ * everything else.
+ */
+ if (zb2->zb_object == DMU_DEADLIST_OBJECT)
+ return (B_TRUE);
+
+ /* The objset_phys_t isn't before anything. */
+ if (dnp == NULL)
+ return (B_FALSE);
+
+ zb1nextL0 = (zb1->zb_blkid + 1) <<
+ ((zb1->zb_level) * (dnp->dn_indblkshift - SPA_BLKPTRSHIFT));
+
+ zb2thisobj = zb2->zb_object ? zb2->zb_object :
+ zb2->zb_blkid << (DNODE_BLOCK_SHIFT - DNODE_SHIFT);
+
+ if (zb1->zb_object == DMU_META_DNODE_OBJECT) {
+ uint64_t nextobj = zb1nextL0 *
+ (dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT) >> DNODE_SHIFT;
+ return (nextobj <= zb2thisobj);
+ }
+
+ if (zb1->zb_object < zb2thisobj)
+ return (B_TRUE);
+ if (zb1->zb_object > zb2thisobj)
+ return (B_FALSE);
+ if (zb2->zb_object == DMU_META_DNODE_OBJECT)
+ return (B_FALSE);
+ return (zb1nextL0 <= zb2->zb_blkid);
+}
+
#if defined(_KERNEL) && defined(HAVE_SPL)
/* Fault injection */
EXPORT_SYMBOL(zio_injection_enabled);
module_param(zio_requeue_io_start_cut_in_line, int, 0644);
MODULE_PARM_DESC(zio_requeue_io_start_cut_in_line, "Prioritize requeued I/O");
+
+module_param(zfs_sync_pass_deferred_free, int, 0644);
+MODULE_PARM_DESC(zfs_sync_pass_deferred_free,
+ "defer frees starting in this pass");
+
+module_param(zfs_sync_pass_dont_compress, int, 0644);
+MODULE_PARM_DESC(zfs_sync_pass_dont_compress,
+ "don't compress starting in this pass");
+
+module_param(zfs_sync_pass_rewrite, int, 0644);
+MODULE_PARM_DESC(zfs_sync_pass_rewrite,
+ "rewrite new bps starting in this pass");
#endif