extern int zfs_mg_alloc_failures;
/*
+ * The following actions directly effect the spa's sync-to-convergence logic.
+ * The values below define the sync pass when we start performing the action.
+ * Care should be taken when changing these values as they directly impact
+ * spa_sync() performance. Tuning these values may introduce subtle performance
+ * pathologies and should only be done in the context of performance analysis.
+ * These tunables will eventually be removed and replaced with #defines once
+ * enough analysis has been done to determine optimal values.
+ *
+ * The 'zfs_sync_pass_deferred_free' pass must be greater than 1 to ensure that
+ * regular blocks are not deferred.
+ */
+int zfs_sync_pass_deferred_free = 2; /* defer frees starting in this pass */
+int zfs_sync_pass_dont_compress = 5; /* don't compress starting in this pass */
+int zfs_sync_pass_rewrite = 2; /* rewrite new bps starting in this pass */
+
+/*
* An allocating zio is one that either currently has the DVA allocate
* stage set or will have it later in its lifetime.
*/
ASSERT(!BP_IS_HOLE(bp));
ASSERT(spa_syncing_txg(spa) == txg);
- ASSERT(spa_sync_pass(spa) <= SYNC_PASS_DEFERRED_FREE);
+ ASSERT(spa_sync_pass(spa) < zfs_sync_pass_deferred_free);
zio = zio_create(pio, spa, txg, bp, NULL, BP_GET_PSIZE(bp),
NULL, NULL, ZIO_TYPE_FREE, ZIO_PRIORITY_FREE, flags,
ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
ASSERT(!BP_GET_DEDUP(bp));
- if (pass > SYNC_PASS_DONT_COMPRESS)
+ if (pass >= zfs_sync_pass_dont_compress)
compress = ZIO_COMPRESS_OFF;
/* Make sure someone doesn't change their mind on overwrites */
* There should only be a handful of blocks after pass 1 in any case.
*/
if (bp->blk_birth == zio->io_txg && BP_GET_PSIZE(bp) == psize &&
- pass > SYNC_PASS_REWRITE) {
+ pass >= zfs_sync_pass_rewrite) {
enum zio_stage gang_stages = zio->io_pipeline & ZIO_GANG_STAGES;
ASSERT(psize != 0);
zio->io_pipeline = ZIO_REWRITE_PIPELINE | gang_stages;
*/
static void
-zio_taskq_dispatch(zio_t *zio, enum zio_taskq_type q, boolean_t cutinline)
+zio_taskq_dispatch(zio_t *zio, zio_taskq_type_t q, boolean_t cutinline)
{
spa_t *spa = zio->io_spa;
zio_type_t t = zio->io_type;
t = ZIO_TYPE_NULL;
/*
- * If this is a high priority I/O, then use the high priority taskq.
+ * If this is a high priority I/O, then use the high priority taskq if
+ * available.
*/
if (zio->io_priority == ZIO_PRIORITY_NOW &&
- spa->spa_zio_taskq[t][q + 1] != NULL)
+ spa->spa_zio_taskq[t][q + 1].stqs_count != 0)
q++;
ASSERT3U(q, <, ZIO_TASKQ_TYPES);
* to dispatch the zio to another taskq at the same time.
*/
ASSERT(taskq_empty_ent(&zio->io_tqent));
- taskq_dispatch_ent(spa->spa_zio_taskq[t][q],
- (task_func_t *)zio_execute, zio, flags, &zio->io_tqent);
+ spa_taskq_dispatch_ent(spa, t, q, (task_func_t *)zio_execute, zio,
+ flags, &zio->io_tqent);
}
static boolean_t
-zio_taskq_member(zio_t *zio, enum zio_taskq_type q)
+zio_taskq_member(zio_t *zio, zio_taskq_type_t q)
{
kthread_t *executor = zio->io_executor;
spa_t *spa = zio->io_spa;
zio_type_t t;
- for (t = 0; t < ZIO_TYPES; t++)
- if (taskq_member(spa->spa_zio_taskq[t][q], executor))
- return (B_TRUE);
+ for (t = 0; t < ZIO_TYPES; t++) {
+ spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q];
+ uint_t i;
+ for (i = 0; i < tqs->stqs_count; i++) {
+ if (taskq_member(tqs->stqs_taskq[i], executor))
+ return (B_TRUE);
+ }
+ }
return (B_FALSE);
}
}
ASSERT(BP_IS_HOLE(bp));
- ASSERT3U(BP_GET_NDVAS(bp), ==, 0);
+ ASSERT0(BP_GET_NDVAS(bp));
ASSERT3U(zio->io_prop.zp_copies, >, 0);
ASSERT3U(zio->io_prop.zp_copies, <=, spa_max_replication(spa));
ASSERT3U(zio->io_size, ==, BP_GET_PSIZE(bp));
* Hand it off to the otherwise-unused claim taskq.
*/
ASSERT(taskq_empty_ent(&zio->io_tqent));
- taskq_dispatch_ent(
- zio->io_spa->spa_zio_taskq[ZIO_TYPE_CLAIM][ZIO_TASKQ_ISSUE],
+ spa_taskq_dispatch_ent(zio->io_spa,
+ ZIO_TYPE_CLAIM, ZIO_TASKQ_ISSUE,
(task_func_t *)zio_reexecute, zio, 0,
&zio->io_tqent);
}