/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011 by Delphix. All rights reserved.
+ * Copyright (c) 2011 Nexenta Systems, Inc. All rights reserved.
*/
#include <sys/zfs_context.h>
data_alloc_arena = zio_alloc_arena;
#endif
zio_cache = kmem_cache_create("zio_cache",
- sizeof (zio_t), 0, NULL, NULL, NULL, NULL, NULL, 0);
+ sizeof (zio_t), 0, NULL, NULL, NULL, NULL, NULL, KMC_KMEM);
zio_link_cache = kmem_cache_create("zio_link_cache",
- sizeof (zio_link_t), 0, NULL, NULL, NULL, NULL, NULL, 0);
+ sizeof (zio_link_t), 0, NULL, NULL, NULL, NULL, NULL, KMC_KMEM);
/*
* For small buffers, we want a cache for each multiple of
if (align != 0) {
char name[36];
+ int flags = zio_bulk_flags;
+
+ /*
+ * The smallest buffers (512b) are heavily used and
+ * experience a lot of churn. The slabs allocated
+ * for them are also relatively small (32K). Thus
+ * in over to avoid expensive calls to vmalloc() we
+ * make an exception to the usual slab allocation
+ * policy and force these buffers to be kmem backed.
+ */
+ if (size == (1 << SPA_MINBLOCKSHIFT))
+ flags |= KMC_KMEM;
+
(void) sprintf(name, "zio_buf_%lu", (ulong_t)size);
zio_buf_cache[c] = kmem_cache_create(name, size,
- align, NULL, NULL, NULL, NULL, NULL,
- (size > zio_buf_debug_limit ? KMC_NODEBUG : 0) |
- zio_bulk_flags);
+ align, NULL, NULL, NULL, NULL, NULL, flags);
(void) sprintf(name, "zio_data_buf_%lu", (ulong_t)size);
zio_data_buf_cache[c] = kmem_cache_create(name, size,
- align, NULL, NULL, NULL, NULL, data_alloc_arena,
- (size > zio_buf_debug_limit ? KMC_NODEBUG : 0) |
- zio_bulk_flags);
+ align, NULL, NULL, NULL, NULL,
+ data_alloc_arena, flags);
}
}
zio_add_child(pio, zio);
}
+ taskq_init_ent(&zio->io_tqent);
+
return (zio);
}
{
spa_t *spa = zio->io_spa;
zio_type_t t = zio->io_type;
- int flags = TQ_NOSLEEP | (cutinline ? TQ_FRONT : 0);
+ int flags = (cutinline ? TQ_FRONT : 0);
/*
* If we're a config writer or a probe, the normal issue and
ASSERT3U(q, <, ZIO_TASKQ_TYPES);
- while (taskq_dispatch(spa->spa_zio_taskq[t][q],
- (task_func_t *)zio_execute, zio, flags) == 0); /* do nothing */
+ /*
+ * NB: We are assuming that the zio can only be dispatched
+ * to a single taskq at a time. It would be a grievous error
+ * to dispatch the zio to another taskq at the same time.
+ */
+ ASSERT(taskq_empty_ent(&zio->io_tqent));
+ taskq_dispatch_ent(spa->spa_zio_taskq[t][q],
+ (task_func_t *)zio_execute, zio, flags, &zio->io_tqent);
}
static boolean_t
* Reexecution is potentially a huge amount of work.
* Hand it off to the otherwise-unused claim taskq.
*/
- (void) taskq_dispatch(
+ ASSERT(taskq_empty_ent(&zio->io_tqent));
+ (void) taskq_dispatch_ent(
zio->io_spa->spa_zio_taskq[ZIO_TYPE_CLAIM][ZIO_TASKQ_ISSUE],
- (task_func_t *)zio_reexecute, zio, TQ_SLEEP);
+ (task_func_t *)zio_reexecute, zio, 0,
+ &zio->io_tqent);
}
return (ZIO_PIPELINE_STOP);
}