*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2011 by Delphix. All rights reserved.
+ * Copyright (c) 2012 by Delphix. All rights reserved.
* Copyright (c) 2011 Nexenta Systems, Inc. All rights reserved.
*/
*/
kmem_cache_t *zio_cache;
kmem_cache_t *zio_link_cache;
+kmem_cache_t *zio_vdev_cache;
kmem_cache_t *zio_buf_cache[SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT];
kmem_cache_t *zio_data_buf_cache[SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT];
int zio_bulk_flags = 0;
zio_cons, zio_dest, NULL, NULL, NULL, KMC_KMEM);
zio_link_cache = kmem_cache_create("zio_link_cache",
sizeof (zio_link_t), 0, NULL, NULL, NULL, NULL, NULL, KMC_KMEM);
+ zio_vdev_cache = kmem_cache_create("zio_vdev_cache", sizeof(vdev_io_t),
+ PAGESIZE, NULL, NULL, NULL, NULL, NULL, KMC_VMEM);
/*
* For small buffers, we want a cache for each multiple of
zio_data_buf_cache[c] = NULL;
}
+ kmem_cache_destroy(zio_vdev_cache);
kmem_cache_destroy(zio_link_cache);
kmem_cache_destroy(zio_cache);
ASSERT(c < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT);
- return (kmem_cache_alloc(zio_buf_cache[c], KM_PUSHPAGE));
+ return (kmem_cache_alloc(zio_buf_cache[c], KM_PUSHPAGE | KM_NODEBUG));
}
/*
ASSERT(c < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT);
- return (kmem_cache_alloc(zio_data_buf_cache[c], KM_PUSHPAGE));
+ return (kmem_cache_alloc(zio_data_buf_cache[c],
+ KM_PUSHPAGE | KM_NODEBUG));
}
void
}
/*
+ * Dedicated I/O buffers to ensure that memory fragmentation never prevents
+ * or significantly delays the issuing of a zio. These buffers are used
+ * to aggregate I/O and could be used for raidz stripes.
+ */
+void *
+zio_vdev_alloc(void)
+{
+ return (kmem_cache_alloc(zio_vdev_cache, KM_PUSHPAGE));
+}
+
+void
+zio_vdev_free(void *buf)
+{
+ kmem_cache_free(zio_vdev_cache, buf);
+
+}
+
+/*
* ==========================================================================
* Push and pop I/O transform buffers
* ==========================================================================
ASSERT(txg > spa_syncing_txg(spa));
- if (use_slog)
+ /*
+ * ZIL blocks are always contiguous (i.e. not gang blocks) so we
+ * set the METASLAB_GANG_AVOID flag so that they don't "fast gang"
+ * when allocating them.
+ */
+ if (use_slog) {
error = metaslab_alloc(spa, spa_log_class(spa), size,
- new_bp, 1, txg, old_bp, METASLAB_HINTBP_AVOID);
+ new_bp, 1, txg, old_bp,
+ METASLAB_HINTBP_AVOID | METASLAB_GANG_AVOID);
+ }
- if (error)
+ if (error) {
error = metaslab_alloc(spa, spa_normal_class(spa), size,
- new_bp, 1, txg, old_bp, METASLAB_HINTBP_AVOID);
+ new_bp, 1, txg, old_bp,
+ METASLAB_HINTBP_AVOID | METASLAB_GANG_AVOID);
+ }
if (error == 0) {
BP_SET_LSIZE(new_bp, size);
align = 1ULL << vd->vdev_top->vdev_ashift;
- if (P2PHASE(zio->io_size, align) != 0) {
+ /*
+ * On Linux, we don't care about read alignment. The backing block
+ * device driver will take care of that for us.
+ * The only exception is raidz, which needs a full block for parity.
+ */
+ if (P2PHASE(zio->io_size, align) != 0 &&
+ (zio->io_type != ZIO_TYPE_READ ||
+ vd->vdev_ops == &vdev_raidz_ops)) {
uint64_t asize = P2ROUNDUP(zio->io_size, align);
char *abuf = zio_buf_alloc(asize);
- ASSERT(vd == vd->vdev_top);
if (zio->io_type == ZIO_TYPE_WRITE) {
bcopy(zio->io_data, abuf, zio->io_size);
bzero(abuf + zio->io_size, asize - zio->io_size);
}
zio_push_transform(zio, abuf, asize, asize, zio_subblock);
+ ASSERT(P2PHASE(zio->io_size, align) == 0);
}
ASSERT(P2PHASE(zio->io_offset, align) == 0);
- ASSERT(P2PHASE(zio->io_size, align) == 0);
VERIFY(zio->io_type != ZIO_TYPE_WRITE || spa_writeable(spa));
/*