4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
27 * Copyright (c) 2012 by Delphix. All rights reserved.
30 #include <sys/zfs_context.h>
31 #include <sys/vdev_impl.h>
36 * These tunables are for performance analysis.
39 * zfs_vdev_max_pending is the maximum number of i/os concurrently
40 * pending to each device. zfs_vdev_min_pending is the initial number
41 * of i/os pending to each device (before it starts ramping up to
44 int zfs_vdev_max_pending = 10;
45 int zfs_vdev_min_pending = 4;
47 /* deadline = pri + ddi_get_lbolt64() >> time_shift) */
48 int zfs_vdev_time_shift = 6;
50 /* exponential I/O issue ramp-up rate */
51 int zfs_vdev_ramp_rate = 2;
54 * To reduce IOPs, we aggregate small adjacent I/Os into one large I/O.
55 * For read I/Os, we also aggregate across small adjacency gaps; for writes
56 * we include spans of optional I/Os to aid aggregation at the disk even when
57 * they aren't able to help us aggregate at this level.
59 int zfs_vdev_aggregation_limit = SPA_MAXBLOCKSIZE;
60 int zfs_vdev_read_gap_limit = 32 << 10;
61 int zfs_vdev_write_gap_limit = 4 << 10;
64 * Virtual device vector for disk I/O scheduling.
67 vdev_queue_deadline_compare(const void *x1, const void *x2)
72 if (z1->io_deadline < z2->io_deadline)
74 if (z1->io_deadline > z2->io_deadline)
77 if (z1->io_offset < z2->io_offset)
79 if (z1->io_offset > z2->io_offset)
91 vdev_queue_offset_compare(const void *x1, const void *x2)
96 if (z1->io_offset < z2->io_offset)
98 if (z1->io_offset > z2->io_offset)
110 vdev_queue_init(vdev_t *vd)
112 vdev_queue_t *vq = &vd->vdev_queue;
115 mutex_init(&vq->vq_lock, NULL, MUTEX_DEFAULT, NULL);
117 avl_create(&vq->vq_deadline_tree, vdev_queue_deadline_compare,
118 sizeof (zio_t), offsetof(struct zio, io_deadline_node));
120 avl_create(&vq->vq_read_tree, vdev_queue_offset_compare,
121 sizeof (zio_t), offsetof(struct zio, io_offset_node));
123 avl_create(&vq->vq_write_tree, vdev_queue_offset_compare,
124 sizeof (zio_t), offsetof(struct zio, io_offset_node));
126 avl_create(&vq->vq_pending_tree, vdev_queue_offset_compare,
127 sizeof (zio_t), offsetof(struct zio, io_offset_node));
130 * A list of buffers which can be used for aggregate I/O, this
131 * avoids the need to allocate them on demand when memory is low.
133 list_create(&vq->vq_io_list, sizeof (vdev_io_t),
134 offsetof(vdev_io_t, vi_node));
136 for (i = 0; i < zfs_vdev_max_pending; i++)
137 list_insert_tail(&vq->vq_io_list, zio_vdev_alloc());
141 vdev_queue_fini(vdev_t *vd)
143 vdev_queue_t *vq = &vd->vdev_queue;
146 avl_destroy(&vq->vq_deadline_tree);
147 avl_destroy(&vq->vq_read_tree);
148 avl_destroy(&vq->vq_write_tree);
149 avl_destroy(&vq->vq_pending_tree);
151 while ((vi = list_head(&vq->vq_io_list)) != NULL) {
152 list_remove(&vq->vq_io_list, vi);
156 list_destroy(&vq->vq_io_list);
158 mutex_destroy(&vq->vq_lock);
162 vdev_queue_io_add(vdev_queue_t *vq, zio_t *zio)
164 avl_add(&vq->vq_deadline_tree, zio);
165 avl_add(zio->io_vdev_tree, zio);
169 vdev_queue_io_remove(vdev_queue_t *vq, zio_t *zio)
171 avl_remove(&vq->vq_deadline_tree, zio);
172 avl_remove(zio->io_vdev_tree, zio);
176 vdev_queue_agg_io_done(zio_t *aio)
178 vdev_queue_t *vq = &aio->io_vd->vdev_queue;
179 vdev_io_t *vi = aio->io_data;
182 while ((pio = zio_walk_parents(aio)) != NULL)
183 if (aio->io_type == ZIO_TYPE_READ)
184 bcopy((char *)aio->io_data + (pio->io_offset -
185 aio->io_offset), pio->io_data, pio->io_size);
187 mutex_enter(&vq->vq_lock);
188 list_insert_tail(&vq->vq_io_list, vi);
189 mutex_exit(&vq->vq_lock);
193 * Compute the range spanned by two i/os, which is the endpoint of the last
194 * (lio->io_offset + lio->io_size) minus start of the first (fio->io_offset).
195 * Conveniently, the gap between fio and lio is given by -IO_SPAN(lio, fio);
196 * thus fio and lio are adjacent if and only if IO_SPAN(lio, fio) == 0.
198 #define IO_SPAN(fio, lio) ((lio)->io_offset + (lio)->io_size - (fio)->io_offset)
199 #define IO_GAP(fio, lio) (-IO_SPAN(lio, fio))
202 vdev_queue_io_to_issue(vdev_queue_t *vq, uint64_t pending_limit)
204 zio_t *fio, *lio, *aio, *dio, *nio, *mio;
208 uint64_t maxspan = MIN(zfs_vdev_aggregation_limit, SPA_MAXBLOCKSIZE);
213 ASSERT(MUTEX_HELD(&vq->vq_lock));
215 if (avl_numnodes(&vq->vq_pending_tree) >= pending_limit ||
216 avl_numnodes(&vq->vq_deadline_tree) == 0)
219 fio = lio = avl_first(&vq->vq_deadline_tree);
221 t = fio->io_vdev_tree;
222 flags = fio->io_flags & ZIO_FLAG_AGG_INHERIT;
223 maxgap = (t == &vq->vq_read_tree) ? zfs_vdev_read_gap_limit : 0;
225 vi = list_head(&vq->vq_io_list);
227 vi = zio_vdev_alloc();
228 list_insert_head(&vq->vq_io_list, vi);
231 if (!(flags & ZIO_FLAG_DONT_AGGREGATE)) {
233 * We can aggregate I/Os that are sufficiently adjacent and of
234 * the same flavor, as expressed by the AGG_INHERIT flags.
235 * The latter requirement is necessary so that certain
236 * attributes of the I/O, such as whether it's a normal I/O
237 * or a scrub/resilver, can be preserved in the aggregate.
238 * We can include optional I/Os, but don't allow them
239 * to begin a range as they add no benefit in that situation.
243 * We keep track of the last non-optional I/O.
245 mio = (fio->io_flags & ZIO_FLAG_OPTIONAL) ? NULL : fio;
248 * Walk backwards through sufficiently contiguous I/Os
249 * recording the last non-option I/O.
251 while ((dio = AVL_PREV(t, fio)) != NULL &&
252 (dio->io_flags & ZIO_FLAG_AGG_INHERIT) == flags &&
253 IO_SPAN(dio, lio) <= maxspan &&
254 IO_GAP(dio, fio) <= maxgap) {
256 if (mio == NULL && !(fio->io_flags & ZIO_FLAG_OPTIONAL))
261 * Skip any initial optional I/Os.
263 while ((fio->io_flags & ZIO_FLAG_OPTIONAL) && fio != lio) {
264 fio = AVL_NEXT(t, fio);
269 * Walk forward through sufficiently contiguous I/Os.
271 while ((dio = AVL_NEXT(t, lio)) != NULL &&
272 (dio->io_flags & ZIO_FLAG_AGG_INHERIT) == flags &&
273 IO_SPAN(fio, dio) <= maxspan &&
274 IO_GAP(lio, dio) <= maxgap) {
276 if (!(lio->io_flags & ZIO_FLAG_OPTIONAL))
281 * Now that we've established the range of the I/O aggregation
282 * we must decide what to do with trailing optional I/Os.
283 * For reads, there's nothing to do. While we are unable to
284 * aggregate further, it's possible that a trailing optional
285 * I/O would allow the underlying device to aggregate with
286 * subsequent I/Os. We must therefore determine if the next
287 * non-optional I/O is close enough to make aggregation
291 if (t != &vq->vq_read_tree && mio != NULL) {
293 while ((dio = AVL_NEXT(t, nio)) != NULL &&
294 IO_GAP(nio, dio) == 0 &&
295 IO_GAP(mio, dio) <= zfs_vdev_write_gap_limit) {
297 if (!(nio->io_flags & ZIO_FLAG_OPTIONAL)) {
305 /* This may be a no-op. */
306 VERIFY((dio = AVL_NEXT(t, lio)) != NULL);
307 dio->io_flags &= ~ZIO_FLAG_OPTIONAL;
309 while (lio != mio && lio != fio) {
310 ASSERT(lio->io_flags & ZIO_FLAG_OPTIONAL);
311 lio = AVL_PREV(t, lio);
318 uint64_t size = IO_SPAN(fio, lio);
319 ASSERT(size <= maxspan);
322 aio = zio_vdev_delegated_io(fio->io_vd, fio->io_offset,
323 vi, size, fio->io_type, ZIO_PRIORITY_AGG,
324 flags | ZIO_FLAG_DONT_CACHE | ZIO_FLAG_DONT_QUEUE,
325 vdev_queue_agg_io_done, NULL);
326 aio->io_timestamp = fio->io_timestamp;
331 nio = AVL_NEXT(t, dio);
332 ASSERT(dio->io_type == aio->io_type);
333 ASSERT(dio->io_vdev_tree == t);
335 if (dio->io_flags & ZIO_FLAG_NODATA) {
336 ASSERT(dio->io_type == ZIO_TYPE_WRITE);
337 bzero((char *)aio->io_data + (dio->io_offset -
338 aio->io_offset), dio->io_size);
339 } else if (dio->io_type == ZIO_TYPE_WRITE) {
340 bcopy(dio->io_data, (char *)aio->io_data +
341 (dio->io_offset - aio->io_offset),
345 zio_add_child(dio, aio);
346 vdev_queue_io_remove(vq, dio);
347 zio_vdev_io_bypass(dio);
349 } while (dio != lio);
351 avl_add(&vq->vq_pending_tree, aio);
352 list_remove(&vq->vq_io_list, vi);
357 ASSERT(fio->io_vdev_tree == t);
358 vdev_queue_io_remove(vq, fio);
361 * If the I/O is or was optional and therefore has no data, we need to
362 * simply discard it. We need to drop the vdev queue's lock to avoid a
363 * deadlock that we could encounter since this I/O will complete
366 if (fio->io_flags & ZIO_FLAG_NODATA) {
367 mutex_exit(&vq->vq_lock);
368 zio_vdev_io_bypass(fio);
370 mutex_enter(&vq->vq_lock);
374 avl_add(&vq->vq_pending_tree, fio);
380 vdev_queue_io(zio_t *zio)
382 vdev_queue_t *vq = &zio->io_vd->vdev_queue;
385 ASSERT(zio->io_type == ZIO_TYPE_READ || zio->io_type == ZIO_TYPE_WRITE);
387 if (zio->io_flags & ZIO_FLAG_DONT_QUEUE)
390 zio->io_flags |= ZIO_FLAG_DONT_CACHE | ZIO_FLAG_DONT_QUEUE;
392 if (zio->io_type == ZIO_TYPE_READ)
393 zio->io_vdev_tree = &vq->vq_read_tree;
395 zio->io_vdev_tree = &vq->vq_write_tree;
397 mutex_enter(&vq->vq_lock);
399 zio->io_timestamp = ddi_get_lbolt64();
400 zio->io_deadline = (zio->io_timestamp >> zfs_vdev_time_shift) +
403 vdev_queue_io_add(vq, zio);
405 nio = vdev_queue_io_to_issue(vq, zfs_vdev_min_pending);
407 mutex_exit(&vq->vq_lock);
412 if (nio->io_done == vdev_queue_agg_io_done) {
421 vdev_queue_io_done(zio_t *zio)
423 vdev_queue_t *vq = &zio->io_vd->vdev_queue;
426 if (zio_injection_enabled)
427 delay(SEC_TO_TICK(zio_handle_io_delay(zio)));
429 mutex_enter(&vq->vq_lock);
431 avl_remove(&vq->vq_pending_tree, zio);
433 zio->io_delta = ddi_get_lbolt64() - zio->io_timestamp;
434 vq->vq_io_complete_ts = ddi_get_lbolt64();
435 vq->vq_io_delta_ts = vq->vq_io_complete_ts - zio->io_timestamp;
437 for (i = 0; i < zfs_vdev_ramp_rate; i++) {
438 zio_t *nio = vdev_queue_io_to_issue(vq, zfs_vdev_max_pending);
441 mutex_exit(&vq->vq_lock);
442 if (nio->io_done == vdev_queue_agg_io_done) {
445 zio_vdev_io_reissue(nio);
448 mutex_enter(&vq->vq_lock);
451 mutex_exit(&vq->vq_lock);
454 #if defined(_KERNEL) && defined(HAVE_SPL)
455 module_param(zfs_vdev_max_pending, int, 0644);
456 MODULE_PARM_DESC(zfs_vdev_max_pending, "Max pending per-vdev I/Os");
458 module_param(zfs_vdev_min_pending, int, 0644);
459 MODULE_PARM_DESC(zfs_vdev_min_pending, "Min pending per-vdev I/Os");
461 module_param(zfs_vdev_aggregation_limit, int, 0644);
462 MODULE_PARM_DESC(zfs_vdev_aggregation_limit, "Max vdev I/O aggregation size");
464 module_param(zfs_vdev_time_shift, int, 0644);
465 MODULE_PARM_DESC(zfs_vdev_time_shift, "Deadline time shift for vdev I/O");
467 module_param(zfs_vdev_ramp_rate, int, 0644);
468 MODULE_PARM_DESC(zfs_vdev_ramp_rate, "Exponential I/O issue ramp-up rate");
470 module_param(zfs_vdev_read_gap_limit, int, 0644);
471 MODULE_PARM_DESC(zfs_vdev_read_gap_limit, "Aggregate read I/O over gap");
473 module_param(zfs_vdev_write_gap_limit, int, 0644);
474 MODULE_PARM_DESC(zfs_vdev_write_gap_limit, "Aggregate write I/O over gap");