4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
26 #include <sys/zfs_context.h>
27 #include <sys/vdev_impl.h>
32 * These tunables are for performance analysis.
35 * zfs_vdev_max_pending is the maximum number of i/os concurrently
36 * pending to each device. zfs_vdev_min_pending is the initial number
37 * of i/os pending to each device (before it starts ramping up to
40 int zfs_vdev_max_pending = 10;
41 int zfs_vdev_min_pending = 4;
43 /* deadline = pri + ddi_get_lbolt64() >> time_shift) */
44 int zfs_vdev_time_shift = 6;
46 /* exponential I/O issue ramp-up rate */
47 int zfs_vdev_ramp_rate = 2;
50 * To reduce IOPs, we aggregate small adjacent I/Os into one large I/O.
51 * For read I/Os, we also aggregate across small adjacency gaps; for writes
52 * we include spans of optional I/Os to aid aggregation at the disk even when
53 * they aren't able to help us aggregate at this level.
55 int zfs_vdev_aggregation_limit = SPA_MAXBLOCKSIZE;
56 int zfs_vdev_read_gap_limit = 32 << 10;
57 int zfs_vdev_write_gap_limit = 4 << 10;
60 * Virtual device vector for disk I/O scheduling.
63 vdev_queue_deadline_compare(const void *x1, const void *x2)
68 if (z1->io_deadline < z2->io_deadline)
70 if (z1->io_deadline > z2->io_deadline)
73 if (z1->io_offset < z2->io_offset)
75 if (z1->io_offset > z2->io_offset)
87 vdev_queue_offset_compare(const void *x1, const void *x2)
92 if (z1->io_offset < z2->io_offset)
94 if (z1->io_offset > z2->io_offset)
106 vdev_queue_init(vdev_t *vd)
108 vdev_queue_t *vq = &vd->vdev_queue;
111 mutex_init(&vq->vq_lock, NULL, MUTEX_DEFAULT, NULL);
113 avl_create(&vq->vq_deadline_tree, vdev_queue_deadline_compare,
114 sizeof (zio_t), offsetof(struct zio, io_deadline_node));
116 avl_create(&vq->vq_read_tree, vdev_queue_offset_compare,
117 sizeof (zio_t), offsetof(struct zio, io_offset_node));
119 avl_create(&vq->vq_write_tree, vdev_queue_offset_compare,
120 sizeof (zio_t), offsetof(struct zio, io_offset_node));
122 avl_create(&vq->vq_pending_tree, vdev_queue_offset_compare,
123 sizeof (zio_t), offsetof(struct zio, io_offset_node));
126 * A list of buffers which can be used for aggregate I/O, this
127 * avoids the need to allocate them on demand when memory is low.
129 list_create(&vq->vq_io_list, sizeof (vdev_io_t),
130 offsetof(vdev_io_t, vi_node));
132 for (i = 0; i < zfs_vdev_max_pending; i++)
133 list_insert_tail(&vq->vq_io_list, zio_vdev_alloc());
137 vdev_queue_fini(vdev_t *vd)
139 vdev_queue_t *vq = &vd->vdev_queue;
142 avl_destroy(&vq->vq_deadline_tree);
143 avl_destroy(&vq->vq_read_tree);
144 avl_destroy(&vq->vq_write_tree);
145 avl_destroy(&vq->vq_pending_tree);
147 while ((vi = list_head(&vq->vq_io_list)) != NULL) {
148 list_remove(&vq->vq_io_list, vi);
152 list_destroy(&vq->vq_io_list);
154 mutex_destroy(&vq->vq_lock);
158 vdev_queue_io_add(vdev_queue_t *vq, zio_t *zio)
160 avl_add(&vq->vq_deadline_tree, zio);
161 avl_add(zio->io_vdev_tree, zio);
165 vdev_queue_io_remove(vdev_queue_t *vq, zio_t *zio)
167 avl_remove(&vq->vq_deadline_tree, zio);
168 avl_remove(zio->io_vdev_tree, zio);
172 vdev_queue_agg_io_done(zio_t *aio)
174 vdev_queue_t *vq = &aio->io_vd->vdev_queue;
175 vdev_io_t *vi = aio->io_data;
178 while ((pio = zio_walk_parents(aio)) != NULL)
179 if (aio->io_type == ZIO_TYPE_READ)
180 bcopy((char *)aio->io_data + (pio->io_offset -
181 aio->io_offset), pio->io_data, pio->io_size);
183 mutex_enter(&vq->vq_lock);
184 list_insert_tail(&vq->vq_io_list, vi);
185 mutex_exit(&vq->vq_lock);
189 * Compute the range spanned by two i/os, which is the endpoint of the last
190 * (lio->io_offset + lio->io_size) minus start of the first (fio->io_offset).
191 * Conveniently, the gap between fio and lio is given by -IO_SPAN(lio, fio);
192 * thus fio and lio are adjacent if and only if IO_SPAN(lio, fio) == 0.
194 #define IO_SPAN(fio, lio) ((lio)->io_offset + (lio)->io_size - (fio)->io_offset)
195 #define IO_GAP(fio, lio) (-IO_SPAN(lio, fio))
198 vdev_queue_io_to_issue(vdev_queue_t *vq, uint64_t pending_limit)
200 zio_t *fio, *lio, *aio, *dio, *nio, *mio;
204 uint64_t maxspan = zfs_vdev_aggregation_limit;
209 ASSERT(MUTEX_HELD(&vq->vq_lock));
211 if (avl_numnodes(&vq->vq_pending_tree) >= pending_limit ||
212 avl_numnodes(&vq->vq_deadline_tree) == 0)
215 fio = lio = avl_first(&vq->vq_deadline_tree);
217 t = fio->io_vdev_tree;
218 flags = fio->io_flags & ZIO_FLAG_AGG_INHERIT;
219 maxgap = (t == &vq->vq_read_tree) ? zfs_vdev_read_gap_limit : 0;
221 vi = list_head(&vq->vq_io_list);
223 vi = zio_vdev_alloc();
224 list_insert_head(&vq->vq_io_list, vi);
227 if (!(flags & ZIO_FLAG_DONT_AGGREGATE)) {
229 * We can aggregate I/Os that are sufficiently adjacent and of
230 * the same flavor, as expressed by the AGG_INHERIT flags.
231 * The latter requirement is necessary so that certain
232 * attributes of the I/O, such as whether it's a normal I/O
233 * or a scrub/resilver, can be preserved in the aggregate.
234 * We can include optional I/Os, but don't allow them
235 * to begin a range as they add no benefit in that situation.
239 * We keep track of the last non-optional I/O.
241 mio = (fio->io_flags & ZIO_FLAG_OPTIONAL) ? NULL : fio;
244 * Walk backwards through sufficiently contiguous I/Os
245 * recording the last non-option I/O.
247 while ((dio = AVL_PREV(t, fio)) != NULL &&
248 (dio->io_flags & ZIO_FLAG_AGG_INHERIT) == flags &&
249 IO_SPAN(dio, lio) <= maxspan &&
250 IO_GAP(dio, fio) <= maxgap) {
252 if (mio == NULL && !(fio->io_flags & ZIO_FLAG_OPTIONAL))
257 * Skip any initial optional I/Os.
259 while ((fio->io_flags & ZIO_FLAG_OPTIONAL) && fio != lio) {
260 fio = AVL_NEXT(t, fio);
265 * Walk forward through sufficiently contiguous I/Os.
267 while ((dio = AVL_NEXT(t, lio)) != NULL &&
268 (dio->io_flags & ZIO_FLAG_AGG_INHERIT) == flags &&
269 IO_SPAN(fio, dio) <= maxspan &&
270 IO_GAP(lio, dio) <= maxgap) {
272 if (!(lio->io_flags & ZIO_FLAG_OPTIONAL))
277 * Now that we've established the range of the I/O aggregation
278 * we must decide what to do with trailing optional I/Os.
279 * For reads, there's nothing to do. While we are unable to
280 * aggregate further, it's possible that a trailing optional
281 * I/O would allow the underlying device to aggregate with
282 * subsequent I/Os. We must therefore determine if the next
283 * non-optional I/O is close enough to make aggregation
287 if (t != &vq->vq_read_tree && mio != NULL) {
289 while ((dio = AVL_NEXT(t, nio)) != NULL &&
290 IO_GAP(nio, dio) == 0 &&
291 IO_GAP(mio, dio) <= zfs_vdev_write_gap_limit) {
293 if (!(nio->io_flags & ZIO_FLAG_OPTIONAL)) {
301 /* This may be a no-op. */
302 VERIFY((dio = AVL_NEXT(t, lio)) != NULL);
303 dio->io_flags &= ~ZIO_FLAG_OPTIONAL;
305 while (lio != mio && lio != fio) {
306 ASSERT(lio->io_flags & ZIO_FLAG_OPTIONAL);
307 lio = AVL_PREV(t, lio);
314 uint64_t size = IO_SPAN(fio, lio);
315 ASSERT(size <= zfs_vdev_aggregation_limit);
318 aio = zio_vdev_delegated_io(fio->io_vd, fio->io_offset,
319 vi, size, fio->io_type, ZIO_PRIORITY_AGG,
320 flags | ZIO_FLAG_DONT_CACHE | ZIO_FLAG_DONT_QUEUE,
321 vdev_queue_agg_io_done, NULL);
326 nio = AVL_NEXT(t, dio);
327 ASSERT(dio->io_type == aio->io_type);
328 ASSERT(dio->io_vdev_tree == t);
330 if (dio->io_flags & ZIO_FLAG_NODATA) {
331 ASSERT(dio->io_type == ZIO_TYPE_WRITE);
332 bzero((char *)aio->io_data + (dio->io_offset -
333 aio->io_offset), dio->io_size);
334 } else if (dio->io_type == ZIO_TYPE_WRITE) {
335 bcopy(dio->io_data, (char *)aio->io_data +
336 (dio->io_offset - aio->io_offset),
340 zio_add_child(dio, aio);
341 vdev_queue_io_remove(vq, dio);
342 zio_vdev_io_bypass(dio);
344 } while (dio != lio);
346 avl_add(&vq->vq_pending_tree, aio);
347 list_remove(&vq->vq_io_list, vi);
352 ASSERT(fio->io_vdev_tree == t);
353 vdev_queue_io_remove(vq, fio);
356 * If the I/O is or was optional and therefore has no data, we need to
357 * simply discard it. We need to drop the vdev queue's lock to avoid a
358 * deadlock that we could encounter since this I/O will complete
361 if (fio->io_flags & ZIO_FLAG_NODATA) {
362 mutex_exit(&vq->vq_lock);
363 zio_vdev_io_bypass(fio);
365 mutex_enter(&vq->vq_lock);
369 avl_add(&vq->vq_pending_tree, fio);
375 vdev_queue_io(zio_t *zio)
377 vdev_queue_t *vq = &zio->io_vd->vdev_queue;
380 ASSERT(zio->io_type == ZIO_TYPE_READ || zio->io_type == ZIO_TYPE_WRITE);
382 if (zio->io_flags & ZIO_FLAG_DONT_QUEUE)
385 zio->io_flags |= ZIO_FLAG_DONT_CACHE | ZIO_FLAG_DONT_QUEUE;
387 if (zio->io_type == ZIO_TYPE_READ)
388 zio->io_vdev_tree = &vq->vq_read_tree;
390 zio->io_vdev_tree = &vq->vq_write_tree;
392 mutex_enter(&vq->vq_lock);
394 zio->io_deadline = (ddi_get_lbolt64() >> zfs_vdev_time_shift) +
397 vdev_queue_io_add(vq, zio);
399 nio = vdev_queue_io_to_issue(vq, zfs_vdev_min_pending);
401 mutex_exit(&vq->vq_lock);
406 if (nio->io_done == vdev_queue_agg_io_done) {
415 vdev_queue_io_done(zio_t *zio)
417 vdev_queue_t *vq = &zio->io_vd->vdev_queue;
420 mutex_enter(&vq->vq_lock);
422 avl_remove(&vq->vq_pending_tree, zio);
424 for (i = 0; i < zfs_vdev_ramp_rate; i++) {
425 zio_t *nio = vdev_queue_io_to_issue(vq, zfs_vdev_max_pending);
428 mutex_exit(&vq->vq_lock);
429 if (nio->io_done == vdev_queue_agg_io_done) {
432 zio_vdev_io_reissue(nio);
435 mutex_enter(&vq->vq_lock);
438 mutex_exit(&vq->vq_lock);
441 #if defined(_KERNEL) && defined(HAVE_SPL)
442 module_param(zfs_vdev_max_pending, int, 0644);
443 MODULE_PARM_DESC(zfs_vdev_max_pending, "Max pending per-vdev I/Os");
445 module_param(zfs_vdev_min_pending, int, 0644);
446 MODULE_PARM_DESC(zfs_vdev_min_pending, "Min pending per-vdev I/Os");
448 module_param(zfs_vdev_aggregation_limit, int, 0644);
449 MODULE_PARM_DESC(zfs_vdev_aggregation_limit, "Max vdev I/O aggregation size");
451 module_param(zfs_vdev_time_shift, int, 0644);
452 MODULE_PARM_DESC(zfs_vdev_time_shift, "Deadline time shift for vdev I/O");
454 module_param(zfs_vdev_ramp_rate, int, 0644);
455 MODULE_PARM_DESC(zfs_vdev_ramp_rate, "Exponential I/O issue ramp-up rate");
457 module_param(zfs_vdev_read_gap_limit, int, 0644);
458 MODULE_PARM_DESC(zfs_vdev_read_gap_limit, "Aggregate read I/O over gap");
460 module_param(zfs_vdev_write_gap_limit, int, 0644);
461 MODULE_PARM_DESC(zfs_vdev_write_gap_limit, "Aggregate write I/O over gap");