4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
26 #include <sys/zfs_context.h>
27 #include <sys/spa_impl.h>
28 #include <sys/vdev_impl.h>
33 * These tunables are for performance analysis.
36 * zfs_vdev_max_pending is the maximum number of i/os concurrently
37 * pending to each device. zfs_vdev_min_pending is the initial number
38 * of i/os pending to each device (before it starts ramping up to
41 int zfs_vdev_max_pending = 35;
42 int zfs_vdev_min_pending = 4;
44 /* deadline = pri + (lbolt >> time_shift) */
45 int zfs_vdev_time_shift = 6;
47 /* exponential I/O issue ramp-up rate */
48 int zfs_vdev_ramp_rate = 2;
51 * To reduce IOPs, we aggregate small adjacent I/Os into one large I/O.
52 * For read I/Os, we also aggregate across small adjacency gaps; for writes
53 * we include spans of optional I/Os to aid aggregation at the disk even when
54 * they aren't able to help us aggregate at this level.
56 int zfs_vdev_aggregation_limit = SPA_MAXBLOCKSIZE;
57 int zfs_vdev_read_gap_limit = 32 << 10;
58 int zfs_vdev_write_gap_limit = 4 << 10;
61 * Virtual device vector for disk I/O scheduling.
64 vdev_queue_deadline_compare(const void *x1, const void *x2)
69 if (z1->io_deadline < z2->io_deadline)
71 if (z1->io_deadline > z2->io_deadline)
74 if (z1->io_offset < z2->io_offset)
76 if (z1->io_offset > z2->io_offset)
88 vdev_queue_offset_compare(const void *x1, const void *x2)
93 if (z1->io_offset < z2->io_offset)
95 if (z1->io_offset > z2->io_offset)
107 vdev_queue_init(vdev_t *vd)
109 vdev_queue_t *vq = &vd->vdev_queue;
111 mutex_init(&vq->vq_lock, NULL, MUTEX_DEFAULT, NULL);
113 avl_create(&vq->vq_deadline_tree, vdev_queue_deadline_compare,
114 sizeof (zio_t), offsetof(struct zio, io_deadline_node));
116 avl_create(&vq->vq_read_tree, vdev_queue_offset_compare,
117 sizeof (zio_t), offsetof(struct zio, io_offset_node));
119 avl_create(&vq->vq_write_tree, vdev_queue_offset_compare,
120 sizeof (zio_t), offsetof(struct zio, io_offset_node));
122 avl_create(&vq->vq_pending_tree, vdev_queue_offset_compare,
123 sizeof (zio_t), offsetof(struct zio, io_offset_node));
127 vdev_queue_fini(vdev_t *vd)
129 vdev_queue_t *vq = &vd->vdev_queue;
131 avl_destroy(&vq->vq_deadline_tree);
132 avl_destroy(&vq->vq_read_tree);
133 avl_destroy(&vq->vq_write_tree);
134 avl_destroy(&vq->vq_pending_tree);
136 mutex_destroy(&vq->vq_lock);
140 vdev_queue_io_add(vdev_queue_t *vq, zio_t *zio)
142 avl_add(&vq->vq_deadline_tree, zio);
143 avl_add(zio->io_vdev_tree, zio);
147 vdev_queue_io_remove(vdev_queue_t *vq, zio_t *zio)
149 avl_remove(&vq->vq_deadline_tree, zio);
150 avl_remove(zio->io_vdev_tree, zio);
154 vdev_queue_agg_io_done(zio_t *aio)
158 while ((pio = zio_walk_parents(aio)) != NULL)
159 if (aio->io_type == ZIO_TYPE_READ)
160 bcopy((char *)aio->io_data + (pio->io_offset -
161 aio->io_offset), pio->io_data, pio->io_size);
163 zio_buf_free(aio->io_data, aio->io_size);
167 * Compute the range spanned by two i/os, which is the endpoint of the last
168 * (lio->io_offset + lio->io_size) minus start of the first (fio->io_offset).
169 * Conveniently, the gap between fio and lio is given by -IO_SPAN(lio, fio);
170 * thus fio and lio are adjacent if and only if IO_SPAN(lio, fio) == 0.
172 #define IO_SPAN(fio, lio) ((lio)->io_offset + (lio)->io_size - (fio)->io_offset)
173 #define IO_GAP(fio, lio) (-IO_SPAN(lio, fio))
176 vdev_queue_io_to_issue(vdev_queue_t *vq, uint64_t pending_limit)
178 zio_t *fio, *lio, *aio, *dio, *nio, *mio;
181 uint64_t maxspan = zfs_vdev_aggregation_limit;
186 ASSERT(MUTEX_HELD(&vq->vq_lock));
188 if (avl_numnodes(&vq->vq_pending_tree) >= pending_limit ||
189 avl_numnodes(&vq->vq_deadline_tree) == 0)
192 fio = lio = avl_first(&vq->vq_deadline_tree);
194 t = fio->io_vdev_tree;
195 flags = fio->io_flags & ZIO_FLAG_AGG_INHERIT;
196 maxgap = (t == &vq->vq_read_tree) ? zfs_vdev_read_gap_limit : 0;
198 if (!(flags & ZIO_FLAG_DONT_AGGREGATE)) {
200 * We can aggregate I/Os that are sufficiently adjacent and of
201 * the same flavor, as expressed by the AGG_INHERIT flags.
202 * The latter requirement is necessary so that certain
203 * attributes of the I/O, such as whether it's a normal I/O
204 * or a scrub/resilver, can be preserved in the aggregate.
205 * We can include optional I/Os, but don't allow them
206 * to begin a range as they add no benefit in that situation.
210 * We keep track of the last non-optional I/O.
212 mio = (fio->io_flags & ZIO_FLAG_OPTIONAL) ? NULL : fio;
215 * Walk backwards through sufficiently contiguous I/Os
216 * recording the last non-option I/O.
218 while ((dio = AVL_PREV(t, fio)) != NULL &&
219 (dio->io_flags & ZIO_FLAG_AGG_INHERIT) == flags &&
220 IO_SPAN(dio, lio) <= maxspan &&
221 IO_GAP(dio, fio) <= maxgap) {
223 if (mio == NULL && !(fio->io_flags & ZIO_FLAG_OPTIONAL))
228 * Skip any initial optional I/Os.
230 while ((fio->io_flags & ZIO_FLAG_OPTIONAL) && fio != lio) {
231 fio = AVL_NEXT(t, fio);
236 * Walk forward through sufficiently contiguous I/Os.
238 while ((dio = AVL_NEXT(t, lio)) != NULL &&
239 (dio->io_flags & ZIO_FLAG_AGG_INHERIT) == flags &&
240 IO_SPAN(fio, dio) <= maxspan &&
241 IO_GAP(lio, dio) <= maxgap) {
243 if (!(lio->io_flags & ZIO_FLAG_OPTIONAL))
248 * Now that we've established the range of the I/O aggregation
249 * we must decide what to do with trailing optional I/Os.
250 * For reads, there's nothing to do. While we are unable to
251 * aggregate further, it's possible that a trailing optional
252 * I/O would allow the underlying device to aggregate with
253 * subsequent I/Os. We must therefore determine if the next
254 * non-optional I/O is close enough to make aggregation
258 if (t != &vq->vq_read_tree && mio != NULL) {
260 while ((dio = AVL_NEXT(t, nio)) != NULL &&
261 IO_GAP(nio, dio) == 0 &&
262 IO_GAP(mio, dio) <= zfs_vdev_write_gap_limit) {
264 if (!(nio->io_flags & ZIO_FLAG_OPTIONAL)) {
272 /* This may be a no-op. */
273 VERIFY((dio = AVL_NEXT(t, lio)) != NULL);
274 dio->io_flags &= ~ZIO_FLAG_OPTIONAL;
276 while (lio != mio && lio != fio) {
277 ASSERT(lio->io_flags & ZIO_FLAG_OPTIONAL);
278 lio = AVL_PREV(t, lio);
285 uint64_t size = IO_SPAN(fio, lio);
286 ASSERT(size <= zfs_vdev_aggregation_limit);
288 aio = zio_vdev_delegated_io(fio->io_vd, fio->io_offset,
289 zio_buf_alloc(size), size, fio->io_type, ZIO_PRIORITY_NOW,
290 flags | ZIO_FLAG_DONT_CACHE | ZIO_FLAG_DONT_QUEUE,
291 vdev_queue_agg_io_done, NULL);
296 nio = AVL_NEXT(t, dio);
297 ASSERT(dio->io_type == aio->io_type);
298 ASSERT(dio->io_vdev_tree == t);
300 if (dio->io_flags & ZIO_FLAG_NODATA) {
301 ASSERT(dio->io_type == ZIO_TYPE_WRITE);
302 bzero((char *)aio->io_data + (dio->io_offset -
303 aio->io_offset), dio->io_size);
304 } else if (dio->io_type == ZIO_TYPE_WRITE) {
305 bcopy(dio->io_data, (char *)aio->io_data +
306 (dio->io_offset - aio->io_offset),
310 zio_add_child(dio, aio);
311 vdev_queue_io_remove(vq, dio);
312 zio_vdev_io_bypass(dio);
314 } while (dio != lio);
316 avl_add(&vq->vq_pending_tree, aio);
321 ASSERT(fio->io_vdev_tree == t);
322 vdev_queue_io_remove(vq, fio);
325 * If the I/O is or was optional and therefore has no data, we need to
326 * simply discard it. We need to drop the vdev queue's lock to avoid a
327 * deadlock that we could encounter since this I/O will complete
330 if (fio->io_flags & ZIO_FLAG_NODATA) {
331 mutex_exit(&vq->vq_lock);
332 zio_vdev_io_bypass(fio);
334 mutex_enter(&vq->vq_lock);
338 avl_add(&vq->vq_pending_tree, fio);
344 vdev_queue_io(zio_t *zio)
346 vdev_queue_t *vq = &zio->io_vd->vdev_queue;
349 ASSERT(zio->io_type == ZIO_TYPE_READ || zio->io_type == ZIO_TYPE_WRITE);
351 if (zio->io_flags & ZIO_FLAG_DONT_QUEUE)
354 zio->io_flags |= ZIO_FLAG_DONT_CACHE | ZIO_FLAG_DONT_QUEUE;
356 if (zio->io_type == ZIO_TYPE_READ)
357 zio->io_vdev_tree = &vq->vq_read_tree;
359 zio->io_vdev_tree = &vq->vq_write_tree;
361 mutex_enter(&vq->vq_lock);
363 zio->io_deadline = (lbolt64 >> zfs_vdev_time_shift) + zio->io_priority;
365 vdev_queue_io_add(vq, zio);
367 nio = vdev_queue_io_to_issue(vq, zfs_vdev_min_pending);
369 mutex_exit(&vq->vq_lock);
374 if (nio->io_done == vdev_queue_agg_io_done) {
383 vdev_queue_io_done(zio_t *zio)
385 vdev_queue_t *vq = &zio->io_vd->vdev_queue;
387 mutex_enter(&vq->vq_lock);
389 avl_remove(&vq->vq_pending_tree, zio);
391 for (int i = 0; i < zfs_vdev_ramp_rate; i++) {
392 zio_t *nio = vdev_queue_io_to_issue(vq, zfs_vdev_max_pending);
395 mutex_exit(&vq->vq_lock);
396 if (nio->io_done == vdev_queue_agg_io_done) {
399 zio_vdev_io_reissue(nio);
402 mutex_enter(&vq->vq_lock);
405 mutex_exit(&vq->vq_lock);