4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
26 #pragma ident "@(#)vdev_cache.c 1.7 08/01/10 SMI"
28 #include <sys/zfs_context.h>
30 #include <sys/vdev_impl.h>
32 #include <sys/kstat.h>
35 * Virtual device read-ahead caching.
37 * This file implements a simple LRU read-ahead cache. When the DMU reads
38 * a given block, it will often want other, nearby blocks soon thereafter.
39 * We take advantage of this by reading a larger disk region and caching
40 * the result. In the best case, this can turn 128 back-to-back 512-byte
41 * reads into a single 64k read followed by 127 cache hits; this reduces
42 * latency dramatically. In the worst case, it can turn an isolated 512-byte
43 * read into a 64k read, which doesn't affect latency all that much but is
44 * terribly wasteful of bandwidth. A more intelligent version of the cache
45 * could keep track of access patterns and not do read-ahead unless it sees
46 * at least two temporally close I/Os to the same region. Currently, only
47 * metadata I/O is inflated. A futher enhancement could take advantage of
48 * more semantic information about the I/O. And it could use something
49 * faster than an AVL tree; that was chosen solely for convenience.
51 * There are five cache operations: allocate, fill, read, write, evict.
53 * (1) Allocate. This reserves a cache entry for the specified region.
54 * We separate the allocate and fill operations so that multiple threads
55 * don't generate I/O for the same cache miss.
57 * (2) Fill. When the I/O for a cache miss completes, the fill routine
58 * places the data in the previously allocated cache entry.
60 * (3) Read. Read data from the cache.
62 * (4) Write. Update cache contents after write completion.
64 * (5) Evict. When allocating a new entry, we evict the oldest (LRU) entry
65 * if the total cache size exceeds zfs_vdev_cache_size.
69 * These tunables are for performance analysis.
72 * All i/os smaller than zfs_vdev_cache_max will be turned into
73 * 1<<zfs_vdev_cache_bshift byte reads by the vdev_cache (aka software
74 * track buffer). At most zfs_vdev_cache_size bytes will be kept in each
77 int zfs_vdev_cache_max = 1<<14; /* 16KB */
78 int zfs_vdev_cache_size = 10ULL << 20; /* 10MB */
79 int zfs_vdev_cache_bshift = 16;
81 #define VCBS (1 << zfs_vdev_cache_bshift) /* 64KB */
83 kstat_t *vdc_ksp = NULL;
85 typedef struct vdc_stats {
86 kstat_named_t vdc_stat_delegations;
87 kstat_named_t vdc_stat_hits;
88 kstat_named_t vdc_stat_misses;
91 static vdc_stats_t vdc_stats = {
92 { "delegations", KSTAT_DATA_UINT64 },
93 { "hits", KSTAT_DATA_UINT64 },
94 { "misses", KSTAT_DATA_UINT64 }
97 #define VDCSTAT_BUMP(stat) atomic_add_64(&vdc_stats.stat.value.ui64, 1);
100 vdev_cache_offset_compare(const void *a1, const void *a2)
102 const vdev_cache_entry_t *ve1 = a1;
103 const vdev_cache_entry_t *ve2 = a2;
105 if (ve1->ve_offset < ve2->ve_offset)
107 if (ve1->ve_offset > ve2->ve_offset)
113 vdev_cache_lastused_compare(const void *a1, const void *a2)
115 const vdev_cache_entry_t *ve1 = a1;
116 const vdev_cache_entry_t *ve2 = a2;
118 if (ve1->ve_lastused < ve2->ve_lastused)
120 if (ve1->ve_lastused > ve2->ve_lastused)
124 * Among equally old entries, sort by offset to ensure uniqueness.
126 return (vdev_cache_offset_compare(a1, a2));
130 * Evict the specified entry from the cache.
133 vdev_cache_evict(vdev_cache_t *vc, vdev_cache_entry_t *ve)
135 ASSERT(MUTEX_HELD(&vc->vc_lock));
136 ASSERT(ve->ve_fill_io == NULL);
137 ASSERT(ve->ve_data != NULL);
139 dprintf("evicting %p, off %llx, LRU %llu, age %lu, hits %u, stale %u\n",
140 vc, ve->ve_offset, ve->ve_lastused, lbolt - ve->ve_lastused,
141 ve->ve_hits, ve->ve_missed_update);
143 avl_remove(&vc->vc_lastused_tree, ve);
144 avl_remove(&vc->vc_offset_tree, ve);
145 zio_buf_free(ve->ve_data, VCBS);
146 kmem_free(ve, sizeof (vdev_cache_entry_t));
150 * Allocate an entry in the cache. At the point we don't have the data,
151 * we're just creating a placeholder so that multiple threads don't all
152 * go off and read the same blocks.
154 static vdev_cache_entry_t *
155 vdev_cache_allocate(zio_t *zio)
157 vdev_cache_t *vc = &zio->io_vd->vdev_cache;
158 uint64_t offset = P2ALIGN(zio->io_offset, VCBS);
159 vdev_cache_entry_t *ve;
161 ASSERT(MUTEX_HELD(&vc->vc_lock));
163 if (zfs_vdev_cache_size == 0)
167 * If adding a new entry would exceed the cache size,
168 * evict the oldest entry (LRU).
170 if ((avl_numnodes(&vc->vc_lastused_tree) << zfs_vdev_cache_bshift) >
171 zfs_vdev_cache_size) {
172 ve = avl_first(&vc->vc_lastused_tree);
173 if (ve->ve_fill_io != NULL) {
174 dprintf("can't evict in %p, still filling\n", vc);
177 ASSERT(ve->ve_hits != 0);
178 vdev_cache_evict(vc, ve);
181 ve = kmem_zalloc(sizeof (vdev_cache_entry_t), KM_SLEEP);
182 ve->ve_offset = offset;
183 ve->ve_lastused = lbolt;
184 ve->ve_data = zio_buf_alloc(VCBS);
186 avl_add(&vc->vc_offset_tree, ve);
187 avl_add(&vc->vc_lastused_tree, ve);
193 vdev_cache_hit(vdev_cache_t *vc, vdev_cache_entry_t *ve, zio_t *zio)
195 uint64_t cache_phase = P2PHASE(zio->io_offset, VCBS);
197 ASSERT(MUTEX_HELD(&vc->vc_lock));
198 ASSERT(ve->ve_fill_io == NULL);
200 if (ve->ve_lastused != lbolt) {
201 avl_remove(&vc->vc_lastused_tree, ve);
202 ve->ve_lastused = lbolt;
203 avl_add(&vc->vc_lastused_tree, ve);
207 bcopy(ve->ve_data + cache_phase, zio->io_data, zio->io_size);
211 * Fill a previously allocated cache entry with data.
214 vdev_cache_fill(zio_t *zio)
216 vdev_t *vd = zio->io_vd;
217 vdev_cache_t *vc = &vd->vdev_cache;
218 vdev_cache_entry_t *ve = zio->io_private;
221 ASSERT(zio->io_size == VCBS);
224 * Add data to the cache.
226 mutex_enter(&vc->vc_lock);
228 ASSERT(ve->ve_fill_io == zio);
229 ASSERT(ve->ve_offset == zio->io_offset);
230 ASSERT(ve->ve_data == zio->io_data);
232 ve->ve_fill_io = NULL;
235 * Even if this cache line was invalidated by a missed write update,
236 * any reads that were queued up before the missed update are still
237 * valid, so we can satisfy them from this line before we evict it.
239 for (dio = zio->io_delegate_list; dio; dio = dio->io_delegate_next)
240 vdev_cache_hit(vc, ve, dio);
242 if (zio->io_error || ve->ve_missed_update)
243 vdev_cache_evict(vc, ve);
245 mutex_exit(&vc->vc_lock);
247 while ((dio = zio->io_delegate_list) != NULL) {
248 zio->io_delegate_list = dio->io_delegate_next;
249 dio->io_delegate_next = NULL;
250 dio->io_error = zio->io_error;
256 * Read data from the cache. Returns 0 on cache hit, errno on a miss.
259 vdev_cache_read(zio_t *zio)
261 vdev_cache_t *vc = &zio->io_vd->vdev_cache;
262 vdev_cache_entry_t *ve, ve_search;
263 uint64_t cache_offset = P2ALIGN(zio->io_offset, VCBS);
264 uint64_t cache_phase = P2PHASE(zio->io_offset, VCBS);
267 ASSERT(zio->io_type == ZIO_TYPE_READ);
269 if (zio->io_flags & ZIO_FLAG_DONT_CACHE)
272 if (zio->io_size > zfs_vdev_cache_max)
276 * If the I/O straddles two or more cache blocks, don't cache it.
278 if (P2CROSS(zio->io_offset, zio->io_offset + zio->io_size - 1, VCBS))
281 ASSERT(cache_phase + zio->io_size <= VCBS);
283 mutex_enter(&vc->vc_lock);
285 ve_search.ve_offset = cache_offset;
286 ve = avl_find(&vc->vc_offset_tree, &ve_search, NULL);
289 if (ve->ve_missed_update) {
290 mutex_exit(&vc->vc_lock);
294 if ((fio = ve->ve_fill_io) != NULL) {
295 zio->io_delegate_next = fio->io_delegate_list;
296 fio->io_delegate_list = zio;
297 zio_vdev_io_bypass(zio);
298 mutex_exit(&vc->vc_lock);
299 VDCSTAT_BUMP(vdc_stat_delegations);
303 vdev_cache_hit(vc, ve, zio);
304 zio_vdev_io_bypass(zio);
306 mutex_exit(&vc->vc_lock);
308 VDCSTAT_BUMP(vdc_stat_hits);
312 ve = vdev_cache_allocate(zio);
315 mutex_exit(&vc->vc_lock);
319 fio = zio_vdev_child_io(zio, NULL, zio->io_vd, cache_offset,
320 ve->ve_data, VCBS, ZIO_TYPE_READ, ZIO_PRIORITY_CACHE_FILL,
321 ZIO_FLAG_DONT_CACHE | ZIO_FLAG_DONT_PROPAGATE |
322 ZIO_FLAG_DONT_RETRY | ZIO_FLAG_NOBOOKMARK,
323 vdev_cache_fill, ve);
325 ve->ve_fill_io = fio;
326 fio->io_delegate_list = zio;
327 zio_vdev_io_bypass(zio);
329 mutex_exit(&vc->vc_lock);
331 VDCSTAT_BUMP(vdc_stat_misses);
337 * Update cache contents upon write completion.
340 vdev_cache_write(zio_t *zio)
342 vdev_cache_t *vc = &zio->io_vd->vdev_cache;
343 vdev_cache_entry_t *ve, ve_search;
344 uint64_t io_start = zio->io_offset;
345 uint64_t io_end = io_start + zio->io_size;
346 uint64_t min_offset = P2ALIGN(io_start, VCBS);
347 uint64_t max_offset = P2ROUNDUP(io_end, VCBS);
350 ASSERT(zio->io_type == ZIO_TYPE_WRITE);
352 mutex_enter(&vc->vc_lock);
354 ve_search.ve_offset = min_offset;
355 ve = avl_find(&vc->vc_offset_tree, &ve_search, &where);
358 ve = avl_nearest(&vc->vc_offset_tree, where, AVL_AFTER);
360 while (ve != NULL && ve->ve_offset < max_offset) {
361 uint64_t start = MAX(ve->ve_offset, io_start);
362 uint64_t end = MIN(ve->ve_offset + VCBS, io_end);
364 if (ve->ve_fill_io != NULL) {
365 ve->ve_missed_update = 1;
367 bcopy((char *)zio->io_data + start - io_start,
368 ve->ve_data + start - ve->ve_offset, end - start);
370 ve = AVL_NEXT(&vc->vc_offset_tree, ve);
372 mutex_exit(&vc->vc_lock);
376 vdev_cache_purge(vdev_t *vd)
378 vdev_cache_t *vc = &vd->vdev_cache;
379 vdev_cache_entry_t *ve;
381 mutex_enter(&vc->vc_lock);
382 while ((ve = avl_first(&vc->vc_offset_tree)) != NULL)
383 vdev_cache_evict(vc, ve);
384 mutex_exit(&vc->vc_lock);
388 vdev_cache_init(vdev_t *vd)
390 vdev_cache_t *vc = &vd->vdev_cache;
392 mutex_init(&vc->vc_lock, NULL, MUTEX_DEFAULT, NULL);
394 avl_create(&vc->vc_offset_tree, vdev_cache_offset_compare,
395 sizeof (vdev_cache_entry_t),
396 offsetof(struct vdev_cache_entry, ve_offset_node));
398 avl_create(&vc->vc_lastused_tree, vdev_cache_lastused_compare,
399 sizeof (vdev_cache_entry_t),
400 offsetof(struct vdev_cache_entry, ve_lastused_node));
404 vdev_cache_fini(vdev_t *vd)
406 vdev_cache_t *vc = &vd->vdev_cache;
408 vdev_cache_purge(vd);
410 avl_destroy(&vc->vc_offset_tree);
411 avl_destroy(&vc->vc_lastused_tree);
413 mutex_destroy(&vc->vc_lock);
417 vdev_cache_stat_init(void)
419 vdc_ksp = kstat_create("zfs", 0, "vdev_cache_stats", "misc",
420 KSTAT_TYPE_NAMED, sizeof (vdc_stats) / sizeof (kstat_named_t),
422 if (vdc_ksp != NULL) {
423 vdc_ksp->ks_data = &vdc_stats;
424 kstat_install(vdc_ksp);
429 vdev_cache_stat_fini(void)
431 if (vdc_ksp != NULL) {
432 kstat_delete(vdc_ksp);