4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
26 #pragma ident "@(#)arc.c 1.44 08/03/20 SMI"
29 * DVA-based Adjustable Replacement Cache
31 * While much of the theory of operation used here is
32 * based on the self-tuning, low overhead replacement cache
33 * presented by Megiddo and Modha at FAST 2003, there are some
34 * significant differences:
36 * 1. The Megiddo and Modha model assumes any page is evictable.
37 * Pages in its cache cannot be "locked" into memory. This makes
38 * the eviction algorithm simple: evict the last page in the list.
39 * This also make the performance characteristics easy to reason
40 * about. Our cache is not so simple. At any given moment, some
41 * subset of the blocks in the cache are un-evictable because we
42 * have handed out a reference to them. Blocks are only evictable
43 * when there are no external references active. This makes
44 * eviction far more problematic: we choose to evict the evictable
45 * blocks that are the "lowest" in the list.
47 * There are times when it is not possible to evict the requested
48 * space. In these circumstances we are unable to adjust the cache
49 * size. To prevent the cache growing unbounded at these times we
50 * implement a "cache throttle" that slows the flow of new data
51 * into the cache until we can make space available.
53 * 2. The Megiddo and Modha model assumes a fixed cache size.
54 * Pages are evicted when the cache is full and there is a cache
55 * miss. Our model has a variable sized cache. It grows with
56 * high use, but also tries to react to memory pressure from the
57 * operating system: decreasing its size when system memory is
60 * 3. The Megiddo and Modha model assumes a fixed page size. All
61 * elements of the cache are therefor exactly the same size. So
62 * when adjusting the cache size following a cache miss, its simply
63 * a matter of choosing a single page to evict. In our model, we
64 * have variable sized cache blocks (rangeing from 512 bytes to
65 * 128K bytes). We therefor choose a set of blocks to evict to make
66 * space for a cache miss that approximates as closely as possible
67 * the space used by the new block.
69 * See also: "ARC: A Self-Tuning, Low Overhead Replacement Cache"
70 * by N. Megiddo & D. Modha, FAST 2003
76 * A new reference to a cache buffer can be obtained in two
77 * ways: 1) via a hash table lookup using the DVA as a key,
78 * or 2) via one of the ARC lists. The arc_read() interface
79 * uses method 1, while the internal arc algorithms for
80 * adjusting the cache use method 2. We therefor provide two
81 * types of locks: 1) the hash table lock array, and 2) the
84 * Buffers do not have their own mutexs, rather they rely on the
85 * hash table mutexs for the bulk of their protection (i.e. most
86 * fields in the arc_buf_hdr_t are protected by these mutexs).
88 * buf_hash_find() returns the appropriate mutex (held) when it
89 * locates the requested buffer in the hash table. It returns
90 * NULL for the mutex if the buffer was not in the table.
92 * buf_hash_remove() expects the appropriate hash mutex to be
93 * already held before it is invoked.
95 * Each arc state also has a mutex which is used to protect the
96 * buffer list associated with the state. When attempting to
97 * obtain a hash table lock while holding an arc list lock you
98 * must use: mutex_tryenter() to avoid deadlock. Also note that
99 * the active state mutex must be held before the ghost state mutex.
101 * Arc buffers may have an associated eviction callback function.
102 * This function will be invoked prior to removing the buffer (e.g.
103 * in arc_do_user_evicts()). Note however that the data associated
104 * with the buffer may be evicted prior to the callback. The callback
105 * must be made with *no locks held* (to prevent deadlock). Additionally,
106 * the users of callbacks must ensure that their private data is
107 * protected from simultaneous callbacks from arc_buf_evict()
108 * and arc_do_user_evicts().
110 * Note that the majority of the performance stats are manipulated
111 * with atomic operations.
113 * The L2ARC uses the l2arc_buflist_mtx global mutex for the following:
115 * - L2ARC buflist creation
116 * - L2ARC buflist eviction
117 * - L2ARC write completion, which walks L2ARC buflists
118 * - ARC header destruction, as it removes from L2ARC buflists
119 * - ARC header release, as it removes from L2ARC buflists
124 #include <sys/zio_checksum.h>
125 #include <sys/zfs_context.h>
127 #include <sys/refcount.h>
129 #include <sys/vmsystm.h>
131 #include <sys/fs/swapnode.h>
132 #include <sys/dnlc.h>
134 #include <sys/callb.h>
135 #include <sys/kstat.h>
137 static kmutex_t arc_reclaim_thr_lock;
138 static kcondvar_t arc_reclaim_thr_cv; /* used to signal reclaim thr */
139 static uint8_t arc_thread_exit;
141 extern int zfs_write_limit_shift;
142 extern uint64_t zfs_write_limit_max;
143 extern uint64_t zfs_write_limit_inflated;
145 #define ARC_REDUCE_DNLC_PERCENT 3
146 uint_t arc_reduce_dnlc_percent = ARC_REDUCE_DNLC_PERCENT;
148 typedef enum arc_reclaim_strategy {
149 ARC_RECLAIM_AGGR, /* Aggressive reclaim strategy */
150 ARC_RECLAIM_CONS /* Conservative reclaim strategy */
151 } arc_reclaim_strategy_t;
153 /* number of seconds before growing cache again */
154 static int arc_grow_retry = 60;
157 * minimum lifespan of a prefetch block in clock ticks
158 * (initialized in arc_init())
160 static int arc_min_prefetch_lifespan;
165 * These tunables are for performance analysis.
167 uint64_t zfs_arc_max;
168 uint64_t zfs_arc_min;
169 uint64_t zfs_arc_meta_limit = 0;
172 * Note that buffers can be in one of 6 states:
173 * ARC_anon - anonymous (discussed below)
174 * ARC_mru - recently used, currently cached
175 * ARC_mru_ghost - recentely used, no longer in cache
176 * ARC_mfu - frequently used, currently cached
177 * ARC_mfu_ghost - frequently used, no longer in cache
178 * ARC_l2c_only - exists in L2ARC but not other states
179 * When there are no active references to the buffer, they are
180 * are linked onto a list in one of these arc states. These are
181 * the only buffers that can be evicted or deleted. Within each
182 * state there are multiple lists, one for meta-data and one for
183 * non-meta-data. Meta-data (indirect blocks, blocks of dnodes,
184 * etc.) is tracked separately so that it can be managed more
185 * explicitly: favored over data, limited explicitly.
187 * Anonymous buffers are buffers that are not associated with
188 * a DVA. These are buffers that hold dirty block copies
189 * before they are written to stable storage. By definition,
190 * they are "ref'd" and are considered part of arc_mru
191 * that cannot be freed. Generally, they will aquire a DVA
192 * as they are written and migrate onto the arc_mru list.
194 * The ARC_l2c_only state is for buffers that are in the second
195 * level ARC but no longer in any of the ARC_m* lists. The second
196 * level ARC itself may also contain buffers that are in any of
197 * the ARC_m* states - meaning that a buffer can exist in two
198 * places. The reason for the ARC_l2c_only state is to keep the
199 * buffer header in the hash table, so that reads that hit the
200 * second level ARC benefit from these fast lookups.
203 typedef struct arc_state {
204 list_t arcs_list[ARC_BUFC_NUMTYPES]; /* list of evictable buffers */
205 uint64_t arcs_lsize[ARC_BUFC_NUMTYPES]; /* amount of evictable data */
206 uint64_t arcs_size; /* total amount of data in this state */
211 static arc_state_t ARC_anon;
212 static arc_state_t ARC_mru;
213 static arc_state_t ARC_mru_ghost;
214 static arc_state_t ARC_mfu;
215 static arc_state_t ARC_mfu_ghost;
216 static arc_state_t ARC_l2c_only;
218 typedef struct arc_stats {
219 kstat_named_t arcstat_hits;
220 kstat_named_t arcstat_misses;
221 kstat_named_t arcstat_demand_data_hits;
222 kstat_named_t arcstat_demand_data_misses;
223 kstat_named_t arcstat_demand_metadata_hits;
224 kstat_named_t arcstat_demand_metadata_misses;
225 kstat_named_t arcstat_prefetch_data_hits;
226 kstat_named_t arcstat_prefetch_data_misses;
227 kstat_named_t arcstat_prefetch_metadata_hits;
228 kstat_named_t arcstat_prefetch_metadata_misses;
229 kstat_named_t arcstat_mru_hits;
230 kstat_named_t arcstat_mru_ghost_hits;
231 kstat_named_t arcstat_mfu_hits;
232 kstat_named_t arcstat_mfu_ghost_hits;
233 kstat_named_t arcstat_deleted;
234 kstat_named_t arcstat_recycle_miss;
235 kstat_named_t arcstat_mutex_miss;
236 kstat_named_t arcstat_evict_skip;
237 kstat_named_t arcstat_hash_elements;
238 kstat_named_t arcstat_hash_elements_max;
239 kstat_named_t arcstat_hash_collisions;
240 kstat_named_t arcstat_hash_chains;
241 kstat_named_t arcstat_hash_chain_max;
242 kstat_named_t arcstat_p;
243 kstat_named_t arcstat_c;
244 kstat_named_t arcstat_c_min;
245 kstat_named_t arcstat_c_max;
246 kstat_named_t arcstat_size;
247 kstat_named_t arcstat_hdr_size;
248 kstat_named_t arcstat_l2_hits;
249 kstat_named_t arcstat_l2_misses;
250 kstat_named_t arcstat_l2_feeds;
251 kstat_named_t arcstat_l2_rw_clash;
252 kstat_named_t arcstat_l2_writes_sent;
253 kstat_named_t arcstat_l2_writes_done;
254 kstat_named_t arcstat_l2_writes_error;
255 kstat_named_t arcstat_l2_writes_hdr_miss;
256 kstat_named_t arcstat_l2_evict_lock_retry;
257 kstat_named_t arcstat_l2_evict_reading;
258 kstat_named_t arcstat_l2_free_on_write;
259 kstat_named_t arcstat_l2_abort_lowmem;
260 kstat_named_t arcstat_l2_cksum_bad;
261 kstat_named_t arcstat_l2_io_error;
262 kstat_named_t arcstat_l2_size;
263 kstat_named_t arcstat_l2_hdr_size;
264 kstat_named_t arcstat_memory_throttle_count;
267 static arc_stats_t arc_stats = {
268 { "hits", KSTAT_DATA_UINT64 },
269 { "misses", KSTAT_DATA_UINT64 },
270 { "demand_data_hits", KSTAT_DATA_UINT64 },
271 { "demand_data_misses", KSTAT_DATA_UINT64 },
272 { "demand_metadata_hits", KSTAT_DATA_UINT64 },
273 { "demand_metadata_misses", KSTAT_DATA_UINT64 },
274 { "prefetch_data_hits", KSTAT_DATA_UINT64 },
275 { "prefetch_data_misses", KSTAT_DATA_UINT64 },
276 { "prefetch_metadata_hits", KSTAT_DATA_UINT64 },
277 { "prefetch_metadata_misses", KSTAT_DATA_UINT64 },
278 { "mru_hits", KSTAT_DATA_UINT64 },
279 { "mru_ghost_hits", KSTAT_DATA_UINT64 },
280 { "mfu_hits", KSTAT_DATA_UINT64 },
281 { "mfu_ghost_hits", KSTAT_DATA_UINT64 },
282 { "deleted", KSTAT_DATA_UINT64 },
283 { "recycle_miss", KSTAT_DATA_UINT64 },
284 { "mutex_miss", KSTAT_DATA_UINT64 },
285 { "evict_skip", KSTAT_DATA_UINT64 },
286 { "hash_elements", KSTAT_DATA_UINT64 },
287 { "hash_elements_max", KSTAT_DATA_UINT64 },
288 { "hash_collisions", KSTAT_DATA_UINT64 },
289 { "hash_chains", KSTAT_DATA_UINT64 },
290 { "hash_chain_max", KSTAT_DATA_UINT64 },
291 { "p", KSTAT_DATA_UINT64 },
292 { "c", KSTAT_DATA_UINT64 },
293 { "c_min", KSTAT_DATA_UINT64 },
294 { "c_max", KSTAT_DATA_UINT64 },
295 { "size", KSTAT_DATA_UINT64 },
296 { "hdr_size", KSTAT_DATA_UINT64 },
297 { "l2_hits", KSTAT_DATA_UINT64 },
298 { "l2_misses", KSTAT_DATA_UINT64 },
299 { "l2_feeds", KSTAT_DATA_UINT64 },
300 { "l2_rw_clash", KSTAT_DATA_UINT64 },
301 { "l2_writes_sent", KSTAT_DATA_UINT64 },
302 { "l2_writes_done", KSTAT_DATA_UINT64 },
303 { "l2_writes_error", KSTAT_DATA_UINT64 },
304 { "l2_writes_hdr_miss", KSTAT_DATA_UINT64 },
305 { "l2_evict_lock_retry", KSTAT_DATA_UINT64 },
306 { "l2_evict_reading", KSTAT_DATA_UINT64 },
307 { "l2_free_on_write", KSTAT_DATA_UINT64 },
308 { "l2_abort_lowmem", KSTAT_DATA_UINT64 },
309 { "l2_cksum_bad", KSTAT_DATA_UINT64 },
310 { "l2_io_error", KSTAT_DATA_UINT64 },
311 { "l2_size", KSTAT_DATA_UINT64 },
312 { "l2_hdr_size", KSTAT_DATA_UINT64 },
313 { "memory_throttle_count", KSTAT_DATA_UINT64 }
316 #define ARCSTAT(stat) (arc_stats.stat.value.ui64)
318 #define ARCSTAT_INCR(stat, val) \
319 atomic_add_64(&arc_stats.stat.value.ui64, (val));
321 #define ARCSTAT_BUMP(stat) ARCSTAT_INCR(stat, 1)
322 #define ARCSTAT_BUMPDOWN(stat) ARCSTAT_INCR(stat, -1)
324 #define ARCSTAT_MAX(stat, val) { \
326 while ((val) > (m = arc_stats.stat.value.ui64) && \
327 (m != atomic_cas_64(&arc_stats.stat.value.ui64, m, (val)))) \
331 #define ARCSTAT_MAXSTAT(stat) \
332 ARCSTAT_MAX(stat##_max, arc_stats.stat.value.ui64)
335 * We define a macro to allow ARC hits/misses to be easily broken down by
336 * two separate conditions, giving a total of four different subtypes for
337 * each of hits and misses (so eight statistics total).
339 #define ARCSTAT_CONDSTAT(cond1, stat1, notstat1, cond2, stat2, notstat2, stat) \
342 ARCSTAT_BUMP(arcstat_##stat1##_##stat2##_##stat); \
344 ARCSTAT_BUMP(arcstat_##stat1##_##notstat2##_##stat); \
348 ARCSTAT_BUMP(arcstat_##notstat1##_##stat2##_##stat); \
350 ARCSTAT_BUMP(arcstat_##notstat1##_##notstat2##_##stat);\
355 static arc_state_t *arc_anon;
356 static arc_state_t *arc_mru;
357 static arc_state_t *arc_mru_ghost;
358 static arc_state_t *arc_mfu;
359 static arc_state_t *arc_mfu_ghost;
360 static arc_state_t *arc_l2c_only;
363 * There are several ARC variables that are critical to export as kstats --
364 * but we don't want to have to grovel around in the kstat whenever we wish to
365 * manipulate them. For these variables, we therefore define them to be in
366 * terms of the statistic variable. This assures that we are not introducing
367 * the possibility of inconsistency by having shadow copies of the variables,
368 * while still allowing the code to be readable.
370 #define arc_size ARCSTAT(arcstat_size) /* actual total arc size */
371 #define arc_p ARCSTAT(arcstat_p) /* target size of MRU */
372 #define arc_c ARCSTAT(arcstat_c) /* target size of cache */
373 #define arc_c_min ARCSTAT(arcstat_c_min) /* min target cache size */
374 #define arc_c_max ARCSTAT(arcstat_c_max) /* max target cache size */
376 static int arc_no_grow; /* Don't try to grow cache size */
377 static uint64_t arc_tempreserve;
378 static uint64_t arc_meta_used;
379 static uint64_t arc_meta_limit;
380 static uint64_t arc_meta_max = 0;
382 typedef struct l2arc_buf_hdr l2arc_buf_hdr_t;
384 typedef struct arc_callback arc_callback_t;
386 struct arc_callback {
388 arc_done_func_t *acb_done;
389 arc_byteswap_func_t *acb_byteswap;
391 zio_t *acb_zio_dummy;
392 arc_callback_t *acb_next;
395 typedef struct arc_write_callback arc_write_callback_t;
397 struct arc_write_callback {
399 arc_done_func_t *awcb_ready;
400 arc_done_func_t *awcb_done;
405 /* protected by hash lock */
410 kmutex_t b_freeze_lock;
411 zio_cksum_t *b_freeze_cksum;
413 arc_buf_hdr_t *b_hash_next;
418 arc_callback_t *b_acb;
422 arc_buf_contents_t b_type;
426 /* protected by arc state mutex */
427 arc_state_t *b_state;
428 list_node_t b_arc_node;
430 /* updated atomically */
431 clock_t b_arc_access;
433 /* self protecting */
436 l2arc_buf_hdr_t *b_l2hdr;
437 list_node_t b_l2node;
440 static arc_buf_t *arc_eviction_list;
441 static kmutex_t arc_eviction_mtx;
442 static arc_buf_hdr_t arc_eviction_hdr;
443 static void arc_get_data_buf(arc_buf_t *buf);
444 static void arc_access(arc_buf_hdr_t *buf, kmutex_t *hash_lock);
445 static int arc_evict_needed(arc_buf_contents_t type);
446 static void arc_evict_ghost(arc_state_t *state, spa_t *spa, int64_t bytes);
448 #define GHOST_STATE(state) \
449 ((state) == arc_mru_ghost || (state) == arc_mfu_ghost || \
450 (state) == arc_l2c_only)
453 * Private ARC flags. These flags are private ARC only flags that will show up
454 * in b_flags in the arc_hdr_buf_t. Some flags are publicly declared, and can
455 * be passed in as arc_flags in things like arc_read. However, these flags
456 * should never be passed and should only be set by ARC code. When adding new
457 * public flags, make sure not to smash the private ones.
460 #define ARC_IN_HASH_TABLE (1 << 9) /* this buffer is hashed */
461 #define ARC_IO_IN_PROGRESS (1 << 10) /* I/O in progress for buf */
462 #define ARC_IO_ERROR (1 << 11) /* I/O failed for buf */
463 #define ARC_FREED_IN_READ (1 << 12) /* buf freed while in read */
464 #define ARC_BUF_AVAILABLE (1 << 13) /* block not in active use */
465 #define ARC_INDIRECT (1 << 14) /* this is an indirect block */
466 #define ARC_FREE_IN_PROGRESS (1 << 15) /* hdr about to be freed */
467 #define ARC_DONT_L2CACHE (1 << 16) /* originated by prefetch */
468 #define ARC_L2_READING (1 << 17) /* L2ARC read in progress */
469 #define ARC_L2_WRITING (1 << 18) /* L2ARC write in progress */
470 #define ARC_L2_EVICTED (1 << 19) /* evicted during I/O */
471 #define ARC_L2_WRITE_HEAD (1 << 20) /* head of write list */
473 #define HDR_IN_HASH_TABLE(hdr) ((hdr)->b_flags & ARC_IN_HASH_TABLE)
474 #define HDR_IO_IN_PROGRESS(hdr) ((hdr)->b_flags & ARC_IO_IN_PROGRESS)
475 #define HDR_IO_ERROR(hdr) ((hdr)->b_flags & ARC_IO_ERROR)
476 #define HDR_FREED_IN_READ(hdr) ((hdr)->b_flags & ARC_FREED_IN_READ)
477 #define HDR_BUF_AVAILABLE(hdr) ((hdr)->b_flags & ARC_BUF_AVAILABLE)
478 #define HDR_FREE_IN_PROGRESS(hdr) ((hdr)->b_flags & ARC_FREE_IN_PROGRESS)
479 #define HDR_DONT_L2CACHE(hdr) ((hdr)->b_flags & ARC_DONT_L2CACHE)
480 #define HDR_L2_READING(hdr) ((hdr)->b_flags & ARC_L2_READING)
481 #define HDR_L2_WRITING(hdr) ((hdr)->b_flags & ARC_L2_WRITING)
482 #define HDR_L2_EVICTED(hdr) ((hdr)->b_flags & ARC_L2_EVICTED)
483 #define HDR_L2_WRITE_HEAD(hdr) ((hdr)->b_flags & ARC_L2_WRITE_HEAD)
489 #define HDR_SIZE ((int64_t)sizeof (arc_buf_hdr_t))
490 #define L2HDR_SIZE ((int64_t)sizeof (l2arc_buf_hdr_t))
493 * Hash table routines
496 #define HT_LOCK_PAD 64
501 unsigned char pad[(HT_LOCK_PAD - sizeof (kmutex_t))];
505 #define BUF_LOCKS 256
506 typedef struct buf_hash_table {
508 arc_buf_hdr_t **ht_table;
509 struct ht_lock ht_locks[BUF_LOCKS];
512 static buf_hash_table_t buf_hash_table;
514 #define BUF_HASH_INDEX(spa, dva, birth) \
515 (buf_hash(spa, dva, birth) & buf_hash_table.ht_mask)
516 #define BUF_HASH_LOCK_NTRY(idx) (buf_hash_table.ht_locks[idx & (BUF_LOCKS-1)])
517 #define BUF_HASH_LOCK(idx) (&(BUF_HASH_LOCK_NTRY(idx).ht_lock))
518 #define HDR_LOCK(buf) \
519 (BUF_HASH_LOCK(BUF_HASH_INDEX(buf->b_spa, &buf->b_dva, buf->b_birth)))
521 uint64_t zfs_crc64_table[256];
527 #define L2ARC_WRITE_SIZE (8 * 1024 * 1024) /* initial write max */
528 #define L2ARC_HEADROOM 4 /* num of writes */
529 #define L2ARC_FEED_DELAY 180 /* starting grace */
530 #define L2ARC_FEED_SECS 1 /* caching interval */
532 #define l2arc_writes_sent ARCSTAT(arcstat_l2_writes_sent)
533 #define l2arc_writes_done ARCSTAT(arcstat_l2_writes_done)
536 * L2ARC Performance Tunables
538 uint64_t l2arc_write_max = L2ARC_WRITE_SIZE; /* default max write size */
539 uint64_t l2arc_headroom = L2ARC_HEADROOM; /* number of dev writes */
540 uint64_t l2arc_feed_secs = L2ARC_FEED_SECS; /* interval seconds */
541 boolean_t l2arc_noprefetch = B_TRUE; /* don't cache prefetch bufs */
546 typedef struct l2arc_dev {
547 vdev_t *l2ad_vdev; /* vdev */
548 spa_t *l2ad_spa; /* spa */
549 uint64_t l2ad_hand; /* next write location */
550 uint64_t l2ad_write; /* desired write size, bytes */
551 uint64_t l2ad_start; /* first addr on device */
552 uint64_t l2ad_end; /* last addr on device */
553 uint64_t l2ad_evict; /* last addr eviction reached */
554 boolean_t l2ad_first; /* first sweep through */
555 list_t *l2ad_buflist; /* buffer list */
556 list_node_t l2ad_node; /* device list node */
559 static list_t L2ARC_dev_list; /* device list */
560 static list_t *l2arc_dev_list; /* device list pointer */
561 static kmutex_t l2arc_dev_mtx; /* device list mutex */
562 static l2arc_dev_t *l2arc_dev_last; /* last device used */
563 static kmutex_t l2arc_buflist_mtx; /* mutex for all buflists */
564 static list_t L2ARC_free_on_write; /* free after write buf list */
565 static list_t *l2arc_free_on_write; /* free after write list ptr */
566 static kmutex_t l2arc_free_on_write_mtx; /* mutex for list */
567 static uint64_t l2arc_ndev; /* number of devices */
569 typedef struct l2arc_read_callback {
570 arc_buf_t *l2rcb_buf; /* read buffer */
571 spa_t *l2rcb_spa; /* spa */
572 blkptr_t l2rcb_bp; /* original blkptr */
573 zbookmark_t l2rcb_zb; /* original bookmark */
574 int l2rcb_flags; /* original flags */
575 } l2arc_read_callback_t;
577 typedef struct l2arc_write_callback {
578 l2arc_dev_t *l2wcb_dev; /* device info */
579 arc_buf_hdr_t *l2wcb_head; /* head of write buflist */
580 } l2arc_write_callback_t;
582 struct l2arc_buf_hdr {
583 /* protected by arc_buf_hdr mutex */
584 l2arc_dev_t *b_dev; /* L2ARC device */
585 daddr_t b_daddr; /* disk address, offset byte */
588 typedef struct l2arc_data_free {
589 /* protected by l2arc_free_on_write_mtx */
592 void (*l2df_func)(void *, size_t);
593 list_node_t l2df_list_node;
596 static kmutex_t l2arc_feed_thr_lock;
597 static kcondvar_t l2arc_feed_thr_cv;
598 static uint8_t l2arc_thread_exit;
600 static void l2arc_read_done(zio_t *zio);
601 static void l2arc_hdr_stat_add(void);
602 static void l2arc_hdr_stat_remove(void);
605 buf_hash(spa_t *spa, dva_t *dva, uint64_t birth)
607 uintptr_t spav = (uintptr_t)spa;
608 uint8_t *vdva = (uint8_t *)dva;
609 uint64_t crc = -1ULL;
612 ASSERT(zfs_crc64_table[128] == ZFS_CRC64_POLY);
614 for (i = 0; i < sizeof (dva_t); i++)
615 crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ vdva[i]) & 0xFF];
617 crc ^= (spav>>8) ^ birth;
622 #define BUF_EMPTY(buf) \
623 ((buf)->b_dva.dva_word[0] == 0 && \
624 (buf)->b_dva.dva_word[1] == 0 && \
627 #define BUF_EQUAL(spa, dva, birth, buf) \
628 ((buf)->b_dva.dva_word[0] == (dva)->dva_word[0]) && \
629 ((buf)->b_dva.dva_word[1] == (dva)->dva_word[1]) && \
630 ((buf)->b_birth == birth) && ((buf)->b_spa == spa)
632 static arc_buf_hdr_t *
633 buf_hash_find(spa_t *spa, dva_t *dva, uint64_t birth, kmutex_t **lockp)
635 uint64_t idx = BUF_HASH_INDEX(spa, dva, birth);
636 kmutex_t *hash_lock = BUF_HASH_LOCK(idx);
639 mutex_enter(hash_lock);
640 for (buf = buf_hash_table.ht_table[idx]; buf != NULL;
641 buf = buf->b_hash_next) {
642 if (BUF_EQUAL(spa, dva, birth, buf)) {
647 mutex_exit(hash_lock);
653 * Insert an entry into the hash table. If there is already an element
654 * equal to elem in the hash table, then the already existing element
655 * will be returned and the new element will not be inserted.
656 * Otherwise returns NULL.
658 static arc_buf_hdr_t *
659 buf_hash_insert(arc_buf_hdr_t *buf, kmutex_t **lockp)
661 uint64_t idx = BUF_HASH_INDEX(buf->b_spa, &buf->b_dva, buf->b_birth);
662 kmutex_t *hash_lock = BUF_HASH_LOCK(idx);
666 ASSERT(!HDR_IN_HASH_TABLE(buf));
668 mutex_enter(hash_lock);
669 for (fbuf = buf_hash_table.ht_table[idx], i = 0; fbuf != NULL;
670 fbuf = fbuf->b_hash_next, i++) {
671 if (BUF_EQUAL(buf->b_spa, &buf->b_dva, buf->b_birth, fbuf))
675 buf->b_hash_next = buf_hash_table.ht_table[idx];
676 buf_hash_table.ht_table[idx] = buf;
677 buf->b_flags |= ARC_IN_HASH_TABLE;
679 /* collect some hash table performance data */
681 ARCSTAT_BUMP(arcstat_hash_collisions);
683 ARCSTAT_BUMP(arcstat_hash_chains);
685 ARCSTAT_MAX(arcstat_hash_chain_max, i);
688 ARCSTAT_BUMP(arcstat_hash_elements);
689 ARCSTAT_MAXSTAT(arcstat_hash_elements);
695 buf_hash_remove(arc_buf_hdr_t *buf)
697 arc_buf_hdr_t *fbuf, **bufp;
698 uint64_t idx = BUF_HASH_INDEX(buf->b_spa, &buf->b_dva, buf->b_birth);
700 ASSERT(MUTEX_HELD(BUF_HASH_LOCK(idx)));
701 ASSERT(HDR_IN_HASH_TABLE(buf));
703 bufp = &buf_hash_table.ht_table[idx];
704 while ((fbuf = *bufp) != buf) {
705 ASSERT(fbuf != NULL);
706 bufp = &fbuf->b_hash_next;
708 *bufp = buf->b_hash_next;
709 buf->b_hash_next = NULL;
710 buf->b_flags &= ~ARC_IN_HASH_TABLE;
712 /* collect some hash table performance data */
713 ARCSTAT_BUMPDOWN(arcstat_hash_elements);
715 if (buf_hash_table.ht_table[idx] &&
716 buf_hash_table.ht_table[idx]->b_hash_next == NULL)
717 ARCSTAT_BUMPDOWN(arcstat_hash_chains);
721 * Global data structures and functions for the buf kmem cache.
723 static kmem_cache_t *hdr_cache;
724 static kmem_cache_t *buf_cache;
731 kmem_free(buf_hash_table.ht_table,
732 (buf_hash_table.ht_mask + 1) * sizeof (void *));
733 for (i = 0; i < BUF_LOCKS; i++)
734 mutex_destroy(&buf_hash_table.ht_locks[i].ht_lock);
735 kmem_cache_destroy(hdr_cache);
736 kmem_cache_destroy(buf_cache);
740 * Constructor callback - called when the cache is empty
741 * and a new buf is requested.
745 hdr_cons(void *vbuf, void *unused, int kmflag)
747 arc_buf_hdr_t *buf = vbuf;
749 bzero(buf, sizeof (arc_buf_hdr_t));
750 refcount_create(&buf->b_refcnt);
751 cv_init(&buf->b_cv, NULL, CV_DEFAULT, NULL);
752 mutex_init(&buf->b_freeze_lock, NULL, MUTEX_DEFAULT, NULL);
754 ARCSTAT_INCR(arcstat_hdr_size, HDR_SIZE);
759 * Destructor callback - called when a cached buf is
760 * no longer required.
764 hdr_dest(void *vbuf, void *unused)
766 arc_buf_hdr_t *buf = vbuf;
768 refcount_destroy(&buf->b_refcnt);
769 cv_destroy(&buf->b_cv);
770 mutex_destroy(&buf->b_freeze_lock);
772 ARCSTAT_INCR(arcstat_hdr_size, -HDR_SIZE);
776 * Reclaim callback -- invoked when memory is low.
780 hdr_recl(void *unused)
782 dprintf("hdr_recl called\n");
784 * umem calls the reclaim func when we destroy the buf cache,
785 * which is after we do arc_fini().
788 cv_signal(&arc_reclaim_thr_cv);
795 uint64_t hsize = 1ULL << 12;
799 * The hash table is big enough to fill all of physical memory
800 * with an average 64K block size. The table will take up
801 * totalmem*sizeof(void*)/64K (eg. 128KB/GB with 8-byte pointers).
803 while (hsize * 65536 < physmem * PAGESIZE)
806 buf_hash_table.ht_mask = hsize - 1;
807 buf_hash_table.ht_table =
808 kmem_zalloc(hsize * sizeof (void*), KM_NOSLEEP);
809 if (buf_hash_table.ht_table == NULL) {
810 ASSERT(hsize > (1ULL << 8));
815 hdr_cache = kmem_cache_create("arc_buf_hdr_t", sizeof (arc_buf_hdr_t),
816 0, hdr_cons, hdr_dest, hdr_recl, NULL, NULL, 0);
817 buf_cache = kmem_cache_create("arc_buf_t", sizeof (arc_buf_t),
818 0, NULL, NULL, NULL, NULL, NULL, 0);
820 for (i = 0; i < 256; i++)
821 for (ct = zfs_crc64_table + i, *ct = i, j = 8; j > 0; j--)
822 *ct = (*ct >> 1) ^ (-(*ct & 1) & ZFS_CRC64_POLY);
824 for (i = 0; i < BUF_LOCKS; i++) {
825 mutex_init(&buf_hash_table.ht_locks[i].ht_lock,
826 NULL, MUTEX_DEFAULT, NULL);
830 #define ARC_MINTIME (hz>>4) /* 62 ms */
833 arc_cksum_verify(arc_buf_t *buf)
837 if (!(zfs_flags & ZFS_DEBUG_MODIFY))
840 mutex_enter(&buf->b_hdr->b_freeze_lock);
841 if (buf->b_hdr->b_freeze_cksum == NULL ||
842 (buf->b_hdr->b_flags & ARC_IO_ERROR)) {
843 mutex_exit(&buf->b_hdr->b_freeze_lock);
846 fletcher_2_native(buf->b_data, buf->b_hdr->b_size, &zc);
847 if (!ZIO_CHECKSUM_EQUAL(*buf->b_hdr->b_freeze_cksum, zc))
848 panic("buffer modified while frozen!");
849 mutex_exit(&buf->b_hdr->b_freeze_lock);
853 arc_cksum_equal(arc_buf_t *buf)
858 mutex_enter(&buf->b_hdr->b_freeze_lock);
859 fletcher_2_native(buf->b_data, buf->b_hdr->b_size, &zc);
860 equal = ZIO_CHECKSUM_EQUAL(*buf->b_hdr->b_freeze_cksum, zc);
861 mutex_exit(&buf->b_hdr->b_freeze_lock);
867 arc_cksum_compute(arc_buf_t *buf, boolean_t force)
869 if (!force && !(zfs_flags & ZFS_DEBUG_MODIFY))
872 mutex_enter(&buf->b_hdr->b_freeze_lock);
873 if (buf->b_hdr->b_freeze_cksum != NULL) {
874 mutex_exit(&buf->b_hdr->b_freeze_lock);
877 buf->b_hdr->b_freeze_cksum = kmem_alloc(sizeof (zio_cksum_t), KM_SLEEP);
878 fletcher_2_native(buf->b_data, buf->b_hdr->b_size,
879 buf->b_hdr->b_freeze_cksum);
880 mutex_exit(&buf->b_hdr->b_freeze_lock);
884 arc_buf_thaw(arc_buf_t *buf)
886 if (zfs_flags & ZFS_DEBUG_MODIFY) {
887 if (buf->b_hdr->b_state != arc_anon)
888 panic("modifying non-anon buffer!");
889 if (buf->b_hdr->b_flags & ARC_IO_IN_PROGRESS)
890 panic("modifying buffer while i/o in progress!");
891 arc_cksum_verify(buf);
894 mutex_enter(&buf->b_hdr->b_freeze_lock);
895 if (buf->b_hdr->b_freeze_cksum != NULL) {
896 kmem_free(buf->b_hdr->b_freeze_cksum, sizeof (zio_cksum_t));
897 buf->b_hdr->b_freeze_cksum = NULL;
899 mutex_exit(&buf->b_hdr->b_freeze_lock);
903 arc_buf_freeze(arc_buf_t *buf)
905 if (!(zfs_flags & ZFS_DEBUG_MODIFY))
908 ASSERT(buf->b_hdr->b_freeze_cksum != NULL ||
909 buf->b_hdr->b_state == arc_anon);
910 arc_cksum_compute(buf, B_FALSE);
914 add_reference(arc_buf_hdr_t *ab, kmutex_t *hash_lock, void *tag)
916 ASSERT(MUTEX_HELD(hash_lock));
918 if ((refcount_add(&ab->b_refcnt, tag) == 1) &&
919 (ab->b_state != arc_anon)) {
920 uint64_t delta = ab->b_size * ab->b_datacnt;
921 list_t *list = &ab->b_state->arcs_list[ab->b_type];
922 uint64_t *size = &ab->b_state->arcs_lsize[ab->b_type];
924 ASSERT(!MUTEX_HELD(&ab->b_state->arcs_mtx));
925 mutex_enter(&ab->b_state->arcs_mtx);
926 ASSERT(list_link_active(&ab->b_arc_node));
927 list_remove(list, ab);
928 if (GHOST_STATE(ab->b_state)) {
929 ASSERT3U(ab->b_datacnt, ==, 0);
930 ASSERT3P(ab->b_buf, ==, NULL);
934 ASSERT3U(*size, >=, delta);
935 atomic_add_64(size, -delta);
936 mutex_exit(&ab->b_state->arcs_mtx);
937 /* remove the prefetch flag is we get a reference */
938 if (ab->b_flags & ARC_PREFETCH)
939 ab->b_flags &= ~ARC_PREFETCH;
944 remove_reference(arc_buf_hdr_t *ab, kmutex_t *hash_lock, void *tag)
947 arc_state_t *state = ab->b_state;
949 ASSERT(state == arc_anon || MUTEX_HELD(hash_lock));
950 ASSERT(!GHOST_STATE(state));
952 if (((cnt = refcount_remove(&ab->b_refcnt, tag)) == 0) &&
953 (state != arc_anon)) {
954 uint64_t *size = &state->arcs_lsize[ab->b_type];
956 ASSERT(!MUTEX_HELD(&state->arcs_mtx));
957 mutex_enter(&state->arcs_mtx);
958 ASSERT(!list_link_active(&ab->b_arc_node));
959 list_insert_head(&state->arcs_list[ab->b_type], ab);
960 ASSERT(ab->b_datacnt > 0);
961 atomic_add_64(size, ab->b_size * ab->b_datacnt);
962 mutex_exit(&state->arcs_mtx);
968 * Move the supplied buffer to the indicated state. The mutex
969 * for the buffer must be held by the caller.
972 arc_change_state(arc_state_t *new_state, arc_buf_hdr_t *ab, kmutex_t *hash_lock)
974 arc_state_t *old_state = ab->b_state;
975 int64_t refcnt = refcount_count(&ab->b_refcnt);
976 uint64_t from_delta, to_delta;
978 ASSERT(MUTEX_HELD(hash_lock));
979 ASSERT(new_state != old_state);
980 ASSERT(refcnt == 0 || ab->b_datacnt > 0);
981 ASSERT(ab->b_datacnt == 0 || !GHOST_STATE(new_state));
983 from_delta = to_delta = ab->b_datacnt * ab->b_size;
986 * If this buffer is evictable, transfer it from the
987 * old state list to the new state list.
990 if (old_state != arc_anon) {
991 int use_mutex = !MUTEX_HELD(&old_state->arcs_mtx);
992 uint64_t *size = &old_state->arcs_lsize[ab->b_type];
995 mutex_enter(&old_state->arcs_mtx);
997 ASSERT(list_link_active(&ab->b_arc_node));
998 list_remove(&old_state->arcs_list[ab->b_type], ab);
1001 * If prefetching out of the ghost cache,
1002 * we will have a non-null datacnt.
1004 if (GHOST_STATE(old_state) && ab->b_datacnt == 0) {
1005 /* ghost elements have a ghost size */
1006 ASSERT(ab->b_buf == NULL);
1007 from_delta = ab->b_size;
1009 ASSERT3U(*size, >=, from_delta);
1010 atomic_add_64(size, -from_delta);
1013 mutex_exit(&old_state->arcs_mtx);
1015 if (new_state != arc_anon) {
1016 int use_mutex = !MUTEX_HELD(&new_state->arcs_mtx);
1017 uint64_t *size = &new_state->arcs_lsize[ab->b_type];
1020 mutex_enter(&new_state->arcs_mtx);
1022 list_insert_head(&new_state->arcs_list[ab->b_type], ab);
1024 /* ghost elements have a ghost size */
1025 if (GHOST_STATE(new_state)) {
1026 ASSERT(ab->b_datacnt == 0);
1027 ASSERT(ab->b_buf == NULL);
1028 to_delta = ab->b_size;
1030 atomic_add_64(size, to_delta);
1033 mutex_exit(&new_state->arcs_mtx);
1037 ASSERT(!BUF_EMPTY(ab));
1038 if (new_state == arc_anon) {
1039 buf_hash_remove(ab);
1042 /* adjust state sizes */
1044 atomic_add_64(&new_state->arcs_size, to_delta);
1046 ASSERT3U(old_state->arcs_size, >=, from_delta);
1047 atomic_add_64(&old_state->arcs_size, -from_delta);
1049 ab->b_state = new_state;
1051 /* adjust l2arc hdr stats */
1052 if (new_state == arc_l2c_only)
1053 l2arc_hdr_stat_add();
1054 else if (old_state == arc_l2c_only)
1055 l2arc_hdr_stat_remove();
1059 arc_space_consume(uint64_t space)
1061 atomic_add_64(&arc_meta_used, space);
1062 atomic_add_64(&arc_size, space);
1066 arc_space_return(uint64_t space)
1068 ASSERT(arc_meta_used >= space);
1069 if (arc_meta_max < arc_meta_used)
1070 arc_meta_max = arc_meta_used;
1071 atomic_add_64(&arc_meta_used, -space);
1072 ASSERT(arc_size >= space);
1073 atomic_add_64(&arc_size, -space);
1077 arc_data_buf_alloc(uint64_t size)
1079 if (arc_evict_needed(ARC_BUFC_DATA))
1080 cv_signal(&arc_reclaim_thr_cv);
1081 atomic_add_64(&arc_size, size);
1082 return (zio_data_buf_alloc(size));
1086 arc_data_buf_free(void *buf, uint64_t size)
1088 zio_data_buf_free(buf, size);
1089 ASSERT(arc_size >= size);
1090 atomic_add_64(&arc_size, -size);
1094 arc_buf_alloc(spa_t *spa, int size, void *tag, arc_buf_contents_t type)
1099 ASSERT3U(size, >, 0);
1100 hdr = kmem_cache_alloc(hdr_cache, KM_PUSHPAGE);
1101 ASSERT(BUF_EMPTY(hdr));
1105 hdr->b_state = arc_anon;
1106 hdr->b_arc_access = 0;
1107 buf = kmem_cache_alloc(buf_cache, KM_PUSHPAGE);
1110 buf->b_efunc = NULL;
1111 buf->b_private = NULL;
1114 arc_get_data_buf(buf);
1117 ASSERT(refcount_is_zero(&hdr->b_refcnt));
1118 (void) refcount_add(&hdr->b_refcnt, tag);
1124 arc_buf_clone(arc_buf_t *from)
1127 arc_buf_hdr_t *hdr = from->b_hdr;
1128 uint64_t size = hdr->b_size;
1130 buf = kmem_cache_alloc(buf_cache, KM_PUSHPAGE);
1133 buf->b_efunc = NULL;
1134 buf->b_private = NULL;
1135 buf->b_next = hdr->b_buf;
1137 arc_get_data_buf(buf);
1138 bcopy(from->b_data, buf->b_data, size);
1139 hdr->b_datacnt += 1;
1144 arc_buf_add_ref(arc_buf_t *buf, void* tag)
1147 kmutex_t *hash_lock;
1150 * Check to see if this buffer is currently being evicted via
1151 * arc_do_user_evicts().
1153 mutex_enter(&arc_eviction_mtx);
1156 mutex_exit(&arc_eviction_mtx);
1159 hash_lock = HDR_LOCK(hdr);
1160 mutex_exit(&arc_eviction_mtx);
1162 mutex_enter(hash_lock);
1163 if (buf->b_data == NULL) {
1165 * This buffer is evicted.
1167 mutex_exit(hash_lock);
1171 ASSERT(buf->b_hdr == hdr);
1172 ASSERT(hdr->b_state == arc_mru || hdr->b_state == arc_mfu);
1173 add_reference(hdr, hash_lock, tag);
1174 arc_access(hdr, hash_lock);
1175 mutex_exit(hash_lock);
1176 ARCSTAT_BUMP(arcstat_hits);
1177 ARCSTAT_CONDSTAT(!(hdr->b_flags & ARC_PREFETCH),
1178 demand, prefetch, hdr->b_type != ARC_BUFC_METADATA,
1179 data, metadata, hits);
1183 * Free the arc data buffer. If it is an l2arc write in progress,
1184 * the buffer is placed on l2arc_free_on_write to be freed later.
1187 arc_buf_data_free(arc_buf_hdr_t *hdr, void (*free_func)(void *, size_t),
1188 void *data, size_t size)
1190 if (HDR_L2_WRITING(hdr)) {
1191 l2arc_data_free_t *df;
1192 df = kmem_alloc(sizeof (l2arc_data_free_t), KM_SLEEP);
1193 df->l2df_data = data;
1194 df->l2df_size = size;
1195 df->l2df_func = free_func;
1196 mutex_enter(&l2arc_free_on_write_mtx);
1197 list_insert_head(l2arc_free_on_write, df);
1198 mutex_exit(&l2arc_free_on_write_mtx);
1199 ARCSTAT_BUMP(arcstat_l2_free_on_write);
1201 free_func(data, size);
1206 arc_buf_destroy(arc_buf_t *buf, boolean_t recycle, boolean_t all)
1210 /* free up data associated with the buf */
1212 arc_state_t *state = buf->b_hdr->b_state;
1213 uint64_t size = buf->b_hdr->b_size;
1214 arc_buf_contents_t type = buf->b_hdr->b_type;
1216 arc_cksum_verify(buf);
1218 if (type == ARC_BUFC_METADATA) {
1219 arc_buf_data_free(buf->b_hdr, zio_buf_free,
1221 arc_space_return(size);
1223 ASSERT(type == ARC_BUFC_DATA);
1224 arc_buf_data_free(buf->b_hdr,
1225 zio_data_buf_free, buf->b_data, size);
1226 atomic_add_64(&arc_size, -size);
1229 if (list_link_active(&buf->b_hdr->b_arc_node)) {
1230 uint64_t *cnt = &state->arcs_lsize[type];
1232 ASSERT(refcount_is_zero(&buf->b_hdr->b_refcnt));
1233 ASSERT(state != arc_anon);
1235 ASSERT3U(*cnt, >=, size);
1236 atomic_add_64(cnt, -size);
1238 ASSERT3U(state->arcs_size, >=, size);
1239 atomic_add_64(&state->arcs_size, -size);
1241 ASSERT(buf->b_hdr->b_datacnt > 0);
1242 buf->b_hdr->b_datacnt -= 1;
1245 /* only remove the buf if requested */
1249 /* remove the buf from the hdr list */
1250 for (bufp = &buf->b_hdr->b_buf; *bufp != buf; bufp = &(*bufp)->b_next)
1252 *bufp = buf->b_next;
1254 ASSERT(buf->b_efunc == NULL);
1256 /* clean up the buf */
1258 kmem_cache_free(buf_cache, buf);
1262 arc_hdr_destroy(arc_buf_hdr_t *hdr)
1264 ASSERT(refcount_is_zero(&hdr->b_refcnt));
1265 ASSERT3P(hdr->b_state, ==, arc_anon);
1266 ASSERT(!HDR_IO_IN_PROGRESS(hdr));
1268 if (hdr->b_l2hdr != NULL) {
1269 if (!MUTEX_HELD(&l2arc_buflist_mtx)) {
1271 * To prevent arc_free() and l2arc_evict() from
1272 * attempting to free the same buffer at the same time,
1273 * a FREE_IN_PROGRESS flag is given to arc_free() to
1274 * give it priority. l2arc_evict() can't destroy this
1275 * header while we are waiting on l2arc_buflist_mtx.
1277 mutex_enter(&l2arc_buflist_mtx);
1278 ASSERT(hdr->b_l2hdr != NULL);
1280 list_remove(hdr->b_l2hdr->b_dev->l2ad_buflist, hdr);
1281 mutex_exit(&l2arc_buflist_mtx);
1283 list_remove(hdr->b_l2hdr->b_dev->l2ad_buflist, hdr);
1285 ARCSTAT_INCR(arcstat_l2_size, -hdr->b_size);
1286 kmem_free(hdr->b_l2hdr, sizeof (l2arc_buf_hdr_t));
1287 if (hdr->b_state == arc_l2c_only)
1288 l2arc_hdr_stat_remove();
1289 hdr->b_l2hdr = NULL;
1292 if (!BUF_EMPTY(hdr)) {
1293 ASSERT(!HDR_IN_HASH_TABLE(hdr));
1294 bzero(&hdr->b_dva, sizeof (dva_t));
1298 while (hdr->b_buf) {
1299 arc_buf_t *buf = hdr->b_buf;
1302 mutex_enter(&arc_eviction_mtx);
1303 ASSERT(buf->b_hdr != NULL);
1304 arc_buf_destroy(hdr->b_buf, FALSE, FALSE);
1305 hdr->b_buf = buf->b_next;
1306 buf->b_hdr = &arc_eviction_hdr;
1307 buf->b_next = arc_eviction_list;
1308 arc_eviction_list = buf;
1309 mutex_exit(&arc_eviction_mtx);
1311 arc_buf_destroy(hdr->b_buf, FALSE, TRUE);
1314 if (hdr->b_freeze_cksum != NULL) {
1315 kmem_free(hdr->b_freeze_cksum, sizeof (zio_cksum_t));
1316 hdr->b_freeze_cksum = NULL;
1319 ASSERT(!list_link_active(&hdr->b_arc_node));
1320 ASSERT3P(hdr->b_hash_next, ==, NULL);
1321 ASSERT3P(hdr->b_acb, ==, NULL);
1322 kmem_cache_free(hdr_cache, hdr);
1326 arc_buf_free(arc_buf_t *buf, void *tag)
1328 arc_buf_hdr_t *hdr = buf->b_hdr;
1329 int hashed = hdr->b_state != arc_anon;
1331 ASSERT(buf->b_efunc == NULL);
1332 ASSERT(buf->b_data != NULL);
1335 kmutex_t *hash_lock = HDR_LOCK(hdr);
1337 mutex_enter(hash_lock);
1338 (void) remove_reference(hdr, hash_lock, tag);
1339 if (hdr->b_datacnt > 1)
1340 arc_buf_destroy(buf, FALSE, TRUE);
1342 hdr->b_flags |= ARC_BUF_AVAILABLE;
1343 mutex_exit(hash_lock);
1344 } else if (HDR_IO_IN_PROGRESS(hdr)) {
1347 * We are in the middle of an async write. Don't destroy
1348 * this buffer unless the write completes before we finish
1349 * decrementing the reference count.
1351 mutex_enter(&arc_eviction_mtx);
1352 (void) remove_reference(hdr, NULL, tag);
1353 ASSERT(refcount_is_zero(&hdr->b_refcnt));
1354 destroy_hdr = !HDR_IO_IN_PROGRESS(hdr);
1355 mutex_exit(&arc_eviction_mtx);
1357 arc_hdr_destroy(hdr);
1359 if (remove_reference(hdr, NULL, tag) > 0) {
1360 ASSERT(HDR_IO_ERROR(hdr));
1361 arc_buf_destroy(buf, FALSE, TRUE);
1363 arc_hdr_destroy(hdr);
1369 arc_buf_remove_ref(arc_buf_t *buf, void* tag)
1371 arc_buf_hdr_t *hdr = buf->b_hdr;
1372 kmutex_t *hash_lock = HDR_LOCK(hdr);
1373 int no_callback = (buf->b_efunc == NULL);
1375 if (hdr->b_state == arc_anon) {
1376 arc_buf_free(buf, tag);
1377 return (no_callback);
1380 mutex_enter(hash_lock);
1381 ASSERT(hdr->b_state != arc_anon);
1382 ASSERT(buf->b_data != NULL);
1384 (void) remove_reference(hdr, hash_lock, tag);
1385 if (hdr->b_datacnt > 1) {
1387 arc_buf_destroy(buf, FALSE, TRUE);
1388 } else if (no_callback) {
1389 ASSERT(hdr->b_buf == buf && buf->b_next == NULL);
1390 hdr->b_flags |= ARC_BUF_AVAILABLE;
1392 ASSERT(no_callback || hdr->b_datacnt > 1 ||
1393 refcount_is_zero(&hdr->b_refcnt));
1394 mutex_exit(hash_lock);
1395 return (no_callback);
1399 arc_buf_size(arc_buf_t *buf)
1401 return (buf->b_hdr->b_size);
1405 * Evict buffers from list until we've removed the specified number of
1406 * bytes. Move the removed buffers to the appropriate evict state.
1407 * If the recycle flag is set, then attempt to "recycle" a buffer:
1408 * - look for a buffer to evict that is `bytes' long.
1409 * - return the data block from this buffer rather than freeing it.
1410 * This flag is used by callers that are trying to make space for a
1411 * new buffer in a full arc cache.
1413 * This function makes a "best effort". It skips over any buffers
1414 * it can't get a hash_lock on, and so may not catch all candidates.
1415 * It may also return without evicting as much space as requested.
1418 arc_evict(arc_state_t *state, spa_t *spa, int64_t bytes, boolean_t recycle,
1419 arc_buf_contents_t type)
1421 arc_state_t *evicted_state;
1422 uint64_t bytes_evicted = 0, skipped = 0, missed = 0;
1423 arc_buf_hdr_t *ab, *ab_prev = NULL;
1424 list_t *list = &state->arcs_list[type];
1425 kmutex_t *hash_lock;
1426 boolean_t have_lock;
1427 void *stolen = NULL;
1429 ASSERT(state == arc_mru || state == arc_mfu);
1431 evicted_state = (state == arc_mru) ? arc_mru_ghost : arc_mfu_ghost;
1433 mutex_enter(&state->arcs_mtx);
1434 mutex_enter(&evicted_state->arcs_mtx);
1436 for (ab = list_tail(list); ab; ab = ab_prev) {
1437 ab_prev = list_prev(list, ab);
1438 /* prefetch buffers have a minimum lifespan */
1439 if (HDR_IO_IN_PROGRESS(ab) ||
1440 (spa && ab->b_spa != spa) ||
1441 (ab->b_flags & (ARC_PREFETCH|ARC_INDIRECT) &&
1442 lbolt - ab->b_arc_access < arc_min_prefetch_lifespan)) {
1446 /* "lookahead" for better eviction candidate */
1447 if (recycle && ab->b_size != bytes &&
1448 ab_prev && ab_prev->b_size == bytes)
1450 hash_lock = HDR_LOCK(ab);
1451 have_lock = MUTEX_HELD(hash_lock);
1452 if (have_lock || mutex_tryenter(hash_lock)) {
1453 ASSERT3U(refcount_count(&ab->b_refcnt), ==, 0);
1454 ASSERT(ab->b_datacnt > 0);
1456 arc_buf_t *buf = ab->b_buf;
1458 bytes_evicted += ab->b_size;
1459 if (recycle && ab->b_type == type &&
1460 ab->b_size == bytes &&
1461 !HDR_L2_WRITING(ab)) {
1462 stolen = buf->b_data;
1467 mutex_enter(&arc_eviction_mtx);
1468 arc_buf_destroy(buf,
1469 buf->b_data == stolen, FALSE);
1470 ab->b_buf = buf->b_next;
1471 buf->b_hdr = &arc_eviction_hdr;
1472 buf->b_next = arc_eviction_list;
1473 arc_eviction_list = buf;
1474 mutex_exit(&arc_eviction_mtx);
1476 arc_buf_destroy(buf,
1477 buf->b_data == stolen, TRUE);
1480 ASSERT(ab->b_datacnt == 0);
1481 arc_change_state(evicted_state, ab, hash_lock);
1482 ASSERT(HDR_IN_HASH_TABLE(ab));
1483 ab->b_flags |= ARC_IN_HASH_TABLE;
1484 ab->b_flags &= ~ARC_BUF_AVAILABLE;
1485 DTRACE_PROBE1(arc__evict, arc_buf_hdr_t *, ab);
1487 mutex_exit(hash_lock);
1488 if (bytes >= 0 && bytes_evicted >= bytes)
1495 mutex_exit(&evicted_state->arcs_mtx);
1496 mutex_exit(&state->arcs_mtx);
1498 if (bytes_evicted < bytes)
1499 dprintf("only evicted %lld bytes from %x",
1500 (longlong_t)bytes_evicted, state);
1503 ARCSTAT_INCR(arcstat_evict_skip, skipped);
1506 ARCSTAT_INCR(arcstat_mutex_miss, missed);
1509 * We have just evicted some date into the ghost state, make
1510 * sure we also adjust the ghost state size if necessary.
1513 arc_mru_ghost->arcs_size + arc_mfu_ghost->arcs_size > arc_c) {
1514 int64_t mru_over = arc_anon->arcs_size + arc_mru->arcs_size +
1515 arc_mru_ghost->arcs_size - arc_c;
1517 if (mru_over > 0 && arc_mru_ghost->arcs_lsize[type] > 0) {
1519 MIN(arc_mru_ghost->arcs_lsize[type], mru_over);
1520 arc_evict_ghost(arc_mru_ghost, NULL, todelete);
1521 } else if (arc_mfu_ghost->arcs_lsize[type] > 0) {
1522 int64_t todelete = MIN(arc_mfu_ghost->arcs_lsize[type],
1523 arc_mru_ghost->arcs_size +
1524 arc_mfu_ghost->arcs_size - arc_c);
1525 arc_evict_ghost(arc_mfu_ghost, NULL, todelete);
1533 * Remove buffers from list until we've removed the specified number of
1534 * bytes. Destroy the buffers that are removed.
1537 arc_evict_ghost(arc_state_t *state, spa_t *spa, int64_t bytes)
1539 arc_buf_hdr_t *ab, *ab_prev;
1540 list_t *list = &state->arcs_list[ARC_BUFC_DATA];
1541 kmutex_t *hash_lock;
1542 uint64_t bytes_deleted = 0;
1543 uint64_t bufs_skipped = 0;
1545 ASSERT(GHOST_STATE(state));
1547 mutex_enter(&state->arcs_mtx);
1548 for (ab = list_tail(list); ab; ab = ab_prev) {
1549 ab_prev = list_prev(list, ab);
1550 if (spa && ab->b_spa != spa)
1552 hash_lock = HDR_LOCK(ab);
1553 if (mutex_tryenter(hash_lock)) {
1554 ASSERT(!HDR_IO_IN_PROGRESS(ab));
1555 ASSERT(ab->b_buf == NULL);
1556 ARCSTAT_BUMP(arcstat_deleted);
1557 bytes_deleted += ab->b_size;
1559 if (ab->b_l2hdr != NULL) {
1561 * This buffer is cached on the 2nd Level ARC;
1562 * don't destroy the header.
1564 arc_change_state(arc_l2c_only, ab, hash_lock);
1565 mutex_exit(hash_lock);
1567 arc_change_state(arc_anon, ab, hash_lock);
1568 mutex_exit(hash_lock);
1569 arc_hdr_destroy(ab);
1572 DTRACE_PROBE1(arc__delete, arc_buf_hdr_t *, ab);
1573 if (bytes >= 0 && bytes_deleted >= bytes)
1577 mutex_exit(&state->arcs_mtx);
1578 mutex_enter(hash_lock);
1579 mutex_exit(hash_lock);
1585 mutex_exit(&state->arcs_mtx);
1587 if (list == &state->arcs_list[ARC_BUFC_DATA] &&
1588 (bytes < 0 || bytes_deleted < bytes)) {
1589 list = &state->arcs_list[ARC_BUFC_METADATA];
1594 ARCSTAT_INCR(arcstat_mutex_miss, bufs_skipped);
1598 if (bytes_deleted < bytes)
1599 dprintf("only deleted %lld bytes from %p",
1600 (longlong_t)bytes_deleted, state);
1606 int64_t top_sz, mru_over, arc_over, todelete;
1608 top_sz = arc_anon->arcs_size + arc_mru->arcs_size + arc_meta_used;
1610 if (top_sz > arc_p && arc_mru->arcs_lsize[ARC_BUFC_DATA] > 0) {
1612 MIN(arc_mru->arcs_lsize[ARC_BUFC_DATA], top_sz - arc_p);
1613 (void) arc_evict(arc_mru, NULL, toevict, FALSE, ARC_BUFC_DATA);
1614 top_sz = arc_anon->arcs_size + arc_mru->arcs_size;
1617 if (top_sz > arc_p && arc_mru->arcs_lsize[ARC_BUFC_METADATA] > 0) {
1619 MIN(arc_mru->arcs_lsize[ARC_BUFC_METADATA], top_sz - arc_p);
1620 (void) arc_evict(arc_mru, NULL, toevict, FALSE,
1622 top_sz = arc_anon->arcs_size + arc_mru->arcs_size;
1625 mru_over = top_sz + arc_mru_ghost->arcs_size - arc_c;
1628 if (arc_mru_ghost->arcs_size > 0) {
1629 todelete = MIN(arc_mru_ghost->arcs_size, mru_over);
1630 arc_evict_ghost(arc_mru_ghost, NULL, todelete);
1634 if ((arc_over = arc_size - arc_c) > 0) {
1637 if (arc_mfu->arcs_lsize[ARC_BUFC_DATA] > 0) {
1639 MIN(arc_mfu->arcs_lsize[ARC_BUFC_DATA], arc_over);
1640 (void) arc_evict(arc_mfu, NULL, toevict, FALSE,
1642 arc_over = arc_size - arc_c;
1646 arc_mfu->arcs_lsize[ARC_BUFC_METADATA] > 0) {
1648 MIN(arc_mfu->arcs_lsize[ARC_BUFC_METADATA],
1650 (void) arc_evict(arc_mfu, NULL, toevict, FALSE,
1654 tbl_over = arc_size + arc_mru_ghost->arcs_size +
1655 arc_mfu_ghost->arcs_size - arc_c * 2;
1657 if (tbl_over > 0 && arc_mfu_ghost->arcs_size > 0) {
1658 todelete = MIN(arc_mfu_ghost->arcs_size, tbl_over);
1659 arc_evict_ghost(arc_mfu_ghost, NULL, todelete);
1665 arc_do_user_evicts(void)
1667 mutex_enter(&arc_eviction_mtx);
1668 while (arc_eviction_list != NULL) {
1669 arc_buf_t *buf = arc_eviction_list;
1670 arc_eviction_list = buf->b_next;
1672 mutex_exit(&arc_eviction_mtx);
1674 if (buf->b_efunc != NULL)
1675 VERIFY(buf->b_efunc(buf) == 0);
1677 buf->b_efunc = NULL;
1678 buf->b_private = NULL;
1679 kmem_cache_free(buf_cache, buf);
1680 mutex_enter(&arc_eviction_mtx);
1682 mutex_exit(&arc_eviction_mtx);
1686 * Flush all *evictable* data from the cache for the given spa.
1687 * NOTE: this will not touch "active" (i.e. referenced) data.
1690 arc_flush(spa_t *spa)
1692 while (list_head(&arc_mru->arcs_list[ARC_BUFC_DATA])) {
1693 (void) arc_evict(arc_mru, spa, -1, FALSE, ARC_BUFC_DATA);
1697 while (list_head(&arc_mru->arcs_list[ARC_BUFC_METADATA])) {
1698 (void) arc_evict(arc_mru, spa, -1, FALSE, ARC_BUFC_METADATA);
1702 while (list_head(&arc_mfu->arcs_list[ARC_BUFC_DATA])) {
1703 (void) arc_evict(arc_mfu, spa, -1, FALSE, ARC_BUFC_DATA);
1707 while (list_head(&arc_mfu->arcs_list[ARC_BUFC_METADATA])) {
1708 (void) arc_evict(arc_mfu, spa, -1, FALSE, ARC_BUFC_METADATA);
1713 arc_evict_ghost(arc_mru_ghost, spa, -1);
1714 arc_evict_ghost(arc_mfu_ghost, spa, -1);
1716 mutex_enter(&arc_reclaim_thr_lock);
1717 arc_do_user_evicts();
1718 mutex_exit(&arc_reclaim_thr_lock);
1719 ASSERT(spa || arc_eviction_list == NULL);
1722 int arc_shrink_shift = 5; /* log2(fraction of arc to reclaim) */
1727 if (arc_c > arc_c_min) {
1731 to_free = MAX(arc_c >> arc_shrink_shift, ptob(needfree));
1733 to_free = arc_c >> arc_shrink_shift;
1735 if (arc_c > arc_c_min + to_free)
1736 atomic_add_64(&arc_c, -to_free);
1740 atomic_add_64(&arc_p, -(arc_p >> arc_shrink_shift));
1741 if (arc_c > arc_size)
1742 arc_c = MAX(arc_size, arc_c_min);
1744 arc_p = (arc_c >> 1);
1745 ASSERT(arc_c >= arc_c_min);
1746 ASSERT((int64_t)arc_p >= 0);
1749 if (arc_size > arc_c)
1754 arc_reclaim_needed(void)
1764 * take 'desfree' extra pages, so we reclaim sooner, rather than later
1769 * check that we're out of range of the pageout scanner. It starts to
1770 * schedule paging if freemem is less than lotsfree and needfree.
1771 * lotsfree is the high-water mark for pageout, and needfree is the
1772 * number of needed free pages. We add extra pages here to make sure
1773 * the scanner doesn't start up while we're freeing memory.
1775 if (freemem < lotsfree + needfree + extra)
1779 * check to make sure that swapfs has enough space so that anon
1780 * reservations can still succeed. anon_resvmem() checks that the
1781 * availrmem is greater than swapfs_minfree, and the number of reserved
1782 * swap pages. We also add a bit of extra here just to prevent
1783 * circumstances from getting really dire.
1785 if (availrmem < swapfs_minfree + swapfs_reserve + extra)
1790 * If we're on an i386 platform, it's possible that we'll exhaust the
1791 * kernel heap space before we ever run out of available physical
1792 * memory. Most checks of the size of the heap_area compare against
1793 * tune.t_minarmem, which is the minimum available real memory that we
1794 * can have in the system. However, this is generally fixed at 25 pages
1795 * which is so low that it's useless. In this comparison, we seek to
1796 * calculate the total heap-size, and reclaim if more than 3/4ths of the
1797 * heap is allocated. (Or, in the calculation, if less than 1/4th is
1800 if (btop(vmem_size(heap_arena, VMEM_FREE)) <
1801 (btop(vmem_size(heap_arena, VMEM_FREE | VMEM_ALLOC)) >> 2))
1806 if (spa_get_random(100) == 0)
1813 arc_kmem_reap_now(arc_reclaim_strategy_t strat)
1816 kmem_cache_t *prev_cache = NULL;
1817 kmem_cache_t *prev_data_cache = NULL;
1818 extern kmem_cache_t *zio_buf_cache[];
1819 extern kmem_cache_t *zio_data_buf_cache[];
1822 if (arc_meta_used >= arc_meta_limit) {
1824 * We are exceeding our meta-data cache limit.
1825 * Purge some DNLC entries to release holds on meta-data.
1827 dnlc_reduce_cache((void *)(uintptr_t)arc_reduce_dnlc_percent);
1831 * Reclaim unused memory from all kmem caches.
1838 * An aggressive reclamation will shrink the cache size as well as
1839 * reap free buffers from the arc kmem caches.
1841 if (strat == ARC_RECLAIM_AGGR)
1844 for (i = 0; i < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; i++) {
1845 if (zio_buf_cache[i] != prev_cache) {
1846 prev_cache = zio_buf_cache[i];
1847 kmem_cache_reap_now(zio_buf_cache[i]);
1849 if (zio_data_buf_cache[i] != prev_data_cache) {
1850 prev_data_cache = zio_data_buf_cache[i];
1851 kmem_cache_reap_now(zio_data_buf_cache[i]);
1854 kmem_cache_reap_now(buf_cache);
1855 kmem_cache_reap_now(hdr_cache);
1859 arc_reclaim_thread(void)
1861 clock_t growtime = 0;
1862 arc_reclaim_strategy_t last_reclaim = ARC_RECLAIM_CONS;
1865 CALLB_CPR_INIT(&cpr, &arc_reclaim_thr_lock, callb_generic_cpr, FTAG);
1867 mutex_enter(&arc_reclaim_thr_lock);
1868 while (arc_thread_exit == 0) {
1869 if (arc_reclaim_needed()) {
1872 if (last_reclaim == ARC_RECLAIM_CONS) {
1873 last_reclaim = ARC_RECLAIM_AGGR;
1875 last_reclaim = ARC_RECLAIM_CONS;
1879 last_reclaim = ARC_RECLAIM_AGGR;
1883 /* reset the growth delay for every reclaim */
1884 growtime = lbolt + (arc_grow_retry * hz);
1886 arc_kmem_reap_now(last_reclaim);
1888 } else if (arc_no_grow && lbolt >= growtime) {
1889 arc_no_grow = FALSE;
1892 if (2 * arc_c < arc_size +
1893 arc_mru_ghost->arcs_size + arc_mfu_ghost->arcs_size)
1896 if (arc_eviction_list != NULL)
1897 arc_do_user_evicts();
1899 /* block until needed, or one second, whichever is shorter */
1900 CALLB_CPR_SAFE_BEGIN(&cpr);
1901 (void) cv_timedwait(&arc_reclaim_thr_cv,
1902 &arc_reclaim_thr_lock, (lbolt + hz));
1903 CALLB_CPR_SAFE_END(&cpr, &arc_reclaim_thr_lock);
1906 arc_thread_exit = 0;
1907 cv_broadcast(&arc_reclaim_thr_cv);
1908 CALLB_CPR_EXIT(&cpr); /* drops arc_reclaim_thr_lock */
1913 * Adapt arc info given the number of bytes we are trying to add and
1914 * the state that we are comming from. This function is only called
1915 * when we are adding new content to the cache.
1918 arc_adapt(int bytes, arc_state_t *state)
1922 if (state == arc_l2c_only)
1927 * Adapt the target size of the MRU list:
1928 * - if we just hit in the MRU ghost list, then increase
1929 * the target size of the MRU list.
1930 * - if we just hit in the MFU ghost list, then increase
1931 * the target size of the MFU list by decreasing the
1932 * target size of the MRU list.
1934 if (state == arc_mru_ghost) {
1935 mult = ((arc_mru_ghost->arcs_size >= arc_mfu_ghost->arcs_size) ?
1936 1 : (arc_mfu_ghost->arcs_size/arc_mru_ghost->arcs_size));
1938 arc_p = MIN(arc_c, arc_p + bytes * mult);
1939 } else if (state == arc_mfu_ghost) {
1940 mult = ((arc_mfu_ghost->arcs_size >= arc_mru_ghost->arcs_size) ?
1941 1 : (arc_mru_ghost->arcs_size/arc_mfu_ghost->arcs_size));
1943 arc_p = MAX(0, (int64_t)arc_p - bytes * mult);
1945 ASSERT((int64_t)arc_p >= 0);
1947 if (arc_reclaim_needed()) {
1948 cv_signal(&arc_reclaim_thr_cv);
1955 if (arc_c >= arc_c_max)
1959 * If we're within (2 * maxblocksize) bytes of the target
1960 * cache size, increment the target cache size
1962 if (arc_size > arc_c - (2ULL << SPA_MAXBLOCKSHIFT)) {
1963 atomic_add_64(&arc_c, (int64_t)bytes);
1964 if (arc_c > arc_c_max)
1966 else if (state == arc_anon)
1967 atomic_add_64(&arc_p, (int64_t)bytes);
1971 ASSERT((int64_t)arc_p >= 0);
1975 * Check if the cache has reached its limits and eviction is required
1979 arc_evict_needed(arc_buf_contents_t type)
1981 if (type == ARC_BUFC_METADATA && arc_meta_used >= arc_meta_limit)
1986 * If zio data pages are being allocated out of a separate heap segment,
1987 * then enforce that the size of available vmem for this area remains
1988 * above about 1/32nd free.
1990 if (type == ARC_BUFC_DATA && zio_arena != NULL &&
1991 vmem_size(zio_arena, VMEM_FREE) <
1992 (vmem_size(zio_arena, VMEM_ALLOC) >> 5))
1996 if (arc_reclaim_needed())
1999 return (arc_size > arc_c);
2003 * The buffer, supplied as the first argument, needs a data block.
2004 * So, if we are at cache max, determine which cache should be victimized.
2005 * We have the following cases:
2007 * 1. Insert for MRU, p > sizeof(arc_anon + arc_mru) ->
2008 * In this situation if we're out of space, but the resident size of the MFU is
2009 * under the limit, victimize the MFU cache to satisfy this insertion request.
2011 * 2. Insert for MRU, p <= sizeof(arc_anon + arc_mru) ->
2012 * Here, we've used up all of the available space for the MRU, so we need to
2013 * evict from our own cache instead. Evict from the set of resident MRU
2016 * 3. Insert for MFU (c - p) > sizeof(arc_mfu) ->
2017 * c minus p represents the MFU space in the cache, since p is the size of the
2018 * cache that is dedicated to the MRU. In this situation there's still space on
2019 * the MFU side, so the MRU side needs to be victimized.
2021 * 4. Insert for MFU (c - p) < sizeof(arc_mfu) ->
2022 * MFU's resident set is consuming more space than it has been allotted. In
2023 * this situation, we must victimize our own cache, the MFU, for this insertion.
2026 arc_get_data_buf(arc_buf_t *buf)
2028 arc_state_t *state = buf->b_hdr->b_state;
2029 uint64_t size = buf->b_hdr->b_size;
2030 arc_buf_contents_t type = buf->b_hdr->b_type;
2032 arc_adapt(size, state);
2035 * We have not yet reached cache maximum size,
2036 * just allocate a new buffer.
2038 if (!arc_evict_needed(type)) {
2039 if (type == ARC_BUFC_METADATA) {
2040 buf->b_data = zio_buf_alloc(size);
2041 arc_space_consume(size);
2043 ASSERT(type == ARC_BUFC_DATA);
2044 buf->b_data = zio_data_buf_alloc(size);
2045 atomic_add_64(&arc_size, size);
2051 * If we are prefetching from the mfu ghost list, this buffer
2052 * will end up on the mru list; so steal space from there.
2054 if (state == arc_mfu_ghost)
2055 state = buf->b_hdr->b_flags & ARC_PREFETCH ? arc_mru : arc_mfu;
2056 else if (state == arc_mru_ghost)
2059 if (state == arc_mru || state == arc_anon) {
2060 uint64_t mru_used = arc_anon->arcs_size + arc_mru->arcs_size;
2061 state = (arc_mfu->arcs_lsize[type] > 0 &&
2062 arc_p > mru_used) ? arc_mfu : arc_mru;
2065 uint64_t mfu_space = arc_c - arc_p;
2066 state = (arc_mru->arcs_lsize[type] > 0 &&
2067 mfu_space > arc_mfu->arcs_size) ? arc_mru : arc_mfu;
2069 if ((buf->b_data = arc_evict(state, NULL, size, TRUE, type)) == NULL) {
2070 if (type == ARC_BUFC_METADATA) {
2071 buf->b_data = zio_buf_alloc(size);
2072 arc_space_consume(size);
2074 ASSERT(type == ARC_BUFC_DATA);
2075 buf->b_data = zio_data_buf_alloc(size);
2076 atomic_add_64(&arc_size, size);
2078 ARCSTAT_BUMP(arcstat_recycle_miss);
2080 ASSERT(buf->b_data != NULL);
2083 * Update the state size. Note that ghost states have a
2084 * "ghost size" and so don't need to be updated.
2086 if (!GHOST_STATE(buf->b_hdr->b_state)) {
2087 arc_buf_hdr_t *hdr = buf->b_hdr;
2089 atomic_add_64(&hdr->b_state->arcs_size, size);
2090 if (list_link_active(&hdr->b_arc_node)) {
2091 ASSERT(refcount_is_zero(&hdr->b_refcnt));
2092 atomic_add_64(&hdr->b_state->arcs_lsize[type], size);
2095 * If we are growing the cache, and we are adding anonymous
2096 * data, and we have outgrown arc_p, update arc_p
2098 if (arc_size < arc_c && hdr->b_state == arc_anon &&
2099 arc_anon->arcs_size + arc_mru->arcs_size > arc_p)
2100 arc_p = MIN(arc_c, arc_p + size);
2105 * This routine is called whenever a buffer is accessed.
2106 * NOTE: the hash lock is dropped in this function.
2109 arc_access(arc_buf_hdr_t *buf, kmutex_t *hash_lock)
2111 ASSERT(MUTEX_HELD(hash_lock));
2113 if (buf->b_state == arc_anon) {
2115 * This buffer is not in the cache, and does not
2116 * appear in our "ghost" list. Add the new buffer
2120 ASSERT(buf->b_arc_access == 0);
2121 buf->b_arc_access = lbolt;
2122 DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, buf);
2123 arc_change_state(arc_mru, buf, hash_lock);
2125 } else if (buf->b_state == arc_mru) {
2127 * If this buffer is here because of a prefetch, then either:
2128 * - clear the flag if this is a "referencing" read
2129 * (any subsequent access will bump this into the MFU state).
2131 * - move the buffer to the head of the list if this is
2132 * another prefetch (to make it less likely to be evicted).
2134 if ((buf->b_flags & ARC_PREFETCH) != 0) {
2135 if (refcount_count(&buf->b_refcnt) == 0) {
2136 ASSERT(list_link_active(&buf->b_arc_node));
2138 buf->b_flags &= ~ARC_PREFETCH;
2139 ARCSTAT_BUMP(arcstat_mru_hits);
2141 buf->b_arc_access = lbolt;
2146 * This buffer has been "accessed" only once so far,
2147 * but it is still in the cache. Move it to the MFU
2150 if (lbolt > buf->b_arc_access + ARC_MINTIME) {
2152 * More than 125ms have passed since we
2153 * instantiated this buffer. Move it to the
2154 * most frequently used state.
2156 buf->b_arc_access = lbolt;
2157 DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf);
2158 arc_change_state(arc_mfu, buf, hash_lock);
2160 ARCSTAT_BUMP(arcstat_mru_hits);
2161 } else if (buf->b_state == arc_mru_ghost) {
2162 arc_state_t *new_state;
2164 * This buffer has been "accessed" recently, but
2165 * was evicted from the cache. Move it to the
2169 if (buf->b_flags & ARC_PREFETCH) {
2170 new_state = arc_mru;
2171 if (refcount_count(&buf->b_refcnt) > 0)
2172 buf->b_flags &= ~ARC_PREFETCH;
2173 DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, buf);
2175 new_state = arc_mfu;
2176 DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf);
2179 buf->b_arc_access = lbolt;
2180 arc_change_state(new_state, buf, hash_lock);
2182 ARCSTAT_BUMP(arcstat_mru_ghost_hits);
2183 } else if (buf->b_state == arc_mfu) {
2185 * This buffer has been accessed more than once and is
2186 * still in the cache. Keep it in the MFU state.
2188 * NOTE: an add_reference() that occurred when we did
2189 * the arc_read() will have kicked this off the list.
2190 * If it was a prefetch, we will explicitly move it to
2191 * the head of the list now.
2193 if ((buf->b_flags & ARC_PREFETCH) != 0) {
2194 ASSERT(refcount_count(&buf->b_refcnt) == 0);
2195 ASSERT(list_link_active(&buf->b_arc_node));
2197 ARCSTAT_BUMP(arcstat_mfu_hits);
2198 buf->b_arc_access = lbolt;
2199 } else if (buf->b_state == arc_mfu_ghost) {
2200 arc_state_t *new_state = arc_mfu;
2202 * This buffer has been accessed more than once but has
2203 * been evicted from the cache. Move it back to the
2207 if (buf->b_flags & ARC_PREFETCH) {
2209 * This is a prefetch access...
2210 * move this block back to the MRU state.
2212 ASSERT3U(refcount_count(&buf->b_refcnt), ==, 0);
2213 new_state = arc_mru;
2216 buf->b_arc_access = lbolt;
2217 DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf);
2218 arc_change_state(new_state, buf, hash_lock);
2220 ARCSTAT_BUMP(arcstat_mfu_ghost_hits);
2221 } else if (buf->b_state == arc_l2c_only) {
2223 * This buffer is on the 2nd Level ARC.
2226 buf->b_arc_access = lbolt;
2227 DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf);
2228 arc_change_state(arc_mfu, buf, hash_lock);
2230 ASSERT(!"invalid arc state");
2234 /* a generic arc_done_func_t which you can use */
2237 arc_bcopy_func(zio_t *zio, arc_buf_t *buf, void *arg)
2239 bcopy(buf->b_data, arg, buf->b_hdr->b_size);
2240 VERIFY(arc_buf_remove_ref(buf, arg) == 1);
2243 /* a generic arc_done_func_t */
2245 arc_getbuf_func(zio_t *zio, arc_buf_t *buf, void *arg)
2247 arc_buf_t **bufp = arg;
2248 if (zio && zio->io_error) {
2249 VERIFY(arc_buf_remove_ref(buf, arg) == 1);
2257 arc_read_done(zio_t *zio)
2259 arc_buf_hdr_t *hdr, *found;
2261 arc_buf_t *abuf; /* buffer we're assigning to callback */
2262 kmutex_t *hash_lock;
2263 arc_callback_t *callback_list, *acb;
2264 int freeable = FALSE;
2266 buf = zio->io_private;
2270 * The hdr was inserted into hash-table and removed from lists
2271 * prior to starting I/O. We should find this header, since
2272 * it's in the hash table, and it should be legit since it's
2273 * not possible to evict it during the I/O. The only possible
2274 * reason for it not to be found is if we were freed during the
2277 found = buf_hash_find(zio->io_spa, &hdr->b_dva, hdr->b_birth,
2280 ASSERT((found == NULL && HDR_FREED_IN_READ(hdr) && hash_lock == NULL) ||
2281 (found == hdr && DVA_EQUAL(&hdr->b_dva, BP_IDENTITY(zio->io_bp))) ||
2282 (found == hdr && HDR_L2_READING(hdr)));
2284 hdr->b_flags &= ~(ARC_L2_READING|ARC_L2_EVICTED);
2285 if (l2arc_noprefetch && (hdr->b_flags & ARC_PREFETCH))
2286 hdr->b_flags |= ARC_DONT_L2CACHE;
2288 /* byteswap if necessary */
2289 callback_list = hdr->b_acb;
2290 ASSERT(callback_list != NULL);
2291 if (BP_SHOULD_BYTESWAP(zio->io_bp) && callback_list->acb_byteswap)
2292 callback_list->acb_byteswap(buf->b_data, hdr->b_size);
2294 arc_cksum_compute(buf, B_FALSE);
2296 /* create copies of the data buffer for the callers */
2298 for (acb = callback_list; acb; acb = acb->acb_next) {
2299 if (acb->acb_done) {
2301 abuf = arc_buf_clone(buf);
2302 acb->acb_buf = abuf;
2307 hdr->b_flags &= ~ARC_IO_IN_PROGRESS;
2308 ASSERT(!HDR_BUF_AVAILABLE(hdr));
2310 hdr->b_flags |= ARC_BUF_AVAILABLE;
2312 ASSERT(refcount_is_zero(&hdr->b_refcnt) || callback_list != NULL);
2314 if (zio->io_error != 0) {
2315 hdr->b_flags |= ARC_IO_ERROR;
2316 if (hdr->b_state != arc_anon)
2317 arc_change_state(arc_anon, hdr, hash_lock);
2318 if (HDR_IN_HASH_TABLE(hdr))
2319 buf_hash_remove(hdr);
2320 freeable = refcount_is_zero(&hdr->b_refcnt);
2321 /* convert checksum errors into IO errors */
2322 if (zio->io_error == ECKSUM)
2323 zio->io_error = EIO;
2327 * Broadcast before we drop the hash_lock to avoid the possibility
2328 * that the hdr (and hence the cv) might be freed before we get to
2329 * the cv_broadcast().
2331 cv_broadcast(&hdr->b_cv);
2335 * Only call arc_access on anonymous buffers. This is because
2336 * if we've issued an I/O for an evicted buffer, we've already
2337 * called arc_access (to prevent any simultaneous readers from
2338 * getting confused).
2340 if (zio->io_error == 0 && hdr->b_state == arc_anon)
2341 arc_access(hdr, hash_lock);
2342 mutex_exit(hash_lock);
2345 * This block was freed while we waited for the read to
2346 * complete. It has been removed from the hash table and
2347 * moved to the anonymous state (so that it won't show up
2350 ASSERT3P(hdr->b_state, ==, arc_anon);
2351 freeable = refcount_is_zero(&hdr->b_refcnt);
2354 /* execute each callback and free its structure */
2355 while ((acb = callback_list) != NULL) {
2357 acb->acb_done(zio, acb->acb_buf, acb->acb_private);
2359 if (acb->acb_zio_dummy != NULL) {
2360 acb->acb_zio_dummy->io_error = zio->io_error;
2361 zio_nowait(acb->acb_zio_dummy);
2364 callback_list = acb->acb_next;
2365 kmem_free(acb, sizeof (arc_callback_t));
2369 arc_hdr_destroy(hdr);
2373 * "Read" the block block at the specified DVA (in bp) via the
2374 * cache. If the block is found in the cache, invoke the provided
2375 * callback immediately and return. Note that the `zio' parameter
2376 * in the callback will be NULL in this case, since no IO was
2377 * required. If the block is not in the cache pass the read request
2378 * on to the spa with a substitute callback function, so that the
2379 * requested block will be added to the cache.
2381 * If a read request arrives for a block that has a read in-progress,
2382 * either wait for the in-progress read to complete (and return the
2383 * results); or, if this is a read with a "done" func, add a record
2384 * to the read to invoke the "done" func when the read completes,
2385 * and return; or just return.
2387 * arc_read_done() will invoke all the requested "done" functions
2388 * for readers of this block.
2391 arc_read(zio_t *pio, spa_t *spa, blkptr_t *bp, arc_byteswap_func_t *swap,
2392 arc_done_func_t *done, void *private, int priority, int flags,
2393 uint32_t *arc_flags, zbookmark_t *zb)
2397 kmutex_t *hash_lock;
2401 hdr = buf_hash_find(spa, BP_IDENTITY(bp), bp->blk_birth, &hash_lock);
2402 if (hdr && hdr->b_datacnt > 0) {
2404 *arc_flags |= ARC_CACHED;
2406 if (HDR_IO_IN_PROGRESS(hdr)) {
2408 if (*arc_flags & ARC_WAIT) {
2409 cv_wait(&hdr->b_cv, hash_lock);
2410 mutex_exit(hash_lock);
2413 ASSERT(*arc_flags & ARC_NOWAIT);
2416 arc_callback_t *acb = NULL;
2418 acb = kmem_zalloc(sizeof (arc_callback_t),
2420 acb->acb_done = done;
2421 acb->acb_private = private;
2422 acb->acb_byteswap = swap;
2424 acb->acb_zio_dummy = zio_null(pio,
2425 spa, NULL, NULL, flags);
2427 ASSERT(acb->acb_done != NULL);
2428 acb->acb_next = hdr->b_acb;
2430 add_reference(hdr, hash_lock, private);
2431 mutex_exit(hash_lock);
2434 mutex_exit(hash_lock);
2438 ASSERT(hdr->b_state == arc_mru || hdr->b_state == arc_mfu);
2441 add_reference(hdr, hash_lock, private);
2443 * If this block is already in use, create a new
2444 * copy of the data so that we will be guaranteed
2445 * that arc_release() will always succeed.
2449 ASSERT(buf->b_data);
2450 if (HDR_BUF_AVAILABLE(hdr)) {
2451 ASSERT(buf->b_efunc == NULL);
2452 hdr->b_flags &= ~ARC_BUF_AVAILABLE;
2454 buf = arc_buf_clone(buf);
2456 } else if (*arc_flags & ARC_PREFETCH &&
2457 refcount_count(&hdr->b_refcnt) == 0) {
2458 hdr->b_flags |= ARC_PREFETCH;
2460 DTRACE_PROBE1(arc__hit, arc_buf_hdr_t *, hdr);
2461 arc_access(hdr, hash_lock);
2462 mutex_exit(hash_lock);
2463 ARCSTAT_BUMP(arcstat_hits);
2464 ARCSTAT_CONDSTAT(!(hdr->b_flags & ARC_PREFETCH),
2465 demand, prefetch, hdr->b_type != ARC_BUFC_METADATA,
2466 data, metadata, hits);
2469 done(NULL, buf, private);
2471 uint64_t size = BP_GET_LSIZE(bp);
2472 arc_callback_t *acb;
2475 /* this block is not in the cache */
2476 arc_buf_hdr_t *exists;
2477 arc_buf_contents_t type = BP_GET_BUFC_TYPE(bp);
2478 buf = arc_buf_alloc(spa, size, private, type);
2480 hdr->b_dva = *BP_IDENTITY(bp);
2481 hdr->b_birth = bp->blk_birth;
2482 hdr->b_cksum0 = bp->blk_cksum.zc_word[0];
2483 exists = buf_hash_insert(hdr, &hash_lock);
2485 /* somebody beat us to the hash insert */
2486 mutex_exit(hash_lock);
2487 bzero(&hdr->b_dva, sizeof (dva_t));
2490 (void) arc_buf_remove_ref(buf, private);
2491 goto top; /* restart the IO request */
2493 /* if this is a prefetch, we don't have a reference */
2494 if (*arc_flags & ARC_PREFETCH) {
2495 (void) remove_reference(hdr, hash_lock,
2497 hdr->b_flags |= ARC_PREFETCH;
2499 if (BP_GET_LEVEL(bp) > 0)
2500 hdr->b_flags |= ARC_INDIRECT;
2502 /* this block is in the ghost cache */
2503 ASSERT(GHOST_STATE(hdr->b_state));
2504 ASSERT(!HDR_IO_IN_PROGRESS(hdr));
2505 ASSERT3U(refcount_count(&hdr->b_refcnt), ==, 0);
2506 ASSERT(hdr->b_buf == NULL);
2508 /* if this is a prefetch, we don't have a reference */
2509 if (*arc_flags & ARC_PREFETCH)
2510 hdr->b_flags |= ARC_PREFETCH;
2512 add_reference(hdr, hash_lock, private);
2513 buf = kmem_cache_alloc(buf_cache, KM_PUSHPAGE);
2516 buf->b_efunc = NULL;
2517 buf->b_private = NULL;
2520 arc_get_data_buf(buf);
2521 ASSERT(hdr->b_datacnt == 0);
2526 acb = kmem_zalloc(sizeof (arc_callback_t), KM_SLEEP);
2527 acb->acb_done = done;
2528 acb->acb_private = private;
2529 acb->acb_byteswap = swap;
2531 ASSERT(hdr->b_acb == NULL);
2533 hdr->b_flags |= ARC_IO_IN_PROGRESS;
2536 * If the buffer has been evicted, migrate it to a present state
2537 * before issuing the I/O. Once we drop the hash-table lock,
2538 * the header will be marked as I/O in progress and have an
2539 * attached buffer. At this point, anybody who finds this
2540 * buffer ought to notice that it's legit but has a pending I/O.
2543 if (GHOST_STATE(hdr->b_state))
2544 arc_access(hdr, hash_lock);
2546 ASSERT3U(hdr->b_size, ==, size);
2547 DTRACE_PROBE3(arc__miss, blkptr_t *, bp, uint64_t, size,
2549 ARCSTAT_BUMP(arcstat_misses);
2550 ARCSTAT_CONDSTAT(!(hdr->b_flags & ARC_PREFETCH),
2551 demand, prefetch, hdr->b_type != ARC_BUFC_METADATA,
2552 data, metadata, misses);
2554 if (l2arc_ndev != 0) {
2556 * Read from the L2ARC if the following are true:
2557 * 1. This buffer has L2ARC metadata.
2558 * 2. This buffer isn't currently writing to the L2ARC.
2560 if (hdr->b_l2hdr != NULL && !HDR_L2_WRITING(hdr)) {
2561 vdev_t *vd = hdr->b_l2hdr->b_dev->l2ad_vdev;
2562 daddr_t addr = hdr->b_l2hdr->b_daddr;
2563 l2arc_read_callback_t *cb;
2565 DTRACE_PROBE1(l2arc__hit, arc_buf_hdr_t *, hdr);
2566 ARCSTAT_BUMP(arcstat_l2_hits);
2568 hdr->b_flags |= ARC_L2_READING;
2569 mutex_exit(hash_lock);
2571 cb = kmem_zalloc(sizeof (l2arc_read_callback_t),
2573 cb->l2rcb_buf = buf;
2574 cb->l2rcb_spa = spa;
2577 cb->l2rcb_flags = flags;
2582 rzio = zio_read_phys(pio, vd, addr, size,
2583 buf->b_data, ZIO_CHECKSUM_OFF,
2584 l2arc_read_done, cb, priority,
2585 flags | ZIO_FLAG_DONT_CACHE, B_FALSE);
2586 DTRACE_PROBE2(l2arc__read, vdev_t *, vd,
2589 if (*arc_flags & ARC_WAIT)
2590 return (zio_wait(rzio));
2592 ASSERT(*arc_flags & ARC_NOWAIT);
2596 DTRACE_PROBE1(l2arc__miss,
2597 arc_buf_hdr_t *, hdr);
2598 ARCSTAT_BUMP(arcstat_l2_misses);
2599 if (HDR_L2_WRITING(hdr))
2600 ARCSTAT_BUMP(arcstat_l2_rw_clash);
2603 mutex_exit(hash_lock);
2605 rzio = zio_read(pio, spa, bp, buf->b_data, size,
2606 arc_read_done, buf, priority, flags, zb);
2608 if (*arc_flags & ARC_WAIT)
2609 return (zio_wait(rzio));
2611 ASSERT(*arc_flags & ARC_NOWAIT);
2618 * arc_read() variant to support pool traversal. If the block is already
2619 * in the ARC, make a copy of it; otherwise, the caller will do the I/O.
2620 * The idea is that we don't want pool traversal filling up memory, but
2621 * if the ARC already has the data anyway, we shouldn't pay for the I/O.
2624 arc_tryread(spa_t *spa, blkptr_t *bp, void *data)
2630 hdr = buf_hash_find(spa, BP_IDENTITY(bp), bp->blk_birth, &hash_mtx);
2632 if (hdr && hdr->b_datacnt > 0 && !HDR_IO_IN_PROGRESS(hdr)) {
2633 arc_buf_t *buf = hdr->b_buf;
2636 while (buf->b_data == NULL) {
2640 bcopy(buf->b_data, data, hdr->b_size);
2646 mutex_exit(hash_mtx);
2652 arc_set_callback(arc_buf_t *buf, arc_evict_func_t *func, void *private)
2654 ASSERT(buf->b_hdr != NULL);
2655 ASSERT(buf->b_hdr->b_state != arc_anon);
2656 ASSERT(!refcount_is_zero(&buf->b_hdr->b_refcnt) || func == NULL);
2657 buf->b_efunc = func;
2658 buf->b_private = private;
2662 * This is used by the DMU to let the ARC know that a buffer is
2663 * being evicted, so the ARC should clean up. If this arc buf
2664 * is not yet in the evicted state, it will be put there.
2667 arc_buf_evict(arc_buf_t *buf)
2670 kmutex_t *hash_lock;
2673 mutex_enter(&arc_eviction_mtx);
2677 * We are in arc_do_user_evicts().
2679 ASSERT(buf->b_data == NULL);
2680 mutex_exit(&arc_eviction_mtx);
2683 hash_lock = HDR_LOCK(hdr);
2684 mutex_exit(&arc_eviction_mtx);
2686 mutex_enter(hash_lock);
2688 if (buf->b_data == NULL) {
2690 * We are on the eviction list.
2692 mutex_exit(hash_lock);
2693 mutex_enter(&arc_eviction_mtx);
2694 if (buf->b_hdr == NULL) {
2696 * We are already in arc_do_user_evicts().
2698 mutex_exit(&arc_eviction_mtx);
2701 arc_buf_t copy = *buf; /* structure assignment */
2703 * Process this buffer now
2704 * but let arc_do_user_evicts() do the reaping.
2706 buf->b_efunc = NULL;
2707 mutex_exit(&arc_eviction_mtx);
2708 VERIFY(copy.b_efunc(©) == 0);
2713 ASSERT(buf->b_hdr == hdr);
2714 ASSERT3U(refcount_count(&hdr->b_refcnt), <, hdr->b_datacnt);
2715 ASSERT(hdr->b_state == arc_mru || hdr->b_state == arc_mfu);
2718 * Pull this buffer off of the hdr
2721 while (*bufp != buf)
2722 bufp = &(*bufp)->b_next;
2723 *bufp = buf->b_next;
2725 ASSERT(buf->b_data != NULL);
2726 arc_buf_destroy(buf, FALSE, FALSE);
2728 if (hdr->b_datacnt == 0) {
2729 arc_state_t *old_state = hdr->b_state;
2730 arc_state_t *evicted_state;
2732 ASSERT(refcount_is_zero(&hdr->b_refcnt));
2735 (old_state == arc_mru) ? arc_mru_ghost : arc_mfu_ghost;
2737 mutex_enter(&old_state->arcs_mtx);
2738 mutex_enter(&evicted_state->arcs_mtx);
2740 arc_change_state(evicted_state, hdr, hash_lock);
2741 ASSERT(HDR_IN_HASH_TABLE(hdr));
2742 hdr->b_flags |= ARC_IN_HASH_TABLE;
2743 hdr->b_flags &= ~ARC_BUF_AVAILABLE;
2745 mutex_exit(&evicted_state->arcs_mtx);
2746 mutex_exit(&old_state->arcs_mtx);
2748 mutex_exit(hash_lock);
2750 VERIFY(buf->b_efunc(buf) == 0);
2751 buf->b_efunc = NULL;
2752 buf->b_private = NULL;
2754 kmem_cache_free(buf_cache, buf);
2759 * Release this buffer from the cache. This must be done
2760 * after a read and prior to modifying the buffer contents.
2761 * If the buffer has more than one reference, we must make
2762 * make a new hdr for the buffer.
2765 arc_release(arc_buf_t *buf, void *tag)
2767 arc_buf_hdr_t *hdr = buf->b_hdr;
2768 kmutex_t *hash_lock = HDR_LOCK(hdr);
2769 l2arc_buf_hdr_t *l2hdr = NULL;
2772 /* this buffer is not on any list */
2773 ASSERT(refcount_count(&hdr->b_refcnt) > 0);
2775 if (hdr->b_state == arc_anon) {
2776 /* this buffer is already released */
2777 ASSERT3U(refcount_count(&hdr->b_refcnt), ==, 1);
2778 ASSERT(BUF_EMPTY(hdr));
2779 ASSERT(buf->b_efunc == NULL);
2784 mutex_enter(hash_lock);
2787 * Do we have more than one buf?
2789 if (hdr->b_buf != buf || buf->b_next != NULL) {
2790 arc_buf_hdr_t *nhdr;
2792 uint64_t blksz = hdr->b_size;
2793 spa_t *spa = hdr->b_spa;
2794 arc_buf_contents_t type = hdr->b_type;
2795 uint32_t flags = hdr->b_flags;
2797 ASSERT(hdr->b_datacnt > 1);
2799 * Pull the data off of this buf and attach it to
2800 * a new anonymous buf.
2802 (void) remove_reference(hdr, hash_lock, tag);
2804 while (*bufp != buf)
2805 bufp = &(*bufp)->b_next;
2806 *bufp = (*bufp)->b_next;
2809 ASSERT3U(hdr->b_state->arcs_size, >=, hdr->b_size);
2810 atomic_add_64(&hdr->b_state->arcs_size, -hdr->b_size);
2811 if (refcount_is_zero(&hdr->b_refcnt)) {
2812 uint64_t *size = &hdr->b_state->arcs_lsize[hdr->b_type];
2813 ASSERT3U(*size, >=, hdr->b_size);
2814 atomic_add_64(size, -hdr->b_size);
2816 hdr->b_datacnt -= 1;
2817 if (hdr->b_l2hdr != NULL) {
2818 mutex_enter(&l2arc_buflist_mtx);
2819 l2hdr = hdr->b_l2hdr;
2820 hdr->b_l2hdr = NULL;
2821 buf_size = hdr->b_size;
2823 arc_cksum_verify(buf);
2825 mutex_exit(hash_lock);
2827 nhdr = kmem_cache_alloc(hdr_cache, KM_PUSHPAGE);
2828 nhdr->b_size = blksz;
2830 nhdr->b_type = type;
2832 nhdr->b_state = arc_anon;
2833 nhdr->b_arc_access = 0;
2834 nhdr->b_flags = flags & ARC_L2_WRITING;
2835 nhdr->b_l2hdr = NULL;
2836 nhdr->b_datacnt = 1;
2837 nhdr->b_freeze_cksum = NULL;
2838 (void) refcount_add(&nhdr->b_refcnt, tag);
2840 atomic_add_64(&arc_anon->arcs_size, blksz);
2842 ASSERT(refcount_count(&hdr->b_refcnt) == 1);
2843 ASSERT(!list_link_active(&hdr->b_arc_node));
2844 ASSERT(!HDR_IO_IN_PROGRESS(hdr));
2845 arc_change_state(arc_anon, hdr, hash_lock);
2846 hdr->b_arc_access = 0;
2847 if (hdr->b_l2hdr != NULL) {
2848 mutex_enter(&l2arc_buflist_mtx);
2849 l2hdr = hdr->b_l2hdr;
2850 hdr->b_l2hdr = NULL;
2851 buf_size = hdr->b_size;
2853 mutex_exit(hash_lock);
2855 bzero(&hdr->b_dva, sizeof (dva_t));
2860 buf->b_efunc = NULL;
2861 buf->b_private = NULL;
2864 list_remove(l2hdr->b_dev->l2ad_buflist, hdr);
2865 kmem_free(l2hdr, sizeof (l2arc_buf_hdr_t));
2866 ARCSTAT_INCR(arcstat_l2_size, -buf_size);
2868 if (MUTEX_HELD(&l2arc_buflist_mtx))
2869 mutex_exit(&l2arc_buflist_mtx);
2873 arc_released(arc_buf_t *buf)
2875 return (buf->b_data != NULL && buf->b_hdr->b_state == arc_anon);
2879 arc_has_callback(arc_buf_t *buf)
2881 return (buf->b_efunc != NULL);
2886 arc_referenced(arc_buf_t *buf)
2888 return (refcount_count(&buf->b_hdr->b_refcnt));
2893 arc_write_ready(zio_t *zio)
2895 arc_write_callback_t *callback = zio->io_private;
2896 arc_buf_t *buf = callback->awcb_buf;
2897 arc_buf_hdr_t *hdr = buf->b_hdr;
2899 if (zio->io_error == 0 && callback->awcb_ready) {
2900 ASSERT(!refcount_is_zero(&buf->b_hdr->b_refcnt));
2901 callback->awcb_ready(zio, buf, callback->awcb_private);
2904 * If the IO is already in progress, then this is a re-write
2905 * attempt, so we need to thaw and re-compute the cksum. It is
2906 * the responsibility of the callback to handle the freeing
2907 * and accounting for any re-write attempt. If we don't have a
2908 * callback registered then simply free the block here.
2910 if (HDR_IO_IN_PROGRESS(hdr)) {
2911 if (!BP_IS_HOLE(&zio->io_bp_orig) &&
2912 callback->awcb_ready == NULL) {
2913 zio_nowait(zio_free(zio, zio->io_spa, zio->io_txg,
2914 &zio->io_bp_orig, NULL, NULL));
2916 mutex_enter(&hdr->b_freeze_lock);
2917 if (hdr->b_freeze_cksum != NULL) {
2918 kmem_free(hdr->b_freeze_cksum, sizeof (zio_cksum_t));
2919 hdr->b_freeze_cksum = NULL;
2921 mutex_exit(&hdr->b_freeze_lock);
2923 arc_cksum_compute(buf, B_FALSE);
2924 hdr->b_flags |= ARC_IO_IN_PROGRESS;
2928 arc_write_done(zio_t *zio)
2930 arc_write_callback_t *callback = zio->io_private;
2931 arc_buf_t *buf = callback->awcb_buf;
2932 arc_buf_hdr_t *hdr = buf->b_hdr;
2936 /* this buffer is on no lists and is not in the hash table */
2937 ASSERT3P(hdr->b_state, ==, arc_anon);
2939 hdr->b_dva = *BP_IDENTITY(zio->io_bp);
2940 hdr->b_birth = zio->io_bp->blk_birth;
2941 hdr->b_cksum0 = zio->io_bp->blk_cksum.zc_word[0];
2943 * If the block to be written was all-zero, we may have
2944 * compressed it away. In this case no write was performed
2945 * so there will be no dva/birth-date/checksum. The buffer
2946 * must therefor remain anonymous (and uncached).
2948 if (!BUF_EMPTY(hdr)) {
2949 arc_buf_hdr_t *exists;
2950 kmutex_t *hash_lock;
2952 arc_cksum_verify(buf);
2954 exists = buf_hash_insert(hdr, &hash_lock);
2957 * This can only happen if we overwrite for
2958 * sync-to-convergence, because we remove
2959 * buffers from the hash table when we arc_free().
2961 ASSERT(DVA_EQUAL(BP_IDENTITY(&zio->io_bp_orig),
2962 BP_IDENTITY(zio->io_bp)));
2963 ASSERT3U(zio->io_bp_orig.blk_birth, ==,
2964 zio->io_bp->blk_birth);
2966 ASSERT(refcount_is_zero(&exists->b_refcnt));
2967 arc_change_state(arc_anon, exists, hash_lock);
2968 mutex_exit(hash_lock);
2969 arc_hdr_destroy(exists);
2970 exists = buf_hash_insert(hdr, &hash_lock);
2971 ASSERT3P(exists, ==, NULL);
2973 hdr->b_flags &= ~ARC_IO_IN_PROGRESS;
2974 arc_access(hdr, hash_lock);
2975 mutex_exit(hash_lock);
2976 } else if (callback->awcb_done == NULL) {
2979 * This is an anonymous buffer with no user callback,
2980 * destroy it if there are no active references.
2982 mutex_enter(&arc_eviction_mtx);
2983 destroy_hdr = refcount_is_zero(&hdr->b_refcnt);
2984 hdr->b_flags &= ~ARC_IO_IN_PROGRESS;
2985 mutex_exit(&arc_eviction_mtx);
2987 arc_hdr_destroy(hdr);
2989 hdr->b_flags &= ~ARC_IO_IN_PROGRESS;
2992 if (callback->awcb_done) {
2993 ASSERT(!refcount_is_zero(&hdr->b_refcnt));
2994 callback->awcb_done(zio, buf, callback->awcb_private);
2997 kmem_free(callback, sizeof (arc_write_callback_t));
3001 arc_write(zio_t *pio, spa_t *spa, int checksum, int compress, int ncopies,
3002 uint64_t txg, blkptr_t *bp, arc_buf_t *buf,
3003 arc_done_func_t *ready, arc_done_func_t *done, void *private, int priority,
3004 int flags, zbookmark_t *zb)
3006 arc_buf_hdr_t *hdr = buf->b_hdr;
3007 arc_write_callback_t *callback;
3010 /* this is a private buffer - no locking required */
3011 ASSERT3P(hdr->b_state, ==, arc_anon);
3012 ASSERT(BUF_EMPTY(hdr));
3013 ASSERT(!HDR_IO_ERROR(hdr));
3014 ASSERT((hdr->b_flags & ARC_IO_IN_PROGRESS) == 0);
3015 ASSERT(hdr->b_acb == 0);
3016 callback = kmem_zalloc(sizeof (arc_write_callback_t), KM_SLEEP);
3017 callback->awcb_ready = ready;
3018 callback->awcb_done = done;
3019 callback->awcb_private = private;
3020 callback->awcb_buf = buf;
3021 zio = zio_write(pio, spa, checksum, compress, ncopies, txg, bp,
3022 buf->b_data, hdr->b_size, arc_write_ready, arc_write_done, callback,
3023 priority, flags, zb);
3029 arc_free(zio_t *pio, spa_t *spa, uint64_t txg, blkptr_t *bp,
3030 zio_done_func_t *done, void *private, uint32_t arc_flags)
3033 kmutex_t *hash_lock;
3037 * If this buffer is in the cache, release it, so it
3040 ab = buf_hash_find(spa, BP_IDENTITY(bp), bp->blk_birth, &hash_lock);
3043 * The checksum of blocks to free is not always
3044 * preserved (eg. on the deadlist). However, if it is
3045 * nonzero, it should match what we have in the cache.
3047 ASSERT(bp->blk_cksum.zc_word[0] == 0 ||
3048 ab->b_cksum0 == bp->blk_cksum.zc_word[0]);
3049 if (ab->b_state != arc_anon)
3050 arc_change_state(arc_anon, ab, hash_lock);
3051 if (HDR_IO_IN_PROGRESS(ab)) {
3053 * This should only happen when we prefetch.
3055 ASSERT(ab->b_flags & ARC_PREFETCH);
3056 ASSERT3U(ab->b_datacnt, ==, 1);
3057 ab->b_flags |= ARC_FREED_IN_READ;
3058 if (HDR_IN_HASH_TABLE(ab))
3059 buf_hash_remove(ab);
3060 ab->b_arc_access = 0;
3061 bzero(&ab->b_dva, sizeof (dva_t));
3064 ab->b_buf->b_efunc = NULL;
3065 ab->b_buf->b_private = NULL;
3066 mutex_exit(hash_lock);
3067 } else if (refcount_is_zero(&ab->b_refcnt)) {
3068 ab->b_flags |= ARC_FREE_IN_PROGRESS;
3069 mutex_exit(hash_lock);
3070 arc_hdr_destroy(ab);
3071 ARCSTAT_BUMP(arcstat_deleted);
3074 * We still have an active reference on this
3075 * buffer. This can happen, e.g., from
3076 * dbuf_unoverride().
3078 ASSERT(!HDR_IN_HASH_TABLE(ab));
3079 ab->b_arc_access = 0;
3080 bzero(&ab->b_dva, sizeof (dva_t));
3083 ab->b_buf->b_efunc = NULL;
3084 ab->b_buf->b_private = NULL;
3085 mutex_exit(hash_lock);
3089 zio = zio_free(pio, spa, txg, bp, done, private);
3091 if (arc_flags & ARC_WAIT)
3092 return (zio_wait(zio));
3094 ASSERT(arc_flags & ARC_NOWAIT);
3101 arc_memory_throttle(uint64_t reserve, uint64_t txg)
3104 uint64_t inflight_data = arc_anon->arcs_size;
3105 uint64_t available_memory = ptob(freemem);
3106 static uint64_t page_load = 0;
3107 static uint64_t last_txg = 0;
3111 MIN(available_memory, vmem_size(heap_arena, VMEM_FREE));
3113 if (available_memory >= zfs_write_limit_max)
3116 if (txg > last_txg) {
3121 * If we are in pageout, we know that memory is already tight,
3122 * the arc is already going to be evicting, so we just want to
3123 * continue to let page writes occur as quickly as possible.
3125 if (curproc == proc_pageout) {
3126 if (page_load > MAX(ptob(minfree), available_memory) / 4)
3128 /* Note: reserve is inflated, so we deflate */
3129 page_load += reserve / 8;
3131 } else if (page_load > 0 && arc_reclaim_needed()) {
3132 /* memory is low, delay before restarting */
3133 ARCSTAT_INCR(arcstat_memory_throttle_count, 1);
3138 if (arc_size > arc_c_min) {
3139 uint64_t evictable_memory =
3140 arc_mru->arcs_lsize[ARC_BUFC_DATA] +
3141 arc_mru->arcs_lsize[ARC_BUFC_METADATA] +
3142 arc_mfu->arcs_lsize[ARC_BUFC_DATA] +
3143 arc_mfu->arcs_lsize[ARC_BUFC_METADATA];
3144 available_memory += MIN(evictable_memory, arc_size - arc_c_min);
3147 if (inflight_data > available_memory / 4) {
3148 ARCSTAT_INCR(arcstat_memory_throttle_count, 1);
3156 arc_tempreserve_clear(uint64_t reserve)
3158 atomic_add_64(&arc_tempreserve, -reserve);
3159 ASSERT((int64_t)arc_tempreserve >= 0);
3163 arc_tempreserve_space(uint64_t reserve, uint64_t txg)
3169 * Once in a while, fail for no reason. Everything should cope.
3171 if (spa_get_random(10000) == 0) {
3172 dprintf("forcing random failure\n");
3176 if (reserve > arc_c/4 && !arc_no_grow)
3177 arc_c = MIN(arc_c_max, reserve * 4);
3178 if (reserve > arc_c)
3182 * Writes will, almost always, require additional memory allocations
3183 * in order to compress/encrypt/etc the data. We therefor need to
3184 * make sure that there is sufficient available memory for this.
3186 if (error = arc_memory_throttle(reserve, txg))
3190 * Throttle writes when the amount of dirty data in the cache
3191 * gets too large. We try to keep the cache less than half full
3192 * of dirty blocks so that our sync times don't grow too large.
3193 * Note: if two requests come in concurrently, we might let them
3194 * both succeed, when one of them should fail. Not a huge deal.
3196 if (reserve + arc_tempreserve + arc_anon->arcs_size > arc_c / 2 &&
3197 arc_anon->arcs_size > arc_c / 4) {
3198 dprintf("failing, arc_tempreserve=%lluK anon_meta=%lluK "
3199 "anon_data=%lluK tempreserve=%lluK arc_c=%lluK\n",
3200 arc_tempreserve>>10,
3201 arc_anon->arcs_lsize[ARC_BUFC_METADATA]>>10,
3202 arc_anon->arcs_lsize[ARC_BUFC_DATA]>>10,
3203 reserve>>10, arc_c>>10);
3206 atomic_add_64(&arc_tempreserve, reserve);
3213 mutex_init(&arc_reclaim_thr_lock, NULL, MUTEX_DEFAULT, NULL);
3214 cv_init(&arc_reclaim_thr_cv, NULL, CV_DEFAULT, NULL);
3216 /* Convert seconds to clock ticks */
3217 arc_min_prefetch_lifespan = 1 * hz;
3219 /* Start out with 1/8 of all memory */
3220 arc_c = physmem * PAGESIZE / 8;
3224 * On architectures where the physical memory can be larger
3225 * than the addressable space (intel in 32-bit mode), we may
3226 * need to limit the cache to 1/8 of VM size.
3228 arc_c = MIN(arc_c, vmem_size(heap_arena, VMEM_ALLOC | VMEM_FREE) / 8);
3231 /* set min cache to 1/32 of all memory, or 64MB, whichever is more */
3232 arc_c_min = MAX(arc_c / 4, 64<<20);
3233 /* set max to 3/4 of all memory, or all but 1GB, whichever is more */
3234 if (arc_c * 8 >= 1<<30)
3235 arc_c_max = (arc_c * 8) - (1<<30);
3237 arc_c_max = arc_c_min;
3238 arc_c_max = MAX(arc_c * 6, arc_c_max);
3241 * Allow the tunables to override our calculations if they are
3242 * reasonable (ie. over 64MB)
3244 if (zfs_arc_max > 64<<20 && zfs_arc_max < physmem * PAGESIZE)
3245 arc_c_max = zfs_arc_max;
3246 if (zfs_arc_min > 64<<20 && zfs_arc_min <= arc_c_max)
3247 arc_c_min = zfs_arc_min;
3250 arc_p = (arc_c >> 1);
3252 /* limit meta-data to 1/4 of the arc capacity */
3253 arc_meta_limit = arc_c_max / 4;
3255 /* Allow the tunable to override if it is reasonable */
3256 if (zfs_arc_meta_limit > 0 && zfs_arc_meta_limit <= arc_c_max)
3257 arc_meta_limit = zfs_arc_meta_limit;
3259 if (arc_c_min < arc_meta_limit / 2 && zfs_arc_min == 0)
3260 arc_c_min = arc_meta_limit / 2;
3262 /* if kmem_flags are set, lets try to use less memory */
3263 if (kmem_debugging())
3265 if (arc_c < arc_c_min)
3268 arc_anon = &ARC_anon;
3270 arc_mru_ghost = &ARC_mru_ghost;
3272 arc_mfu_ghost = &ARC_mfu_ghost;
3273 arc_l2c_only = &ARC_l2c_only;
3276 mutex_init(&arc_anon->arcs_mtx, NULL, MUTEX_DEFAULT, NULL);
3277 mutex_init(&arc_mru->arcs_mtx, NULL, MUTEX_DEFAULT, NULL);
3278 mutex_init(&arc_mru_ghost->arcs_mtx, NULL, MUTEX_DEFAULT, NULL);
3279 mutex_init(&arc_mfu->arcs_mtx, NULL, MUTEX_DEFAULT, NULL);
3280 mutex_init(&arc_mfu_ghost->arcs_mtx, NULL, MUTEX_DEFAULT, NULL);
3281 mutex_init(&arc_l2c_only->arcs_mtx, NULL, MUTEX_DEFAULT, NULL);
3283 list_create(&arc_mru->arcs_list[ARC_BUFC_METADATA],
3284 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node));
3285 list_create(&arc_mru->arcs_list[ARC_BUFC_DATA],
3286 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node));
3287 list_create(&arc_mru_ghost->arcs_list[ARC_BUFC_METADATA],
3288 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node));
3289 list_create(&arc_mru_ghost->arcs_list[ARC_BUFC_DATA],
3290 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node));
3291 list_create(&arc_mfu->arcs_list[ARC_BUFC_METADATA],
3292 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node));
3293 list_create(&arc_mfu->arcs_list[ARC_BUFC_DATA],
3294 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node));
3295 list_create(&arc_mfu_ghost->arcs_list[ARC_BUFC_METADATA],
3296 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node));
3297 list_create(&arc_mfu_ghost->arcs_list[ARC_BUFC_DATA],
3298 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node));
3299 list_create(&arc_l2c_only->arcs_list[ARC_BUFC_METADATA],
3300 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node));
3301 list_create(&arc_l2c_only->arcs_list[ARC_BUFC_DATA],
3302 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node));
3306 arc_thread_exit = 0;
3307 arc_eviction_list = NULL;
3308 mutex_init(&arc_eviction_mtx, NULL, MUTEX_DEFAULT, NULL);
3309 bzero(&arc_eviction_hdr, sizeof (arc_buf_hdr_t));
3311 arc_ksp = kstat_create("zfs", 0, "arcstats", "misc", KSTAT_TYPE_NAMED,
3312 sizeof (arc_stats) / sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL);
3314 if (arc_ksp != NULL) {
3315 arc_ksp->ks_data = &arc_stats;
3316 kstat_install(arc_ksp);
3319 (void) thread_create(NULL, 0, arc_reclaim_thread, NULL, 0, &p0,
3320 TS_RUN, minclsyspri);
3324 if (zfs_write_limit_max == 0)
3325 zfs_write_limit_max = physmem * PAGESIZE >>
3326 zfs_write_limit_shift;
3328 zfs_write_limit_shift = 0;
3334 mutex_enter(&arc_reclaim_thr_lock);
3335 arc_thread_exit = 1;
3336 while (arc_thread_exit != 0)
3337 cv_wait(&arc_reclaim_thr_cv, &arc_reclaim_thr_lock);
3338 mutex_exit(&arc_reclaim_thr_lock);
3344 if (arc_ksp != NULL) {
3345 kstat_delete(arc_ksp);
3349 mutex_destroy(&arc_eviction_mtx);
3350 mutex_destroy(&arc_reclaim_thr_lock);
3351 cv_destroy(&arc_reclaim_thr_cv);
3353 list_destroy(&arc_mru->arcs_list[ARC_BUFC_METADATA]);
3354 list_destroy(&arc_mru_ghost->arcs_list[ARC_BUFC_METADATA]);
3355 list_destroy(&arc_mfu->arcs_list[ARC_BUFC_METADATA]);
3356 list_destroy(&arc_mfu_ghost->arcs_list[ARC_BUFC_METADATA]);
3357 list_destroy(&arc_mru->arcs_list[ARC_BUFC_DATA]);
3358 list_destroy(&arc_mru_ghost->arcs_list[ARC_BUFC_DATA]);
3359 list_destroy(&arc_mfu->arcs_list[ARC_BUFC_DATA]);
3360 list_destroy(&arc_mfu_ghost->arcs_list[ARC_BUFC_DATA]);
3362 mutex_destroy(&arc_anon->arcs_mtx);
3363 mutex_destroy(&arc_mru->arcs_mtx);
3364 mutex_destroy(&arc_mru_ghost->arcs_mtx);
3365 mutex_destroy(&arc_mfu->arcs_mtx);
3366 mutex_destroy(&arc_mfu_ghost->arcs_mtx);
3374 * The level 2 ARC (L2ARC) is a cache layer in-between main memory and disk.
3375 * It uses dedicated storage devices to hold cached data, which are populated
3376 * using large infrequent writes. The main role of this cache is to boost
3377 * the performance of random read workloads. The intended L2ARC devices
3378 * include short-stroked disks, solid state disks, and other media with
3379 * substantially faster read latency than disk.
3381 * +-----------------------+
3383 * +-----------------------+
3386 * l2arc_feed_thread() arc_read()
3390 * +---------------+ |
3392 * +---------------+ |
3397 * +-------+ +-------+
3399 * | cache | | cache |
3400 * +-------+ +-------+
3401 * +=========+ .-----.
3402 * : L2ARC : |-_____-|
3403 * : devices : | Disks |
3404 * +=========+ `-_____-'
3406 * Read requests are satisfied from the following sources, in order:
3409 * 2) vdev cache of L2ARC devices
3411 * 4) vdev cache of disks
3414 * Some L2ARC device types exhibit extremely slow write performance.
3415 * To accommodate for this there are some significant differences between
3416 * the L2ARC and traditional cache design:
3418 * 1. There is no eviction path from the ARC to the L2ARC. Evictions from
3419 * the ARC behave as usual, freeing buffers and placing headers on ghost
3420 * lists. The ARC does not send buffers to the L2ARC during eviction as
3421 * this would add inflated write latencies for all ARC memory pressure.
3423 * 2. The L2ARC attempts to cache data from the ARC before it is evicted.
3424 * It does this by periodically scanning buffers from the eviction-end of
3425 * the MFU and MRU ARC lists, copying them to the L2ARC devices if they are
3426 * not already there. It scans until a headroom of buffers is satisfied,
3427 * which itself is a buffer for ARC eviction. The thread that does this is
3428 * l2arc_feed_thread(), illustrated below; example sizes are included to
3429 * provide a better sense of ratio than this diagram:
3432 * +---------------------+----------+
3433 * ARC_mfu |:::::#:::::::::::::::|o#o###o###|-->. # already on L2ARC
3434 * +---------------------+----------+ | o L2ARC eligible
3435 * ARC_mru |:#:::::::::::::::::::|#o#ooo####|-->| : ARC buffer
3436 * +---------------------+----------+ |
3437 * 15.9 Gbytes ^ 32 Mbytes |
3439 * l2arc_feed_thread()
3441 * l2arc write hand <--[oooo]--'
3445 * +==============================+
3446 * L2ARC dev |####|#|###|###| |####| ... |
3447 * +==============================+
3450 * 3. If an ARC buffer is copied to the L2ARC but then hit instead of
3451 * evicted, then the L2ARC has cached a buffer much sooner than it probably
3452 * needed to, potentially wasting L2ARC device bandwidth and storage. It is
3453 * safe to say that this is an uncommon case, since buffers at the end of
3454 * the ARC lists have moved there due to inactivity.
3456 * 4. If the ARC evicts faster than the L2ARC can maintain a headroom,
3457 * then the L2ARC simply misses copying some buffers. This serves as a
3458 * pressure valve to prevent heavy read workloads from both stalling the ARC
3459 * with waits and clogging the L2ARC with writes. This also helps prevent
3460 * the potential for the L2ARC to churn if it attempts to cache content too
3461 * quickly, such as during backups of the entire pool.
3463 * 5. Writes to the L2ARC devices are grouped and sent in-sequence, so that
3464 * the vdev queue can aggregate them into larger and fewer writes. Each
3465 * device is written to in a rotor fashion, sweeping writes through
3466 * available space then repeating.
3468 * 6. The L2ARC does not store dirty content. It never needs to flush
3469 * write buffers back to disk based storage.
3471 * 7. If an ARC buffer is written (and dirtied) which also exists in the
3472 * L2ARC, the now stale L2ARC buffer is immediately dropped.
3474 * The performance of the L2ARC can be tweaked by a number of tunables, which
3475 * may be necessary for different workloads:
3477 * l2arc_write_max max write bytes per interval
3478 * l2arc_noprefetch skip caching prefetched buffers
3479 * l2arc_headroom number of max device writes to precache
3480 * l2arc_feed_secs seconds between L2ARC writing
3482 * Tunables may be removed or added as future performance improvements are
3483 * integrated, and also may become zpool properties.
3487 l2arc_hdr_stat_add(void)
3489 ARCSTAT_INCR(arcstat_l2_hdr_size, HDR_SIZE + L2HDR_SIZE);
3490 ARCSTAT_INCR(arcstat_hdr_size, -HDR_SIZE);
3494 l2arc_hdr_stat_remove(void)
3496 ARCSTAT_INCR(arcstat_l2_hdr_size, -(HDR_SIZE + L2HDR_SIZE));
3497 ARCSTAT_INCR(arcstat_hdr_size, HDR_SIZE);
3501 * Cycle through L2ARC devices. This is how L2ARC load balances.
3502 * This is called with l2arc_dev_mtx held, which also locks out spa removal.
3504 static l2arc_dev_t *
3505 l2arc_dev_get_next(void)
3509 if (l2arc_dev_last == NULL) {
3510 next = list_head(l2arc_dev_list);
3512 next = list_next(l2arc_dev_list, l2arc_dev_last);
3514 next = list_head(l2arc_dev_list);
3517 l2arc_dev_last = next;
3523 * A write to a cache device has completed. Update all headers to allow
3524 * reads from these buffers to begin.
3527 l2arc_write_done(zio_t *zio)
3529 l2arc_write_callback_t *cb;
3532 l2arc_data_free_t *df, *df_prev;
3533 arc_buf_hdr_t *head, *ab, *ab_prev;
3534 kmutex_t *hash_lock;
3536 cb = zio->io_private;
3538 dev = cb->l2wcb_dev;
3539 ASSERT(dev != NULL);
3540 head = cb->l2wcb_head;
3541 ASSERT(head != NULL);
3542 buflist = dev->l2ad_buflist;
3543 ASSERT(buflist != NULL);
3544 DTRACE_PROBE2(l2arc__iodone, zio_t *, zio,
3545 l2arc_write_callback_t *, cb);
3547 if (zio->io_error != 0)
3548 ARCSTAT_BUMP(arcstat_l2_writes_error);
3550 mutex_enter(&l2arc_buflist_mtx);
3553 * All writes completed, or an error was hit.
3555 for (ab = list_prev(buflist, head); ab; ab = ab_prev) {
3556 ab_prev = list_prev(buflist, ab);
3558 hash_lock = HDR_LOCK(ab);
3559 if (!mutex_tryenter(hash_lock)) {
3561 * This buffer misses out. It may be in a stage
3562 * of eviction. Its ARC_L2_WRITING flag will be
3563 * left set, denying reads to this buffer.
3565 ARCSTAT_BUMP(arcstat_l2_writes_hdr_miss);
3569 if (zio->io_error != 0) {
3571 * Error - invalidate L2ARC entry.
3577 * Allow ARC to begin reads to this L2ARC entry.
3579 ab->b_flags &= ~ARC_L2_WRITING;
3581 mutex_exit(hash_lock);
3584 atomic_inc_64(&l2arc_writes_done);
3585 list_remove(buflist, head);
3586 kmem_cache_free(hdr_cache, head);
3587 mutex_exit(&l2arc_buflist_mtx);
3590 * Free buffers that were tagged for destruction.
3592 mutex_enter(&l2arc_free_on_write_mtx);
3593 buflist = l2arc_free_on_write;
3594 for (df = list_tail(buflist); df; df = df_prev) {
3595 df_prev = list_prev(buflist, df);
3596 ASSERT(df->l2df_data != NULL);
3597 ASSERT(df->l2df_func != NULL);
3598 df->l2df_func(df->l2df_data, df->l2df_size);
3599 list_remove(buflist, df);
3600 kmem_free(df, sizeof (l2arc_data_free_t));
3602 mutex_exit(&l2arc_free_on_write_mtx);
3604 kmem_free(cb, sizeof (l2arc_write_callback_t));
3608 * A read to a cache device completed. Validate buffer contents before
3609 * handing over to the regular ARC routines.
3612 l2arc_read_done(zio_t *zio)
3614 l2arc_read_callback_t *cb;
3618 kmutex_t *hash_lock;
3621 cb = zio->io_private;
3623 buf = cb->l2rcb_buf;
3624 ASSERT(buf != NULL);
3626 ASSERT(hdr != NULL);
3628 hash_lock = HDR_LOCK(hdr);
3629 mutex_enter(hash_lock);
3632 * Check this survived the L2ARC journey.
3634 equal = arc_cksum_equal(buf);
3635 if (equal && zio->io_error == 0 && !HDR_L2_EVICTED(hdr)) {
3636 mutex_exit(hash_lock);
3637 zio->io_private = buf;
3640 mutex_exit(hash_lock);
3642 * Buffer didn't survive caching. Increment stats and
3643 * reissue to the original storage device.
3645 if (zio->io_error != 0)
3646 ARCSTAT_BUMP(arcstat_l2_io_error);
3648 ARCSTAT_BUMP(arcstat_l2_cksum_bad);
3650 zio->io_flags &= ~ZIO_FLAG_DONT_CACHE;
3651 rzio = zio_read(NULL, cb->l2rcb_spa, &cb->l2rcb_bp,
3652 buf->b_data, zio->io_size, arc_read_done, buf,
3653 zio->io_priority, cb->l2rcb_flags, &cb->l2rcb_zb);
3656 * Since this is a seperate thread, we can wait on this
3657 * I/O whether there is an io_waiter or not.
3659 err = zio_wait(rzio);
3662 * Let the resent I/O call arc_read_done() instead.
3663 * io_error is set to the reissued I/O error status.
3665 zio->io_done = NULL;
3666 zio->io_waiter = NULL;
3667 zio->io_error = err;
3670 kmem_free(cb, sizeof (l2arc_read_callback_t));
3674 * This is the list priority from which the L2ARC will search for pages to
3675 * cache. This is used within loops (0..3) to cycle through lists in the
3676 * desired order. This order can have a significant effect on cache
3679 * Currently the metadata lists are hit first, MFU then MRU, followed by
3680 * the data lists. This function returns a locked list, and also returns
3684 l2arc_list_locked(int list_num, kmutex_t **lock)
3688 ASSERT(list_num >= 0 && list_num <= 3);
3692 list = &arc_mfu->arcs_list[ARC_BUFC_METADATA];
3693 *lock = &arc_mfu->arcs_mtx;
3696 list = &arc_mru->arcs_list[ARC_BUFC_METADATA];
3697 *lock = &arc_mru->arcs_mtx;
3700 list = &arc_mfu->arcs_list[ARC_BUFC_DATA];
3701 *lock = &arc_mfu->arcs_mtx;
3704 list = &arc_mru->arcs_list[ARC_BUFC_DATA];
3705 *lock = &arc_mru->arcs_mtx;
3709 ASSERT(!(MUTEX_HELD(*lock)));
3715 * Evict buffers from the device write hand to the distance specified in
3716 * bytes. This distance may span populated buffers, it may span nothing.
3717 * This is clearing a region on the L2ARC device ready for writing.
3718 * If the 'all' boolean is set, every buffer is evicted.
3721 l2arc_evict(l2arc_dev_t *dev, uint64_t distance, boolean_t all)
3724 l2arc_buf_hdr_t *abl2;
3725 arc_buf_hdr_t *ab, *ab_prev;
3726 kmutex_t *hash_lock;
3729 ASSERT(MUTEX_HELD(&l2arc_dev_mtx));
3731 buflist = dev->l2ad_buflist;
3733 if (buflist == NULL)
3736 if (!all && dev->l2ad_first) {
3738 * This is the first sweep through the device. There is
3744 if (dev->l2ad_hand >= (dev->l2ad_end - (2 * dev->l2ad_write))) {
3746 * When nearing the end of the device, evict to the end
3747 * before the device write hand jumps to the start.
3749 taddr = dev->l2ad_end;
3751 taddr = dev->l2ad_hand + distance;
3753 DTRACE_PROBE4(l2arc__evict, l2arc_dev_t *, dev, list_t *, buflist,
3754 uint64_t, taddr, boolean_t, all);
3757 mutex_enter(&l2arc_buflist_mtx);
3758 for (ab = list_tail(buflist); ab; ab = ab_prev) {
3759 ab_prev = list_prev(buflist, ab);
3761 hash_lock = HDR_LOCK(ab);
3762 if (!mutex_tryenter(hash_lock)) {
3764 * Missed the hash lock. Retry.
3766 ARCSTAT_BUMP(arcstat_l2_evict_lock_retry);
3767 mutex_exit(&l2arc_buflist_mtx);
3768 mutex_enter(hash_lock);
3769 mutex_exit(hash_lock);
3773 if (HDR_L2_WRITE_HEAD(ab)) {
3775 * We hit a write head node. Leave it for
3776 * l2arc_write_done().
3778 list_remove(buflist, ab);
3779 mutex_exit(hash_lock);
3783 if (!all && ab->b_l2hdr != NULL &&
3784 (ab->b_l2hdr->b_daddr > taddr ||
3785 ab->b_l2hdr->b_daddr < dev->l2ad_hand)) {
3787 * We've evicted to the target address,
3788 * or the end of the device.
3790 mutex_exit(hash_lock);
3794 if (HDR_FREE_IN_PROGRESS(ab)) {
3796 * Already on the path to destruction.
3798 mutex_exit(hash_lock);
3802 if (ab->b_state == arc_l2c_only) {
3803 ASSERT(!HDR_L2_READING(ab));
3805 * This doesn't exist in the ARC. Destroy.
3806 * arc_hdr_destroy() will call list_remove()
3807 * and decrement arcstat_l2_size.
3809 arc_change_state(arc_anon, ab, hash_lock);
3810 arc_hdr_destroy(ab);
3813 * Tell ARC this no longer exists in L2ARC.
3815 if (ab->b_l2hdr != NULL) {
3818 kmem_free(abl2, sizeof (l2arc_buf_hdr_t));
3819 ARCSTAT_INCR(arcstat_l2_size, -ab->b_size);
3821 list_remove(buflist, ab);
3824 * This may have been leftover after a
3827 ab->b_flags &= ~ARC_L2_WRITING;
3830 * Invalidate issued or about to be issued
3831 * reads, since we may be about to write
3832 * over this location.
3834 if (HDR_L2_READING(ab)) {
3835 ARCSTAT_BUMP(arcstat_l2_evict_reading);
3836 ab->b_flags |= ARC_L2_EVICTED;
3839 mutex_exit(hash_lock);
3841 mutex_exit(&l2arc_buflist_mtx);
3843 spa_l2cache_space_update(dev->l2ad_vdev, 0, -(taddr - dev->l2ad_evict));
3844 dev->l2ad_evict = taddr;
3848 * Find and write ARC buffers to the L2ARC device.
3850 * An ARC_L2_WRITING flag is set so that the L2ARC buffers are not valid
3851 * for reading until they have completed writing.
3854 l2arc_write_buffers(spa_t *spa, l2arc_dev_t *dev)
3856 arc_buf_hdr_t *ab, *ab_prev, *head;
3857 l2arc_buf_hdr_t *hdrl2;
3859 uint64_t passed_sz, write_sz, buf_sz;
3860 uint64_t target_sz = dev->l2ad_write;
3861 uint64_t headroom = dev->l2ad_write * l2arc_headroom;
3863 kmutex_t *hash_lock, *list_lock;
3864 boolean_t have_lock, full;
3865 l2arc_write_callback_t *cb;
3868 ASSERT(MUTEX_HELD(&l2arc_dev_mtx));
3869 ASSERT(dev->l2ad_vdev != NULL);
3874 head = kmem_cache_alloc(hdr_cache, KM_PUSHPAGE);
3875 head->b_flags |= ARC_L2_WRITE_HEAD;
3878 * Copy buffers for L2ARC writing.
3880 mutex_enter(&l2arc_buflist_mtx);
3881 for (int try = 0; try <= 3; try++) {
3882 list = l2arc_list_locked(try, &list_lock);
3885 for (ab = list_tail(list); ab; ab = ab_prev) {
3886 ab_prev = list_prev(list, ab);
3888 hash_lock = HDR_LOCK(ab);
3889 have_lock = MUTEX_HELD(hash_lock);
3890 if (!have_lock && !mutex_tryenter(hash_lock)) {
3892 * Skip this buffer rather than waiting.
3897 passed_sz += ab->b_size;
3898 if (passed_sz > headroom) {
3902 mutex_exit(hash_lock);
3906 if (ab->b_spa != spa) {
3907 mutex_exit(hash_lock);
3911 if (ab->b_l2hdr != NULL) {
3915 mutex_exit(hash_lock);
3919 if (HDR_IO_IN_PROGRESS(ab) || HDR_DONT_L2CACHE(ab)) {
3920 mutex_exit(hash_lock);
3924 if ((write_sz + ab->b_size) > target_sz) {
3926 mutex_exit(hash_lock);
3930 if (ab->b_buf == NULL) {
3931 DTRACE_PROBE1(l2arc__buf__null, void *, ab);
3932 mutex_exit(hash_lock);
3938 * Insert a dummy header on the buflist so
3939 * l2arc_write_done() can find where the
3940 * write buffers begin without searching.
3942 list_insert_head(dev->l2ad_buflist, head);
3945 sizeof (l2arc_write_callback_t), KM_SLEEP);
3946 cb->l2wcb_dev = dev;
3947 cb->l2wcb_head = head;
3948 pio = zio_root(spa, l2arc_write_done, cb,
3953 * Create and add a new L2ARC header.
3955 hdrl2 = kmem_zalloc(sizeof (l2arc_buf_hdr_t), KM_SLEEP);
3957 hdrl2->b_daddr = dev->l2ad_hand;
3959 ab->b_flags |= ARC_L2_WRITING;
3960 ab->b_l2hdr = hdrl2;
3961 list_insert_head(dev->l2ad_buflist, ab);
3962 buf_data = ab->b_buf->b_data;
3963 buf_sz = ab->b_size;
3966 * Compute and store the buffer cksum before
3967 * writing. On debug the cksum is verified first.
3969 arc_cksum_verify(ab->b_buf);
3970 arc_cksum_compute(ab->b_buf, B_TRUE);
3972 mutex_exit(hash_lock);
3974 wzio = zio_write_phys(pio, dev->l2ad_vdev,
3975 dev->l2ad_hand, buf_sz, buf_data, ZIO_CHECKSUM_OFF,
3976 NULL, NULL, ZIO_PRIORITY_ASYNC_WRITE,
3977 ZIO_FLAG_CANFAIL, B_FALSE);
3979 DTRACE_PROBE2(l2arc__write, vdev_t *, dev->l2ad_vdev,
3981 (void) zio_nowait(wzio);
3984 dev->l2ad_hand += buf_sz;
3987 mutex_exit(list_lock);
3992 mutex_exit(&l2arc_buflist_mtx);
3995 ASSERT3U(write_sz, ==, 0);
3996 kmem_cache_free(hdr_cache, head);
4000 ASSERT3U(write_sz, <=, target_sz);
4001 ARCSTAT_BUMP(arcstat_l2_writes_sent);
4002 ARCSTAT_INCR(arcstat_l2_size, write_sz);
4003 spa_l2cache_space_update(dev->l2ad_vdev, 0, write_sz);
4006 * Bump device hand to the device start if it is approaching the end.
4007 * l2arc_evict() will already have evicted ahead for this case.
4009 if (dev->l2ad_hand >= (dev->l2ad_end - dev->l2ad_write)) {
4010 spa_l2cache_space_update(dev->l2ad_vdev, 0,
4011 dev->l2ad_end - dev->l2ad_hand);
4012 dev->l2ad_hand = dev->l2ad_start;
4013 dev->l2ad_evict = dev->l2ad_start;
4014 dev->l2ad_first = B_FALSE;
4017 (void) zio_wait(pio);
4021 * This thread feeds the L2ARC at regular intervals. This is the beating
4022 * heart of the L2ARC.
4025 l2arc_feed_thread(void)
4031 boolean_t startup = B_TRUE;
4033 CALLB_CPR_INIT(&cpr, &l2arc_feed_thr_lock, callb_generic_cpr, FTAG);
4035 mutex_enter(&l2arc_feed_thr_lock);
4037 while (l2arc_thread_exit == 0) {
4039 * Initially pause for L2ARC_FEED_DELAY seconds as a grace
4040 * interval during boot, followed by l2arc_feed_secs seconds
4043 CALLB_CPR_SAFE_BEGIN(&cpr);
4045 interval = L2ARC_FEED_DELAY;
4048 interval = l2arc_feed_secs;
4050 (void) cv_timedwait(&l2arc_feed_thr_cv, &l2arc_feed_thr_lock,
4051 lbolt + (hz * interval));
4052 CALLB_CPR_SAFE_END(&cpr, &l2arc_feed_thr_lock);
4055 * Do nothing until L2ARC devices exist.
4057 mutex_enter(&l2arc_dev_mtx);
4058 if (l2arc_ndev == 0) {
4059 mutex_exit(&l2arc_dev_mtx);
4064 * Avoid contributing to memory pressure.
4066 if (arc_reclaim_needed()) {
4067 ARCSTAT_BUMP(arcstat_l2_abort_lowmem);
4068 mutex_exit(&l2arc_dev_mtx);
4073 * This selects the next l2arc device to write to, and in
4074 * doing so the next spa to feed from: dev->l2ad_spa.
4076 if ((dev = l2arc_dev_get_next()) == NULL) {
4077 mutex_exit(&l2arc_dev_mtx);
4080 spa = dev->l2ad_spa;
4081 ASSERT(spa != NULL);
4082 ARCSTAT_BUMP(arcstat_l2_feeds);
4085 * Evict L2ARC buffers that will be overwritten.
4087 l2arc_evict(dev, dev->l2ad_write, B_FALSE);
4090 * Write ARC buffers.
4092 l2arc_write_buffers(spa, dev);
4093 mutex_exit(&l2arc_dev_mtx);
4096 l2arc_thread_exit = 0;
4097 cv_broadcast(&l2arc_feed_thr_cv);
4098 CALLB_CPR_EXIT(&cpr); /* drops l2arc_feed_thr_lock */
4103 * Add a vdev for use by the L2ARC. By this point the spa has already
4104 * validated the vdev and opened it.
4107 l2arc_add_vdev(spa_t *spa, vdev_t *vd, uint64_t start, uint64_t end)
4109 l2arc_dev_t *adddev;
4112 * Create a new l2arc device entry.
4114 adddev = kmem_zalloc(sizeof (l2arc_dev_t), KM_SLEEP);
4115 adddev->l2ad_spa = spa;
4116 adddev->l2ad_vdev = vd;
4117 adddev->l2ad_write = l2arc_write_max;
4118 adddev->l2ad_start = start;
4119 adddev->l2ad_end = end;
4120 adddev->l2ad_hand = adddev->l2ad_start;
4121 adddev->l2ad_evict = adddev->l2ad_start;
4122 adddev->l2ad_first = B_TRUE;
4123 ASSERT3U(adddev->l2ad_write, >, 0);
4126 * This is a list of all ARC buffers that are still valid on the
4129 adddev->l2ad_buflist = kmem_zalloc(sizeof (list_t), KM_SLEEP);
4130 list_create(adddev->l2ad_buflist, sizeof (arc_buf_hdr_t),
4131 offsetof(arc_buf_hdr_t, b_l2node));
4133 spa_l2cache_space_update(vd, adddev->l2ad_end - adddev->l2ad_hand, 0);
4136 * Add device to global list
4138 mutex_enter(&l2arc_dev_mtx);
4139 list_insert_head(l2arc_dev_list, adddev);
4140 atomic_inc_64(&l2arc_ndev);
4141 mutex_exit(&l2arc_dev_mtx);
4145 * Remove a vdev from the L2ARC.
4148 l2arc_remove_vdev(vdev_t *vd)
4150 l2arc_dev_t *dev, *nextdev, *remdev = NULL;
4153 * We can only grab the spa config lock when cache device writes
4156 ASSERT3U(l2arc_writes_sent, ==, l2arc_writes_done);
4159 * Find the device by vdev
4161 mutex_enter(&l2arc_dev_mtx);
4162 for (dev = list_head(l2arc_dev_list); dev; dev = nextdev) {
4163 nextdev = list_next(l2arc_dev_list, dev);
4164 if (vd == dev->l2ad_vdev) {
4169 ASSERT(remdev != NULL);
4172 * Remove device from global list
4174 list_remove(l2arc_dev_list, remdev);
4175 l2arc_dev_last = NULL; /* may have been invalidated */
4178 * Clear all buflists and ARC references. L2ARC device flush.
4180 l2arc_evict(remdev, 0, B_TRUE);
4181 list_destroy(remdev->l2ad_buflist);
4182 kmem_free(remdev->l2ad_buflist, sizeof (list_t));
4183 kmem_free(remdev, sizeof (l2arc_dev_t));
4185 atomic_dec_64(&l2arc_ndev);
4186 mutex_exit(&l2arc_dev_mtx);
4192 l2arc_thread_exit = 0;
4194 l2arc_writes_sent = 0;
4195 l2arc_writes_done = 0;
4197 mutex_init(&l2arc_feed_thr_lock, NULL, MUTEX_DEFAULT, NULL);
4198 cv_init(&l2arc_feed_thr_cv, NULL, CV_DEFAULT, NULL);
4199 mutex_init(&l2arc_dev_mtx, NULL, MUTEX_DEFAULT, NULL);
4200 mutex_init(&l2arc_buflist_mtx, NULL, MUTEX_DEFAULT, NULL);
4201 mutex_init(&l2arc_free_on_write_mtx, NULL, MUTEX_DEFAULT, NULL);
4203 l2arc_dev_list = &L2ARC_dev_list;
4204 l2arc_free_on_write = &L2ARC_free_on_write;
4205 list_create(l2arc_dev_list, sizeof (l2arc_dev_t),
4206 offsetof(l2arc_dev_t, l2ad_node));
4207 list_create(l2arc_free_on_write, sizeof (l2arc_data_free_t),
4208 offsetof(l2arc_data_free_t, l2df_list_node));
4210 (void) thread_create(NULL, 0, l2arc_feed_thread, NULL, 0, &p0,
4211 TS_RUN, minclsyspri);
4217 mutex_enter(&l2arc_feed_thr_lock);
4218 cv_signal(&l2arc_feed_thr_cv); /* kick thread out of startup */
4219 l2arc_thread_exit = 1;
4220 while (l2arc_thread_exit != 0)
4221 cv_wait(&l2arc_feed_thr_cv, &l2arc_feed_thr_lock);
4222 mutex_exit(&l2arc_feed_thr_lock);
4224 mutex_destroy(&l2arc_feed_thr_lock);
4225 cv_destroy(&l2arc_feed_thr_cv);
4226 mutex_destroy(&l2arc_dev_mtx);
4227 mutex_destroy(&l2arc_buflist_mtx);
4228 mutex_destroy(&l2arc_free_on_write_mtx);
4230 list_destroy(l2arc_dev_list);
4231 list_destroy(l2arc_free_on_write);