int zfs_arc_grow_retry = 0;
int zfs_arc_shrink_shift = 0;
int zfs_arc_p_min_shift = 0;
+int zfs_arc_reduce_dnlc_percent = 0;
/*
* Note that buffers can be in one of 6 states:
/*
* L2ARC Performance Tunables
*/
-uint64_t l2arc_write_max = L2ARC_WRITE_SIZE; /* default max write size */
-uint64_t l2arc_write_boost = L2ARC_WRITE_SIZE; /* extra write during warmup */
-uint64_t l2arc_headroom = L2ARC_HEADROOM; /* number of dev writes */
-uint64_t l2arc_feed_secs = L2ARC_FEED_SECS; /* interval seconds */
-uint64_t l2arc_feed_min_ms = L2ARC_FEED_MIN_MS; /* min interval milliseconds */
-boolean_t l2arc_noprefetch = B_TRUE; /* don't cache prefetch bufs */
-boolean_t l2arc_feed_again = B_TRUE; /* turbo warmup */
-boolean_t l2arc_norw = B_TRUE; /* no reads during writes */
+unsigned long l2arc_write_max = L2ARC_WRITE_SIZE; /* def max write size */
+unsigned long l2arc_write_boost = L2ARC_WRITE_SIZE; /* extra warmup write */
+unsigned long l2arc_headroom = L2ARC_HEADROOM; /* # of dev writes */
+unsigned long l2arc_feed_secs = L2ARC_FEED_SECS; /* interval seconds */
+unsigned long l2arc_feed_min_ms = L2ARC_FEED_MIN_MS; /* min interval msecs */
+int l2arc_noprefetch = B_TRUE; /* don't cache prefetch bufs */
+int l2arc_feed_again = B_TRUE; /* turbo warmup */
+int l2arc_norw = B_TRUE; /* no reads during writes */
/*
* L2ARC Internals
kmem_cache_t *prev_data_cache = NULL;
extern kmem_cache_t *zio_buf_cache[];
extern kmem_cache_t *zio_data_buf_cache[];
-
#ifdef _KERNEL
- if (arc_meta_used >= arc_meta_limit) {
+ int retry = 0;
+
+ while ((arc_meta_used >= arc_meta_limit) && (retry < 10)) {
/*
* We are exceeding our meta-data cache limit.
* Purge some DNLC entries to release holds on meta-data.
*/
dnlc_reduce_cache((void *)(uintptr_t)arc_reduce_dnlc_percent);
+ retry++;
}
#if defined(__i386)
/*
arc_no_grow = FALSE;
}
+ /* Keep meta data usage within limits */
+ if (arc_meta_used >= arc_meta_limit)
+ arc_kmem_reap_now(ARC_RECLAIM_CONS);
+
arc_adjust();
if (arc_eviction_list != NULL)
* direct reclaim will be trigger. In direct reclaim a more aggressive
* strategy is used, data is evicted from the ARC and free slabs reaped.
*/
-SPL_SHRINKER_CALLBACK_PROTO(arc_shrinker_func, cb, nr_to_scan, gfp_mask)
+static int
+__arc_shrinker_func(struct shrinker *shrink, struct shrink_control *sc)
{
arc_reclaim_strategy_t strategy;
int arc_reclaim;
- /* Not allowed to perform filesystem reclaim */
- if (!(gfp_mask & __GFP_FS))
- return (-1);
-
/* Return number of reclaimable pages based on arc_shrink_shift */
- arc_reclaim = btop((arc_size - arc_c_min)) >> arc_shrink_shift;
- if (nr_to_scan == 0)
+ arc_reclaim = MAX(btop(((int64_t)arc_size - (int64_t)arc_c_min))
+ >> arc_shrink_shift, 0);
+ if (sc->nr_to_scan == 0)
return (arc_reclaim);
+ /* Prevent reclaim below arc_c_min */
+ if (arc_reclaim <= 0)
+ return (-1);
+
+ /* Not allowed to perform filesystem reclaim */
+ if (!(sc->gfp_mask & __GFP_FS))
+ return (-1);
+
/* Reclaim in progress */
if (mutex_tryenter(&arc_reclaim_thr_lock) == 0)
return (-1);
}
arc_kmem_reap_now(strategy);
- arc_reclaim = btop((arc_size - arc_c_min)) >> arc_shrink_shift;
+ arc_reclaim = MAX(btop(((int64_t)arc_size - (int64_t)arc_c_min))
+ >> arc_shrink_shift, 0);
mutex_exit(&arc_reclaim_thr_lock);
return (arc_reclaim);
}
+SPL_SHRINKER_CALLBACK_WRAPPER(arc_shrinker_func);
SPL_SHRINKER_DECLARE(arc_shrinker, arc_shrinker_func, DEFAULT_SEEKS);
#endif /* _KERNEL */
if (zfs_arc_p_min_shift > 0)
arc_p_min_shift = zfs_arc_p_min_shift;
+ if (zfs_arc_reduce_dnlc_percent > 0)
+ arc_reduce_dnlc_percent = zfs_arc_reduce_dnlc_percent;
+
/* if kmem_flags are set, lets try to use less memory */
if (kmem_debugging())
arc_c = arc_c / 2;
EXPORT_SYMBOL(arc_buf_remove_ref);
EXPORT_SYMBOL(arc_getbuf_func);
-module_param(zfs_arc_min, ulong, 0644);
-MODULE_PARM_DESC(zfs_arc_min, "Minimum arc size");
+module_param(zfs_arc_min, ulong, 0444);
+MODULE_PARM_DESC(zfs_arc_min, "Min arc size");
-module_param(zfs_arc_max, ulong, 0644);
-MODULE_PARM_DESC(zfs_arc_max, "Maximum arc size");
+module_param(zfs_arc_max, ulong, 0444);
+MODULE_PARM_DESC(zfs_arc_max, "Max arc size");
-module_param(zfs_arc_meta_limit, ulong, 0644);
+module_param(zfs_arc_meta_limit, ulong, 0444);
MODULE_PARM_DESC(zfs_arc_meta_limit, "Meta limit for arc size");
+
+module_param(zfs_arc_reduce_dnlc_percent, int, 0444);
+MODULE_PARM_DESC(zfs_arc_reduce_dnlc_percent, "Meta reclaim percentage");
+
+module_param(zfs_arc_grow_retry, int, 0444);
+MODULE_PARM_DESC(zfs_arc_grow_retry, "Seconds before growing arc size");
+
+module_param(zfs_arc_shrink_shift, int, 0444);
+MODULE_PARM_DESC(zfs_arc_shrink_shift, "log2(fraction of arc to reclaim)");
+
+module_param(zfs_arc_p_min_shift, int, 0444);
+MODULE_PARM_DESC(zfs_arc_p_min_shift, "arc_c shift to calc min/max arc_p");
+
+module_param(l2arc_write_max, ulong, 0444);
+MODULE_PARM_DESC(l2arc_write_max, "Max write bytes per interval");
+
+module_param(l2arc_write_boost, ulong, 0444);
+MODULE_PARM_DESC(l2arc_write_boost, "Extra write bytes during device warmup");
+
+module_param(l2arc_headroom, ulong, 0444);
+MODULE_PARM_DESC(l2arc_headroom, "Number of max device writes to precache");
+
+module_param(l2arc_feed_secs, ulong, 0444);
+MODULE_PARM_DESC(l2arc_feed_secs, "Seconds between L2ARC writing");
+
+module_param(l2arc_feed_min_ms, ulong, 0444);
+MODULE_PARM_DESC(l2arc_feed_min_ms, "Min feed interval in milliseconds");
+
+module_param(l2arc_noprefetch, int, 0444);
+MODULE_PARM_DESC(l2arc_noprefetch, "Skip caching prefetched buffers");
+
+module_param(l2arc_feed_again, int, 0444);
+MODULE_PARM_DESC(l2arc_feed_again, "Turbo L2ARC warmup");
+
+module_param(l2arc_norw, int, 0444);
+MODULE_PARM_DESC(l2arc_norw, "No reads during writes");
+
#endif