The non-blocking allocation handlers in nvlist_alloc() would be
mistakenly assigned if any flags other than KM_SLEEP were passed.
This meant that nvlists allocated with KM_PUSHPUSH or other KM_*
debug flags were effectively always using atomic allocations.
While these failures were unlikely it could lead to assertions
because KM_PUSHPAGE allocations in particular are guaranteed to
succeed or block. They must never fail.
Since the existing API does not allow us to pass allocation
flags to the private allocators the cleanest thing to do is to
add a KM_PUSHPAGE allocator.
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Closes zfsonlinux/spl#249
#if defined(_KERNEL) && !defined(_BOOT)
extern nv_alloc_t *nv_alloc_sleep;
#if defined(_KERNEL) && !defined(_BOOT)
extern nv_alloc_t *nv_alloc_sleep;
+extern nv_alloc_t *nv_alloc_pushpage;
#endif
int nv_alloc_init(nv_alloc_t *, const nv_alloc_ops_t *, /* args */ ...);
#endif
int nv_alloc_init(nv_alloc_t *, const nv_alloc_ops_t *, /* args */ ...);
int
nvlist_alloc(nvlist_t **nvlp, uint_t nvflag, int kmflag)
{
int
nvlist_alloc(nvlist_t **nvlp, uint_t nvflag, int kmflag)
{
+ nv_alloc_t *nva = nv_alloc_nosleep;
+
#if defined(_KERNEL) && !defined(_BOOT)
#if defined(_KERNEL) && !defined(_BOOT)
- return (nvlist_xalloc(nvlp, nvflag,
- (kmflag == KM_SLEEP ? nv_alloc_sleep : nv_alloc_nosleep)));
-#else
- return (nvlist_xalloc(nvlp, nvflag, nv_alloc_nosleep));
+ switch (kmflag) {
+ case KM_SLEEP:
+ nva = nv_alloc_sleep;
+ break;
+ case KM_PUSHPAGE:
+ nva = nv_alloc_pushpage;
+ break;
+ case KM_NOSLEEP:
+ nva = nv_alloc_nosleep;
+ break;
+ default:
+ return (EINVAL);
+ }
+
+ return (nvlist_xalloc(nvlp, nvflag, nva));
+nv_alloc_pushpage_spl(nv_alloc_t *nva, size_t size)
+{
+ return (kmem_alloc(size, KM_PUSHPAGE | KM_NODEBUG));
+}
+
+static void *
nv_alloc_nosleep_spl(nv_alloc_t *nva, size_t size)
{
return (kmem_alloc(size, KM_NOSLEEP));
nv_alloc_nosleep_spl(nv_alloc_t *nva, size_t size)
{
return (kmem_alloc(size, KM_NOSLEEP));
NULL /* nv_ao_reset() */
};
NULL /* nv_ao_reset() */
};
+const nv_alloc_ops_t spl_pushpage_ops_def = {
+ NULL, /* nv_ao_init() */
+ NULL, /* nv_ao_fini() */
+ nv_alloc_pushpage_spl, /* nv_ao_alloc() */
+ nv_free_spl, /* nv_ao_free() */
+ NULL /* nv_ao_reset() */
+};
+
const nv_alloc_ops_t spl_nosleep_ops_def = {
NULL, /* nv_ao_init() */
NULL, /* nv_ao_fini() */
const nv_alloc_ops_t spl_nosleep_ops_def = {
NULL, /* nv_ao_init() */
NULL, /* nv_ao_fini() */
+nv_alloc_t nv_alloc_pushpage_def = {
+ &spl_pushpage_ops_def,
+ NULL
+};
+
nv_alloc_t nv_alloc_nosleep_def = {
&spl_nosleep_ops_def,
NULL
};
nv_alloc_t *nv_alloc_sleep = &nv_alloc_sleep_def;
nv_alloc_t nv_alloc_nosleep_def = {
&spl_nosleep_ops_def,
NULL
};
nv_alloc_t *nv_alloc_sleep = &nv_alloc_sleep_def;
+nv_alloc_t *nv_alloc_pushpage = &nv_alloc_pushpage_def;
nv_alloc_t *nv_alloc_nosleep = &nv_alloc_nosleep_def;
nv_alloc_t *nv_alloc_nosleep = &nv_alloc_nosleep_def;