X-Git-Url: https://git.camperquake.de/gitweb.cgi?a=blobdiff_plain;f=module%2Fzfs%2Fzio.c;h=70d3addf5cadcbfded19a17c0586d986f0fab342;hb=5e6121455c0b941f4612ceb2c1f312d527534b46;hp=fb60bd8a06a9f07a53be8182eb78857a9d7080c9;hpb=c776b317e44a64d53217d34c3fa61d36fd5a32d5;p=zfs.git diff --git a/module/zfs/zio.c b/module/zfs/zio.c index fb60bd8..70d3add 100644 --- a/module/zfs/zio.c +++ b/module/zfs/zio.c @@ -74,6 +74,7 @@ kmem_cache_t *zio_cache; kmem_cache_t *zio_link_cache; kmem_cache_t *zio_buf_cache[SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT]; kmem_cache_t *zio_data_buf_cache[SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT]; +int zio_bulk_flags = 0; #ifdef _KERNEL extern vmem_t *zio_alloc_arena; @@ -93,6 +94,8 @@ int zio_buf_debug_limit = 16384; int zio_buf_debug_limit = 0; #endif +static inline void __zio_execute(zio_t *zio); + void zio_init(void) { @@ -134,12 +137,14 @@ zio_init(void) (void) sprintf(name, "zio_buf_%lu", (ulong_t)size); zio_buf_cache[c] = kmem_cache_create(name, size, align, NULL, NULL, NULL, NULL, NULL, - size > zio_buf_debug_limit ? KMC_NODEBUG : 0); + (size > zio_buf_debug_limit ? KMC_NODEBUG : 0) | + zio_bulk_flags); (void) sprintf(name, "zio_data_buf_%lu", (ulong_t)size); zio_data_buf_cache[c] = kmem_cache_create(name, size, align, NULL, NULL, NULL, NULL, data_alloc_arena, - size > zio_buf_debug_limit ? KMC_NODEBUG : 0); + (size > zio_buf_debug_limit ? KMC_NODEBUG : 0) | + zio_bulk_flags); } } @@ -451,7 +456,7 @@ zio_notify_parent(zio_t *pio, zio_t *zio, enum zio_wait_type wait) if (--*countp == 0 && pio->io_stall == countp) { pio->io_stall = NULL; mutex_exit(&pio->io_lock); - zio_execute(pio); + __zio_execute(pio); } else { mutex_exit(&pio->io_lock); } @@ -1122,9 +1127,24 @@ zio_interrupt(zio_t *zio) */ static zio_pipe_stage_t *zio_pipeline[]; +/* + * zio_execute() is a wrapper around the static function + * __zio_execute() so that we can force __zio_execute() to be + * inlined. This reduces stack overhead which is important + * because __zio_execute() is called recursively in several zio + * code paths. zio_execute() itself cannot be inlined because + * it is externally visible. + */ void zio_execute(zio_t *zio) { + __zio_execute(zio); +} + +__attribute__((always_inline)) +static inline void +__zio_execute(zio_t *zio) +{ zio->io_executor = curthread; while (zio->io_stage < ZIO_STAGE_DONE) { @@ -1169,6 +1189,7 @@ zio_execute(zio_t *zio) } } + /* * ========================================================================== * Initiate I/O, either sync or async @@ -1184,7 +1205,7 @@ zio_wait(zio_t *zio) zio->io_waiter = curthread; - zio_execute(zio); + __zio_execute(zio); mutex_enter(&zio->io_lock); while (zio->io_executor != NULL) @@ -1214,7 +1235,7 @@ zio_nowait(zio_t *zio) zio_add_child(spa->spa_async_zio_root, zio); } - zio_execute(zio); + __zio_execute(zio); } /* @@ -1269,7 +1290,7 @@ zio_reexecute(zio_t *pio) * responsibility of the caller to wait on him. */ if (!(pio->io_flags & ZIO_FLAG_GODFATHER)) - zio_execute(pio); + __zio_execute(pio); } void @@ -2957,3 +2978,19 @@ static zio_pipe_stage_t *zio_pipeline[] = { zio_checksum_verify, zio_done }; + +#if defined(_KERNEL) && defined(HAVE_SPL) +/* Fault injection */ +EXPORT_SYMBOL(zio_injection_enabled); +EXPORT_SYMBOL(zio_inject_fault); +EXPORT_SYMBOL(zio_inject_list_next); +EXPORT_SYMBOL(zio_clear_fault); +EXPORT_SYMBOL(zio_handle_fault_injection); +EXPORT_SYMBOL(zio_handle_device_injection); +EXPORT_SYMBOL(zio_handle_label_injection); +EXPORT_SYMBOL(zio_priority_table); +EXPORT_SYMBOL(zio_type_name); + +module_param(zio_bulk_flags, int, 0644); +MODULE_PARM_DESC(zio_bulk_flags, "Additional flags to pass to bulk buffers"); +#endif