X-Git-Url: https://git.camperquake.de/gitweb.cgi?a=blobdiff_plain;f=module%2Fzfs%2Ftxg.c;h=6e64adf9376e5c97a8cfb823caf796c0c9cb6b8f;hb=570827e129ed81e066e894530bbe24642f473154;hp=f9f24dd0a9459a5f8de66f4375d526841d978546;hpb=090ff0929ec7a2e2e65efaaddb9981d15964f7d9;p=zfs.git diff --git a/module/zfs/txg.c b/module/zfs/txg.c index f9f24dd..6e64adf 100644 --- a/module/zfs/txg.c +++ b/module/zfs/txg.c @@ -49,7 +49,7 @@ txg_init(dsl_pool_t *dp, uint64_t txg) int c; bzero(tx, sizeof (tx_state_t)); - tx->tx_cpu = kmem_zalloc(max_ncpus * sizeof (tx_cpu_t), KM_SLEEP); + tx->tx_cpu = vmem_zalloc(max_ncpus * sizeof (tx_cpu_t), KM_SLEEP); for (c = 0; c < max_ncpus; c++) { int i; @@ -107,7 +107,7 @@ txg_fini(dsl_pool_t *dp) if (tx->tx_commit_cb_taskq != NULL) taskq_destroy(tx->tx_commit_cb_taskq); - kmem_free(tx->tx_cpu, max_ncpus * sizeof (tx_cpu_t)); + vmem_free(tx->tx_cpu, max_ncpus * sizeof (tx_cpu_t)); bzero(tx, sizeof (tx_state_t)); } @@ -166,10 +166,10 @@ txg_thread_wait(tx_state_t *tx, callb_cpr_t *cpr, kcondvar_t *cv, uint64_t time) CALLB_CPR_SAFE_BEGIN(cpr); if (time) - (void) cv_timedwait(cv, &tx->tx_sync_lock, + (void) cv_timedwait_interruptible(cv, &tx->tx_sync_lock, ddi_get_lbolt() + time); else - cv_wait(cv, &tx->tx_sync_lock); + cv_wait_interruptible(cv, &tx->tx_sync_lock); CALLB_CPR_SAFE_END(cpr, &tx->tx_sync_lock); } @@ -350,6 +350,20 @@ txg_dispatch_callbacks(dsl_pool_t *dp, uint64_t txg) } } +/* + * Wait for pending commit callbacks of already-synced transactions to finish + * processing. + * Calling this function from within a commit callback will deadlock. + */ +void +txg_wait_callbacks(dsl_pool_t *dp) +{ + tx_state_t *tx = &dp->dp_tx; + + if (tx->tx_commit_cb_taskq != NULL) + taskq_wait(tx->tx_commit_cb_taskq); +} + static void txg_sync_thread(dsl_pool_t *dp) { @@ -358,6 +372,19 @@ txg_sync_thread(dsl_pool_t *dp) callb_cpr_t cpr; uint64_t start, delta; +#ifdef _KERNEL + /* + * Disable the normal reclaim path for the txg_sync thread. This + * ensures the thread will never enter dmu_tx_assign() which can + * otherwise occur due to direct reclaim. If this is allowed to + * happen the system can deadlock. Direct reclaim call path: + * + * ->shrink_icache_memory->prune_icache->dispose_list-> + * clear_inode->zpl_clear_inode->zfs_inactive->dmu_tx_assign + */ + current->flags |= PF_MEMALLOC; +#endif /* _KERNEL */ + txg_thread_enter(tx, &cpr); start = delta = 0; @@ -479,7 +506,7 @@ void txg_delay(dsl_pool_t *dp, uint64_t txg, int ticks) { tx_state_t *tx = &dp->dp_tx; - int timeout = ddi_get_lbolt() + ticks; + clock_t timeout = ddi_get_lbolt() + ticks; /* don't delay if this txg could transition to quiesing immediately */ if (tx->tx_open_txg > txg || @@ -497,6 +524,8 @@ txg_delay(dsl_pool_t *dp, uint64_t txg, int ticks) (void) cv_timedwait(&tx->tx_quiesce_more_cv, &tx->tx_sync_lock, timeout); + DMU_TX_STAT_BUMP(dmu_tx_delay); + mutex_exit(&tx->tx_sync_lock); } @@ -722,3 +751,20 @@ txg_list_next(txg_list_t *tl, void *p, uint64_t txg) return (tn == NULL ? NULL : (char *)tn - tl->tl_offset); } + +#if defined(_KERNEL) && defined(HAVE_SPL) +EXPORT_SYMBOL(txg_init); +EXPORT_SYMBOL(txg_fini); +EXPORT_SYMBOL(txg_sync_start); +EXPORT_SYMBOL(txg_sync_stop); +EXPORT_SYMBOL(txg_hold_open); +EXPORT_SYMBOL(txg_rele_to_quiesce); +EXPORT_SYMBOL(txg_rele_to_sync); +EXPORT_SYMBOL(txg_register_callbacks); +EXPORT_SYMBOL(txg_delay); +EXPORT_SYMBOL(txg_wait_synced); +EXPORT_SYMBOL(txg_wait_open); +EXPORT_SYMBOL(txg_wait_callbacks); +EXPORT_SYMBOL(txg_stalled); +EXPORT_SYMBOL(txg_sync_waiting); +#endif