CALLB_CPR_SAFE_BEGIN(cpr);
if (time)
- (void) cv_timedwait(cv, &tx->tx_sync_lock,
+ (void) cv_timedwait_interruptible(cv, &tx->tx_sync_lock,
ddi_get_lbolt() + time);
else
- cv_wait(cv, &tx->tx_sync_lock);
+ cv_wait_interruptible(cv, &tx->tx_sync_lock);
CALLB_CPR_SAFE_END(cpr, &tx->tx_sync_lock);
}
}
}
+/*
+ * Wait for pending commit callbacks of already-synced transactions to finish
+ * processing.
+ * Calling this function from within a commit callback will deadlock.
+ */
+void
+txg_wait_callbacks(dsl_pool_t *dp)
+{
+ tx_state_t *tx = &dp->dp_tx;
+
+ if (tx->tx_commit_cb_taskq != NULL)
+ taskq_wait(tx->tx_commit_cb_taskq);
+}
+
static void
txg_sync_thread(dsl_pool_t *dp)
{
callb_cpr_t cpr;
uint64_t start, delta;
+#ifdef _KERNEL
+ /*
+ * Disable the normal reclaim path for the txg_sync thread. This
+ * ensures the thread will never enter dmu_tx_assign() which can
+ * otherwise occur due to direct reclaim. If this is allowed to
+ * happen the system can deadlock. Direct reclaim call path:
+ *
+ * ->shrink_icache_memory->prune_icache->dispose_list->
+ * clear_inode->zpl_clear_inode->zfs_inactive->dmu_tx_assign
+ */
+ current->flags |= PF_MEMALLOC;
+#endif /* _KERNEL */
+
txg_thread_enter(tx, &cpr);
start = delta = 0;
EXPORT_SYMBOL(txg_delay);
EXPORT_SYMBOL(txg_wait_synced);
EXPORT_SYMBOL(txg_wait_open);
+EXPORT_SYMBOL(txg_wait_callbacks);
EXPORT_SYMBOL(txg_stalled);
EXPORT_SYMBOL(txg_sync_waiting);
#endif