CALLB_CPR_SAFE_BEGIN(cpr);
if (time)
- (void) cv_timedwait(cv, &tx->tx_sync_lock,
+ (void) cv_timedwait_interruptible(cv, &tx->tx_sync_lock,
ddi_get_lbolt() + time);
else
- cv_wait(cv, &tx->tx_sync_lock);
+ cv_wait_interruptible(cv, &tx->tx_sync_lock);
CALLB_CPR_SAFE_END(cpr, &tx->tx_sync_lock);
}
callb_cpr_t cpr;
uint64_t start, delta;
+#ifdef _KERNEL
+ /*
+ * Disable the normal reclaim path for the txg_sync thread. This
+ * ensures the thread will never enter dmu_tx_assign() which can
+ * otherwise occur due to direct reclaim. If this is allowed to
+ * happen the system can deadlock. Direct reclaim call path:
+ *
+ * ->shrink_icache_memory->prune_icache->dispose_list->
+ * clear_inode->zpl_clear_inode->zfs_inactive->dmu_tx_assign
+ */
+ current->flags |= PF_MEMALLOC;
+#endif /* _KERNEL */
+
txg_thread_enter(tx, &cpr);
start = delta = 0;
txg_delay(dsl_pool_t *dp, uint64_t txg, int ticks)
{
tx_state_t *tx = &dp->dp_tx;
- int timeout = ddi_get_lbolt() + ticks;
+ clock_t timeout = ddi_get_lbolt() + ticks;
/* don't delay if this txg could transition to quiesing immediately */
if (tx->tx_open_txg > txg ||