4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
25 #include <sys/zfs_context.h>
26 #include <sys/txg_impl.h>
27 #include <sys/dmu_impl.h>
28 #include <sys/dmu_tx.h>
29 #include <sys/dsl_pool.h>
30 #include <sys/dsl_scan.h>
31 #include <sys/callb.h>
34 * Pool-wide transaction groups.
37 static void txg_sync_thread(dsl_pool_t *dp);
38 static void txg_quiesce_thread(dsl_pool_t *dp);
40 int zfs_txg_timeout = 5; /* max seconds worth of delta per txg */
43 * Prepare the txg subsystem.
46 txg_init(dsl_pool_t *dp, uint64_t txg)
48 tx_state_t *tx = &dp->dp_tx;
50 bzero(tx, sizeof (tx_state_t));
52 tx->tx_cpu = vmem_zalloc(max_ncpus * sizeof (tx_cpu_t), KM_SLEEP);
54 for (c = 0; c < max_ncpus; c++) {
57 mutex_init(&tx->tx_cpu[c].tc_lock, NULL, MUTEX_DEFAULT, NULL);
58 for (i = 0; i < TXG_SIZE; i++) {
59 cv_init(&tx->tx_cpu[c].tc_cv[i], NULL, CV_DEFAULT,
61 list_create(&tx->tx_cpu[c].tc_callbacks[i],
62 sizeof (dmu_tx_callback_t),
63 offsetof(dmu_tx_callback_t, dcb_node));
67 mutex_init(&tx->tx_sync_lock, NULL, MUTEX_DEFAULT, NULL);
69 cv_init(&tx->tx_sync_more_cv, NULL, CV_DEFAULT, NULL);
70 cv_init(&tx->tx_sync_done_cv, NULL, CV_DEFAULT, NULL);
71 cv_init(&tx->tx_quiesce_more_cv, NULL, CV_DEFAULT, NULL);
72 cv_init(&tx->tx_quiesce_done_cv, NULL, CV_DEFAULT, NULL);
73 cv_init(&tx->tx_exit_cv, NULL, CV_DEFAULT, NULL);
75 tx->tx_open_txg = txg;
79 * Close down the txg subsystem.
82 txg_fini(dsl_pool_t *dp)
84 tx_state_t *tx = &dp->dp_tx;
87 ASSERT(tx->tx_threads == 0);
89 mutex_destroy(&tx->tx_sync_lock);
91 cv_destroy(&tx->tx_sync_more_cv);
92 cv_destroy(&tx->tx_sync_done_cv);
93 cv_destroy(&tx->tx_quiesce_more_cv);
94 cv_destroy(&tx->tx_quiesce_done_cv);
95 cv_destroy(&tx->tx_exit_cv);
97 for (c = 0; c < max_ncpus; c++) {
100 mutex_destroy(&tx->tx_cpu[c].tc_lock);
101 for (i = 0; i < TXG_SIZE; i++) {
102 cv_destroy(&tx->tx_cpu[c].tc_cv[i]);
103 list_destroy(&tx->tx_cpu[c].tc_callbacks[i]);
107 if (tx->tx_commit_cb_taskq != NULL)
108 taskq_destroy(tx->tx_commit_cb_taskq);
110 vmem_free(tx->tx_cpu, max_ncpus * sizeof (tx_cpu_t));
112 bzero(tx, sizeof (tx_state_t));
116 * Start syncing transaction groups.
119 txg_sync_start(dsl_pool_t *dp)
121 tx_state_t *tx = &dp->dp_tx;
123 mutex_enter(&tx->tx_sync_lock);
125 dprintf("pool %p\n", dp);
127 ASSERT(tx->tx_threads == 0);
131 tx->tx_quiesce_thread = thread_create(NULL, 0, txg_quiesce_thread,
132 dp, 0, &p0, TS_RUN, minclsyspri);
135 * The sync thread can need a larger-than-default stack size on
136 * 32-bit x86. This is due in part to nested pools and
137 * scrub_visitbp() recursion.
139 tx->tx_sync_thread = thread_create(NULL, 32<<10, txg_sync_thread,
140 dp, 0, &p0, TS_RUN, minclsyspri);
142 mutex_exit(&tx->tx_sync_lock);
146 txg_thread_enter(tx_state_t *tx, callb_cpr_t *cpr)
148 CALLB_CPR_INIT(cpr, &tx->tx_sync_lock, callb_generic_cpr, FTAG);
149 mutex_enter(&tx->tx_sync_lock);
153 txg_thread_exit(tx_state_t *tx, callb_cpr_t *cpr, kthread_t **tpp)
155 ASSERT(*tpp != NULL);
158 cv_broadcast(&tx->tx_exit_cv);
159 CALLB_CPR_EXIT(cpr); /* drops &tx->tx_sync_lock */
164 txg_thread_wait(tx_state_t *tx, callb_cpr_t *cpr, kcondvar_t *cv, uint64_t time)
166 CALLB_CPR_SAFE_BEGIN(cpr);
169 (void) cv_timedwait_interruptible(cv, &tx->tx_sync_lock,
170 ddi_get_lbolt() + time);
172 cv_wait_interruptible(cv, &tx->tx_sync_lock);
174 CALLB_CPR_SAFE_END(cpr, &tx->tx_sync_lock);
178 * Stop syncing transaction groups.
181 txg_sync_stop(dsl_pool_t *dp)
183 tx_state_t *tx = &dp->dp_tx;
185 dprintf("pool %p\n", dp);
187 * Finish off any work in progress.
189 ASSERT(tx->tx_threads == 2);
192 * We need to ensure that we've vacated the deferred space_maps.
194 txg_wait_synced(dp, tx->tx_open_txg + TXG_DEFER_SIZE);
197 * Wake all sync threads and wait for them to die.
199 mutex_enter(&tx->tx_sync_lock);
201 ASSERT(tx->tx_threads == 2);
205 cv_broadcast(&tx->tx_quiesce_more_cv);
206 cv_broadcast(&tx->tx_quiesce_done_cv);
207 cv_broadcast(&tx->tx_sync_more_cv);
209 while (tx->tx_threads != 0)
210 cv_wait(&tx->tx_exit_cv, &tx->tx_sync_lock);
214 mutex_exit(&tx->tx_sync_lock);
218 txg_hold_open(dsl_pool_t *dp, txg_handle_t *th)
220 tx_state_t *tx = &dp->dp_tx;
225 * It appears the processor id is simply used as a "random"
226 * number to index into the array, and there isn't any other
227 * significance to the chosen tx_cpu. Because.. Why not use
228 * the current cpu to index into the array?
231 tc = &tx->tx_cpu[CPU_SEQID];
234 mutex_enter(&tc->tc_lock);
236 txg = tx->tx_open_txg;
237 tc->tc_count[txg & TXG_MASK]++;
246 txg_rele_to_quiesce(txg_handle_t *th)
248 tx_cpu_t *tc = th->th_cpu;
250 mutex_exit(&tc->tc_lock);
254 txg_register_callbacks(txg_handle_t *th, list_t *tx_callbacks)
256 tx_cpu_t *tc = th->th_cpu;
257 int g = th->th_txg & TXG_MASK;
259 mutex_enter(&tc->tc_lock);
260 list_move_tail(&tc->tc_callbacks[g], tx_callbacks);
261 mutex_exit(&tc->tc_lock);
265 txg_rele_to_sync(txg_handle_t *th)
267 tx_cpu_t *tc = th->th_cpu;
268 int g = th->th_txg & TXG_MASK;
270 mutex_enter(&tc->tc_lock);
271 ASSERT(tc->tc_count[g] != 0);
272 if (--tc->tc_count[g] == 0)
273 cv_broadcast(&tc->tc_cv[g]);
274 mutex_exit(&tc->tc_lock);
276 th->th_cpu = NULL; /* defensive */
280 txg_quiesce(dsl_pool_t *dp, uint64_t txg)
282 tx_state_t *tx = &dp->dp_tx;
283 int g = txg & TXG_MASK;
287 * Grab all tx_cpu locks so nobody else can get into this txg.
289 for (c = 0; c < max_ncpus; c++)
290 mutex_enter(&tx->tx_cpu[c].tc_lock);
292 ASSERT(txg == tx->tx_open_txg);
296 * Now that we've incremented tx_open_txg, we can let threads
297 * enter the next transaction group.
299 for (c = 0; c < max_ncpus; c++)
300 mutex_exit(&tx->tx_cpu[c].tc_lock);
303 * Quiesce the transaction group by waiting for everyone to txg_exit().
305 for (c = 0; c < max_ncpus; c++) {
306 tx_cpu_t *tc = &tx->tx_cpu[c];
307 mutex_enter(&tc->tc_lock);
308 while (tc->tc_count[g] != 0)
309 cv_wait(&tc->tc_cv[g], &tc->tc_lock);
310 mutex_exit(&tc->tc_lock);
315 txg_do_callbacks(list_t *cb_list)
317 dmu_tx_do_callbacks(cb_list, 0);
319 list_destroy(cb_list);
321 kmem_free(cb_list, sizeof (list_t));
325 * Dispatch the commit callbacks registered on this txg to worker threads.
328 txg_dispatch_callbacks(dsl_pool_t *dp, uint64_t txg)
331 tx_state_t *tx = &dp->dp_tx;
334 for (c = 0; c < max_ncpus; c++) {
335 tx_cpu_t *tc = &tx->tx_cpu[c];
336 /* No need to lock tx_cpu_t at this point */
338 int g = txg & TXG_MASK;
340 if (list_is_empty(&tc->tc_callbacks[g]))
343 if (tx->tx_commit_cb_taskq == NULL) {
345 * Commit callback taskq hasn't been created yet.
347 tx->tx_commit_cb_taskq = taskq_create("tx_commit_cb",
348 100, minclsyspri, max_ncpus, INT_MAX,
349 TASKQ_THREADS_CPU_PCT | TASKQ_PREPOPULATE);
352 cb_list = kmem_alloc(sizeof (list_t), KM_SLEEP);
353 list_create(cb_list, sizeof (dmu_tx_callback_t),
354 offsetof(dmu_tx_callback_t, dcb_node));
356 list_move_tail(cb_list, &tc->tc_callbacks[g]);
358 (void) taskq_dispatch(tx->tx_commit_cb_taskq, (task_func_t *)
359 txg_do_callbacks, cb_list, TQ_SLEEP);
364 * Wait for pending commit callbacks of already-synced transactions to finish
366 * Calling this function from within a commit callback will deadlock.
369 txg_wait_callbacks(dsl_pool_t *dp)
371 tx_state_t *tx = &dp->dp_tx;
373 if (tx->tx_commit_cb_taskq != NULL)
374 taskq_wait(tx->tx_commit_cb_taskq);
378 txg_sync_thread(dsl_pool_t *dp)
380 spa_t *spa = dp->dp_spa;
381 tx_state_t *tx = &dp->dp_tx;
383 uint64_t start, delta;
387 * Disable the normal reclaim path for the txg_sync thread. This
388 * ensures the thread will never enter dmu_tx_assign() which can
389 * otherwise occur due to direct reclaim. If this is allowed to
390 * happen the system can deadlock. Direct reclaim call path:
392 * ->shrink_icache_memory->prune_icache->dispose_list->
393 * clear_inode->zpl_clear_inode->zfs_inactive->dmu_tx_assign
395 current->flags |= PF_MEMALLOC;
398 txg_thread_enter(tx, &cpr);
402 uint64_t timer, timeout = zfs_txg_timeout * hz;
406 * We sync when we're scanning, there's someone waiting
407 * on us, or the quiesce thread has handed off a txg to
408 * us, or we have reached our timeout.
410 timer = (delta >= timeout ? 0 : timeout - delta);
411 while (!dsl_scan_active(dp->dp_scan) &&
412 !tx->tx_exiting && timer > 0 &&
413 tx->tx_synced_txg >= tx->tx_sync_txg_waiting &&
414 tx->tx_quiesced_txg == 0) {
415 dprintf("waiting; tx_synced=%llu waiting=%llu dp=%p\n",
416 tx->tx_synced_txg, tx->tx_sync_txg_waiting, dp);
417 txg_thread_wait(tx, &cpr, &tx->tx_sync_more_cv, timer);
418 delta = ddi_get_lbolt() - start;
419 timer = (delta > timeout ? 0 : timeout - delta);
423 * Wait until the quiesce thread hands off a txg to us,
424 * prompting it to do so if necessary.
426 while (!tx->tx_exiting && tx->tx_quiesced_txg == 0) {
427 if (tx->tx_quiesce_txg_waiting < tx->tx_open_txg+1)
428 tx->tx_quiesce_txg_waiting = tx->tx_open_txg+1;
429 cv_broadcast(&tx->tx_quiesce_more_cv);
430 txg_thread_wait(tx, &cpr, &tx->tx_quiesce_done_cv, 0);
434 txg_thread_exit(tx, &cpr, &tx->tx_sync_thread);
437 * Consume the quiesced txg which has been handed off to
438 * us. This may cause the quiescing thread to now be
439 * able to quiesce another txg, so we must signal it.
441 txg = tx->tx_quiesced_txg;
442 tx->tx_quiesced_txg = 0;
443 tx->tx_syncing_txg = txg;
444 cv_broadcast(&tx->tx_quiesce_more_cv);
446 dprintf("txg=%llu quiesce_txg=%llu sync_txg=%llu\n",
447 txg, tx->tx_quiesce_txg_waiting, tx->tx_sync_txg_waiting);
448 mutex_exit(&tx->tx_sync_lock);
450 start = ddi_get_lbolt();
452 delta = ddi_get_lbolt() - start;
454 mutex_enter(&tx->tx_sync_lock);
455 tx->tx_synced_txg = txg;
456 tx->tx_syncing_txg = 0;
457 cv_broadcast(&tx->tx_sync_done_cv);
460 * Dispatch commit callbacks to worker threads.
462 txg_dispatch_callbacks(dp, txg);
467 txg_quiesce_thread(dsl_pool_t *dp)
469 tx_state_t *tx = &dp->dp_tx;
472 txg_thread_enter(tx, &cpr);
478 * We quiesce when there's someone waiting on us.
479 * However, we can only have one txg in "quiescing" or
480 * "quiesced, waiting to sync" state. So we wait until
481 * the "quiesced, waiting to sync" txg has been consumed
482 * by the sync thread.
484 while (!tx->tx_exiting &&
485 (tx->tx_open_txg >= tx->tx_quiesce_txg_waiting ||
486 tx->tx_quiesced_txg != 0))
487 txg_thread_wait(tx, &cpr, &tx->tx_quiesce_more_cv, 0);
490 txg_thread_exit(tx, &cpr, &tx->tx_quiesce_thread);
492 txg = tx->tx_open_txg;
493 dprintf("txg=%llu quiesce_txg=%llu sync_txg=%llu\n",
494 txg, tx->tx_quiesce_txg_waiting,
495 tx->tx_sync_txg_waiting);
496 mutex_exit(&tx->tx_sync_lock);
497 txg_quiesce(dp, txg);
498 mutex_enter(&tx->tx_sync_lock);
501 * Hand this txg off to the sync thread.
503 dprintf("quiesce done, handing off txg %llu\n", txg);
504 tx->tx_quiesced_txg = txg;
505 cv_broadcast(&tx->tx_sync_more_cv);
506 cv_broadcast(&tx->tx_quiesce_done_cv);
511 * Delay this thread by 'ticks' if we are still in the open transaction
512 * group and there is already a waiting txg quiesing or quiesced. Abort
513 * the delay if this txg stalls or enters the quiesing state.
516 txg_delay(dsl_pool_t *dp, uint64_t txg, int ticks)
518 tx_state_t *tx = &dp->dp_tx;
519 clock_t timeout = ddi_get_lbolt() + ticks;
521 /* don't delay if this txg could transition to quiesing immediately */
522 if (tx->tx_open_txg > txg ||
523 tx->tx_syncing_txg == txg-1 || tx->tx_synced_txg == txg-1)
526 mutex_enter(&tx->tx_sync_lock);
527 if (tx->tx_open_txg > txg || tx->tx_synced_txg == txg-1) {
528 mutex_exit(&tx->tx_sync_lock);
532 while (ddi_get_lbolt() < timeout &&
533 tx->tx_syncing_txg < txg-1 && !txg_stalled(dp))
534 (void) cv_timedwait(&tx->tx_quiesce_more_cv, &tx->tx_sync_lock,
537 DMU_TX_STAT_BUMP(dmu_tx_delay);
539 mutex_exit(&tx->tx_sync_lock);
543 txg_wait_synced(dsl_pool_t *dp, uint64_t txg)
545 tx_state_t *tx = &dp->dp_tx;
547 mutex_enter(&tx->tx_sync_lock);
548 ASSERT(tx->tx_threads == 2);
550 txg = tx->tx_open_txg + TXG_DEFER_SIZE;
551 if (tx->tx_sync_txg_waiting < txg)
552 tx->tx_sync_txg_waiting = txg;
553 dprintf("txg=%llu quiesce_txg=%llu sync_txg=%llu\n",
554 txg, tx->tx_quiesce_txg_waiting, tx->tx_sync_txg_waiting);
555 while (tx->tx_synced_txg < txg) {
556 dprintf("broadcasting sync more "
557 "tx_synced=%llu waiting=%llu dp=%p\n",
558 tx->tx_synced_txg, tx->tx_sync_txg_waiting, dp);
559 cv_broadcast(&tx->tx_sync_more_cv);
560 cv_wait(&tx->tx_sync_done_cv, &tx->tx_sync_lock);
562 mutex_exit(&tx->tx_sync_lock);
566 txg_wait_open(dsl_pool_t *dp, uint64_t txg)
568 tx_state_t *tx = &dp->dp_tx;
570 mutex_enter(&tx->tx_sync_lock);
571 ASSERT(tx->tx_threads == 2);
573 txg = tx->tx_open_txg + 1;
574 if (tx->tx_quiesce_txg_waiting < txg)
575 tx->tx_quiesce_txg_waiting = txg;
576 dprintf("txg=%llu quiesce_txg=%llu sync_txg=%llu\n",
577 txg, tx->tx_quiesce_txg_waiting, tx->tx_sync_txg_waiting);
578 while (tx->tx_open_txg < txg) {
579 cv_broadcast(&tx->tx_quiesce_more_cv);
580 cv_wait(&tx->tx_quiesce_done_cv, &tx->tx_sync_lock);
582 mutex_exit(&tx->tx_sync_lock);
586 txg_stalled(dsl_pool_t *dp)
588 tx_state_t *tx = &dp->dp_tx;
589 return (tx->tx_quiesce_txg_waiting > tx->tx_open_txg);
593 txg_sync_waiting(dsl_pool_t *dp)
595 tx_state_t *tx = &dp->dp_tx;
597 return (tx->tx_syncing_txg <= tx->tx_sync_txg_waiting ||
598 tx->tx_quiesced_txg != 0);
602 * Per-txg object lists.
605 txg_list_create(txg_list_t *tl, size_t offset)
609 mutex_init(&tl->tl_lock, NULL, MUTEX_DEFAULT, NULL);
611 tl->tl_offset = offset;
613 for (t = 0; t < TXG_SIZE; t++)
614 tl->tl_head[t] = NULL;
618 txg_list_destroy(txg_list_t *tl)
622 for (t = 0; t < TXG_SIZE; t++)
623 ASSERT(txg_list_empty(tl, t));
625 mutex_destroy(&tl->tl_lock);
629 txg_list_empty(txg_list_t *tl, uint64_t txg)
631 return (tl->tl_head[txg & TXG_MASK] == NULL);
635 * Add an entry to the list.
636 * Returns 0 if it's a new entry, 1 if it's already there.
639 txg_list_add(txg_list_t *tl, void *p, uint64_t txg)
641 int t = txg & TXG_MASK;
642 txg_node_t *tn = (txg_node_t *)((char *)p + tl->tl_offset);
645 mutex_enter(&tl->tl_lock);
646 already_on_list = tn->tn_member[t];
647 if (!already_on_list) {
648 tn->tn_member[t] = 1;
649 tn->tn_next[t] = tl->tl_head[t];
652 mutex_exit(&tl->tl_lock);
654 return (already_on_list);
658 * Add an entry to the end of the list (walks list to find end).
659 * Returns 0 if it's a new entry, 1 if it's already there.
662 txg_list_add_tail(txg_list_t *tl, void *p, uint64_t txg)
664 int t = txg & TXG_MASK;
665 txg_node_t *tn = (txg_node_t *)((char *)p + tl->tl_offset);
668 mutex_enter(&tl->tl_lock);
669 already_on_list = tn->tn_member[t];
670 if (!already_on_list) {
673 for (tp = &tl->tl_head[t]; *tp != NULL; tp = &(*tp)->tn_next[t])
676 tn->tn_member[t] = 1;
677 tn->tn_next[t] = NULL;
680 mutex_exit(&tl->tl_lock);
682 return (already_on_list);
686 * Remove the head of the list and return it.
689 txg_list_remove(txg_list_t *tl, uint64_t txg)
691 int t = txg & TXG_MASK;
695 mutex_enter(&tl->tl_lock);
696 if ((tn = tl->tl_head[t]) != NULL) {
697 p = (char *)tn - tl->tl_offset;
698 tl->tl_head[t] = tn->tn_next[t];
699 tn->tn_next[t] = NULL;
700 tn->tn_member[t] = 0;
702 mutex_exit(&tl->tl_lock);
708 * Remove a specific item from the list and return it.
711 txg_list_remove_this(txg_list_t *tl, void *p, uint64_t txg)
713 int t = txg & TXG_MASK;
714 txg_node_t *tn, **tp;
716 mutex_enter(&tl->tl_lock);
718 for (tp = &tl->tl_head[t]; (tn = *tp) != NULL; tp = &tn->tn_next[t]) {
719 if ((char *)tn - tl->tl_offset == p) {
720 *tp = tn->tn_next[t];
721 tn->tn_next[t] = NULL;
722 tn->tn_member[t] = 0;
723 mutex_exit(&tl->tl_lock);
728 mutex_exit(&tl->tl_lock);
734 txg_list_member(txg_list_t *tl, void *p, uint64_t txg)
736 int t = txg & TXG_MASK;
737 txg_node_t *tn = (txg_node_t *)((char *)p + tl->tl_offset);
739 return (tn->tn_member[t]);
743 * Walk a txg list -- only safe if you know it's not changing.
746 txg_list_head(txg_list_t *tl, uint64_t txg)
748 int t = txg & TXG_MASK;
749 txg_node_t *tn = tl->tl_head[t];
751 return (tn == NULL ? NULL : (char *)tn - tl->tl_offset);
755 txg_list_next(txg_list_t *tl, void *p, uint64_t txg)
757 int t = txg & TXG_MASK;
758 txg_node_t *tn = (txg_node_t *)((char *)p + tl->tl_offset);
762 return (tn == NULL ? NULL : (char *)tn - tl->tl_offset);
765 #if defined(_KERNEL) && defined(HAVE_SPL)
766 EXPORT_SYMBOL(txg_init);
767 EXPORT_SYMBOL(txg_fini);
768 EXPORT_SYMBOL(txg_sync_start);
769 EXPORT_SYMBOL(txg_sync_stop);
770 EXPORT_SYMBOL(txg_hold_open);
771 EXPORT_SYMBOL(txg_rele_to_quiesce);
772 EXPORT_SYMBOL(txg_rele_to_sync);
773 EXPORT_SYMBOL(txg_register_callbacks);
774 EXPORT_SYMBOL(txg_delay);
775 EXPORT_SYMBOL(txg_wait_synced);
776 EXPORT_SYMBOL(txg_wait_open);
777 EXPORT_SYMBOL(txg_wait_callbacks);
778 EXPORT_SYMBOL(txg_stalled);
779 EXPORT_SYMBOL(txg_sync_waiting);