typedef void (*dmu_tx_hold_func_t)(dmu_tx_t *tx, struct dnode *dn,
uint64_t arg1, uint64_t arg2);
+dmu_tx_stats_t dmu_tx_stats = {
+ { "dmu_tx_assigned", KSTAT_DATA_UINT64 },
+ { "dmu_tx_delay", KSTAT_DATA_UINT64 },
+ { "dmu_tx_error", KSTAT_DATA_UINT64 },
+ { "dmu_tx_suspended", KSTAT_DATA_UINT64 },
+ { "dmu_tx_group", KSTAT_DATA_UINT64 },
+ { "dmu_tx_how", KSTAT_DATA_UINT64 },
+ { "dmu_tx_memory_reserve", KSTAT_DATA_UINT64 },
+ { "dmu_tx_memory_reclaim", KSTAT_DATA_UINT64 },
+ { "dmu_tx_memory_inflight", KSTAT_DATA_UINT64 },
+ { "dmu_tx_dirty_throttle", KSTAT_DATA_UINT64 },
+ { "dmu_tx_write_limit", KSTAT_DATA_UINT64 },
+ { "dmu_tx_quota", KSTAT_DATA_UINT64 },
+};
+
+static kstat_t *dmu_tx_ksp;
dmu_tx_t *
dmu_tx_create_dd(dsl_dir_t *dd)
uint64_t start, end, i;
int min_bs, max_bs, min_ibs, max_ibs, epbs, bits;
int err = 0;
+ int l;
if (len == 0)
return;
* we need to account for overwrites/unref.
*/
if (start <= dn->dn_maxblkid) {
- for (int l = 0; l < DN_MAX_LEVELS; l++)
+ for (l = 0; l < DN_MAX_LEVELS; l++)
history[l] = -1ULL;
}
while (start <= dn->dn_maxblkid) {
ASSERT3U(tx->tx_txg, ==, 0);
- if (tx->tx_err)
+ if (tx->tx_err) {
+ DMU_TX_STAT_BUMP(dmu_tx_error);
return (tx->tx_err);
+ }
if (spa_suspended(spa)) {
+ DMU_TX_STAT_BUMP(dmu_tx_suspended);
+
/*
* If the user has indicated a blocking failure mode
* then return ERESTART which will block in dmu_tx_wait().
if (dn->dn_assigned_txg == tx->tx_txg - 1) {
mutex_exit(&dn->dn_mtx);
tx->tx_needassign_txh = txh;
+ DMU_TX_STAT_BUMP(dmu_tx_group);
return (ERESTART);
}
if (dn->dn_assigned_txg == 0)
* NB: This check must be after we've held the dnodes, so that
* the dmu_tx_unassign() logic will work properly
*/
- if (txg_how >= TXG_INITIAL && txg_how != tx->tx_txg)
+ if (txg_how >= TXG_INITIAL && txg_how != tx->tx_txg) {
+ DMU_TX_STAT_BUMP(dmu_tx_how);
return (ERESTART);
+ }
/*
* If a snapshot has been taken since we made our estimates,
return (err);
}
+ DMU_TX_STAT_BUMP(dmu_tx_assigned);
+
return (0);
}
ASSERT(tx->tx_txg != 0);
- while (txh = list_head(&tx->tx_holds)) {
+ while ((txh = list_head(&tx->tx_holds))) {
dnode_t *dn = txh->txh_dnode;
list_remove(&tx->tx_holds, txh);
ASSERT(tx->tx_txg == 0);
- while (txh = list_head(&tx->tx_holds)) {
+ while ((txh = list_head(&tx->tx_holds))) {
dnode_t *dn = txh->txh_dnode;
list_remove(&tx->tx_holds, txh);
{
dmu_tx_callback_t *dcb;
- while (dcb = list_head(cb_list)) {
+ while ((dcb = list_head(cb_list))) {
list_remove(cb_list, dcb);
dcb->dcb_func(dcb->dcb_data, error);
kmem_free(dcb, sizeof (dmu_tx_callback_t));
DB_DNODE_EXIT(db);
}
}
+
+void
+dmu_tx_init(void)
+{
+ dmu_tx_ksp = kstat_create("zfs", 0, "dmu_tx", "misc",
+ KSTAT_TYPE_NAMED, sizeof (dmu_tx_stats) / sizeof (kstat_named_t),
+ KSTAT_FLAG_VIRTUAL);
+
+ if (dmu_tx_ksp != NULL) {
+ dmu_tx_ksp->ks_data = &dmu_tx_stats;
+ kstat_install(dmu_tx_ksp);
+ }
+}
+
+void
+dmu_tx_fini(void)
+{
+ if (dmu_tx_ksp != NULL) {
+ kstat_delete(dmu_tx_ksp);
+ dmu_tx_ksp = NULL;
+ }
+}
+
+#if defined(_KERNEL) && defined(HAVE_SPL)
+EXPORT_SYMBOL(dmu_tx_create);
+EXPORT_SYMBOL(dmu_tx_hold_write);
+EXPORT_SYMBOL(dmu_tx_hold_free);
+EXPORT_SYMBOL(dmu_tx_hold_zap);
+EXPORT_SYMBOL(dmu_tx_hold_bonus);
+EXPORT_SYMBOL(dmu_tx_abort);
+EXPORT_SYMBOL(dmu_tx_assign);
+EXPORT_SYMBOL(dmu_tx_wait);
+EXPORT_SYMBOL(dmu_tx_commit);
+EXPORT_SYMBOL(dmu_tx_get_txg);
+EXPORT_SYMBOL(dmu_tx_callback_register);
+EXPORT_SYMBOL(dmu_tx_do_callbacks);
+EXPORT_SYMBOL(dmu_tx_hold_spill);
+EXPORT_SYMBOL(dmu_tx_hold_sa_create);
+EXPORT_SYMBOL(dmu_tx_hold_sa);
+#endif