Add ZIL statistics.
[zfs.git] / module / zfs / zil.c
index 3c18d43..9ab02d7 100644 (file)
@@ -20,6 +20,7 @@
  */
 /*
  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011 by Delphix. All rights reserved.
  */
 
 /* Portions Copyright 2010 Robert Milkowski */
  */
 
 /*
+ * See zil.h for more information about these fields.
+ */
+zil_stats_t zil_stats = {
+       { "zil_commit_count",              KSTAT_DATA_UINT64 },
+       { "zil_commit_writer_count",       KSTAT_DATA_UINT64 },
+       { "zil_itx_count",                 KSTAT_DATA_UINT64 },
+       { "zil_itx_indirect_count",        KSTAT_DATA_UINT64 },
+       { "zil_itx_indirect_bytes",        KSTAT_DATA_UINT64 },
+       { "zil_itx_copied_count",          KSTAT_DATA_UINT64 },
+       { "zil_itx_copied_bytes",          KSTAT_DATA_UINT64 },
+       { "zil_itx_needcopy_count",        KSTAT_DATA_UINT64 },
+       { "zil_itx_needcopy_bytes",        KSTAT_DATA_UINT64 },
+       { "zil_itx_metaslab_normal_count", KSTAT_DATA_UINT64 },
+       { "zil_itx_metaslab_normal_bytes", KSTAT_DATA_UINT64 },
+       { "zil_itx_metaslab_slog_count",   KSTAT_DATA_UINT64 },
+       { "zil_itx_metaslab_slog_bytes",   KSTAT_DATA_UINT64 },
+};
+
+static kstat_t *zil_ksp;
+
+/*
  * This global ZIL switch affects all pools
  */
 int zil_replay_disable = 0;    /* disable intent logging replay */
@@ -74,7 +96,7 @@ int zil_replay_disable = 0;    /* disable intent logging replay */
  * zfs_nocacheflush will cause corruption on power loss if a volatile
  * out-of-order write cache is enabled.
  */
-boolean_t zfs_nocacheflush = B_FALSE;
+int zfs_nocacheflush = 0;
 
 static kmem_cache_t *zil_lwb_cache;
 
@@ -562,7 +584,7 @@ zil_destroy(zilog_t *zilog, boolean_t keep_first)
 
        if (!list_is_empty(&zilog->zl_lwb_list)) {
                ASSERT(zh->zh_claim_txg == 0);
-               ASSERT(!keep_first);
+               VERIFY(!keep_first);
                while ((lwb = list_head(&zilog->zl_lwb_list)) != NULL) {
                        list_remove(&zilog->zl_lwb_list, lwb);
                        if (lwb->lwb_buf != NULL)
@@ -858,7 +880,7 @@ uint64_t zil_block_buckets[] = {
  * is less than the limit or the total list size is less than 2X the limit.
  * Limit checking is disabled by setting zil_slog_limit to UINT64_MAX.
  */
-uint64_t zil_slog_limit = 1024 * 1024;
+unsigned long zil_slog_limit = 1024 * 1024;
 #define        USE_SLOG(zilog) (((zilog)->zl_logbias == ZFS_LOGBIAS_LATENCY) && \
        (((zilog)->zl_cur_used < zil_slog_limit) || \
        ((zilog)->zl_itx_list_sz < (zil_slog_limit << 1))))
@@ -878,6 +900,7 @@ zil_lwb_write_start(zilog_t *zilog, lwb_t *lwb)
        uint64_t txg;
        uint64_t zil_blksz, wsz;
        int i, error;
+       boolean_t use_slog;
 
        if (BP_GET_CHECKSUM(&lwb->lwb_blk) == ZIO_CHECKSUM_ZILOG2) {
                zilc = (zil_chain_t *)lwb->lwb_buf;
@@ -934,8 +957,19 @@ zil_lwb_write_start(zilog_t *zilog, lwb_t *lwb)
 
        BP_ZERO(bp);
        /* pass the old blkptr in order to spread log blocks across devs */
+       use_slog = USE_SLOG(zilog);
        error = zio_alloc_zil(spa, txg, bp, &lwb->lwb_blk, zil_blksz,
-           USE_SLOG(zilog));
+           use_slog);
+       if (use_slog)
+       {
+               ZIL_STAT_BUMP(zil_itx_metaslab_slog_count);
+               ZIL_STAT_INCR(zil_itx_metaslab_slog_bytes, lwb->lwb_nused);
+       }
+       else
+       {
+               ZIL_STAT_BUMP(zil_itx_metaslab_normal_count);
+               ZIL_STAT_INCR(zil_itx_metaslab_normal_bytes, lwb->lwb_nused);
+       }
        if (!error) {
                ASSERT3U(bp->blk_birth, ==, txg);
                bp->blk_cksum = lwb->lwb_blk.blk_cksum;
@@ -1021,13 +1055,18 @@ zil_lwb_commit(zilog_t *zilog, itx_t *itx, lwb_t *lwb)
        lrc = (lr_t *)lr_buf;
        lrw = (lr_write_t *)lrc;
 
+       ZIL_STAT_BUMP(zil_itx_count);
+
        /*
         * If it's a write, fetch the data or get its blkptr as appropriate.
         */
        if (lrc->lrc_txtype == TX_WRITE) {
                if (txg > spa_freeze_txg(zilog->zl_spa))
                        txg_wait_synced(zilog->zl_dmu_pool, txg);
-               if (itx->itx_wr_state != WR_COPIED) {
+               if (itx->itx_wr_state == WR_COPIED) {
+                       ZIL_STAT_BUMP(zil_itx_copied_count);
+                       ZIL_STAT_INCR(zil_itx_copied_bytes, lrw->lr_length);
+               } else {
                        char *dbuf;
                        int error;
 
@@ -1035,9 +1074,13 @@ zil_lwb_commit(zilog_t *zilog, itx_t *itx, lwb_t *lwb)
                                ASSERT(itx->itx_wr_state == WR_NEED_COPY);
                                dbuf = lr_buf + reclen;
                                lrw->lr_common.lrc_reclen += dlen;
+                               ZIL_STAT_BUMP(zil_itx_needcopy_count);
+                               ZIL_STAT_INCR(zil_itx_needcopy_bytes, lrw->lr_length);
                        } else {
                                ASSERT(itx->itx_wr_state == WR_INDIRECT);
                                dbuf = NULL;
+                               ZIL_STAT_BUMP(zil_itx_indirect_count);
+                               ZIL_STAT_INCR(zil_itx_indirect_bytes, lrw->lr_length);
                        }
                        error = zilog->zl_get_data(
                            itx->itx_private, lrw, dbuf, lwb->lwb_zio);
@@ -1075,7 +1118,8 @@ zil_itx_create(uint64_t txtype, size_t lrsize)
 
        lrsize = P2ROUNDUP_TYPED(lrsize, sizeof (uint64_t), size_t);
 
-       itx = kmem_alloc(offsetof(itx_t, itx_lr) + lrsize, KM_SLEEP);
+       itx = kmem_alloc(offsetof(itx_t, itx_lr) + lrsize,
+           KM_PUSHPAGE | KM_NODEBUG);
        itx->itx_lr.lrc_txtype = txtype;
        itx->itx_lr.lrc_reclen = lrsize;
        itx->itx_sod = lrsize; /* if write & WR_NEED_COPY will be increased */
@@ -1495,6 +1539,8 @@ zil_commit(zilog_t *zilog, uint64_t foid)
        if (zilog->zl_sync == ZFS_SYNC_DISABLED)
                return;
 
+       ZIL_STAT_BUMP(zil_commit_count);
+
        /* move the async itxs for the foid to the sync queues */
        zil_async_to_sync(zilog, foid);
 
@@ -1510,6 +1556,7 @@ zil_commit(zilog_t *zilog, uint64_t foid)
 
        zilog->zl_next_batch++;
        zilog->zl_writer = B_TRUE;
+       ZIL_STAT_BUMP(zil_commit_writer_count);
        zil_commit_writer(zilog);
        zilog->zl_com_batch = mybatch;
        zilog->zl_writer = B_FALSE;
@@ -1598,12 +1645,26 @@ zil_init(void)
 {
        zil_lwb_cache = kmem_cache_create("zil_lwb_cache",
            sizeof (struct lwb), 0, NULL, NULL, NULL, NULL, NULL, 0);
+
+       zil_ksp = kstat_create("zfs", 0, "zil", "misc",
+           KSTAT_TYPE_NAMED, sizeof(zil_stats) / sizeof(kstat_named_t),
+           KSTAT_FLAG_VIRTUAL);
+
+       if (zil_ksp != NULL) {
+               zil_ksp->ks_data = &zil_stats;
+               kstat_install(zil_ksp);
+       }
 }
 
 void
 zil_fini(void)
 {
        kmem_cache_destroy(zil_lwb_cache);
+
+       if (zil_ksp != NULL) {
+               kstat_delete(zil_ksp);
+               zil_ksp = NULL;
+       }
 }
 
 void
@@ -1664,21 +1725,11 @@ zil_alloc(objset_t *os, zil_header_t *zh_phys)
 void
 zil_free(zilog_t *zilog)
 {
-       lwb_t *head_lwb;
        int i;
 
        zilog->zl_stop_sync = 1;
 
-       /*
-        * After zil_close() there should only be one lwb with a buffer.
-        */
-       head_lwb = list_head(&zilog->zl_lwb_list);
-       if (head_lwb) {
-               ASSERT(head_lwb == list_tail(&zilog->zl_lwb_list));
-               list_remove(&zilog->zl_lwb_list, head_lwb);
-               zio_buf_free(head_lwb->lwb_buf, head_lwb->lwb_sz);
-               kmem_cache_free(zil_lwb_cache, head_lwb);
-       }
+       ASSERT(list_is_empty(&zilog->zl_lwb_list));
        list_destroy(&zilog->zl_lwb_list);
 
        avl_destroy(&zilog->zl_vdev_tree);
@@ -1718,6 +1769,10 @@ zil_open(objset_t *os, zil_get_data_t *get_data)
 {
        zilog_t *zilog = dmu_objset_zil(os);
 
+       ASSERT(zilog->zl_clean_taskq == NULL);
+       ASSERT(zilog->zl_get_data == NULL);
+       ASSERT(list_is_empty(&zilog->zl_lwb_list));
+
        zilog->zl_get_data = get_data;
        zilog->zl_clean_taskq = taskq_create("zil_clean", 1, minclsyspri,
            2, 2, TASKQ_PREPOPULATE);
@@ -1731,7 +1786,7 @@ zil_open(objset_t *os, zil_get_data_t *get_data)
 void
 zil_close(zilog_t *zilog)
 {
-       lwb_t *tail_lwb;
+       lwb_t *lwb;
        uint64_t txg = 0;
 
        zil_commit(zilog, 0); /* commit all itx */
@@ -1743,9 +1798,9 @@ zil_close(zilog_t *zilog)
         * destroy the zl_clean_taskq.
         */
        mutex_enter(&zilog->zl_lock);
-       tail_lwb = list_tail(&zilog->zl_lwb_list);
-       if (tail_lwb != NULL)
-               txg = tail_lwb->lwb_max_txg;
+       lwb = list_tail(&zilog->zl_lwb_list);
+       if (lwb != NULL)
+               txg = lwb->lwb_max_txg;
        mutex_exit(&zilog->zl_lock);
        if (txg)
                txg_wait_synced(zilog->zl_dmu_pool, txg);
@@ -1753,6 +1808,19 @@ zil_close(zilog_t *zilog)
        taskq_destroy(zilog->zl_clean_taskq);
        zilog->zl_clean_taskq = NULL;
        zilog->zl_get_data = NULL;
+
+       /*
+        * We should have only one LWB left on the list; remove it now.
+        */
+       mutex_enter(&zilog->zl_lock);
+       lwb = list_head(&zilog->zl_lwb_list);
+       if (lwb != NULL) {
+               ASSERT(lwb == list_tail(&zilog->zl_lwb_list));
+               list_remove(&zilog->zl_lwb_list, lwb);
+               zio_buf_free(lwb->lwb_buf, lwb->lwb_sz);
+               kmem_cache_free(zil_lwb_cache, lwb);
+       }
+       mutex_exit(&zilog->zl_lock);
 }
 
 /*
@@ -1939,7 +2007,7 @@ zil_replay(objset_t *os, void *arg, zil_replay_func_t *replay_func[TX_MAX_TYPE])
        zr.zr_replay = replay_func;
        zr.zr_arg = arg;
        zr.zr_byteswap = BP_SHOULD_BYTESWAP(&zh->zh_log);
-       zr.zr_lr = kmem_alloc(2 * SPA_MAXBLOCKSIZE, KM_SLEEP);
+       zr.zr_lr = vmem_alloc(2 * SPA_MAXBLOCKSIZE, KM_SLEEP);
 
        /*
         * Wait for in-progress removes to sync before starting replay.
@@ -1951,7 +2019,7 @@ zil_replay(objset_t *os, void *arg, zil_replay_func_t *replay_func[TX_MAX_TYPE])
        ASSERT(zilog->zl_replay_blks == 0);
        (void) zil_parse(zilog, zil_incr_blks, zil_replay_log_record, &zr,
            zh->zh_claim_txg);
-       kmem_free(zr.zr_lr, 2 * SPA_MAXBLOCKSIZE);
+       vmem_free(zr.zr_lr, 2 * SPA_MAXBLOCKSIZE);
 
        zil_destroy(zilog, B_FALSE);
        txg_wait_synced(zilog->zl_dmu_pool, zilog->zl_destroy_txg);
@@ -1994,3 +2062,14 @@ zil_vdev_offline(const char *osname, void *arg)
        dmu_objset_rele(os, FTAG);
        return (error);
 }
+
+#if defined(_KERNEL) && defined(HAVE_SPL)
+module_param(zil_replay_disable, int, 0644);
+MODULE_PARM_DESC(zil_replay_disable, "Disable intent logging replay");
+
+module_param(zfs_nocacheflush, int, 0644);
+MODULE_PARM_DESC(zfs_nocacheflush, "Disable cache flushes");
+
+module_param(zil_slog_limit, ulong, 0644);
+MODULE_PARM_DESC(zil_slog_limit, "Max commit bytes to separate log device");
+#endif