Illumos #3006
[zfs.git] / module / zfs / dmu_traverse.c
index 023f90e..84407f1 100644 (file)
@@ -20,6 +20,7 @@
  */
 /*
  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012 by Delphix. All rights reserved.
  */
 
 #include <sys/zfs_context.h>
@@ -53,6 +54,7 @@ typedef struct traverse_data {
        uint64_t td_objset;
        blkptr_t *td_rootbp;
        uint64_t td_min_txg;
+       zbookmark_t *td_resume;
        int td_flags;
        prefetch_data_t *td_pfd;
        blkptr_cb_t *td_func;
@@ -128,6 +130,54 @@ traverse_zil(traverse_data_t *td, zil_header_t *zh)
        zil_free(zilog);
 }
 
+typedef enum resume_skip {
+       RESUME_SKIP_ALL,
+       RESUME_SKIP_NONE,
+       RESUME_SKIP_CHILDREN
+} resume_skip_t;
+
+/*
+ * Returns RESUME_SKIP_ALL if td indicates that we are resuming a traversal and
+ * the block indicated by zb does not need to be visited at all. Returns
+ * RESUME_SKIP_CHILDREN if we are resuming a post traversal and we reach the
+ * resume point. This indicates that this block should be visited but not its
+ * children (since they must have been visited in a previous traversal).
+ * Otherwise returns RESUME_SKIP_NONE.
+ */
+static resume_skip_t
+resume_skip_check(traverse_data_t *td, const dnode_phys_t *dnp,
+    const zbookmark_t *zb)
+{
+       if (td->td_resume != NULL && !ZB_IS_ZERO(td->td_resume)) {
+               /*
+                * If we already visited this bp & everything below,
+                * don't bother doing it again.
+                */
+               if (zbookmark_is_before(dnp, zb, td->td_resume))
+                       return (RESUME_SKIP_ALL);
+
+               /*
+                * If we found the block we're trying to resume from, zero
+                * the bookmark out to indicate that we have resumed.
+                */
+               ASSERT3U(zb->zb_object, <=, td->td_resume->zb_object);
+               if (bcmp(zb, td->td_resume, sizeof (*zb)) == 0) {
+                       bzero(td->td_resume, sizeof (*zb));
+                       if (td->td_flags & TRAVERSE_POST)
+                               return (RESUME_SKIP_CHILDREN);
+               }
+       }
+       return (RESUME_SKIP_NONE);
+}
+
+static void
+traverse_pause(traverse_data_t *td, const zbookmark_t *zb)
+{
+       ASSERT(td->td_resume != NULL);
+       ASSERT0(zb->zb_level);
+       bcopy(zb, td->td_resume, sizeof (*td->td_resume));
+}
+
 static int
 traverse_visitbp(traverse_data_t *td, const dnode_phys_t *dnp,
     arc_buf_t *pbuf, blkptr_t *bp, const zbookmark_t *zb)
@@ -137,8 +187,20 @@ traverse_visitbp(traverse_data_t *td, const dnode_phys_t *dnp,
        arc_buf_t *buf = NULL;
        prefetch_data_t *pd = td->td_pfd;
        boolean_t hard = td->td_flags & TRAVERSE_HARD;
+       boolean_t pause = B_FALSE;
 
-       if (bp->blk_birth == 0) {
+       switch (resume_skip_check(td, dnp, zb)) {
+       case RESUME_SKIP_ALL:
+               return (0);
+       case RESUME_SKIP_CHILDREN:
+               goto post;
+       case RESUME_SKIP_NONE:
+               break;
+       default:
+               ASSERT(0);
+       }
+
+       if (BP_IS_HOLE(bp)) {
                err = td->td_func(td->td_spa, NULL, NULL, pbuf, zb, dnp,
                    td->td_arg);
                return (err);
@@ -164,8 +226,10 @@ traverse_visitbp(traverse_data_t *td, const dnode_phys_t *dnp,
                    td->td_arg);
                if (err == TRAVERSE_VISIT_NO_CHILDREN)
                        return (0);
-               if (err)
-                       return (err);
+               if (err == ERESTART)
+                       pause = B_TRUE; /* handle pausing at a common point */
+               if (err != 0)
+                       goto post;
        }
 
        if (BP_GET_LEVEL(bp) > 0) {
@@ -253,9 +317,18 @@ traverse_visitbp(traverse_data_t *td, const dnode_phys_t *dnp,
        if (buf)
                (void) arc_buf_remove_ref(buf, &buf);
 
+post:
        if (err == 0 && lasterr == 0 && (td->td_flags & TRAVERSE_POST)) {
                err = td->td_func(td->td_spa, NULL, bp, pbuf, zb, dnp,
                    td->td_arg);
+               if (err == ERESTART)
+                       pause = B_TRUE;
+       }
+
+       if (pause && td->td_resume != NULL) {
+               ASSERT3U(err, ==, ERESTART);
+               ASSERT(!hard);
+               traverse_pause(td, zb);
        }
 
        return (err != 0 ? err : lasterr);
@@ -353,27 +426,36 @@ traverse_prefetch_thread(void *arg)
  * in syncing context).
  */
 static int
-traverse_impl(spa_t *spa, dsl_dataset_t *ds, blkptr_t *rootbp,
-    uint64_t txg_start, int flags, blkptr_cb_t func, void *arg)
+traverse_impl(spa_t *spa, dsl_dataset_t *ds, uint64_t objset, blkptr_t *rootbp,
+    uint64_t txg_start, zbookmark_t *resume, int flags,
+    blkptr_cb_t func, void *arg)
 {
-       traverse_data_t td;
-       prefetch_data_t pd = { 0 };
-       zbookmark_t czb;
+       traverse_data_t *td;
+       prefetch_data_t *pd;
+       zbookmark_t *czb;
        int err;
 
-       td.td_spa = spa;
-       td.td_objset = ds ? ds->ds_object : 0;
-       td.td_rootbp = rootbp;
-       td.td_min_txg = txg_start;
-       td.td_func = func;
-       td.td_arg = arg;
-       td.td_pfd = &pd;
-       td.td_flags = flags;
+       ASSERT(ds == NULL || objset == ds->ds_object);
+       ASSERT(!(flags & TRAVERSE_PRE) || !(flags & TRAVERSE_POST));
+
+       td = kmem_alloc(sizeof(traverse_data_t), KM_PUSHPAGE);
+       pd = kmem_zalloc(sizeof(prefetch_data_t), KM_PUSHPAGE);
+       czb = kmem_alloc(sizeof(zbookmark_t), KM_PUSHPAGE);
 
-       pd.pd_blks_max = zfs_pd_blks_max;
-       pd.pd_flags = flags;
-       mutex_init(&pd.pd_mtx, NULL, MUTEX_DEFAULT, NULL);
-       cv_init(&pd.pd_cv, NULL, CV_DEFAULT, NULL);
+       td->td_spa = spa;
+       td->td_objset = objset;
+       td->td_rootbp = rootbp;
+       td->td_min_txg = txg_start;
+       td->td_resume = resume;
+       td->td_func = func;
+       td->td_arg = arg;
+       td->td_pfd = pd;
+       td->td_flags = flags;
+
+       pd->pd_blks_max = zfs_pd_blks_max;
+       pd->pd_flags = flags;
+       mutex_init(&pd->pd_mtx, NULL, MUTEX_DEFAULT, NULL);
+       cv_init(&pd->pd_cv, NULL, CV_DEFAULT, NULL);
 
        /* See comment on ZIL traversal in dsl_scan_visitds. */
        if (ds != NULL && !dsl_dataset_is_snapshot(ds)) {
@@ -383,27 +465,31 @@ traverse_impl(spa_t *spa, dsl_dataset_t *ds, blkptr_t *rootbp,
                if (err)
                        return (err);
 
-               traverse_zil(&td, &os->os_zil_header);
+               traverse_zil(td, &os->os_zil_header);
        }
 
        if (!(flags & TRAVERSE_PREFETCH) ||
            0 == taskq_dispatch(system_taskq, traverse_prefetch_thread,
-           &td, TQ_NOQUEUE))
-               pd.pd_exited = B_TRUE;
+           td, TQ_NOQUEUE))
+               pd->pd_exited = B_TRUE;
 
-       SET_BOOKMARK(&czb, td.td_objset,
+       SET_BOOKMARK(czb, td->td_objset,
            ZB_ROOT_OBJECT, ZB_ROOT_LEVEL, ZB_ROOT_BLKID);
-       err = traverse_visitbp(&td, NULL, NULL, rootbp, &czb);
+       err = traverse_visitbp(td, NULL, NULL, rootbp, czb);
 
-       mutex_enter(&pd.pd_mtx);
-       pd.pd_cancel = B_TRUE;
-       cv_broadcast(&pd.pd_cv);
-       while (!pd.pd_exited)
-               cv_wait(&pd.pd_cv, &pd.pd_mtx);
-       mutex_exit(&pd.pd_mtx);
+       mutex_enter(&pd->pd_mtx);
+       pd->pd_cancel = B_TRUE;
+       cv_broadcast(&pd->pd_cv);
+       while (!pd->pd_exited)
+               cv_wait(&pd->pd_cv, &pd->pd_mtx);
+       mutex_exit(&pd->pd_mtx);
 
-       mutex_destroy(&pd.pd_mtx);
-       cv_destroy(&pd.pd_cv);
+       mutex_destroy(&pd->pd_mtx);
+       cv_destroy(&pd->pd_cv);
+
+       kmem_free(czb, sizeof(zbookmark_t));
+       kmem_free(pd, sizeof(struct prefetch_data));
+       kmem_free(td, sizeof(struct traverse_data));
 
        return (err);
 }
@@ -416,8 +502,17 @@ int
 traverse_dataset(dsl_dataset_t *ds, uint64_t txg_start, int flags,
     blkptr_cb_t func, void *arg)
 {
-       return (traverse_impl(ds->ds_dir->dd_pool->dp_spa, ds,
-           &ds->ds_phys->ds_bp, txg_start, flags, func, arg));
+       return (traverse_impl(ds->ds_dir->dd_pool->dp_spa, ds, ds->ds_object,
+           &ds->ds_phys->ds_bp, txg_start, NULL, flags, func, arg));
+}
+
+int
+traverse_dataset_destroyed(spa_t *spa, blkptr_t *blkptr,
+    uint64_t txg_start, zbookmark_t *resume, int flags,
+    blkptr_cb_t func, void *arg)
+{
+       return (traverse_impl(spa, NULL, ZB_DESTROYED_OBJSET,
+           blkptr, txg_start, resume, flags, func, arg));
 }
 
 /*
@@ -434,8 +529,8 @@ traverse_pool(spa_t *spa, uint64_t txg_start, int flags,
        boolean_t hard = (flags & TRAVERSE_HARD);
 
        /* visit the MOS */
-       err = traverse_impl(spa, NULL, spa_get_rootblkptr(spa),
-           txg_start, flags, func, arg);
+       err = traverse_impl(spa, NULL, 0, spa_get_rootblkptr(spa),
+           txg_start, NULL, flags, func, arg);
        if (err)
                return (err);
 
@@ -480,3 +575,11 @@ traverse_pool(spa_t *spa, uint64_t txg_start, int flags,
                err = 0;
        return (err != 0 ? err : lasterr);
 }
+
+#if defined(_KERNEL) && defined(HAVE_SPL)
+EXPORT_SYMBOL(traverse_dataset);
+EXPORT_SYMBOL(traverse_pool);
+
+module_param(zfs_pd_blks_max, int, 0644);
+MODULE_PARM_DESC(zfs_pd_blks_max, "Max number of blocks to prefetch");
+#endif