783604101751905dba200c4d3e8b3b06af9ebff0
[zfs.git] / module / zfs / dsl_scrub.c
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25
26 #include <sys/dsl_pool.h>
27 #include <sys/dsl_dataset.h>
28 #include <sys/dsl_prop.h>
29 #include <sys/dsl_dir.h>
30 #include <sys/dsl_synctask.h>
31 #include <sys/dnode.h>
32 #include <sys/dmu_tx.h>
33 #include <sys/dmu_objset.h>
34 #include <sys/arc.h>
35 #include <sys/zap.h>
36 #include <sys/zio.h>
37 #include <sys/zfs_context.h>
38 #include <sys/fs/zfs.h>
39 #include <sys/zfs_znode.h>
40 #include <sys/spa_impl.h>
41 #include <sys/vdev_impl.h>
42 #include <sys/zil_impl.h>
43
44 typedef int (scrub_cb_t)(dsl_pool_t *, const blkptr_t *, const zbookmark_t *);
45
46 static scrub_cb_t dsl_pool_scrub_clean_cb;
47 static dsl_syncfunc_t dsl_pool_scrub_cancel_sync;
48
49 int zfs_scrub_min_time = 1; /* scrub for at least 1 sec each txg */
50 int zfs_resilver_min_time = 3; /* resilver for at least 3 sec each txg */
51 boolean_t zfs_no_scrub_io = B_FALSE; /* set to disable scrub i/o */
52
53 extern int zfs_txg_timeout;
54
55 static scrub_cb_t *scrub_funcs[SCRUB_FUNC_NUMFUNCS] = {
56         NULL,
57         dsl_pool_scrub_clean_cb
58 };
59
60 #define SET_BOOKMARK(zb, objset, object, level, blkid)  \
61 {                                                       \
62         (zb)->zb_objset = objset;                       \
63         (zb)->zb_object = object;                       \
64         (zb)->zb_level = level;                         \
65         (zb)->zb_blkid = blkid;                         \
66 }
67
68 /* ARGSUSED */
69 static void
70 dsl_pool_scrub_setup_sync(void *arg1, void *arg2, cred_t *cr, dmu_tx_t *tx)
71 {
72         dsl_pool_t *dp = arg1;
73         enum scrub_func *funcp = arg2;
74         dmu_object_type_t ot = 0;
75         boolean_t complete = B_FALSE;
76
77         dsl_pool_scrub_cancel_sync(dp, &complete, cr, tx);
78
79         ASSERT(dp->dp_scrub_func == SCRUB_FUNC_NONE);
80         ASSERT(*funcp > SCRUB_FUNC_NONE);
81         ASSERT(*funcp < SCRUB_FUNC_NUMFUNCS);
82
83         dp->dp_scrub_min_txg = 0;
84         dp->dp_scrub_max_txg = tx->tx_txg;
85
86         if (*funcp == SCRUB_FUNC_CLEAN) {
87                 vdev_t *rvd = dp->dp_spa->spa_root_vdev;
88
89                 /* rewrite all disk labels */
90                 vdev_config_dirty(rvd);
91
92                 if (vdev_resilver_needed(rvd,
93                     &dp->dp_scrub_min_txg, &dp->dp_scrub_max_txg)) {
94                         spa_event_notify(dp->dp_spa, NULL,
95                             ESC_ZFS_RESILVER_START);
96                         dp->dp_scrub_max_txg = MIN(dp->dp_scrub_max_txg,
97                             tx->tx_txg);
98                 } else {
99                         spa_event_notify(dp->dp_spa, NULL,
100                             ESC_ZFS_SCRUB_START);
101                 }
102
103                 /* zero out the scrub stats in all vdev_stat_t's */
104                 vdev_scrub_stat_update(rvd,
105                     dp->dp_scrub_min_txg ? POOL_SCRUB_RESILVER :
106                     POOL_SCRUB_EVERYTHING, B_FALSE);
107
108                 dp->dp_spa->spa_scrub_started = B_TRUE;
109         }
110
111         /* back to the generic stuff */
112
113         if (dp->dp_blkstats == NULL) {
114                 dp->dp_blkstats =
115                     kmem_alloc(sizeof (zfs_all_blkstats_t), KM_SLEEP);
116         }
117         bzero(dp->dp_blkstats, sizeof (zfs_all_blkstats_t));
118
119         if (spa_version(dp->dp_spa) < SPA_VERSION_DSL_SCRUB)
120                 ot = DMU_OT_ZAP_OTHER;
121
122         dp->dp_scrub_func = *funcp;
123         dp->dp_scrub_queue_obj = zap_create(dp->dp_meta_objset,
124             ot ? ot : DMU_OT_SCRUB_QUEUE, DMU_OT_NONE, 0, tx);
125         bzero(&dp->dp_scrub_bookmark, sizeof (zbookmark_t));
126         dp->dp_scrub_restart = B_FALSE;
127         dp->dp_spa->spa_scrub_errors = 0;
128
129         VERIFY(0 == zap_add(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
130             DMU_POOL_SCRUB_FUNC, sizeof (uint32_t), 1,
131             &dp->dp_scrub_func, tx));
132         VERIFY(0 == zap_add(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
133             DMU_POOL_SCRUB_QUEUE, sizeof (uint64_t), 1,
134             &dp->dp_scrub_queue_obj, tx));
135         VERIFY(0 == zap_add(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
136             DMU_POOL_SCRUB_MIN_TXG, sizeof (uint64_t), 1,
137             &dp->dp_scrub_min_txg, tx));
138         VERIFY(0 == zap_add(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
139             DMU_POOL_SCRUB_MAX_TXG, sizeof (uint64_t), 1,
140             &dp->dp_scrub_max_txg, tx));
141         VERIFY(0 == zap_add(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
142             DMU_POOL_SCRUB_BOOKMARK, sizeof (uint64_t), 4,
143             &dp->dp_scrub_bookmark, tx));
144         VERIFY(0 == zap_add(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
145             DMU_POOL_SCRUB_ERRORS, sizeof (uint64_t), 1,
146             &dp->dp_spa->spa_scrub_errors, tx));
147
148         spa_history_internal_log(LOG_POOL_SCRUB, dp->dp_spa, tx, cr,
149             "func=%u mintxg=%llu maxtxg=%llu",
150             *funcp, dp->dp_scrub_min_txg, dp->dp_scrub_max_txg);
151 }
152
153 int
154 dsl_pool_scrub_setup(dsl_pool_t *dp, enum scrub_func func)
155 {
156         return (dsl_sync_task_do(dp, NULL,
157             dsl_pool_scrub_setup_sync, dp, &func, 0));
158 }
159
160 /* ARGSUSED */
161 static void
162 dsl_pool_scrub_cancel_sync(void *arg1, void *arg2, cred_t *cr, dmu_tx_t *tx)
163 {
164         dsl_pool_t *dp = arg1;
165         boolean_t *completep = arg2;
166
167         if (dp->dp_scrub_func == SCRUB_FUNC_NONE)
168                 return;
169
170         mutex_enter(&dp->dp_scrub_cancel_lock);
171
172         if (dp->dp_scrub_restart) {
173                 dp->dp_scrub_restart = B_FALSE;
174                 *completep = B_FALSE;
175         }
176
177         /* XXX this is scrub-clean specific */
178         mutex_enter(&dp->dp_spa->spa_scrub_lock);
179         while (dp->dp_spa->spa_scrub_inflight > 0) {
180                 cv_wait(&dp->dp_spa->spa_scrub_io_cv,
181                     &dp->dp_spa->spa_scrub_lock);
182         }
183         mutex_exit(&dp->dp_spa->spa_scrub_lock);
184         dp->dp_spa->spa_scrub_started = B_FALSE;
185         dp->dp_spa->spa_scrub_active = B_FALSE;
186
187         dp->dp_scrub_func = SCRUB_FUNC_NONE;
188         VERIFY(0 == dmu_object_free(dp->dp_meta_objset,
189             dp->dp_scrub_queue_obj, tx));
190         dp->dp_scrub_queue_obj = 0;
191         bzero(&dp->dp_scrub_bookmark, sizeof (zbookmark_t));
192
193         VERIFY(0 == zap_remove(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
194             DMU_POOL_SCRUB_QUEUE, tx));
195         VERIFY(0 == zap_remove(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
196             DMU_POOL_SCRUB_MIN_TXG, tx));
197         VERIFY(0 == zap_remove(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
198             DMU_POOL_SCRUB_MAX_TXG, tx));
199         VERIFY(0 == zap_remove(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
200             DMU_POOL_SCRUB_BOOKMARK, tx));
201         VERIFY(0 == zap_remove(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
202             DMU_POOL_SCRUB_FUNC, tx));
203         VERIFY(0 == zap_remove(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
204             DMU_POOL_SCRUB_ERRORS, tx));
205
206         spa_history_internal_log(LOG_POOL_SCRUB_DONE, dp->dp_spa, tx, cr,
207             "complete=%u", *completep);
208
209         /* below is scrub-clean specific */
210         vdev_scrub_stat_update(dp->dp_spa->spa_root_vdev, POOL_SCRUB_NONE,
211             *completep);
212         /*
213          * If the scrub/resilver completed, update all DTLs to reflect this.
214          * Whether it succeeded or not, vacate all temporary scrub DTLs.
215          */
216         vdev_dtl_reassess(dp->dp_spa->spa_root_vdev, tx->tx_txg,
217             *completep ? dp->dp_scrub_max_txg : 0, B_TRUE);
218         if (*completep)
219                 spa_event_notify(dp->dp_spa, NULL, dp->dp_scrub_min_txg ?
220                     ESC_ZFS_RESILVER_FINISH : ESC_ZFS_SCRUB_FINISH);
221         spa_errlog_rotate(dp->dp_spa);
222
223         /*
224          * We may have finished replacing a device.
225          * Let the async thread assess this and handle the detach.
226          */
227         spa_async_request(dp->dp_spa, SPA_ASYNC_RESILVER_DONE);
228
229         dp->dp_scrub_min_txg = dp->dp_scrub_max_txg = 0;
230         mutex_exit(&dp->dp_scrub_cancel_lock);
231 }
232
233 int
234 dsl_pool_scrub_cancel(dsl_pool_t *dp)
235 {
236         boolean_t complete = B_FALSE;
237
238         return (dsl_sync_task_do(dp, NULL,
239             dsl_pool_scrub_cancel_sync, dp, &complete, 3));
240 }
241
242 int
243 dsl_free(zio_t *pio, dsl_pool_t *dp, uint64_t txg, const blkptr_t *bpp,
244     zio_done_func_t *done, void *private, uint32_t arc_flags)
245 {
246         /*
247          * This function will be used by bp-rewrite wad to intercept frees.
248          */
249         return (arc_free(pio, dp->dp_spa, txg, (blkptr_t *)bpp,
250             done, private, arc_flags));
251 }
252
253 static boolean_t
254 bookmark_is_zero(const zbookmark_t *zb)
255 {
256         return (zb->zb_objset == 0 && zb->zb_object == 0 &&
257             zb->zb_level == 0 && zb->zb_blkid == 0);
258 }
259
260 /* dnp is the dnode for zb1->zb_object */
261 static boolean_t
262 bookmark_is_before(dnode_phys_t *dnp, const zbookmark_t *zb1,
263     const zbookmark_t *zb2)
264 {
265         uint64_t zb1nextL0, zb2thisobj;
266
267         ASSERT(zb1->zb_objset == zb2->zb_objset);
268         ASSERT(zb1->zb_object != -1ULL);
269         ASSERT(zb2->zb_level == 0);
270
271         /*
272          * A bookmark in the deadlist is considered to be after
273          * everything else.
274          */
275         if (zb2->zb_object == -1ULL)
276                 return (B_TRUE);
277
278         /* The objset_phys_t isn't before anything. */
279         if (dnp == NULL)
280                 return (B_FALSE);
281
282         zb1nextL0 = (zb1->zb_blkid + 1) <<
283             ((zb1->zb_level) * (dnp->dn_indblkshift - SPA_BLKPTRSHIFT));
284
285         zb2thisobj = zb2->zb_object ? zb2->zb_object :
286             zb2->zb_blkid << (DNODE_BLOCK_SHIFT - DNODE_SHIFT);
287
288         if (zb1->zb_object == 0) {
289                 uint64_t nextobj = zb1nextL0 *
290                     (dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT) >> DNODE_SHIFT;
291                 return (nextobj <= zb2thisobj);
292         }
293
294         if (zb1->zb_object < zb2thisobj)
295                 return (B_TRUE);
296         if (zb1->zb_object > zb2thisobj)
297                 return (B_FALSE);
298         if (zb2->zb_object == 0)
299                 return (B_FALSE);
300         return (zb1nextL0 <= zb2->zb_blkid);
301 }
302
303 static boolean_t
304 scrub_pause(dsl_pool_t *dp, const zbookmark_t *zb)
305 {
306         int elapsed_ticks;
307         int mintime;
308
309         if (dp->dp_scrub_pausing)
310                 return (B_TRUE); /* we're already pausing */
311
312         if (!bookmark_is_zero(&dp->dp_scrub_bookmark))
313                 return (B_FALSE); /* we're resuming */
314
315         /* We only know how to resume from level-0 blocks. */
316         if (zb->zb_level != 0)
317                 return (B_FALSE);
318
319         mintime = dp->dp_scrub_isresilver ? zfs_resilver_min_time :
320             zfs_scrub_min_time;
321         elapsed_ticks = lbolt64 - dp->dp_scrub_start_time;
322         if (elapsed_ticks > hz * zfs_txg_timeout ||
323             (elapsed_ticks > hz * mintime && txg_sync_waiting(dp))) {
324                 dprintf("pausing at %llx/%llx/%llx/%llx\n",
325                     (longlong_t)zb->zb_objset, (longlong_t)zb->zb_object,
326                     (longlong_t)zb->zb_level, (longlong_t)zb->zb_blkid);
327                 dp->dp_scrub_pausing = B_TRUE;
328                 dp->dp_scrub_bookmark = *zb;
329                 return (B_TRUE);
330         }
331         return (B_FALSE);
332 }
333
334 typedef struct zil_traverse_arg {
335         dsl_pool_t      *zta_dp;
336         zil_header_t    *zta_zh;
337 } zil_traverse_arg_t;
338
339 /* ARGSUSED */
340 static void
341 traverse_zil_block(zilog_t *zilog, blkptr_t *bp, void *arg, uint64_t claim_txg)
342 {
343         zil_traverse_arg_t *zta = arg;
344         dsl_pool_t *dp = zta->zta_dp;
345         zil_header_t *zh = zta->zta_zh;
346         zbookmark_t zb;
347
348         if (bp->blk_birth <= dp->dp_scrub_min_txg)
349                 return;
350
351         if (claim_txg == 0 && bp->blk_birth >= spa_first_txg(dp->dp_spa))
352                 return;
353
354         zb.zb_objset = zh->zh_log.blk_cksum.zc_word[ZIL_ZC_OBJSET];
355         zb.zb_object = 0;
356         zb.zb_level = -1;
357         zb.zb_blkid = bp->blk_cksum.zc_word[ZIL_ZC_SEQ];
358         VERIFY(0 == scrub_funcs[dp->dp_scrub_func](dp, bp, &zb));
359 }
360
361 /* ARGSUSED */
362 static void
363 traverse_zil_record(zilog_t *zilog, lr_t *lrc, void *arg, uint64_t claim_txg)
364 {
365         if (lrc->lrc_txtype == TX_WRITE) {
366                 zil_traverse_arg_t *zta = arg;
367                 dsl_pool_t *dp = zta->zta_dp;
368                 zil_header_t *zh = zta->zta_zh;
369                 lr_write_t *lr = (lr_write_t *)lrc;
370                 blkptr_t *bp = &lr->lr_blkptr;
371                 zbookmark_t zb;
372
373                 if (bp->blk_birth <= dp->dp_scrub_min_txg)
374                         return;
375
376                 if (claim_txg == 0 || bp->blk_birth < claim_txg)
377                         return;
378
379                 zb.zb_objset = zh->zh_log.blk_cksum.zc_word[ZIL_ZC_OBJSET];
380                 zb.zb_object = lr->lr_foid;
381                 zb.zb_level = BP_GET_LEVEL(bp);
382                 zb.zb_blkid = lr->lr_offset / BP_GET_LSIZE(bp);
383                 VERIFY(0 == scrub_funcs[dp->dp_scrub_func](dp, bp, &zb));
384         }
385 }
386
387 static void
388 traverse_zil(dsl_pool_t *dp, zil_header_t *zh)
389 {
390         uint64_t claim_txg = zh->zh_claim_txg;
391         zil_traverse_arg_t zta = { dp, zh };
392         zilog_t *zilog;
393
394         /*
395          * We only want to visit blocks that have been claimed but not yet
396          * replayed (or, in read-only mode, blocks that *would* be claimed).
397          */
398         if (claim_txg == 0 && spa_writeable(dp->dp_spa))
399                 return;
400
401         zilog = zil_alloc(dp->dp_meta_objset, zh);
402
403         (void) zil_parse(zilog, traverse_zil_block, traverse_zil_record, &zta,
404             claim_txg);
405
406         zil_free(zilog);
407 }
408
409 static void
410 scrub_visitbp(dsl_pool_t *dp, dnode_phys_t *dnp,
411     arc_buf_t *pbuf, blkptr_t *bp, const zbookmark_t *zb)
412 {
413         int err;
414         arc_buf_t *buf = NULL;
415
416         if (bp->blk_birth <= dp->dp_scrub_min_txg)
417                 return;
418
419         if (scrub_pause(dp, zb))
420                 return;
421
422         if (!bookmark_is_zero(&dp->dp_scrub_bookmark)) {
423                 /*
424                  * If we already visited this bp & everything below (in
425                  * a prior txg), don't bother doing it again.
426                  */
427                 if (bookmark_is_before(dnp, zb, &dp->dp_scrub_bookmark))
428                         return;
429
430                 /*
431                  * If we found the block we're trying to resume from, or
432                  * we went past it to a different object, zero it out to
433                  * indicate that it's OK to start checking for pausing
434                  * again.
435                  */
436                 if (bcmp(zb, &dp->dp_scrub_bookmark, sizeof (*zb)) == 0 ||
437                     zb->zb_object > dp->dp_scrub_bookmark.zb_object) {
438                         dprintf("resuming at %llx/%llx/%llx/%llx\n",
439                             (longlong_t)zb->zb_objset,
440                             (longlong_t)zb->zb_object,
441                             (longlong_t)zb->zb_level,
442                             (longlong_t)zb->zb_blkid);
443                         bzero(&dp->dp_scrub_bookmark, sizeof (*zb));
444                 }
445         }
446
447         if (BP_GET_LEVEL(bp) > 0) {
448                 uint32_t flags = ARC_WAIT;
449                 int i;
450                 blkptr_t *cbp;
451                 int epb = BP_GET_LSIZE(bp) >> SPA_BLKPTRSHIFT;
452
453                 err = arc_read(NULL, dp->dp_spa, bp, pbuf,
454                     arc_getbuf_func, &buf,
455                     ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL, &flags, zb);
456                 if (err) {
457                         mutex_enter(&dp->dp_spa->spa_scrub_lock);
458                         dp->dp_spa->spa_scrub_errors++;
459                         mutex_exit(&dp->dp_spa->spa_scrub_lock);
460                         return;
461                 }
462                 cbp = buf->b_data;
463
464                 for (i = 0; i < epb; i++, cbp++) {
465                         zbookmark_t czb;
466
467                         SET_BOOKMARK(&czb, zb->zb_objset, zb->zb_object,
468                             zb->zb_level - 1,
469                             zb->zb_blkid * epb + i);
470                         scrub_visitbp(dp, dnp, buf, cbp, &czb);
471                 }
472         } else if (BP_GET_TYPE(bp) == DMU_OT_DNODE) {
473                 uint32_t flags = ARC_WAIT;
474                 dnode_phys_t *child_dnp;
475                 int i, j;
476                 int epb = BP_GET_LSIZE(bp) >> DNODE_SHIFT;
477
478                 err = arc_read(NULL, dp->dp_spa, bp, pbuf,
479                     arc_getbuf_func, &buf,
480                     ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL, &flags, zb);
481                 if (err) {
482                         mutex_enter(&dp->dp_spa->spa_scrub_lock);
483                         dp->dp_spa->spa_scrub_errors++;
484                         mutex_exit(&dp->dp_spa->spa_scrub_lock);
485                         return;
486                 }
487                 child_dnp = buf->b_data;
488
489                 for (i = 0; i < epb; i++, child_dnp++) {
490                         for (j = 0; j < child_dnp->dn_nblkptr; j++) {
491                                 zbookmark_t czb;
492
493                                 SET_BOOKMARK(&czb, zb->zb_objset,
494                                     zb->zb_blkid * epb + i,
495                                     child_dnp->dn_nlevels - 1, j);
496                                 scrub_visitbp(dp, child_dnp, buf,
497                                     &child_dnp->dn_blkptr[j], &czb);
498                         }
499                 }
500         } else if (BP_GET_TYPE(bp) == DMU_OT_OBJSET) {
501                 uint32_t flags = ARC_WAIT;
502                 objset_phys_t *osp;
503                 int j;
504
505                 err = arc_read_nolock(NULL, dp->dp_spa, bp,
506                     arc_getbuf_func, &buf,
507                     ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL, &flags, zb);
508                 if (err) {
509                         mutex_enter(&dp->dp_spa->spa_scrub_lock);
510                         dp->dp_spa->spa_scrub_errors++;
511                         mutex_exit(&dp->dp_spa->spa_scrub_lock);
512                         return;
513                 }
514
515                 osp = buf->b_data;
516
517                 traverse_zil(dp, &osp->os_zil_header);
518
519                 for (j = 0; j < osp->os_meta_dnode.dn_nblkptr; j++) {
520                         zbookmark_t czb;
521
522                         SET_BOOKMARK(&czb, zb->zb_objset, 0,
523                             osp->os_meta_dnode.dn_nlevels - 1, j);
524                         scrub_visitbp(dp, &osp->os_meta_dnode, buf,
525                             &osp->os_meta_dnode.dn_blkptr[j], &czb);
526                 }
527         }
528
529         (void) scrub_funcs[dp->dp_scrub_func](dp, bp, zb);
530         if (buf)
531                 (void) arc_buf_remove_ref(buf, &buf);
532 }
533
534 static void
535 scrub_visit_rootbp(dsl_pool_t *dp, dsl_dataset_t *ds, blkptr_t *bp)
536 {
537         zbookmark_t zb;
538
539         SET_BOOKMARK(&zb, ds ? ds->ds_object : 0, 0, -1, 0);
540         scrub_visitbp(dp, NULL, NULL, bp, &zb);
541 }
542
543 void
544 dsl_pool_ds_destroyed(dsl_dataset_t *ds, dmu_tx_t *tx)
545 {
546         dsl_pool_t *dp = ds->ds_dir->dd_pool;
547
548         if (dp->dp_scrub_func == SCRUB_FUNC_NONE)
549                 return;
550
551         if (dp->dp_scrub_bookmark.zb_objset == ds->ds_object) {
552                 SET_BOOKMARK(&dp->dp_scrub_bookmark, -1, 0, 0, 0);
553         } else if (zap_remove_int(dp->dp_meta_objset, dp->dp_scrub_queue_obj,
554             ds->ds_object, tx) != 0) {
555                 return;
556         }
557
558         if (ds->ds_phys->ds_next_snap_obj != 0) {
559                 VERIFY(zap_add_int(dp->dp_meta_objset, dp->dp_scrub_queue_obj,
560                     ds->ds_phys->ds_next_snap_obj, tx) == 0);
561         }
562         ASSERT3U(ds->ds_phys->ds_num_children, <=, 1);
563 }
564
565 void
566 dsl_pool_ds_snapshotted(dsl_dataset_t *ds, dmu_tx_t *tx)
567 {
568         dsl_pool_t *dp = ds->ds_dir->dd_pool;
569
570         if (dp->dp_scrub_func == SCRUB_FUNC_NONE)
571                 return;
572
573         ASSERT(ds->ds_phys->ds_prev_snap_obj != 0);
574
575         if (dp->dp_scrub_bookmark.zb_objset == ds->ds_object) {
576                 dp->dp_scrub_bookmark.zb_objset =
577                     ds->ds_phys->ds_prev_snap_obj;
578         } else if (zap_remove_int(dp->dp_meta_objset, dp->dp_scrub_queue_obj,
579             ds->ds_object, tx) == 0) {
580                 VERIFY(zap_add_int(dp->dp_meta_objset, dp->dp_scrub_queue_obj,
581                     ds->ds_phys->ds_prev_snap_obj, tx) == 0);
582         }
583 }
584
585 void
586 dsl_pool_ds_clone_swapped(dsl_dataset_t *ds1, dsl_dataset_t *ds2, dmu_tx_t *tx)
587 {
588         dsl_pool_t *dp = ds1->ds_dir->dd_pool;
589
590         if (dp->dp_scrub_func == SCRUB_FUNC_NONE)
591                 return;
592
593         if (dp->dp_scrub_bookmark.zb_objset == ds1->ds_object) {
594                 dp->dp_scrub_bookmark.zb_objset = ds2->ds_object;
595         } else if (dp->dp_scrub_bookmark.zb_objset == ds2->ds_object) {
596                 dp->dp_scrub_bookmark.zb_objset = ds1->ds_object;
597         }
598
599         if (zap_remove_int(dp->dp_meta_objset, dp->dp_scrub_queue_obj,
600             ds1->ds_object, tx) == 0) {
601                 int err = zap_add_int(dp->dp_meta_objset,
602                     dp->dp_scrub_queue_obj, ds2->ds_object, tx);
603                 VERIFY(err == 0 || err == EEXIST);
604                 if (err == EEXIST) {
605                         /* Both were there to begin with */
606                         VERIFY(0 == zap_add_int(dp->dp_meta_objset,
607                             dp->dp_scrub_queue_obj, ds1->ds_object, tx));
608                 }
609         } else if (zap_remove_int(dp->dp_meta_objset, dp->dp_scrub_queue_obj,
610             ds2->ds_object, tx) == 0) {
611                 VERIFY(0 == zap_add_int(dp->dp_meta_objset,
612                     dp->dp_scrub_queue_obj, ds1->ds_object, tx));
613         }
614 }
615
616 struct enqueue_clones_arg {
617         dmu_tx_t *tx;
618         uint64_t originobj;
619 };
620
621 /* ARGSUSED */
622 static int
623 enqueue_clones_cb(spa_t *spa, uint64_t dsobj, const char *dsname, void *arg)
624 {
625         struct enqueue_clones_arg *eca = arg;
626         dsl_dataset_t *ds;
627         int err;
628         dsl_pool_t *dp;
629
630         err = dsl_dataset_hold_obj(spa->spa_dsl_pool, dsobj, FTAG, &ds);
631         if (err)
632                 return (err);
633         dp = ds->ds_dir->dd_pool;
634
635         if (ds->ds_dir->dd_phys->dd_origin_obj == eca->originobj) {
636                 while (ds->ds_phys->ds_prev_snap_obj != eca->originobj) {
637                         dsl_dataset_t *prev;
638                         err = dsl_dataset_hold_obj(dp,
639                             ds->ds_phys->ds_prev_snap_obj, FTAG, &prev);
640
641                         dsl_dataset_rele(ds, FTAG);
642                         if (err)
643                                 return (err);
644                         ds = prev;
645                 }
646                 VERIFY(zap_add_int(dp->dp_meta_objset, dp->dp_scrub_queue_obj,
647                     ds->ds_object, eca->tx) == 0);
648         }
649         dsl_dataset_rele(ds, FTAG);
650         return (0);
651 }
652
653 static void
654 scrub_visitds(dsl_pool_t *dp, uint64_t dsobj, dmu_tx_t *tx)
655 {
656         dsl_dataset_t *ds;
657         uint64_t min_txg_save;
658
659         VERIFY3U(0, ==, dsl_dataset_hold_obj(dp, dsobj, FTAG, &ds));
660
661         /*
662          * Iterate over the bps in this ds.
663          */
664         min_txg_save = dp->dp_scrub_min_txg;
665         dp->dp_scrub_min_txg =
666             MAX(dp->dp_scrub_min_txg, ds->ds_phys->ds_prev_snap_txg);
667         scrub_visit_rootbp(dp, ds, &ds->ds_phys->ds_bp);
668         dp->dp_scrub_min_txg = min_txg_save;
669
670         if (dp->dp_scrub_pausing)
671                 goto out;
672
673         /*
674          * Add descendent datasets to work queue.
675          */
676         if (ds->ds_phys->ds_next_snap_obj != 0) {
677                 VERIFY(zap_add_int(dp->dp_meta_objset, dp->dp_scrub_queue_obj,
678                     ds->ds_phys->ds_next_snap_obj, tx) == 0);
679         }
680         if (ds->ds_phys->ds_num_children > 1) {
681                 if (spa_version(dp->dp_spa) < SPA_VERSION_DSL_SCRUB) {
682                         struct enqueue_clones_arg eca;
683                         eca.tx = tx;
684                         eca.originobj = ds->ds_object;
685
686                         (void) dmu_objset_find_spa(ds->ds_dir->dd_pool->dp_spa,
687                             NULL, enqueue_clones_cb, &eca, DS_FIND_CHILDREN);
688                 } else {
689                         VERIFY(zap_join(dp->dp_meta_objset,
690                             ds->ds_phys->ds_next_clones_obj,
691                             dp->dp_scrub_queue_obj, tx) == 0);
692                 }
693         }
694
695 out:
696         dsl_dataset_rele(ds, FTAG);
697 }
698
699 /* ARGSUSED */
700 static int
701 enqueue_cb(spa_t *spa, uint64_t dsobj, const char *dsname, void *arg)
702 {
703         dmu_tx_t *tx = arg;
704         dsl_dataset_t *ds;
705         int err;
706         dsl_pool_t *dp;
707
708         err = dsl_dataset_hold_obj(spa->spa_dsl_pool, dsobj, FTAG, &ds);
709         if (err)
710                 return (err);
711
712         dp = ds->ds_dir->dd_pool;
713
714         while (ds->ds_phys->ds_prev_snap_obj != 0) {
715                 dsl_dataset_t *prev;
716                 err = dsl_dataset_hold_obj(dp, ds->ds_phys->ds_prev_snap_obj,
717                     FTAG, &prev);
718                 if (err) {
719                         dsl_dataset_rele(ds, FTAG);
720                         return (err);
721                 }
722
723                 /*
724                  * If this is a clone, we don't need to worry about it for now.
725                  */
726                 if (prev->ds_phys->ds_next_snap_obj != ds->ds_object) {
727                         dsl_dataset_rele(ds, FTAG);
728                         dsl_dataset_rele(prev, FTAG);
729                         return (0);
730                 }
731                 dsl_dataset_rele(ds, FTAG);
732                 ds = prev;
733         }
734
735         VERIFY(zap_add_int(dp->dp_meta_objset, dp->dp_scrub_queue_obj,
736             ds->ds_object, tx) == 0);
737         dsl_dataset_rele(ds, FTAG);
738         return (0);
739 }
740
741 void
742 dsl_pool_scrub_sync(dsl_pool_t *dp, dmu_tx_t *tx)
743 {
744         spa_t *spa = dp->dp_spa;
745         zap_cursor_t zc;
746         zap_attribute_t za;
747         boolean_t complete = B_TRUE;
748
749         if (dp->dp_scrub_func == SCRUB_FUNC_NONE)
750                 return;
751
752         /*
753          * If the pool is not loaded, or is trying to unload, leave it alone.
754          */
755         if (spa->spa_load_state != SPA_LOAD_NONE || spa_shutting_down(spa))
756                 return;
757
758         if (dp->dp_scrub_restart) {
759                 enum scrub_func func = dp->dp_scrub_func;
760                 dp->dp_scrub_restart = B_FALSE;
761                 dsl_pool_scrub_setup_sync(dp, &func, kcred, tx);
762         }
763
764         if (spa->spa_root_vdev->vdev_stat.vs_scrub_type == 0) {
765                 /*
766                  * We must have resumed after rebooting; reset the vdev
767                  * stats to know that we're doing a scrub (although it
768                  * will think we're just starting now).
769                  */
770                 vdev_scrub_stat_update(spa->spa_root_vdev,
771                     dp->dp_scrub_min_txg ? POOL_SCRUB_RESILVER :
772                     POOL_SCRUB_EVERYTHING, B_FALSE);
773         }
774
775         dp->dp_scrub_pausing = B_FALSE;
776         dp->dp_scrub_start_time = lbolt64;
777         dp->dp_scrub_isresilver = (dp->dp_scrub_min_txg != 0);
778         spa->spa_scrub_active = B_TRUE;
779
780         if (dp->dp_scrub_bookmark.zb_objset == 0) {
781                 /* First do the MOS & ORIGIN */
782                 scrub_visit_rootbp(dp, NULL, &dp->dp_meta_rootbp);
783                 if (dp->dp_scrub_pausing)
784                         goto out;
785
786                 if (spa_version(spa) < SPA_VERSION_DSL_SCRUB) {
787                         VERIFY(0 == dmu_objset_find_spa(spa,
788                             NULL, enqueue_cb, tx, DS_FIND_CHILDREN));
789                 } else {
790                         scrub_visitds(dp, dp->dp_origin_snap->ds_object, tx);
791                 }
792                 ASSERT(!dp->dp_scrub_pausing);
793         } else if (dp->dp_scrub_bookmark.zb_objset != -1ULL) {
794                 /*
795                  * If we were paused, continue from here.  Note if the
796                  * ds we were paused on was deleted, the zb_objset will
797                  * be -1, so we will skip this and find a new objset
798                  * below.
799                  */
800                 scrub_visitds(dp, dp->dp_scrub_bookmark.zb_objset, tx);
801                 if (dp->dp_scrub_pausing)
802                         goto out;
803         }
804
805         /*
806          * In case we were paused right at the end of the ds, zero the
807          * bookmark so we don't think that we're still trying to resume.
808          */
809         bzero(&dp->dp_scrub_bookmark, sizeof (zbookmark_t));
810
811         /* keep pulling things out of the zap-object-as-queue */
812         while (zap_cursor_init(&zc, dp->dp_meta_objset, dp->dp_scrub_queue_obj),
813             zap_cursor_retrieve(&zc, &za) == 0) {
814                 VERIFY(0 == zap_remove(dp->dp_meta_objset,
815                     dp->dp_scrub_queue_obj, za.za_name, tx));
816                 scrub_visitds(dp, za.za_first_integer, tx);
817                 if (dp->dp_scrub_pausing)
818                         break;
819                 zap_cursor_fini(&zc);
820         }
821         zap_cursor_fini(&zc);
822         if (dp->dp_scrub_pausing)
823                 goto out;
824
825         /* done. */
826
827         dsl_pool_scrub_cancel_sync(dp, &complete, kcred, tx);
828         return;
829 out:
830         VERIFY(0 == zap_update(dp->dp_meta_objset,
831             DMU_POOL_DIRECTORY_OBJECT,
832             DMU_POOL_SCRUB_BOOKMARK, sizeof (uint64_t), 4,
833             &dp->dp_scrub_bookmark, tx));
834         VERIFY(0 == zap_update(dp->dp_meta_objset,
835             DMU_POOL_DIRECTORY_OBJECT,
836             DMU_POOL_SCRUB_ERRORS, sizeof (uint64_t), 1,
837             &spa->spa_scrub_errors, tx));
838
839         /* XXX this is scrub-clean specific */
840         mutex_enter(&spa->spa_scrub_lock);
841         while (spa->spa_scrub_inflight > 0)
842                 cv_wait(&spa->spa_scrub_io_cv, &spa->spa_scrub_lock);
843         mutex_exit(&spa->spa_scrub_lock);
844 }
845
846 void
847 dsl_pool_scrub_restart(dsl_pool_t *dp)
848 {
849         mutex_enter(&dp->dp_scrub_cancel_lock);
850         dp->dp_scrub_restart = B_TRUE;
851         mutex_exit(&dp->dp_scrub_cancel_lock);
852 }
853
854 /*
855  * scrub consumers
856  */
857
858 static void
859 count_block(zfs_all_blkstats_t *zab, const blkptr_t *bp)
860 {
861         int i;
862
863         /*
864          * If we resume after a reboot, zab will be NULL; don't record
865          * incomplete stats in that case.
866          */
867         if (zab == NULL)
868                 return;
869
870         for (i = 0; i < 4; i++) {
871                 int l = (i < 2) ? BP_GET_LEVEL(bp) : DN_MAX_LEVELS;
872                 int t = (i & 1) ? BP_GET_TYPE(bp) : DMU_OT_TOTAL;
873                 zfs_blkstat_t *zb = &zab->zab_type[l][t];
874                 int equal;
875
876                 zb->zb_count++;
877                 zb->zb_asize += BP_GET_ASIZE(bp);
878                 zb->zb_lsize += BP_GET_LSIZE(bp);
879                 zb->zb_psize += BP_GET_PSIZE(bp);
880                 zb->zb_gangs += BP_COUNT_GANG(bp);
881
882                 switch (BP_GET_NDVAS(bp)) {
883                 case 2:
884                         if (DVA_GET_VDEV(&bp->blk_dva[0]) ==
885                             DVA_GET_VDEV(&bp->blk_dva[1]))
886                                 zb->zb_ditto_2_of_2_samevdev++;
887                         break;
888                 case 3:
889                         equal = (DVA_GET_VDEV(&bp->blk_dva[0]) ==
890                             DVA_GET_VDEV(&bp->blk_dva[1])) +
891                             (DVA_GET_VDEV(&bp->blk_dva[0]) ==
892                             DVA_GET_VDEV(&bp->blk_dva[2])) +
893                             (DVA_GET_VDEV(&bp->blk_dva[1]) ==
894                             DVA_GET_VDEV(&bp->blk_dva[2]));
895                         if (equal == 1)
896                                 zb->zb_ditto_2_of_3_samevdev++;
897                         else if (equal == 3)
898                                 zb->zb_ditto_3_of_3_samevdev++;
899                         break;
900                 }
901         }
902 }
903
904 static void
905 dsl_pool_scrub_clean_done(zio_t *zio)
906 {
907         spa_t *spa = zio->io_spa;
908
909         zio_data_buf_free(zio->io_data, zio->io_size);
910
911         mutex_enter(&spa->spa_scrub_lock);
912         spa->spa_scrub_inflight--;
913         cv_broadcast(&spa->spa_scrub_io_cv);
914
915         if (zio->io_error && (zio->io_error != ECKSUM ||
916             !(zio->io_flags & ZIO_FLAG_SPECULATIVE)))
917                 spa->spa_scrub_errors++;
918         mutex_exit(&spa->spa_scrub_lock);
919 }
920
921 static int
922 dsl_pool_scrub_clean_cb(dsl_pool_t *dp,
923     const blkptr_t *bp, const zbookmark_t *zb)
924 {
925         size_t size = BP_GET_PSIZE(bp);
926         spa_t *spa = dp->dp_spa;
927         boolean_t needs_io;
928         int zio_flags = ZIO_FLAG_SCRUB_THREAD | ZIO_FLAG_RAW | ZIO_FLAG_CANFAIL;
929         int zio_priority;
930
931         ASSERT(bp->blk_birth > dp->dp_scrub_min_txg);
932
933         if (bp->blk_birth >= dp->dp_scrub_max_txg)
934                 return (0);
935
936         count_block(dp->dp_blkstats, bp);
937
938         if (dp->dp_scrub_isresilver == 0) {
939                 /* It's a scrub */
940                 zio_flags |= ZIO_FLAG_SCRUB;
941                 zio_priority = ZIO_PRIORITY_SCRUB;
942                 needs_io = B_TRUE;
943         } else {
944                 /* It's a resilver */
945                 zio_flags |= ZIO_FLAG_RESILVER;
946                 zio_priority = ZIO_PRIORITY_RESILVER;
947                 needs_io = B_FALSE;
948         }
949
950         /* If it's an intent log block, failure is expected. */
951         if (zb->zb_level == -1 && BP_GET_TYPE(bp) != DMU_OT_OBJSET)
952                 zio_flags |= ZIO_FLAG_SPECULATIVE;
953
954         for (int d = 0; d < BP_GET_NDVAS(bp); d++) {
955                 vdev_t *vd = vdev_lookup_top(spa,
956                     DVA_GET_VDEV(&bp->blk_dva[d]));
957
958                 /*
959                  * Keep track of how much data we've examined so that
960                  * zpool(1M) status can make useful progress reports.
961                  */
962                 mutex_enter(&vd->vdev_stat_lock);
963                 vd->vdev_stat.vs_scrub_examined +=
964                     DVA_GET_ASIZE(&bp->blk_dva[d]);
965                 mutex_exit(&vd->vdev_stat_lock);
966
967                 /* if it's a resilver, this may not be in the target range */
968                 if (!needs_io) {
969                         if (DVA_GET_GANG(&bp->blk_dva[d])) {
970                                 /*
971                                  * Gang members may be spread across multiple
972                                  * vdevs, so the best estimate we have is the
973                                  * scrub range, which has already been checked.
974                                  * XXX -- it would be better to change our
975                                  * allocation policy to ensure that all
976                                  * gang members reside on the same vdev.
977                                  */
978                                 needs_io = B_TRUE;
979                         } else {
980                                 needs_io = vdev_dtl_contains(vd, DTL_PARTIAL,
981                                     bp->blk_birth, 1);
982                         }
983                 }
984         }
985
986         if (needs_io && !zfs_no_scrub_io) {
987                 void *data = zio_data_buf_alloc(size);
988
989                 mutex_enter(&spa->spa_scrub_lock);
990                 while (spa->spa_scrub_inflight >= spa->spa_scrub_maxinflight)
991                         cv_wait(&spa->spa_scrub_io_cv, &spa->spa_scrub_lock);
992                 spa->spa_scrub_inflight++;
993                 mutex_exit(&spa->spa_scrub_lock);
994
995                 zio_nowait(zio_read(NULL, spa, bp, data, size,
996                     dsl_pool_scrub_clean_done, NULL, zio_priority,
997                     zio_flags, zb));
998         }
999
1000         /* do not relocate this block */
1001         return (0);
1002 }
1003
1004 int
1005 dsl_pool_scrub_clean(dsl_pool_t *dp)
1006 {
1007         /*
1008          * Purge all vdev caches.  We do this here rather than in sync
1009          * context because this requires a writer lock on the spa_config
1010          * lock, which we can't do from sync context.  The
1011          * spa_scrub_reopen flag indicates that vdev_open() should not
1012          * attempt to start another scrub.
1013          */
1014         spa_config_enter(dp->dp_spa, SCL_ALL, FTAG, RW_WRITER);
1015         dp->dp_spa->spa_scrub_reopen = B_TRUE;
1016         vdev_reopen(dp->dp_spa->spa_root_vdev);
1017         dp->dp_spa->spa_scrub_reopen = B_FALSE;
1018         spa_config_exit(dp->dp_spa, SCL_ALL, FTAG);
1019
1020         return (dsl_pool_scrub_setup(dp, SCRUB_FUNC_CLEAN));
1021 }