4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2011 by Delphix. All rights reserved.
26 #include <sys/dsl_dataset.h>
28 #include <sys/refcount.h>
30 #include <sys/zfs_context.h>
31 #include <sys/dsl_pool.h>
34 * Deadlist concurrency:
36 * Deadlists can only be modified from the syncing thread.
38 * Except for dsl_deadlist_insert(), it can only be modified with the
39 * dp_config_rwlock held with RW_WRITER.
41 * The accessors (dsl_deadlist_space() and dsl_deadlist_space_range()) can
42 * be called concurrently, from open context, with the dl_config_rwlock held
45 * Therefore, we only need to provide locking between dsl_deadlist_insert() and
46 * the accessors, protecting:
47 * dl_phys->dl_used,comp,uncomp
48 * and protecting the dl_tree from being loaded.
49 * The locking is provided by dl_lock. Note that locking on the bpobj_t
50 * provides its own locking, and dl_oldfmt is immutable.
54 dsl_deadlist_compare(const void *arg1, const void *arg2)
56 const dsl_deadlist_entry_t *dle1 = arg1;
57 const dsl_deadlist_entry_t *dle2 = arg2;
59 if (dle1->dle_mintxg < dle2->dle_mintxg)
61 else if (dle1->dle_mintxg > dle2->dle_mintxg)
68 dsl_deadlist_load_tree(dsl_deadlist_t *dl)
73 ASSERT(!dl->dl_oldfmt);
77 avl_create(&dl->dl_tree, dsl_deadlist_compare,
78 sizeof (dsl_deadlist_entry_t),
79 offsetof(dsl_deadlist_entry_t, dle_node));
80 for (zap_cursor_init(&zc, dl->dl_os, dl->dl_object);
81 zap_cursor_retrieve(&zc, &za) == 0;
82 zap_cursor_advance(&zc)) {
83 dsl_deadlist_entry_t *dle;
85 dle = kmem_alloc(sizeof (*dle), KM_PUSHPAGE);
86 dle->dle_mintxg = strtonum(za.za_name, NULL);
87 VERIFY3U(0, ==, bpobj_open(&dle->dle_bpobj, dl->dl_os,
88 za.za_first_integer));
89 avl_add(&dl->dl_tree, dle);
92 dl->dl_havetree = B_TRUE;
96 dsl_deadlist_open(dsl_deadlist_t *dl, objset_t *os, uint64_t object)
98 dmu_object_info_t doi;
100 mutex_init(&dl->dl_lock, NULL, MUTEX_DEFAULT, NULL);
102 dl->dl_object = object;
103 VERIFY3U(0, ==, dmu_bonus_hold(os, object, dl, &dl->dl_dbuf));
104 dmu_object_info_from_db(dl->dl_dbuf, &doi);
105 if (doi.doi_type == DMU_OT_BPOBJ) {
106 dmu_buf_rele(dl->dl_dbuf, dl);
108 dl->dl_oldfmt = B_TRUE;
109 VERIFY3U(0, ==, bpobj_open(&dl->dl_bpobj, os, object));
113 dl->dl_oldfmt = B_FALSE;
114 dl->dl_phys = dl->dl_dbuf->db_data;
115 dl->dl_havetree = B_FALSE;
119 dsl_deadlist_close(dsl_deadlist_t *dl)
122 dsl_deadlist_entry_t *dle;
125 dl->dl_oldfmt = B_FALSE;
126 bpobj_close(&dl->dl_bpobj);
130 if (dl->dl_havetree) {
131 while ((dle = avl_destroy_nodes(&dl->dl_tree, &cookie))
133 bpobj_close(&dle->dle_bpobj);
134 kmem_free(dle, sizeof (*dle));
136 avl_destroy(&dl->dl_tree);
138 dmu_buf_rele(dl->dl_dbuf, dl);
139 mutex_destroy(&dl->dl_lock);
145 dsl_deadlist_alloc(objset_t *os, dmu_tx_t *tx)
147 if (spa_version(dmu_objset_spa(os)) < SPA_VERSION_DEADLISTS)
148 return (bpobj_alloc(os, SPA_MAXBLOCKSIZE, tx));
149 return (zap_create(os, DMU_OT_DEADLIST, DMU_OT_DEADLIST_HDR,
150 sizeof (dsl_deadlist_phys_t), tx));
154 dsl_deadlist_free(objset_t *os, uint64_t dlobj, dmu_tx_t *tx)
156 dmu_object_info_t doi;
160 VERIFY3U(0, ==, dmu_object_info(os, dlobj, &doi));
161 if (doi.doi_type == DMU_OT_BPOBJ) {
162 bpobj_free(os, dlobj, tx);
166 for (zap_cursor_init(&zc, os, dlobj);
167 zap_cursor_retrieve(&zc, &za) == 0;
168 zap_cursor_advance(&zc))
169 bpobj_free(os, za.za_first_integer, tx);
170 zap_cursor_fini(&zc);
171 VERIFY3U(0, ==, dmu_object_free(os, dlobj, tx));
175 dsl_deadlist_insert(dsl_deadlist_t *dl, const blkptr_t *bp, dmu_tx_t *tx)
177 dsl_deadlist_entry_t dle_tofind;
178 dsl_deadlist_entry_t *dle;
182 bpobj_enqueue(&dl->dl_bpobj, bp, tx);
186 dsl_deadlist_load_tree(dl);
188 dmu_buf_will_dirty(dl->dl_dbuf, tx);
189 mutex_enter(&dl->dl_lock);
190 dl->dl_phys->dl_used +=
191 bp_get_dsize_sync(dmu_objset_spa(dl->dl_os), bp);
192 dl->dl_phys->dl_comp += BP_GET_PSIZE(bp);
193 dl->dl_phys->dl_uncomp += BP_GET_UCSIZE(bp);
194 mutex_exit(&dl->dl_lock);
196 dle_tofind.dle_mintxg = bp->blk_birth;
197 dle = avl_find(&dl->dl_tree, &dle_tofind, &where);
199 dle = avl_nearest(&dl->dl_tree, where, AVL_BEFORE);
201 dle = AVL_PREV(&dl->dl_tree, dle);
202 bpobj_enqueue(&dle->dle_bpobj, bp, tx);
206 * Insert new key in deadlist, which must be > all current entries.
207 * mintxg is not inclusive.
210 dsl_deadlist_add_key(dsl_deadlist_t *dl, uint64_t mintxg, dmu_tx_t *tx)
213 dsl_deadlist_entry_t *dle;
218 dsl_deadlist_load_tree(dl);
220 dle = kmem_alloc(sizeof (*dle), KM_PUSHPAGE);
221 dle->dle_mintxg = mintxg;
222 obj = bpobj_alloc(dl->dl_os, SPA_MAXBLOCKSIZE, tx);
223 VERIFY3U(0, ==, bpobj_open(&dle->dle_bpobj, dl->dl_os, obj));
224 avl_add(&dl->dl_tree, dle);
226 VERIFY3U(0, ==, zap_add_int_key(dl->dl_os, dl->dl_object,
231 * Remove this key, merging its entries into the previous key.
234 dsl_deadlist_remove_key(dsl_deadlist_t *dl, uint64_t mintxg, dmu_tx_t *tx)
236 dsl_deadlist_entry_t dle_tofind;
237 dsl_deadlist_entry_t *dle, *dle_prev;
242 dsl_deadlist_load_tree(dl);
244 dle_tofind.dle_mintxg = mintxg;
245 dle = avl_find(&dl->dl_tree, &dle_tofind, NULL);
246 dle_prev = AVL_PREV(&dl->dl_tree, dle);
248 bpobj_enqueue_subobj(&dle_prev->dle_bpobj,
249 dle->dle_bpobj.bpo_object, tx);
251 avl_remove(&dl->dl_tree, dle);
252 bpobj_close(&dle->dle_bpobj);
253 kmem_free(dle, sizeof (*dle));
255 VERIFY3U(0, ==, zap_remove_int(dl->dl_os, dl->dl_object, mintxg, tx));
259 * Walk ds's snapshots to regenerate generate ZAP & AVL.
262 dsl_deadlist_regenerate(objset_t *os, uint64_t dlobj,
263 uint64_t mrs_obj, dmu_tx_t *tx)
266 dsl_pool_t *dp = dmu_objset_pool(os);
268 dsl_deadlist_open(&dl, os, dlobj);
270 dsl_deadlist_close(&dl);
274 while (mrs_obj != 0) {
276 VERIFY3U(0, ==, dsl_dataset_hold_obj(dp, mrs_obj, FTAG, &ds));
277 dsl_deadlist_add_key(&dl, ds->ds_phys->ds_prev_snap_txg, tx);
278 mrs_obj = ds->ds_phys->ds_prev_snap_obj;
279 dsl_dataset_rele(ds, FTAG);
281 dsl_deadlist_close(&dl);
285 dsl_deadlist_clone(dsl_deadlist_t *dl, uint64_t maxtxg,
286 uint64_t mrs_obj, dmu_tx_t *tx)
288 dsl_deadlist_entry_t *dle;
291 newobj = dsl_deadlist_alloc(dl->dl_os, tx);
294 dsl_deadlist_regenerate(dl->dl_os, newobj, mrs_obj, tx);
298 dsl_deadlist_load_tree(dl);
300 for (dle = avl_first(&dl->dl_tree); dle;
301 dle = AVL_NEXT(&dl->dl_tree, dle)) {
304 if (dle->dle_mintxg >= maxtxg)
307 obj = bpobj_alloc(dl->dl_os, SPA_MAXBLOCKSIZE, tx);
308 VERIFY3U(0, ==, zap_add_int_key(dl->dl_os, newobj,
309 dle->dle_mintxg, obj, tx));
315 dsl_deadlist_space(dsl_deadlist_t *dl,
316 uint64_t *usedp, uint64_t *compp, uint64_t *uncompp)
319 VERIFY3U(0, ==, bpobj_space(&dl->dl_bpobj,
320 usedp, compp, uncompp));
324 mutex_enter(&dl->dl_lock);
325 *usedp = dl->dl_phys->dl_used;
326 *compp = dl->dl_phys->dl_comp;
327 *uncompp = dl->dl_phys->dl_uncomp;
328 mutex_exit(&dl->dl_lock);
332 * return space used in the range (mintxg, maxtxg].
333 * Includes maxtxg, does not include mintxg.
334 * mintxg and maxtxg must both be keys in the deadlist (unless maxtxg is
335 * larger than any bp in the deadlist (eg. UINT64_MAX)).
338 dsl_deadlist_space_range(dsl_deadlist_t *dl, uint64_t mintxg, uint64_t maxtxg,
339 uint64_t *usedp, uint64_t *compp, uint64_t *uncompp)
341 dsl_deadlist_entry_t *dle;
342 dsl_deadlist_entry_t dle_tofind;
346 VERIFY3U(0, ==, bpobj_space_range(&dl->dl_bpobj,
347 mintxg, maxtxg, usedp, compp, uncompp));
351 *usedp = *compp = *uncompp = 0;
353 mutex_enter(&dl->dl_lock);
354 dsl_deadlist_load_tree(dl);
355 dle_tofind.dle_mintxg = mintxg;
356 dle = avl_find(&dl->dl_tree, &dle_tofind, &where);
358 * If we don't find this mintxg, there shouldn't be anything
361 ASSERT(dle != NULL ||
362 avl_nearest(&dl->dl_tree, where, AVL_AFTER) == NULL);
364 for (; dle && dle->dle_mintxg < maxtxg;
365 dle = AVL_NEXT(&dl->dl_tree, dle)) {
366 uint64_t used, comp, uncomp;
368 VERIFY3U(0, ==, bpobj_space(&dle->dle_bpobj,
369 &used, &comp, &uncomp));
375 mutex_exit(&dl->dl_lock);
379 dsl_deadlist_insert_bpobj(dsl_deadlist_t *dl, uint64_t obj, uint64_t birth,
382 dsl_deadlist_entry_t dle_tofind;
383 dsl_deadlist_entry_t *dle;
385 uint64_t used, comp, uncomp;
388 VERIFY3U(0, ==, bpobj_open(&bpo, dl->dl_os, obj));
389 VERIFY3U(0, ==, bpobj_space(&bpo, &used, &comp, &uncomp));
392 dsl_deadlist_load_tree(dl);
394 dmu_buf_will_dirty(dl->dl_dbuf, tx);
395 mutex_enter(&dl->dl_lock);
396 dl->dl_phys->dl_used += used;
397 dl->dl_phys->dl_comp += comp;
398 dl->dl_phys->dl_uncomp += uncomp;
399 mutex_exit(&dl->dl_lock);
401 dle_tofind.dle_mintxg = birth;
402 dle = avl_find(&dl->dl_tree, &dle_tofind, &where);
404 dle = avl_nearest(&dl->dl_tree, where, AVL_BEFORE);
405 bpobj_enqueue_subobj(&dle->dle_bpobj, obj, tx);
409 dsl_deadlist_insert_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
411 dsl_deadlist_t *dl = arg;
412 dsl_deadlist_insert(dl, bp, tx);
417 * Merge the deadlist pointed to by 'obj' into dl. obj will be left as
421 dsl_deadlist_merge(dsl_deadlist_t *dl, uint64_t obj, dmu_tx_t *tx)
426 dsl_deadlist_phys_t *dlp;
427 dmu_object_info_t doi;
429 VERIFY3U(0, ==, dmu_object_info(dl->dl_os, obj, &doi));
430 if (doi.doi_type == DMU_OT_BPOBJ) {
432 VERIFY3U(0, ==, bpobj_open(&bpo, dl->dl_os, obj));
433 VERIFY3U(0, ==, bpobj_iterate(&bpo,
434 dsl_deadlist_insert_cb, dl, tx));
439 for (zap_cursor_init(&zc, dl->dl_os, obj);
440 zap_cursor_retrieve(&zc, &za) == 0;
441 zap_cursor_advance(&zc)) {
442 uint64_t mintxg = strtonum(za.za_name, NULL);
443 dsl_deadlist_insert_bpobj(dl, za.za_first_integer, mintxg, tx);
444 VERIFY3U(0, ==, zap_remove_int(dl->dl_os, obj, mintxg, tx));
446 zap_cursor_fini(&zc);
448 VERIFY3U(0, ==, dmu_bonus_hold(dl->dl_os, obj, FTAG, &bonus));
449 dlp = bonus->db_data;
450 dmu_buf_will_dirty(bonus, tx);
451 bzero(dlp, sizeof (*dlp));
452 dmu_buf_rele(bonus, FTAG);
456 * Remove entries on dl that are >= mintxg, and put them on the bpobj.
459 dsl_deadlist_move_bpobj(dsl_deadlist_t *dl, bpobj_t *bpo, uint64_t mintxg,
462 dsl_deadlist_entry_t dle_tofind;
463 dsl_deadlist_entry_t *dle;
466 ASSERT(!dl->dl_oldfmt);
467 dmu_buf_will_dirty(dl->dl_dbuf, tx);
468 dsl_deadlist_load_tree(dl);
470 dle_tofind.dle_mintxg = mintxg;
471 dle = avl_find(&dl->dl_tree, &dle_tofind, &where);
473 dle = avl_nearest(&dl->dl_tree, where, AVL_AFTER);
475 uint64_t used, comp, uncomp;
476 dsl_deadlist_entry_t *dle_next;
478 bpobj_enqueue_subobj(bpo, dle->dle_bpobj.bpo_object, tx);
480 VERIFY3U(0, ==, bpobj_space(&dle->dle_bpobj,
481 &used, &comp, &uncomp));
482 mutex_enter(&dl->dl_lock);
483 ASSERT3U(dl->dl_phys->dl_used, >=, used);
484 ASSERT3U(dl->dl_phys->dl_comp, >=, comp);
485 ASSERT3U(dl->dl_phys->dl_uncomp, >=, uncomp);
486 dl->dl_phys->dl_used -= used;
487 dl->dl_phys->dl_comp -= comp;
488 dl->dl_phys->dl_uncomp -= uncomp;
489 mutex_exit(&dl->dl_lock);
491 VERIFY3U(0, ==, zap_remove_int(dl->dl_os, dl->dl_object,
492 dle->dle_mintxg, tx));
494 dle_next = AVL_NEXT(&dl->dl_tree, dle);
495 avl_remove(&dl->dl_tree, dle);
496 bpobj_close(&dle->dle_bpobj);
497 kmem_free(dle, sizeof (*dle));