4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
25 #include <sys/dmu_objset.h>
26 #include <sys/dsl_dataset.h>
27 #include <sys/dsl_dir.h>
28 #include <sys/dsl_prop.h>
29 #include <sys/dsl_synctask.h>
30 #include <sys/dmu_traverse.h>
31 #include <sys/dmu_tx.h>
35 #include <sys/unique.h>
36 #include <sys/zfs_context.h>
37 #include <sys/zfs_ioctl.h>
39 #include <sys/zfs_znode.h>
41 #include <sys/dsl_scan.h>
42 #include <sys/dsl_deadlist.h>
45 * Enable/disable prefetching of dedup-ed blocks which are going to be freed.
47 int zfs_dedup_prefetch = 1;
49 static char *dsl_reaper = "the grim reaper";
51 static dsl_checkfunc_t dsl_dataset_destroy_begin_check;
52 static dsl_syncfunc_t dsl_dataset_destroy_begin_sync;
53 static dsl_syncfunc_t dsl_dataset_set_reservation_sync;
55 #define SWITCH64(x, y) \
57 uint64_t __tmp = (x); \
62 #define DS_REF_MAX (1ULL << 62)
64 #define DSL_DEADLIST_BLOCKSIZE SPA_MAXBLOCKSIZE
66 #define DSL_DATASET_IS_DESTROYED(ds) ((ds)->ds_owner == dsl_reaper)
70 * Figure out how much of this delta should be propogated to the dsl_dir
71 * layer. If there's a refreservation, that space has already been
72 * partially accounted for in our ancestors.
75 parent_delta(dsl_dataset_t *ds, int64_t delta)
77 uint64_t old_bytes, new_bytes;
79 if (ds->ds_reserved == 0)
82 old_bytes = MAX(ds->ds_phys->ds_unique_bytes, ds->ds_reserved);
83 new_bytes = MAX(ds->ds_phys->ds_unique_bytes + delta, ds->ds_reserved);
85 ASSERT3U(ABS((int64_t)(new_bytes - old_bytes)), <=, ABS(delta));
86 return (new_bytes - old_bytes);
90 dsl_dataset_block_born(dsl_dataset_t *ds, const blkptr_t *bp, dmu_tx_t *tx)
92 int used = bp_get_dsize_sync(tx->tx_pool->dp_spa, bp);
93 int compressed = BP_GET_PSIZE(bp);
94 int uncompressed = BP_GET_UCSIZE(bp);
97 dprintf_bp(bp, "ds=%p", ds);
99 ASSERT(dmu_tx_is_syncing(tx));
100 /* It could have been compressed away to nothing */
103 ASSERT(BP_GET_TYPE(bp) != DMU_OT_NONE);
104 ASSERT3U(BP_GET_TYPE(bp), <, DMU_OT_NUMTYPES);
107 * Account for the meta-objset space in its placeholder
110 ASSERT3U(compressed, ==, uncompressed); /* it's all metadata */
111 dsl_dir_diduse_space(tx->tx_pool->dp_mos_dir, DD_USED_HEAD,
112 used, compressed, uncompressed, tx);
113 dsl_dir_dirty(tx->tx_pool->dp_mos_dir, tx);
116 dmu_buf_will_dirty(ds->ds_dbuf, tx);
118 mutex_enter(&ds->ds_dir->dd_lock);
119 mutex_enter(&ds->ds_lock);
120 delta = parent_delta(ds, used);
121 ds->ds_phys->ds_used_bytes += used;
122 ds->ds_phys->ds_compressed_bytes += compressed;
123 ds->ds_phys->ds_uncompressed_bytes += uncompressed;
124 ds->ds_phys->ds_unique_bytes += used;
125 mutex_exit(&ds->ds_lock);
126 dsl_dir_diduse_space(ds->ds_dir, DD_USED_HEAD, delta,
127 compressed, uncompressed, tx);
128 dsl_dir_transfer_space(ds->ds_dir, used - delta,
129 DD_USED_REFRSRV, DD_USED_HEAD, tx);
130 mutex_exit(&ds->ds_dir->dd_lock);
134 dsl_dataset_block_kill(dsl_dataset_t *ds, const blkptr_t *bp, dmu_tx_t *tx,
140 ASSERT(dmu_tx_is_syncing(tx));
141 ASSERT(bp->blk_birth <= tx->tx_txg);
143 int used = bp_get_dsize_sync(tx->tx_pool->dp_spa, bp);
144 int compressed = BP_GET_PSIZE(bp);
145 int uncompressed = BP_GET_UCSIZE(bp);
150 * Account for the meta-objset space in its placeholder
153 dsl_free(tx->tx_pool, tx->tx_txg, bp);
155 dsl_dir_diduse_space(tx->tx_pool->dp_mos_dir, DD_USED_HEAD,
156 -used, -compressed, -uncompressed, tx);
157 dsl_dir_dirty(tx->tx_pool->dp_mos_dir, tx);
160 ASSERT3P(tx->tx_pool, ==, ds->ds_dir->dd_pool);
162 ASSERT(!dsl_dataset_is_snapshot(ds));
163 dmu_buf_will_dirty(ds->ds_dbuf, tx);
165 if (bp->blk_birth > ds->ds_phys->ds_prev_snap_txg) {
168 dprintf_bp(bp, "freeing ds=%llu", ds->ds_object);
169 dsl_free(tx->tx_pool, tx->tx_txg, bp);
171 mutex_enter(&ds->ds_dir->dd_lock);
172 mutex_enter(&ds->ds_lock);
173 ASSERT(ds->ds_phys->ds_unique_bytes >= used ||
174 !DS_UNIQUE_IS_ACCURATE(ds));
175 delta = parent_delta(ds, -used);
176 ds->ds_phys->ds_unique_bytes -= used;
177 mutex_exit(&ds->ds_lock);
178 dsl_dir_diduse_space(ds->ds_dir, DD_USED_HEAD,
179 delta, -compressed, -uncompressed, tx);
180 dsl_dir_transfer_space(ds->ds_dir, -used - delta,
181 DD_USED_REFRSRV, DD_USED_HEAD, tx);
182 mutex_exit(&ds->ds_dir->dd_lock);
184 dprintf_bp(bp, "putting on dead list: %s", "");
187 * We are here as part of zio's write done callback,
188 * which means we're a zio interrupt thread. We can't
189 * call dsl_deadlist_insert() now because it may block
190 * waiting for I/O. Instead, put bp on the deferred
191 * queue and let dsl_pool_sync() finish the job.
193 bplist_append(&ds->ds_pending_deadlist, bp);
195 dsl_deadlist_insert(&ds->ds_deadlist, bp, tx);
197 ASSERT3U(ds->ds_prev->ds_object, ==,
198 ds->ds_phys->ds_prev_snap_obj);
199 ASSERT(ds->ds_prev->ds_phys->ds_num_children > 0);
200 /* if (bp->blk_birth > prev prev snap txg) prev unique += bs */
201 if (ds->ds_prev->ds_phys->ds_next_snap_obj ==
202 ds->ds_object && bp->blk_birth >
203 ds->ds_prev->ds_phys->ds_prev_snap_txg) {
204 dmu_buf_will_dirty(ds->ds_prev->ds_dbuf, tx);
205 mutex_enter(&ds->ds_prev->ds_lock);
206 ds->ds_prev->ds_phys->ds_unique_bytes += used;
207 mutex_exit(&ds->ds_prev->ds_lock);
209 if (bp->blk_birth > ds->ds_dir->dd_origin_txg) {
210 dsl_dir_transfer_space(ds->ds_dir, used,
211 DD_USED_HEAD, DD_USED_SNAP, tx);
214 mutex_enter(&ds->ds_lock);
215 ASSERT3U(ds->ds_phys->ds_used_bytes, >=, used);
216 ds->ds_phys->ds_used_bytes -= used;
217 ASSERT3U(ds->ds_phys->ds_compressed_bytes, >=, compressed);
218 ds->ds_phys->ds_compressed_bytes -= compressed;
219 ASSERT3U(ds->ds_phys->ds_uncompressed_bytes, >=, uncompressed);
220 ds->ds_phys->ds_uncompressed_bytes -= uncompressed;
221 mutex_exit(&ds->ds_lock);
227 dsl_dataset_prev_snap_txg(dsl_dataset_t *ds)
229 uint64_t trysnap = 0;
234 * The snapshot creation could fail, but that would cause an
235 * incorrect FALSE return, which would only result in an
236 * overestimation of the amount of space that an operation would
237 * consume, which is OK.
239 * There's also a small window where we could miss a pending
240 * snapshot, because we could set the sync task in the quiescing
241 * phase. So this should only be used as a guess.
243 if (ds->ds_trysnap_txg >
244 spa_last_synced_txg(ds->ds_dir->dd_pool->dp_spa))
245 trysnap = ds->ds_trysnap_txg;
246 return (MAX(ds->ds_phys->ds_prev_snap_txg, trysnap));
250 dsl_dataset_block_freeable(dsl_dataset_t *ds, const blkptr_t *bp,
253 if (blk_birth <= dsl_dataset_prev_snap_txg(ds))
256 if (zfs_dedup_prefetch && bp && BP_GET_DEDUP(bp))
257 ddt_prefetch(dsl_dataset_get_spa(ds), bp);
264 dsl_dataset_evict(dmu_buf_t *db, void *dsv)
266 dsl_dataset_t *ds = dsv;
268 ASSERT(ds->ds_owner == NULL || DSL_DATASET_IS_DESTROYED(ds));
270 unique_remove(ds->ds_fsid_guid);
272 if (ds->ds_objset != NULL)
273 dmu_objset_evict(ds->ds_objset);
276 dsl_dataset_drop_ref(ds->ds_prev, ds);
280 bplist_destroy(&ds->ds_pending_deadlist);
282 dsl_deadlist_close(&ds->ds_deadlist);
284 ASSERT(ds->ds_deadlist.dl_dbuf == NULL);
285 ASSERT(!ds->ds_deadlist.dl_oldfmt);
288 dsl_dir_close(ds->ds_dir, ds);
290 ASSERT(!list_link_active(&ds->ds_synced_link));
292 mutex_destroy(&ds->ds_lock);
293 mutex_destroy(&ds->ds_recvlock);
294 mutex_destroy(&ds->ds_opening_lock);
295 rw_destroy(&ds->ds_rwlock);
296 cv_destroy(&ds->ds_exclusive_cv);
298 kmem_free(ds, sizeof (dsl_dataset_t));
302 dsl_dataset_get_snapname(dsl_dataset_t *ds)
304 dsl_dataset_phys_t *headphys;
307 dsl_pool_t *dp = ds->ds_dir->dd_pool;
308 objset_t *mos = dp->dp_meta_objset;
310 if (ds->ds_snapname[0])
312 if (ds->ds_phys->ds_next_snap_obj == 0)
315 err = dmu_bonus_hold(mos, ds->ds_dir->dd_phys->dd_head_dataset_obj,
319 headphys = headdbuf->db_data;
320 err = zap_value_search(dp->dp_meta_objset,
321 headphys->ds_snapnames_zapobj, ds->ds_object, 0, ds->ds_snapname);
322 dmu_buf_rele(headdbuf, FTAG);
327 dsl_dataset_snap_lookup(dsl_dataset_t *ds, const char *name, uint64_t *value)
329 objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
330 uint64_t snapobj = ds->ds_phys->ds_snapnames_zapobj;
334 if (ds->ds_phys->ds_flags & DS_FLAG_CI_DATASET)
339 err = zap_lookup_norm(mos, snapobj, name, 8, 1,
340 value, mt, NULL, 0, NULL);
341 if (err == ENOTSUP && mt == MT_FIRST)
342 err = zap_lookup(mos, snapobj, name, 8, 1, value);
347 dsl_dataset_snap_remove(dsl_dataset_t *ds, char *name, dmu_tx_t *tx)
349 objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
350 uint64_t snapobj = ds->ds_phys->ds_snapnames_zapobj;
354 dsl_dir_snap_cmtime_update(ds->ds_dir);
356 if (ds->ds_phys->ds_flags & DS_FLAG_CI_DATASET)
361 err = zap_remove_norm(mos, snapobj, name, mt, tx);
362 if (err == ENOTSUP && mt == MT_FIRST)
363 err = zap_remove(mos, snapobj, name, tx);
368 dsl_dataset_get_ref(dsl_pool_t *dp, uint64_t dsobj, void *tag,
371 objset_t *mos = dp->dp_meta_objset;
376 ASSERT(RW_LOCK_HELD(&dp->dp_config_rwlock) ||
377 dsl_pool_sync_context(dp));
379 err = dmu_bonus_hold(mos, dsobj, tag, &dbuf);
382 ds = dmu_buf_get_user(dbuf);
384 dsl_dataset_t *winner;
386 ds = kmem_zalloc(sizeof (dsl_dataset_t), KM_SLEEP);
388 ds->ds_object = dsobj;
389 ds->ds_phys = dbuf->db_data;
391 mutex_init(&ds->ds_lock, NULL, MUTEX_DEFAULT, NULL);
392 mutex_init(&ds->ds_recvlock, NULL, MUTEX_DEFAULT, NULL);
393 mutex_init(&ds->ds_opening_lock, NULL, MUTEX_DEFAULT, NULL);
394 rw_init(&ds->ds_rwlock, 0, 0, 0);
395 cv_init(&ds->ds_exclusive_cv, NULL, CV_DEFAULT, NULL);
397 bplist_create(&ds->ds_pending_deadlist);
398 dsl_deadlist_open(&ds->ds_deadlist,
399 mos, ds->ds_phys->ds_deadlist_obj);
402 err = dsl_dir_open_obj(dp,
403 ds->ds_phys->ds_dir_obj, NULL, ds, &ds->ds_dir);
406 mutex_destroy(&ds->ds_lock);
407 mutex_destroy(&ds->ds_recvlock);
408 mutex_destroy(&ds->ds_opening_lock);
409 rw_destroy(&ds->ds_rwlock);
410 cv_destroy(&ds->ds_exclusive_cv);
411 bplist_destroy(&ds->ds_pending_deadlist);
412 dsl_deadlist_close(&ds->ds_deadlist);
413 kmem_free(ds, sizeof (dsl_dataset_t));
414 dmu_buf_rele(dbuf, tag);
418 if (!dsl_dataset_is_snapshot(ds)) {
419 ds->ds_snapname[0] = '\0';
420 if (ds->ds_phys->ds_prev_snap_obj) {
421 err = dsl_dataset_get_ref(dp,
422 ds->ds_phys->ds_prev_snap_obj,
426 if (zfs_flags & ZFS_DEBUG_SNAPNAMES)
427 err = dsl_dataset_get_snapname(ds);
428 if (err == 0 && ds->ds_phys->ds_userrefs_obj != 0) {
430 ds->ds_dir->dd_pool->dp_meta_objset,
431 ds->ds_phys->ds_userrefs_obj,
436 if (err == 0 && !dsl_dataset_is_snapshot(ds)) {
438 * In sync context, we're called with either no lock
439 * or with the write lock. If we're not syncing,
440 * we're always called with the read lock held.
442 boolean_t need_lock =
443 !RW_WRITE_HELD(&dp->dp_config_rwlock) &&
444 dsl_pool_sync_context(dp);
447 rw_enter(&dp->dp_config_rwlock, RW_READER);
449 err = dsl_prop_get_ds(ds,
450 "refreservation", sizeof (uint64_t), 1,
451 &ds->ds_reserved, NULL);
453 err = dsl_prop_get_ds(ds,
454 "refquota", sizeof (uint64_t), 1,
455 &ds->ds_quota, NULL);
459 rw_exit(&dp->dp_config_rwlock);
461 ds->ds_reserved = ds->ds_quota = 0;
465 winner = dmu_buf_set_user_ie(dbuf, ds, &ds->ds_phys,
469 bplist_destroy(&ds->ds_pending_deadlist);
470 dsl_deadlist_close(&ds->ds_deadlist);
472 dsl_dataset_drop_ref(ds->ds_prev, ds);
473 dsl_dir_close(ds->ds_dir, ds);
474 mutex_destroy(&ds->ds_lock);
475 mutex_destroy(&ds->ds_recvlock);
476 mutex_destroy(&ds->ds_opening_lock);
477 rw_destroy(&ds->ds_rwlock);
478 cv_destroy(&ds->ds_exclusive_cv);
479 kmem_free(ds, sizeof (dsl_dataset_t));
481 dmu_buf_rele(dbuf, tag);
487 unique_insert(ds->ds_phys->ds_fsid_guid);
490 ASSERT3P(ds->ds_dbuf, ==, dbuf);
491 ASSERT3P(ds->ds_phys, ==, dbuf->db_data);
492 ASSERT(ds->ds_phys->ds_prev_snap_obj != 0 ||
493 spa_version(dp->dp_spa) < SPA_VERSION_ORIGIN ||
494 dp->dp_origin_snap == NULL || ds == dp->dp_origin_snap);
495 mutex_enter(&ds->ds_lock);
496 if (!dsl_pool_sync_context(dp) && DSL_DATASET_IS_DESTROYED(ds)) {
497 mutex_exit(&ds->ds_lock);
498 dmu_buf_rele(ds->ds_dbuf, tag);
501 mutex_exit(&ds->ds_lock);
507 dsl_dataset_hold_ref(dsl_dataset_t *ds, void *tag)
509 dsl_pool_t *dp = ds->ds_dir->dd_pool;
512 * In syncing context we don't want the rwlock lock: there
513 * may be an existing writer waiting for sync phase to
514 * finish. We don't need to worry about such writers, since
515 * sync phase is single-threaded, so the writer can't be
516 * doing anything while we are active.
518 if (dsl_pool_sync_context(dp)) {
519 ASSERT(!DSL_DATASET_IS_DESTROYED(ds));
524 * Normal users will hold the ds_rwlock as a READER until they
525 * are finished (i.e., call dsl_dataset_rele()). "Owners" will
526 * drop their READER lock after they set the ds_owner field.
528 * If the dataset is being destroyed, the destroy thread will
529 * obtain a WRITER lock for exclusive access after it's done its
530 * open-context work and then change the ds_owner to
531 * dsl_reaper once destruction is assured. So threads
532 * may block here temporarily, until the "destructability" of
533 * the dataset is determined.
535 ASSERT(!RW_WRITE_HELD(&dp->dp_config_rwlock));
536 mutex_enter(&ds->ds_lock);
537 while (!rw_tryenter(&ds->ds_rwlock, RW_READER)) {
538 rw_exit(&dp->dp_config_rwlock);
539 cv_wait(&ds->ds_exclusive_cv, &ds->ds_lock);
540 if (DSL_DATASET_IS_DESTROYED(ds)) {
541 mutex_exit(&ds->ds_lock);
542 dsl_dataset_drop_ref(ds, tag);
543 rw_enter(&dp->dp_config_rwlock, RW_READER);
547 * The dp_config_rwlock lives above the ds_lock. And
548 * we need to check DSL_DATASET_IS_DESTROYED() while
549 * holding the ds_lock, so we have to drop and reacquire
552 mutex_exit(&ds->ds_lock);
553 rw_enter(&dp->dp_config_rwlock, RW_READER);
554 mutex_enter(&ds->ds_lock);
556 mutex_exit(&ds->ds_lock);
561 dsl_dataset_hold_obj(dsl_pool_t *dp, uint64_t dsobj, void *tag,
564 int err = dsl_dataset_get_ref(dp, dsobj, tag, dsp);
568 return (dsl_dataset_hold_ref(*dsp, tag));
572 dsl_dataset_own_obj(dsl_pool_t *dp, uint64_t dsobj, boolean_t inconsistentok,
573 void *tag, dsl_dataset_t **dsp)
575 int err = dsl_dataset_hold_obj(dp, dsobj, tag, dsp);
578 if (!dsl_dataset_tryown(*dsp, inconsistentok, tag)) {
579 dsl_dataset_rele(*dsp, tag);
587 dsl_dataset_hold(const char *name, void *tag, dsl_dataset_t **dsp)
591 const char *snapname;
595 err = dsl_dir_open_spa(NULL, name, FTAG, &dd, &snapname);
600 obj = dd->dd_phys->dd_head_dataset_obj;
601 rw_enter(&dp->dp_config_rwlock, RW_READER);
603 err = dsl_dataset_get_ref(dp, obj, tag, dsp);
609 err = dsl_dataset_hold_ref(*dsp, tag);
611 /* we may be looking for a snapshot */
612 if (err == 0 && snapname != NULL) {
613 dsl_dataset_t *ds = NULL;
615 if (*snapname++ != '@') {
616 dsl_dataset_rele(*dsp, tag);
621 dprintf("looking for snapshot '%s'\n", snapname);
622 err = dsl_dataset_snap_lookup(*dsp, snapname, &obj);
624 err = dsl_dataset_get_ref(dp, obj, tag, &ds);
625 dsl_dataset_rele(*dsp, tag);
627 ASSERT3U((err == 0), ==, (ds != NULL));
630 mutex_enter(&ds->ds_lock);
631 if (ds->ds_snapname[0] == 0)
632 (void) strlcpy(ds->ds_snapname, snapname,
633 sizeof (ds->ds_snapname));
634 mutex_exit(&ds->ds_lock);
635 err = dsl_dataset_hold_ref(ds, tag);
636 *dsp = err ? NULL : ds;
640 rw_exit(&dp->dp_config_rwlock);
641 dsl_dir_close(dd, FTAG);
646 dsl_dataset_own(const char *name, boolean_t inconsistentok,
647 void *tag, dsl_dataset_t **dsp)
649 int err = dsl_dataset_hold(name, tag, dsp);
652 if (!dsl_dataset_tryown(*dsp, inconsistentok, tag)) {
653 dsl_dataset_rele(*dsp, tag);
660 dsl_dataset_name(dsl_dataset_t *ds, char *name)
663 (void) strcpy(name, "mos");
665 dsl_dir_name(ds->ds_dir, name);
666 VERIFY(0 == dsl_dataset_get_snapname(ds));
667 if (ds->ds_snapname[0]) {
668 (void) strcat(name, "@");
670 * We use a "recursive" mutex so that we
671 * can call dprintf_ds() with ds_lock held.
673 if (!MUTEX_HELD(&ds->ds_lock)) {
674 mutex_enter(&ds->ds_lock);
675 (void) strcat(name, ds->ds_snapname);
676 mutex_exit(&ds->ds_lock);
678 (void) strcat(name, ds->ds_snapname);
685 dsl_dataset_namelen(dsl_dataset_t *ds)
690 result = 3; /* "mos" */
692 result = dsl_dir_namelen(ds->ds_dir);
693 VERIFY(0 == dsl_dataset_get_snapname(ds));
694 if (ds->ds_snapname[0]) {
695 ++result; /* adding one for the @-sign */
696 if (!MUTEX_HELD(&ds->ds_lock)) {
697 mutex_enter(&ds->ds_lock);
698 result += strlen(ds->ds_snapname);
699 mutex_exit(&ds->ds_lock);
701 result += strlen(ds->ds_snapname);
710 dsl_dataset_drop_ref(dsl_dataset_t *ds, void *tag)
712 dmu_buf_rele(ds->ds_dbuf, tag);
716 dsl_dataset_rele(dsl_dataset_t *ds, void *tag)
718 if (!dsl_pool_sync_context(ds->ds_dir->dd_pool)) {
719 rw_exit(&ds->ds_rwlock);
721 dsl_dataset_drop_ref(ds, tag);
725 dsl_dataset_disown(dsl_dataset_t *ds, void *tag)
727 ASSERT((ds->ds_owner == tag && ds->ds_dbuf) ||
728 (DSL_DATASET_IS_DESTROYED(ds) && ds->ds_dbuf == NULL));
730 mutex_enter(&ds->ds_lock);
732 if (RW_WRITE_HELD(&ds->ds_rwlock)) {
733 rw_exit(&ds->ds_rwlock);
734 cv_broadcast(&ds->ds_exclusive_cv);
736 mutex_exit(&ds->ds_lock);
738 dsl_dataset_drop_ref(ds, tag);
740 dsl_dataset_evict(NULL, ds);
744 dsl_dataset_tryown(dsl_dataset_t *ds, boolean_t inconsistentok, void *tag)
746 boolean_t gotit = FALSE;
748 mutex_enter(&ds->ds_lock);
749 if (ds->ds_owner == NULL &&
750 (!DS_IS_INCONSISTENT(ds) || inconsistentok)) {
752 if (!dsl_pool_sync_context(ds->ds_dir->dd_pool))
753 rw_exit(&ds->ds_rwlock);
756 mutex_exit(&ds->ds_lock);
761 dsl_dataset_make_exclusive(dsl_dataset_t *ds, void *owner)
763 ASSERT3P(owner, ==, ds->ds_owner);
764 if (!RW_WRITE_HELD(&ds->ds_rwlock))
765 rw_enter(&ds->ds_rwlock, RW_WRITER);
769 dsl_dataset_create_sync_dd(dsl_dir_t *dd, dsl_dataset_t *origin,
770 uint64_t flags, dmu_tx_t *tx)
772 dsl_pool_t *dp = dd->dd_pool;
774 dsl_dataset_phys_t *dsphys;
776 objset_t *mos = dp->dp_meta_objset;
779 origin = dp->dp_origin_snap;
781 ASSERT(origin == NULL || origin->ds_dir->dd_pool == dp);
782 ASSERT(origin == NULL || origin->ds_phys->ds_num_children > 0);
783 ASSERT(dmu_tx_is_syncing(tx));
784 ASSERT(dd->dd_phys->dd_head_dataset_obj == 0);
786 dsobj = dmu_object_alloc(mos, DMU_OT_DSL_DATASET, 0,
787 DMU_OT_DSL_DATASET, sizeof (dsl_dataset_phys_t), tx);
788 VERIFY(0 == dmu_bonus_hold(mos, dsobj, FTAG, &dbuf));
789 dmu_buf_will_dirty(dbuf, tx);
790 dsphys = dbuf->db_data;
791 bzero(dsphys, sizeof (dsl_dataset_phys_t));
792 dsphys->ds_dir_obj = dd->dd_object;
793 dsphys->ds_flags = flags;
794 dsphys->ds_fsid_guid = unique_create();
795 (void) random_get_pseudo_bytes((void*)&dsphys->ds_guid,
796 sizeof (dsphys->ds_guid));
797 dsphys->ds_snapnames_zapobj =
798 zap_create_norm(mos, U8_TEXTPREP_TOUPPER, DMU_OT_DSL_DS_SNAP_MAP,
800 dsphys->ds_creation_time = gethrestime_sec();
801 dsphys->ds_creation_txg = tx->tx_txg == TXG_INITIAL ? 1 : tx->tx_txg;
803 if (origin == NULL) {
804 dsphys->ds_deadlist_obj = dsl_deadlist_alloc(mos, tx);
808 dsphys->ds_prev_snap_obj = origin->ds_object;
809 dsphys->ds_prev_snap_txg =
810 origin->ds_phys->ds_creation_txg;
811 dsphys->ds_used_bytes =
812 origin->ds_phys->ds_used_bytes;
813 dsphys->ds_compressed_bytes =
814 origin->ds_phys->ds_compressed_bytes;
815 dsphys->ds_uncompressed_bytes =
816 origin->ds_phys->ds_uncompressed_bytes;
817 dsphys->ds_bp = origin->ds_phys->ds_bp;
818 dsphys->ds_flags |= origin->ds_phys->ds_flags;
820 dmu_buf_will_dirty(origin->ds_dbuf, tx);
821 origin->ds_phys->ds_num_children++;
823 VERIFY3U(0, ==, dsl_dataset_hold_obj(dp,
824 origin->ds_dir->dd_phys->dd_head_dataset_obj, FTAG, &ohds));
825 dsphys->ds_deadlist_obj = dsl_deadlist_clone(&ohds->ds_deadlist,
826 dsphys->ds_prev_snap_txg, dsphys->ds_prev_snap_obj, tx);
827 dsl_dataset_rele(ohds, FTAG);
829 if (spa_version(dp->dp_spa) >= SPA_VERSION_NEXT_CLONES) {
830 if (origin->ds_phys->ds_next_clones_obj == 0) {
831 origin->ds_phys->ds_next_clones_obj =
833 DMU_OT_NEXT_CLONES, DMU_OT_NONE, 0, tx);
835 VERIFY(0 == zap_add_int(mos,
836 origin->ds_phys->ds_next_clones_obj,
840 dmu_buf_will_dirty(dd->dd_dbuf, tx);
841 dd->dd_phys->dd_origin_obj = origin->ds_object;
842 if (spa_version(dp->dp_spa) >= SPA_VERSION_DIR_CLONES) {
843 if (origin->ds_dir->dd_phys->dd_clones == 0) {
844 dmu_buf_will_dirty(origin->ds_dir->dd_dbuf, tx);
845 origin->ds_dir->dd_phys->dd_clones =
847 DMU_OT_DSL_CLONES, DMU_OT_NONE, 0, tx);
849 VERIFY3U(0, ==, zap_add_int(mos,
850 origin->ds_dir->dd_phys->dd_clones, dsobj, tx));
854 if (spa_version(dp->dp_spa) >= SPA_VERSION_UNIQUE_ACCURATE)
855 dsphys->ds_flags |= DS_FLAG_UNIQUE_ACCURATE;
857 dmu_buf_rele(dbuf, FTAG);
859 dmu_buf_will_dirty(dd->dd_dbuf, tx);
860 dd->dd_phys->dd_head_dataset_obj = dsobj;
866 dsl_dataset_create_sync(dsl_dir_t *pdd, const char *lastname,
867 dsl_dataset_t *origin, uint64_t flags, cred_t *cr, dmu_tx_t *tx)
869 dsl_pool_t *dp = pdd->dd_pool;
870 uint64_t dsobj, ddobj;
873 ASSERT(lastname[0] != '@');
875 ddobj = dsl_dir_create_sync(dp, pdd, lastname, tx);
876 VERIFY(0 == dsl_dir_open_obj(dp, ddobj, lastname, FTAG, &dd));
878 dsobj = dsl_dataset_create_sync_dd(dd, origin, flags, tx);
880 dsl_deleg_set_create_perms(dd, tx, cr);
882 dsl_dir_close(dd, FTAG);
888 dsl_sync_task_group_t *dstg;
895 dsl_snapshot_destroy_one(const char *name, void *arg)
897 struct destroyarg *da = arg;
902 dsname = kmem_asprintf("%s@%s", name, da->snapname);
903 err = dsl_dataset_own(dsname, B_TRUE, da->dstg, &ds);
906 struct dsl_ds_destroyarg *dsda;
908 dsl_dataset_make_exclusive(ds, da->dstg);
909 dsda = kmem_zalloc(sizeof (struct dsl_ds_destroyarg), KM_SLEEP);
911 dsda->defer = da->defer;
912 dsl_sync_task_create(da->dstg, dsl_dataset_destroy_check,
913 dsl_dataset_destroy_sync, dsda, da->dstg, 0);
914 } else if (err == ENOENT) {
917 (void) strcpy(da->failed, name);
923 * Destroy 'snapname' in all descendants of 'fsname'.
925 #pragma weak dmu_snapshots_destroy = dsl_snapshots_destroy
927 dsl_snapshots_destroy(char *fsname, char *snapname, boolean_t defer)
930 struct destroyarg da;
931 dsl_sync_task_t *dst;
934 err = spa_open(fsname, &spa, FTAG);
937 da.dstg = dsl_sync_task_group_create(spa_get_dsl(spa));
938 da.snapname = snapname;
942 err = dmu_objset_find(fsname,
943 dsl_snapshot_destroy_one, &da, DS_FIND_CHILDREN);
946 err = dsl_sync_task_group_wait(da.dstg);
948 for (dst = list_head(&da.dstg->dstg_tasks); dst;
949 dst = list_next(&da.dstg->dstg_tasks, dst)) {
950 struct dsl_ds_destroyarg *dsda = dst->dst_arg1;
951 dsl_dataset_t *ds = dsda->ds;
954 * Return the file system name that triggered the error
957 dsl_dataset_name(ds, fsname);
958 *strchr(fsname, '@') = '\0';
960 ASSERT3P(dsda->rm_origin, ==, NULL);
961 dsl_dataset_disown(ds, da.dstg);
962 kmem_free(dsda, sizeof (struct dsl_ds_destroyarg));
965 dsl_sync_task_group_destroy(da.dstg);
966 spa_close(spa, FTAG);
971 dsl_dataset_might_destroy_origin(dsl_dataset_t *ds)
973 boolean_t might_destroy = B_FALSE;
975 mutex_enter(&ds->ds_lock);
976 if (ds->ds_phys->ds_num_children == 2 && ds->ds_userrefs == 0 &&
977 DS_IS_DEFER_DESTROY(ds))
978 might_destroy = B_TRUE;
979 mutex_exit(&ds->ds_lock);
981 return (might_destroy);
985 * If we're removing a clone, and these three conditions are true:
986 * 1) the clone's origin has no other children
987 * 2) the clone's origin has no user references
988 * 3) the clone's origin has been marked for deferred destruction
989 * Then, prepare to remove the origin as part of this sync task group.
992 dsl_dataset_origin_rm_prep(struct dsl_ds_destroyarg *dsda, void *tag)
994 dsl_dataset_t *ds = dsda->ds;
995 dsl_dataset_t *origin = ds->ds_prev;
997 if (dsl_dataset_might_destroy_origin(origin)) {
1002 namelen = dsl_dataset_namelen(origin) + 1;
1003 name = kmem_alloc(namelen, KM_SLEEP);
1004 dsl_dataset_name(origin, name);
1006 error = zfs_unmount_snap(name, NULL);
1008 kmem_free(name, namelen);
1012 error = dsl_dataset_own(name, B_TRUE, tag, &origin);
1013 kmem_free(name, namelen);
1016 dsda->rm_origin = origin;
1017 dsl_dataset_make_exclusive(origin, tag);
1024 * ds must be opened as OWNER. On return (whether successful or not),
1025 * ds will be closed and caller can no longer dereference it.
1028 dsl_dataset_destroy(dsl_dataset_t *ds, void *tag, boolean_t defer)
1031 dsl_sync_task_group_t *dstg;
1035 struct dsl_ds_destroyarg dsda = { 0 };
1036 dsl_dataset_t dummy_ds = { 0 };
1040 if (dsl_dataset_is_snapshot(ds)) {
1041 /* Destroying a snapshot is simpler */
1042 dsl_dataset_make_exclusive(ds, tag);
1045 err = dsl_sync_task_do(ds->ds_dir->dd_pool,
1046 dsl_dataset_destroy_check, dsl_dataset_destroy_sync,
1048 ASSERT3P(dsda.rm_origin, ==, NULL);
1056 dummy_ds.ds_dir = dd;
1057 dummy_ds.ds_object = ds->ds_object;
1060 * Check for errors and mark this ds as inconsistent, in
1061 * case we crash while freeing the objects.
1063 err = dsl_sync_task_do(dd->dd_pool, dsl_dataset_destroy_begin_check,
1064 dsl_dataset_destroy_begin_sync, ds, NULL, 0);
1068 err = dmu_objset_from_ds(ds, &os);
1073 * remove the objects in open context, so that we won't
1074 * have too much to do in syncing context.
1076 for (obj = 0; err == 0; err = dmu_object_next(os, &obj, FALSE,
1077 ds->ds_phys->ds_prev_snap_txg)) {
1079 * Ignore errors, if there is not enough disk space
1080 * we will deal with it in dsl_dataset_destroy_sync().
1082 (void) dmu_free_object(os, obj);
1086 * We need to sync out all in-flight IO before we try to evict
1087 * (the dataset evict func is trying to clear the cached entries
1088 * for this dataset in the ARC).
1090 txg_wait_synced(dd->dd_pool, 0);
1093 * If we managed to free all the objects in open
1094 * context, the user space accounting should be zero.
1096 if (ds->ds_phys->ds_bp.blk_fill == 0 &&
1097 dmu_objset_userused_enabled(os)) {
1100 ASSERT(zap_count(os, DMU_USERUSED_OBJECT, &count) != 0 ||
1102 ASSERT(zap_count(os, DMU_GROUPUSED_OBJECT, &count) != 0 ||
1109 rw_enter(&dd->dd_pool->dp_config_rwlock, RW_READER);
1110 err = dsl_dir_open_obj(dd->dd_pool, dd->dd_object, NULL, FTAG, &dd);
1111 rw_exit(&dd->dd_pool->dp_config_rwlock);
1117 * Blow away the dsl_dir + head dataset.
1119 dsl_dataset_make_exclusive(ds, tag);
1121 * If we're removing a clone, we might also need to remove its
1125 dsda.need_prep = B_FALSE;
1126 if (dsl_dir_is_clone(dd)) {
1127 err = dsl_dataset_origin_rm_prep(&dsda, tag);
1129 dsl_dir_close(dd, FTAG);
1134 dstg = dsl_sync_task_group_create(ds->ds_dir->dd_pool);
1135 dsl_sync_task_create(dstg, dsl_dataset_destroy_check,
1136 dsl_dataset_destroy_sync, &dsda, tag, 0);
1137 dsl_sync_task_create(dstg, dsl_dir_destroy_check,
1138 dsl_dir_destroy_sync, &dummy_ds, FTAG, 0);
1139 err = dsl_sync_task_group_wait(dstg);
1140 dsl_sync_task_group_destroy(dstg);
1143 * We could be racing against 'zfs release' or 'zfs destroy -d'
1144 * on the origin snap, in which case we can get EBUSY if we
1145 * needed to destroy the origin snap but were not ready to
1148 if (dsda.need_prep) {
1149 ASSERT(err == EBUSY);
1150 ASSERT(dsl_dir_is_clone(dd));
1151 ASSERT(dsda.rm_origin == NULL);
1153 } while (dsda.need_prep);
1155 if (dsda.rm_origin != NULL)
1156 dsl_dataset_disown(dsda.rm_origin, tag);
1158 /* if it is successful, dsl_dir_destroy_sync will close the dd */
1160 dsl_dir_close(dd, FTAG);
1162 dsl_dataset_disown(ds, tag);
1167 dsl_dataset_get_blkptr(dsl_dataset_t *ds)
1169 return (&ds->ds_phys->ds_bp);
1173 dsl_dataset_set_blkptr(dsl_dataset_t *ds, blkptr_t *bp, dmu_tx_t *tx)
1175 ASSERT(dmu_tx_is_syncing(tx));
1176 /* If it's the meta-objset, set dp_meta_rootbp */
1178 tx->tx_pool->dp_meta_rootbp = *bp;
1180 dmu_buf_will_dirty(ds->ds_dbuf, tx);
1181 ds->ds_phys->ds_bp = *bp;
1186 dsl_dataset_get_spa(dsl_dataset_t *ds)
1188 return (ds->ds_dir->dd_pool->dp_spa);
1192 dsl_dataset_dirty(dsl_dataset_t *ds, dmu_tx_t *tx)
1196 if (ds == NULL) /* this is the meta-objset */
1199 ASSERT(ds->ds_objset != NULL);
1201 if (ds->ds_phys->ds_next_snap_obj != 0)
1202 panic("dirtying snapshot!");
1204 dp = ds->ds_dir->dd_pool;
1206 if (txg_list_add(&dp->dp_dirty_datasets, ds, tx->tx_txg) == 0) {
1207 /* up the hold count until we can be written out */
1208 dmu_buf_add_ref(ds->ds_dbuf, ds);
1213 * The unique space in the head dataset can be calculated by subtracting
1214 * the space used in the most recent snapshot, that is still being used
1215 * in this file system, from the space currently in use. To figure out
1216 * the space in the most recent snapshot still in use, we need to take
1217 * the total space used in the snapshot and subtract out the space that
1218 * has been freed up since the snapshot was taken.
1221 dsl_dataset_recalc_head_uniq(dsl_dataset_t *ds)
1224 uint64_t dlused, dlcomp, dluncomp;
1226 ASSERT(!dsl_dataset_is_snapshot(ds));
1228 if (ds->ds_phys->ds_prev_snap_obj != 0)
1229 mrs_used = ds->ds_prev->ds_phys->ds_used_bytes;
1233 dsl_deadlist_space(&ds->ds_deadlist, &dlused, &dlcomp, &dluncomp);
1235 ASSERT3U(dlused, <=, mrs_used);
1236 ds->ds_phys->ds_unique_bytes =
1237 ds->ds_phys->ds_used_bytes - (mrs_used - dlused);
1239 if (spa_version(ds->ds_dir->dd_pool->dp_spa) >=
1240 SPA_VERSION_UNIQUE_ACCURATE)
1241 ds->ds_phys->ds_flags |= DS_FLAG_UNIQUE_ACCURATE;
1251 kill_blkptr(spa_t *spa, zilog_t *zilog, const blkptr_t *bp, arc_buf_t *pbuf,
1252 const zbookmark_t *zb, const dnode_phys_t *dnp, void *arg)
1254 struct killarg *ka = arg;
1255 dmu_tx_t *tx = ka->tx;
1260 if (zb->zb_level == ZB_ZIL_LEVEL) {
1261 ASSERT(zilog != NULL);
1263 * It's a block in the intent log. It has no
1264 * accounting, so just free it.
1266 dsl_free(ka->tx->tx_pool, ka->tx->tx_txg, bp);
1268 ASSERT(zilog == NULL);
1269 ASSERT3U(bp->blk_birth, >, ka->ds->ds_phys->ds_prev_snap_txg);
1270 (void) dsl_dataset_block_kill(ka->ds, bp, tx, B_FALSE);
1278 dsl_dataset_destroy_begin_check(void *arg1, void *arg2, dmu_tx_t *tx)
1280 dsl_dataset_t *ds = arg1;
1281 objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
1286 * Can't delete a head dataset if there are snapshots of it.
1287 * (Except if the only snapshots are from the branch we cloned
1290 if (ds->ds_prev != NULL &&
1291 ds->ds_prev->ds_phys->ds_next_snap_obj == ds->ds_object)
1295 * This is really a dsl_dir thing, but check it here so that
1296 * we'll be less likely to leave this dataset inconsistent &
1299 err = zap_count(mos, ds->ds_dir->dd_phys->dd_child_dir_zapobj, &count);
1310 dsl_dataset_destroy_begin_sync(void *arg1, void *arg2, dmu_tx_t *tx)
1312 dsl_dataset_t *ds = arg1;
1313 dsl_pool_t *dp = ds->ds_dir->dd_pool;
1315 /* Mark it as inconsistent on-disk, in case we crash */
1316 dmu_buf_will_dirty(ds->ds_dbuf, tx);
1317 ds->ds_phys->ds_flags |= DS_FLAG_INCONSISTENT;
1319 spa_history_log_internal(LOG_DS_DESTROY_BEGIN, dp->dp_spa, tx,
1320 "dataset = %llu", ds->ds_object);
1324 dsl_dataset_origin_check(struct dsl_ds_destroyarg *dsda, void *tag,
1327 dsl_dataset_t *ds = dsda->ds;
1328 dsl_dataset_t *ds_prev = ds->ds_prev;
1330 if (dsl_dataset_might_destroy_origin(ds_prev)) {
1331 struct dsl_ds_destroyarg ndsda = {0};
1334 * If we're not prepared to remove the origin, don't remove
1337 if (dsda->rm_origin == NULL) {
1338 dsda->need_prep = B_TRUE;
1343 ndsda.is_origin_rm = B_TRUE;
1344 return (dsl_dataset_destroy_check(&ndsda, tag, tx));
1348 * If we're not going to remove the origin after all,
1349 * undo the open context setup.
1351 if (dsda->rm_origin != NULL) {
1352 dsl_dataset_disown(dsda->rm_origin, tag);
1353 dsda->rm_origin = NULL;
1361 dsl_dataset_destroy_check(void *arg1, void *arg2, dmu_tx_t *tx)
1363 struct dsl_ds_destroyarg *dsda = arg1;
1364 dsl_dataset_t *ds = dsda->ds;
1366 /* we have an owner hold, so noone else can destroy us */
1367 ASSERT(!DSL_DATASET_IS_DESTROYED(ds));
1370 * Only allow deferred destroy on pools that support it.
1371 * NOTE: deferred destroy is only supported on snapshots.
1374 if (spa_version(ds->ds_dir->dd_pool->dp_spa) <
1375 SPA_VERSION_USERREFS)
1377 ASSERT(dsl_dataset_is_snapshot(ds));
1382 * Can't delete a head dataset if there are snapshots of it.
1383 * (Except if the only snapshots are from the branch we cloned
1386 if (ds->ds_prev != NULL &&
1387 ds->ds_prev->ds_phys->ds_next_snap_obj == ds->ds_object)
1391 * If we made changes this txg, traverse_dsl_dataset won't find
1394 if (ds->ds_phys->ds_bp.blk_birth >= tx->tx_txg)
1397 if (dsl_dataset_is_snapshot(ds)) {
1399 * If this snapshot has an elevated user reference count,
1400 * we can't destroy it yet.
1402 if (ds->ds_userrefs > 0 && !dsda->releasing)
1405 mutex_enter(&ds->ds_lock);
1407 * Can't delete a branch point. However, if we're destroying
1408 * a clone and removing its origin due to it having a user
1409 * hold count of 0 and having been marked for deferred destroy,
1410 * it's OK for the origin to have a single clone.
1412 if (ds->ds_phys->ds_num_children >
1413 (dsda->is_origin_rm ? 2 : 1)) {
1414 mutex_exit(&ds->ds_lock);
1417 mutex_exit(&ds->ds_lock);
1418 } else if (dsl_dir_is_clone(ds->ds_dir)) {
1419 return (dsl_dataset_origin_check(dsda, arg2, tx));
1422 /* XXX we should do some i/o error checking... */
1434 dsl_dataset_refs_gone(dmu_buf_t *db, void *argv)
1436 struct refsarg *arg = argv;
1438 mutex_enter(&arg->lock);
1440 cv_signal(&arg->cv);
1441 mutex_exit(&arg->lock);
1445 dsl_dataset_drain_refs(dsl_dataset_t *ds, void *tag)
1449 mutex_init(&arg.lock, NULL, MUTEX_DEFAULT, NULL);
1450 cv_init(&arg.cv, NULL, CV_DEFAULT, NULL);
1452 (void) dmu_buf_update_user(ds->ds_dbuf, ds, &arg, &ds->ds_phys,
1453 dsl_dataset_refs_gone);
1454 dmu_buf_rele(ds->ds_dbuf, tag);
1455 mutex_enter(&arg.lock);
1457 cv_wait(&arg.cv, &arg.lock);
1459 mutex_exit(&arg.lock);
1462 mutex_destroy(&arg.lock);
1463 cv_destroy(&arg.cv);
1467 remove_from_next_clones(dsl_dataset_t *ds, uint64_t obj, dmu_tx_t *tx)
1469 objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
1473 ASSERT(ds->ds_phys->ds_num_children >= 2);
1474 err = zap_remove_int(mos, ds->ds_phys->ds_next_clones_obj, obj, tx);
1476 * The err should not be ENOENT, but a bug in a previous version
1477 * of the code could cause upgrade_clones_cb() to not set
1478 * ds_next_snap_obj when it should, leading to a missing entry.
1479 * If we knew that the pool was created after
1480 * SPA_VERSION_NEXT_CLONES, we could assert that it isn't
1481 * ENOENT. However, at least we can check that we don't have
1482 * too many entries in the next_clones_obj even after failing to
1485 if (err != ENOENT) {
1486 VERIFY3U(err, ==, 0);
1488 ASSERT3U(0, ==, zap_count(mos, ds->ds_phys->ds_next_clones_obj,
1490 ASSERT3U(count, <=, ds->ds_phys->ds_num_children - 2);
1494 dsl_dataset_remove_clones_key(dsl_dataset_t *ds, uint64_t mintxg, dmu_tx_t *tx)
1496 objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
1501 * If it is the old version, dd_clones doesn't exist so we can't
1502 * find the clones, but deadlist_remove_key() is a no-op so it
1505 if (ds->ds_dir->dd_phys->dd_clones == 0)
1508 for (zap_cursor_init(&zc, mos, ds->ds_dir->dd_phys->dd_clones);
1509 zap_cursor_retrieve(&zc, &za) == 0;
1510 zap_cursor_advance(&zc)) {
1511 dsl_dataset_t *clone;
1513 VERIFY3U(0, ==, dsl_dataset_hold_obj(ds->ds_dir->dd_pool,
1514 za.za_first_integer, FTAG, &clone));
1515 if (clone->ds_dir->dd_origin_txg > mintxg) {
1516 dsl_deadlist_remove_key(&clone->ds_deadlist,
1518 dsl_dataset_remove_clones_key(clone, mintxg, tx);
1520 dsl_dataset_rele(clone, FTAG);
1522 zap_cursor_fini(&zc);
1525 struct process_old_arg {
1527 dsl_dataset_t *ds_prev;
1528 boolean_t after_branch_point;
1530 uint64_t used, comp, uncomp;
1534 process_old_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
1536 struct process_old_arg *poa = arg;
1537 dsl_pool_t *dp = poa->ds->ds_dir->dd_pool;
1539 if (bp->blk_birth <= poa->ds->ds_phys->ds_prev_snap_txg) {
1540 dsl_deadlist_insert(&poa->ds->ds_deadlist, bp, tx);
1541 if (poa->ds_prev && !poa->after_branch_point &&
1543 poa->ds_prev->ds_phys->ds_prev_snap_txg) {
1544 poa->ds_prev->ds_phys->ds_unique_bytes +=
1545 bp_get_dsize_sync(dp->dp_spa, bp);
1548 poa->used += bp_get_dsize_sync(dp->dp_spa, bp);
1549 poa->comp += BP_GET_PSIZE(bp);
1550 poa->uncomp += BP_GET_UCSIZE(bp);
1551 dsl_free_sync(poa->pio, dp, tx->tx_txg, bp);
1557 process_old_deadlist(dsl_dataset_t *ds, dsl_dataset_t *ds_prev,
1558 dsl_dataset_t *ds_next, boolean_t after_branch_point, dmu_tx_t *tx)
1560 struct process_old_arg poa = { 0 };
1561 dsl_pool_t *dp = ds->ds_dir->dd_pool;
1562 objset_t *mos = dp->dp_meta_objset;
1564 ASSERT(ds->ds_deadlist.dl_oldfmt);
1565 ASSERT(ds_next->ds_deadlist.dl_oldfmt);
1568 poa.ds_prev = ds_prev;
1569 poa.after_branch_point = after_branch_point;
1570 poa.pio = zio_root(dp->dp_spa, NULL, NULL, ZIO_FLAG_MUSTSUCCEED);
1571 VERIFY3U(0, ==, bpobj_iterate(&ds_next->ds_deadlist.dl_bpobj,
1572 process_old_cb, &poa, tx));
1573 VERIFY3U(zio_wait(poa.pio), ==, 0);
1574 ASSERT3U(poa.used, ==, ds->ds_phys->ds_unique_bytes);
1576 /* change snapused */
1577 dsl_dir_diduse_space(ds->ds_dir, DD_USED_SNAP,
1578 -poa.used, -poa.comp, -poa.uncomp, tx);
1580 /* swap next's deadlist to our deadlist */
1581 dsl_deadlist_close(&ds->ds_deadlist);
1582 dsl_deadlist_close(&ds_next->ds_deadlist);
1583 SWITCH64(ds_next->ds_phys->ds_deadlist_obj,
1584 ds->ds_phys->ds_deadlist_obj);
1585 dsl_deadlist_open(&ds->ds_deadlist, mos, ds->ds_phys->ds_deadlist_obj);
1586 dsl_deadlist_open(&ds_next->ds_deadlist, mos,
1587 ds_next->ds_phys->ds_deadlist_obj);
1591 dsl_dataset_destroy_sync(void *arg1, void *tag, dmu_tx_t *tx)
1593 struct dsl_ds_destroyarg *dsda = arg1;
1594 dsl_dataset_t *ds = dsda->ds;
1596 int after_branch_point = FALSE;
1597 dsl_pool_t *dp = ds->ds_dir->dd_pool;
1598 objset_t *mos = dp->dp_meta_objset;
1599 dsl_dataset_t *ds_prev = NULL;
1602 ASSERT(ds->ds_owner);
1603 ASSERT(dsda->defer || ds->ds_phys->ds_num_children <= 1);
1604 ASSERT(ds->ds_prev == NULL ||
1605 ds->ds_prev->ds_phys->ds_next_snap_obj != ds->ds_object);
1606 ASSERT3U(ds->ds_phys->ds_bp.blk_birth, <=, tx->tx_txg);
1609 ASSERT(spa_version(dp->dp_spa) >= SPA_VERSION_USERREFS);
1610 if (ds->ds_userrefs > 0 || ds->ds_phys->ds_num_children > 1) {
1611 dmu_buf_will_dirty(ds->ds_dbuf, tx);
1612 ds->ds_phys->ds_flags |= DS_FLAG_DEFER_DESTROY;
1617 /* signal any waiters that this dataset is going away */
1618 mutex_enter(&ds->ds_lock);
1619 ds->ds_owner = dsl_reaper;
1620 cv_broadcast(&ds->ds_exclusive_cv);
1621 mutex_exit(&ds->ds_lock);
1623 if (ds->ds_objset) {
1624 dmu_objset_evict(ds->ds_objset);
1625 ds->ds_objset = NULL;
1628 /* Remove our reservation */
1629 if (ds->ds_reserved != 0) {
1630 dsl_prop_setarg_t psa;
1633 dsl_prop_setarg_init_uint64(&psa, "refreservation",
1634 (ZPROP_SRC_NONE | ZPROP_SRC_LOCAL | ZPROP_SRC_RECEIVED),
1636 psa.psa_effective_value = 0; /* predict default value */
1638 dsl_dataset_set_reservation_sync(ds, &psa, tx);
1639 ASSERT3U(ds->ds_reserved, ==, 0);
1642 ASSERT(RW_WRITE_HELD(&dp->dp_config_rwlock));
1644 dsl_scan_ds_destroyed(ds, tx);
1646 obj = ds->ds_object;
1648 if (ds->ds_phys->ds_prev_snap_obj != 0) {
1650 ds_prev = ds->ds_prev;
1652 VERIFY(0 == dsl_dataset_hold_obj(dp,
1653 ds->ds_phys->ds_prev_snap_obj, FTAG, &ds_prev));
1655 after_branch_point =
1656 (ds_prev->ds_phys->ds_next_snap_obj != obj);
1658 dmu_buf_will_dirty(ds_prev->ds_dbuf, tx);
1659 if (after_branch_point &&
1660 ds_prev->ds_phys->ds_next_clones_obj != 0) {
1661 remove_from_next_clones(ds_prev, obj, tx);
1662 if (ds->ds_phys->ds_next_snap_obj != 0) {
1663 VERIFY(0 == zap_add_int(mos,
1664 ds_prev->ds_phys->ds_next_clones_obj,
1665 ds->ds_phys->ds_next_snap_obj, tx));
1668 if (after_branch_point &&
1669 ds->ds_phys->ds_next_snap_obj == 0) {
1670 /* This clone is toast. */
1671 ASSERT(ds_prev->ds_phys->ds_num_children > 1);
1672 ds_prev->ds_phys->ds_num_children--;
1675 * If the clone's origin has no other clones, no
1676 * user holds, and has been marked for deferred
1677 * deletion, then we should have done the necessary
1678 * destroy setup for it.
1680 if (ds_prev->ds_phys->ds_num_children == 1 &&
1681 ds_prev->ds_userrefs == 0 &&
1682 DS_IS_DEFER_DESTROY(ds_prev)) {
1683 ASSERT3P(dsda->rm_origin, !=, NULL);
1685 ASSERT3P(dsda->rm_origin, ==, NULL);
1687 } else if (!after_branch_point) {
1688 ds_prev->ds_phys->ds_next_snap_obj =
1689 ds->ds_phys->ds_next_snap_obj;
1693 if (dsl_dataset_is_snapshot(ds)) {
1694 dsl_dataset_t *ds_next;
1695 uint64_t old_unique;
1696 uint64_t used = 0, comp = 0, uncomp = 0;
1698 VERIFY(0 == dsl_dataset_hold_obj(dp,
1699 ds->ds_phys->ds_next_snap_obj, FTAG, &ds_next));
1700 ASSERT3U(ds_next->ds_phys->ds_prev_snap_obj, ==, obj);
1702 old_unique = ds_next->ds_phys->ds_unique_bytes;
1704 dmu_buf_will_dirty(ds_next->ds_dbuf, tx);
1705 ds_next->ds_phys->ds_prev_snap_obj =
1706 ds->ds_phys->ds_prev_snap_obj;
1707 ds_next->ds_phys->ds_prev_snap_txg =
1708 ds->ds_phys->ds_prev_snap_txg;
1709 ASSERT3U(ds->ds_phys->ds_prev_snap_txg, ==,
1710 ds_prev ? ds_prev->ds_phys->ds_creation_txg : 0);
1713 if (ds_next->ds_deadlist.dl_oldfmt) {
1714 process_old_deadlist(ds, ds_prev, ds_next,
1715 after_branch_point, tx);
1717 /* Adjust prev's unique space. */
1718 if (ds_prev && !after_branch_point) {
1719 dsl_deadlist_space_range(&ds_next->ds_deadlist,
1720 ds_prev->ds_phys->ds_prev_snap_txg,
1721 ds->ds_phys->ds_prev_snap_txg,
1722 &used, &comp, &uncomp);
1723 ds_prev->ds_phys->ds_unique_bytes += used;
1726 /* Adjust snapused. */
1727 dsl_deadlist_space_range(&ds_next->ds_deadlist,
1728 ds->ds_phys->ds_prev_snap_txg, UINT64_MAX,
1729 &used, &comp, &uncomp);
1730 dsl_dir_diduse_space(ds->ds_dir, DD_USED_SNAP,
1731 -used, -comp, -uncomp, tx);
1733 /* Move blocks to be freed to pool's free list. */
1734 dsl_deadlist_move_bpobj(&ds_next->ds_deadlist,
1735 &dp->dp_free_bpobj, ds->ds_phys->ds_prev_snap_txg,
1737 dsl_dir_diduse_space(tx->tx_pool->dp_free_dir,
1738 DD_USED_HEAD, used, comp, uncomp, tx);
1739 dsl_dir_dirty(tx->tx_pool->dp_free_dir, tx);
1741 /* Merge our deadlist into next's and free it. */
1742 dsl_deadlist_merge(&ds_next->ds_deadlist,
1743 ds->ds_phys->ds_deadlist_obj, tx);
1745 dsl_deadlist_close(&ds->ds_deadlist);
1746 dsl_deadlist_free(mos, ds->ds_phys->ds_deadlist_obj, tx);
1748 /* Collapse range in clone heads */
1749 dsl_dataset_remove_clones_key(ds,
1750 ds->ds_phys->ds_creation_txg, tx);
1752 if (dsl_dataset_is_snapshot(ds_next)) {
1753 dsl_dataset_t *ds_nextnext;
1756 * Update next's unique to include blocks which
1757 * were previously shared by only this snapshot
1758 * and it. Those blocks will be born after the
1759 * prev snap and before this snap, and will have
1760 * died after the next snap and before the one
1761 * after that (ie. be on the snap after next's
1764 VERIFY(0 == dsl_dataset_hold_obj(dp,
1765 ds_next->ds_phys->ds_next_snap_obj,
1766 FTAG, &ds_nextnext));
1767 dsl_deadlist_space_range(&ds_nextnext->ds_deadlist,
1768 ds->ds_phys->ds_prev_snap_txg,
1769 ds->ds_phys->ds_creation_txg,
1770 &used, &comp, &uncomp);
1771 ds_next->ds_phys->ds_unique_bytes += used;
1772 dsl_dataset_rele(ds_nextnext, FTAG);
1773 ASSERT3P(ds_next->ds_prev, ==, NULL);
1775 /* Collapse range in this head. */
1777 VERIFY3U(0, ==, dsl_dataset_hold_obj(dp,
1778 ds->ds_dir->dd_phys->dd_head_dataset_obj,
1780 dsl_deadlist_remove_key(&hds->ds_deadlist,
1781 ds->ds_phys->ds_creation_txg, tx);
1782 dsl_dataset_rele(hds, FTAG);
1785 ASSERT3P(ds_next->ds_prev, ==, ds);
1786 dsl_dataset_drop_ref(ds_next->ds_prev, ds_next);
1787 ds_next->ds_prev = NULL;
1789 VERIFY(0 == dsl_dataset_get_ref(dp,
1790 ds->ds_phys->ds_prev_snap_obj,
1791 ds_next, &ds_next->ds_prev));
1794 dsl_dataset_recalc_head_uniq(ds_next);
1797 * Reduce the amount of our unconsmed refreservation
1798 * being charged to our parent by the amount of
1799 * new unique data we have gained.
1801 if (old_unique < ds_next->ds_reserved) {
1803 uint64_t new_unique =
1804 ds_next->ds_phys->ds_unique_bytes;
1806 ASSERT(old_unique <= new_unique);
1807 mrsdelta = MIN(new_unique - old_unique,
1808 ds_next->ds_reserved - old_unique);
1809 dsl_dir_diduse_space(ds->ds_dir,
1810 DD_USED_REFRSRV, -mrsdelta, 0, 0, tx);
1813 dsl_dataset_rele(ds_next, FTAG);
1816 * There's no next snapshot, so this is a head dataset.
1817 * Destroy the deadlist. Unless it's a clone, the
1818 * deadlist should be empty. (If it's a clone, it's
1819 * safe to ignore the deadlist contents.)
1823 dsl_deadlist_close(&ds->ds_deadlist);
1824 dsl_deadlist_free(mos, ds->ds_phys->ds_deadlist_obj, tx);
1825 ds->ds_phys->ds_deadlist_obj = 0;
1828 * Free everything that we point to (that's born after
1829 * the previous snapshot, if we are a clone)
1831 * NB: this should be very quick, because we already
1832 * freed all the objects in open context.
1836 err = traverse_dataset(ds, ds->ds_phys->ds_prev_snap_txg,
1837 TRAVERSE_POST, kill_blkptr, &ka);
1838 ASSERT3U(err, ==, 0);
1839 ASSERT(!DS_UNIQUE_IS_ACCURATE(ds) ||
1840 ds->ds_phys->ds_unique_bytes == 0);
1842 if (ds->ds_prev != NULL) {
1843 if (spa_version(dp->dp_spa) >= SPA_VERSION_DIR_CLONES) {
1844 VERIFY3U(0, ==, zap_remove_int(mos,
1845 ds->ds_prev->ds_dir->dd_phys->dd_clones,
1846 ds->ds_object, tx));
1848 dsl_dataset_rele(ds->ds_prev, ds);
1849 ds->ds_prev = ds_prev = NULL;
1853 if (ds->ds_dir->dd_phys->dd_head_dataset_obj == ds->ds_object) {
1854 /* Erase the link in the dir */
1855 dmu_buf_will_dirty(ds->ds_dir->dd_dbuf, tx);
1856 ds->ds_dir->dd_phys->dd_head_dataset_obj = 0;
1857 ASSERT(ds->ds_phys->ds_snapnames_zapobj != 0);
1858 err = zap_destroy(mos, ds->ds_phys->ds_snapnames_zapobj, tx);
1861 /* remove from snapshot namespace */
1862 dsl_dataset_t *ds_head;
1863 ASSERT(ds->ds_phys->ds_snapnames_zapobj == 0);
1864 VERIFY(0 == dsl_dataset_hold_obj(dp,
1865 ds->ds_dir->dd_phys->dd_head_dataset_obj, FTAG, &ds_head));
1866 VERIFY(0 == dsl_dataset_get_snapname(ds));
1871 err = dsl_dataset_snap_lookup(ds_head,
1872 ds->ds_snapname, &val);
1873 ASSERT3U(err, ==, 0);
1874 ASSERT3U(val, ==, obj);
1877 err = dsl_dataset_snap_remove(ds_head, ds->ds_snapname, tx);
1879 dsl_dataset_rele(ds_head, FTAG);
1882 if (ds_prev && ds->ds_prev != ds_prev)
1883 dsl_dataset_rele(ds_prev, FTAG);
1885 spa_prop_clear_bootfs(dp->dp_spa, ds->ds_object, tx);
1886 spa_history_log_internal(LOG_DS_DESTROY, dp->dp_spa, tx,
1887 "dataset = %llu", ds->ds_object);
1889 if (ds->ds_phys->ds_next_clones_obj != 0) {
1891 ASSERT(0 == zap_count(mos,
1892 ds->ds_phys->ds_next_clones_obj, &count) && count == 0);
1893 VERIFY(0 == dmu_object_free(mos,
1894 ds->ds_phys->ds_next_clones_obj, tx));
1896 if (ds->ds_phys->ds_props_obj != 0)
1897 VERIFY(0 == zap_destroy(mos, ds->ds_phys->ds_props_obj, tx));
1898 if (ds->ds_phys->ds_userrefs_obj != 0)
1899 VERIFY(0 == zap_destroy(mos, ds->ds_phys->ds_userrefs_obj, tx));
1900 dsl_dir_close(ds->ds_dir, ds);
1902 dsl_dataset_drain_refs(ds, tag);
1903 VERIFY(0 == dmu_object_free(mos, obj, tx));
1905 if (dsda->rm_origin) {
1907 * Remove the origin of the clone we just destroyed.
1909 struct dsl_ds_destroyarg ndsda = {0};
1911 ndsda.ds = dsda->rm_origin;
1912 dsl_dataset_destroy_sync(&ndsda, tag, tx);
1917 dsl_dataset_snapshot_reserve_space(dsl_dataset_t *ds, dmu_tx_t *tx)
1921 if (!dmu_tx_is_syncing(tx))
1925 * If there's an fs-only reservation, any blocks that might become
1926 * owned by the snapshot dataset must be accommodated by space
1927 * outside of the reservation.
1929 ASSERT(ds->ds_reserved == 0 || DS_UNIQUE_IS_ACCURATE(ds));
1930 asize = MIN(ds->ds_phys->ds_unique_bytes, ds->ds_reserved);
1931 if (asize > dsl_dir_space_available(ds->ds_dir, NULL, 0, FALSE))
1935 * Propogate any reserved space for this snapshot to other
1936 * snapshot checks in this sync group.
1939 dsl_dir_willuse_space(ds->ds_dir, asize, tx);
1945 dsl_dataset_snapshot_check(void *arg1, void *arg2, dmu_tx_t *tx)
1947 dsl_dataset_t *ds = arg1;
1948 const char *snapname = arg2;
1953 * We don't allow multiple snapshots of the same txg. If there
1954 * is already one, try again.
1956 if (ds->ds_phys->ds_prev_snap_txg >= tx->tx_txg)
1960 * Check for conflicting name snapshot name.
1962 err = dsl_dataset_snap_lookup(ds, snapname, &value);
1969 * Check that the dataset's name is not too long. Name consists
1970 * of the dataset's length + 1 for the @-sign + snapshot name's length
1972 if (dsl_dataset_namelen(ds) + 1 + strlen(snapname) >= MAXNAMELEN)
1973 return (ENAMETOOLONG);
1975 err = dsl_dataset_snapshot_reserve_space(ds, tx);
1979 ds->ds_trysnap_txg = tx->tx_txg;
1984 dsl_dataset_snapshot_sync(void *arg1, void *arg2, dmu_tx_t *tx)
1986 dsl_dataset_t *ds = arg1;
1987 const char *snapname = arg2;
1988 dsl_pool_t *dp = ds->ds_dir->dd_pool;
1990 dsl_dataset_phys_t *dsphys;
1991 uint64_t dsobj, crtxg;
1992 objset_t *mos = dp->dp_meta_objset;
1995 ASSERT(RW_WRITE_HELD(&dp->dp_config_rwlock));
1998 * The origin's ds_creation_txg has to be < TXG_INITIAL
2000 if (strcmp(snapname, ORIGIN_DIR_NAME) == 0)
2005 dsobj = dmu_object_alloc(mos, DMU_OT_DSL_DATASET, 0,
2006 DMU_OT_DSL_DATASET, sizeof (dsl_dataset_phys_t), tx);
2007 VERIFY(0 == dmu_bonus_hold(mos, dsobj, FTAG, &dbuf));
2008 dmu_buf_will_dirty(dbuf, tx);
2009 dsphys = dbuf->db_data;
2010 bzero(dsphys, sizeof (dsl_dataset_phys_t));
2011 dsphys->ds_dir_obj = ds->ds_dir->dd_object;
2012 dsphys->ds_fsid_guid = unique_create();
2013 (void) random_get_pseudo_bytes((void*)&dsphys->ds_guid,
2014 sizeof (dsphys->ds_guid));
2015 dsphys->ds_prev_snap_obj = ds->ds_phys->ds_prev_snap_obj;
2016 dsphys->ds_prev_snap_txg = ds->ds_phys->ds_prev_snap_txg;
2017 dsphys->ds_next_snap_obj = ds->ds_object;
2018 dsphys->ds_num_children = 1;
2019 dsphys->ds_creation_time = gethrestime_sec();
2020 dsphys->ds_creation_txg = crtxg;
2021 dsphys->ds_deadlist_obj = ds->ds_phys->ds_deadlist_obj;
2022 dsphys->ds_used_bytes = ds->ds_phys->ds_used_bytes;
2023 dsphys->ds_compressed_bytes = ds->ds_phys->ds_compressed_bytes;
2024 dsphys->ds_uncompressed_bytes = ds->ds_phys->ds_uncompressed_bytes;
2025 dsphys->ds_flags = ds->ds_phys->ds_flags;
2026 dsphys->ds_bp = ds->ds_phys->ds_bp;
2027 dmu_buf_rele(dbuf, FTAG);
2029 ASSERT3U(ds->ds_prev != 0, ==, ds->ds_phys->ds_prev_snap_obj != 0);
2031 uint64_t next_clones_obj =
2032 ds->ds_prev->ds_phys->ds_next_clones_obj;
2033 ASSERT(ds->ds_prev->ds_phys->ds_next_snap_obj ==
2035 ds->ds_prev->ds_phys->ds_num_children > 1);
2036 if (ds->ds_prev->ds_phys->ds_next_snap_obj == ds->ds_object) {
2037 dmu_buf_will_dirty(ds->ds_prev->ds_dbuf, tx);
2038 ASSERT3U(ds->ds_phys->ds_prev_snap_txg, ==,
2039 ds->ds_prev->ds_phys->ds_creation_txg);
2040 ds->ds_prev->ds_phys->ds_next_snap_obj = dsobj;
2041 } else if (next_clones_obj != 0) {
2042 remove_from_next_clones(ds->ds_prev,
2043 dsphys->ds_next_snap_obj, tx);
2044 VERIFY3U(0, ==, zap_add_int(mos,
2045 next_clones_obj, dsobj, tx));
2050 * If we have a reference-reservation on this dataset, we will
2051 * need to increase the amount of refreservation being charged
2052 * since our unique space is going to zero.
2054 if (ds->ds_reserved) {
2056 ASSERT(DS_UNIQUE_IS_ACCURATE(ds));
2057 delta = MIN(ds->ds_phys->ds_unique_bytes, ds->ds_reserved);
2058 dsl_dir_diduse_space(ds->ds_dir, DD_USED_REFRSRV,
2062 dmu_buf_will_dirty(ds->ds_dbuf, tx);
2063 zfs_dbgmsg("taking snapshot %s@%s/%llu; newkey=%llu",
2064 ds->ds_dir->dd_myname, snapname, dsobj,
2065 ds->ds_phys->ds_prev_snap_txg);
2066 ds->ds_phys->ds_deadlist_obj = dsl_deadlist_clone(&ds->ds_deadlist,
2067 UINT64_MAX, ds->ds_phys->ds_prev_snap_obj, tx);
2068 dsl_deadlist_close(&ds->ds_deadlist);
2069 dsl_deadlist_open(&ds->ds_deadlist, mos, ds->ds_phys->ds_deadlist_obj);
2070 dsl_deadlist_add_key(&ds->ds_deadlist,
2071 ds->ds_phys->ds_prev_snap_txg, tx);
2073 ASSERT3U(ds->ds_phys->ds_prev_snap_txg, <, tx->tx_txg);
2074 ds->ds_phys->ds_prev_snap_obj = dsobj;
2075 ds->ds_phys->ds_prev_snap_txg = crtxg;
2076 ds->ds_phys->ds_unique_bytes = 0;
2077 if (spa_version(dp->dp_spa) >= SPA_VERSION_UNIQUE_ACCURATE)
2078 ds->ds_phys->ds_flags |= DS_FLAG_UNIQUE_ACCURATE;
2080 err = zap_add(mos, ds->ds_phys->ds_snapnames_zapobj,
2081 snapname, 8, 1, &dsobj, tx);
2085 dsl_dataset_drop_ref(ds->ds_prev, ds);
2086 VERIFY(0 == dsl_dataset_get_ref(dp,
2087 ds->ds_phys->ds_prev_snap_obj, ds, &ds->ds_prev));
2089 dsl_scan_ds_snapshotted(ds, tx);
2091 dsl_dir_snap_cmtime_update(ds->ds_dir);
2093 spa_history_log_internal(LOG_DS_SNAPSHOT, dp->dp_spa, tx,
2094 "dataset = %llu", dsobj);
2098 dsl_dataset_sync(dsl_dataset_t *ds, zio_t *zio, dmu_tx_t *tx)
2100 ASSERT(dmu_tx_is_syncing(tx));
2101 ASSERT(ds->ds_objset != NULL);
2102 ASSERT(ds->ds_phys->ds_next_snap_obj == 0);
2105 * in case we had to change ds_fsid_guid when we opened it,
2108 dmu_buf_will_dirty(ds->ds_dbuf, tx);
2109 ds->ds_phys->ds_fsid_guid = ds->ds_fsid_guid;
2111 dsl_dir_dirty(ds->ds_dir, tx);
2112 dmu_objset_sync(ds->ds_objset, zio, tx);
2116 dsl_dataset_stats(dsl_dataset_t *ds, nvlist_t *nv)
2118 uint64_t refd, avail, uobjs, aobjs;
2120 dsl_dir_stats(ds->ds_dir, nv);
2122 dsl_dataset_space(ds, &refd, &avail, &uobjs, &aobjs);
2123 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_AVAILABLE, avail);
2124 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_REFERENCED, refd);
2126 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_CREATION,
2127 ds->ds_phys->ds_creation_time);
2128 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_CREATETXG,
2129 ds->ds_phys->ds_creation_txg);
2130 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_REFQUOTA,
2132 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_REFRESERVATION,
2134 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_GUID,
2135 ds->ds_phys->ds_guid);
2136 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_UNIQUE,
2137 ds->ds_phys->ds_unique_bytes);
2138 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_OBJSETID,
2140 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_USERREFS,
2142 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_DEFER_DESTROY,
2143 DS_IS_DEFER_DESTROY(ds) ? 1 : 0);
2145 if (ds->ds_phys->ds_next_snap_obj) {
2147 * This is a snapshot; override the dd's space used with
2148 * our unique space and compression ratio.
2150 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_USED,
2151 ds->ds_phys->ds_unique_bytes);
2152 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_COMPRESSRATIO,
2153 ds->ds_phys->ds_compressed_bytes == 0 ? 100 :
2154 (ds->ds_phys->ds_uncompressed_bytes * 100 /
2155 ds->ds_phys->ds_compressed_bytes));
2160 dsl_dataset_fast_stat(dsl_dataset_t *ds, dmu_objset_stats_t *stat)
2162 stat->dds_creation_txg = ds->ds_phys->ds_creation_txg;
2163 stat->dds_inconsistent = ds->ds_phys->ds_flags & DS_FLAG_INCONSISTENT;
2164 stat->dds_guid = ds->ds_phys->ds_guid;
2165 if (ds->ds_phys->ds_next_snap_obj) {
2166 stat->dds_is_snapshot = B_TRUE;
2167 stat->dds_num_clones = ds->ds_phys->ds_num_children - 1;
2169 stat->dds_is_snapshot = B_FALSE;
2170 stat->dds_num_clones = 0;
2173 /* clone origin is really a dsl_dir thing... */
2174 rw_enter(&ds->ds_dir->dd_pool->dp_config_rwlock, RW_READER);
2175 if (dsl_dir_is_clone(ds->ds_dir)) {
2178 VERIFY(0 == dsl_dataset_get_ref(ds->ds_dir->dd_pool,
2179 ds->ds_dir->dd_phys->dd_origin_obj, FTAG, &ods));
2180 dsl_dataset_name(ods, stat->dds_origin);
2181 dsl_dataset_drop_ref(ods, FTAG);
2183 stat->dds_origin[0] = '\0';
2185 rw_exit(&ds->ds_dir->dd_pool->dp_config_rwlock);
2189 dsl_dataset_fsid_guid(dsl_dataset_t *ds)
2191 return (ds->ds_fsid_guid);
2195 dsl_dataset_space(dsl_dataset_t *ds,
2196 uint64_t *refdbytesp, uint64_t *availbytesp,
2197 uint64_t *usedobjsp, uint64_t *availobjsp)
2199 *refdbytesp = ds->ds_phys->ds_used_bytes;
2200 *availbytesp = dsl_dir_space_available(ds->ds_dir, NULL, 0, TRUE);
2201 if (ds->ds_reserved > ds->ds_phys->ds_unique_bytes)
2202 *availbytesp += ds->ds_reserved - ds->ds_phys->ds_unique_bytes;
2203 if (ds->ds_quota != 0) {
2205 * Adjust available bytes according to refquota
2207 if (*refdbytesp < ds->ds_quota)
2208 *availbytesp = MIN(*availbytesp,
2209 ds->ds_quota - *refdbytesp);
2213 *usedobjsp = ds->ds_phys->ds_bp.blk_fill;
2214 *availobjsp = DN_MAX_OBJECT - *usedobjsp;
2218 dsl_dataset_modified_since_lastsnap(dsl_dataset_t *ds)
2220 dsl_pool_t *dp = ds->ds_dir->dd_pool;
2222 ASSERT(RW_LOCK_HELD(&dp->dp_config_rwlock) ||
2223 dsl_pool_sync_context(dp));
2224 if (ds->ds_prev == NULL)
2226 if (ds->ds_phys->ds_bp.blk_birth >
2227 ds->ds_prev->ds_phys->ds_creation_txg)
2234 dsl_dataset_snapshot_rename_check(void *arg1, void *arg2, dmu_tx_t *tx)
2236 dsl_dataset_t *ds = arg1;
2237 char *newsnapname = arg2;
2238 dsl_dir_t *dd = ds->ds_dir;
2243 err = dsl_dataset_hold_obj(dd->dd_pool,
2244 dd->dd_phys->dd_head_dataset_obj, FTAG, &hds);
2248 /* new name better not be in use */
2249 err = dsl_dataset_snap_lookup(hds, newsnapname, &val);
2250 dsl_dataset_rele(hds, FTAG);
2254 else if (err == ENOENT)
2257 /* dataset name + 1 for the "@" + the new snapshot name must fit */
2258 if (dsl_dir_namelen(ds->ds_dir) + 1 + strlen(newsnapname) >= MAXNAMELEN)
2265 dsl_dataset_snapshot_rename_sync(void *arg1, void *arg2, dmu_tx_t *tx)
2267 dsl_dataset_t *ds = arg1;
2268 const char *newsnapname = arg2;
2269 dsl_dir_t *dd = ds->ds_dir;
2270 objset_t *mos = dd->dd_pool->dp_meta_objset;
2274 ASSERT(ds->ds_phys->ds_next_snap_obj != 0);
2276 VERIFY(0 == dsl_dataset_hold_obj(dd->dd_pool,
2277 dd->dd_phys->dd_head_dataset_obj, FTAG, &hds));
2279 VERIFY(0 == dsl_dataset_get_snapname(ds));
2280 err = dsl_dataset_snap_remove(hds, ds->ds_snapname, tx);
2281 ASSERT3U(err, ==, 0);
2282 mutex_enter(&ds->ds_lock);
2283 (void) strcpy(ds->ds_snapname, newsnapname);
2284 mutex_exit(&ds->ds_lock);
2285 err = zap_add(mos, hds->ds_phys->ds_snapnames_zapobj,
2286 ds->ds_snapname, 8, 1, &ds->ds_object, tx);
2287 ASSERT3U(err, ==, 0);
2289 spa_history_log_internal(LOG_DS_RENAME, dd->dd_pool->dp_spa, tx,
2290 "dataset = %llu", ds->ds_object);
2291 dsl_dataset_rele(hds, FTAG);
2294 struct renamesnaparg {
2295 dsl_sync_task_group_t *dstg;
2296 char failed[MAXPATHLEN];
2302 dsl_snapshot_rename_one(const char *name, void *arg)
2304 struct renamesnaparg *ra = arg;
2305 dsl_dataset_t *ds = NULL;
2309 snapname = kmem_asprintf("%s@%s", name, ra->oldsnap);
2310 (void) strlcpy(ra->failed, snapname, sizeof (ra->failed));
2313 * For recursive snapshot renames the parent won't be changing
2314 * so we just pass name for both the to/from argument.
2316 err = zfs_secpolicy_rename_perms(snapname, snapname, CRED());
2319 return (err == ENOENT ? 0 : err);
2324 * For all filesystems undergoing rename, we'll need to unmount it.
2326 (void) zfs_unmount_snap(snapname, NULL);
2328 err = dsl_dataset_hold(snapname, ra->dstg, &ds);
2331 return (err == ENOENT ? 0 : err);
2333 dsl_sync_task_create(ra->dstg, dsl_dataset_snapshot_rename_check,
2334 dsl_dataset_snapshot_rename_sync, ds, ra->newsnap, 0);
2340 dsl_recursive_rename(char *oldname, const char *newname)
2343 struct renamesnaparg *ra;
2344 dsl_sync_task_t *dst;
2346 char *cp, *fsname = spa_strdup(oldname);
2347 int len = strlen(oldname) + 1;
2349 /* truncate the snapshot name to get the fsname */
2350 cp = strchr(fsname, '@');
2353 err = spa_open(fsname, &spa, FTAG);
2355 kmem_free(fsname, len);
2358 ra = kmem_alloc(sizeof (struct renamesnaparg), KM_SLEEP);
2359 ra->dstg = dsl_sync_task_group_create(spa_get_dsl(spa));
2361 ra->oldsnap = strchr(oldname, '@') + 1;
2362 ra->newsnap = strchr(newname, '@') + 1;
2365 err = dmu_objset_find(fsname, dsl_snapshot_rename_one, ra,
2367 kmem_free(fsname, len);
2370 err = dsl_sync_task_group_wait(ra->dstg);
2373 for (dst = list_head(&ra->dstg->dstg_tasks); dst;
2374 dst = list_next(&ra->dstg->dstg_tasks, dst)) {
2375 dsl_dataset_t *ds = dst->dst_arg1;
2377 dsl_dir_name(ds->ds_dir, ra->failed);
2378 (void) strlcat(ra->failed, "@", sizeof (ra->failed));
2379 (void) strlcat(ra->failed, ra->newsnap,
2380 sizeof (ra->failed));
2382 dsl_dataset_rele(ds, ra->dstg);
2386 (void) strlcpy(oldname, ra->failed, sizeof (ra->failed));
2388 dsl_sync_task_group_destroy(ra->dstg);
2389 kmem_free(ra, sizeof (struct renamesnaparg));
2390 spa_close(spa, FTAG);
2395 dsl_valid_rename(const char *oldname, void *arg)
2397 int delta = *(int *)arg;
2399 if (strlen(oldname) + delta >= MAXNAMELEN)
2400 return (ENAMETOOLONG);
2405 #pragma weak dmu_objset_rename = dsl_dataset_rename
2407 dsl_dataset_rename(char *oldname, const char *newname, boolean_t recursive)
2414 err = dsl_dir_open(oldname, FTAG, &dd, &tail);
2419 int delta = strlen(newname) - strlen(oldname);
2421 /* if we're growing, validate child name lengths */
2423 err = dmu_objset_find(oldname, dsl_valid_rename,
2424 &delta, DS_FIND_CHILDREN | DS_FIND_SNAPSHOTS);
2427 err = dsl_dir_rename(dd, newname);
2428 dsl_dir_close(dd, FTAG);
2432 if (tail[0] != '@') {
2433 /* the name ended in a nonexistent component */
2434 dsl_dir_close(dd, FTAG);
2438 dsl_dir_close(dd, FTAG);
2440 /* new name must be snapshot in same filesystem */
2441 tail = strchr(newname, '@');
2445 if (strncmp(oldname, newname, tail - newname) != 0)
2449 err = dsl_recursive_rename(oldname, newname);
2451 err = dsl_dataset_hold(oldname, FTAG, &ds);
2455 err = dsl_sync_task_do(ds->ds_dir->dd_pool,
2456 dsl_dataset_snapshot_rename_check,
2457 dsl_dataset_snapshot_rename_sync, ds, (char *)tail, 1);
2459 dsl_dataset_rele(ds, FTAG);
2465 struct promotenode {
2471 list_t shared_snaps, origin_snaps, clone_snaps;
2472 dsl_dataset_t *origin_origin;
2473 uint64_t used, comp, uncomp, unique, cloneusedsnap, originusedsnap;
2477 static int snaplist_space(list_t *l, uint64_t mintxg, uint64_t *spacep);
2478 static boolean_t snaplist_unstable(list_t *l);
2481 dsl_dataset_promote_check(void *arg1, void *arg2, dmu_tx_t *tx)
2483 dsl_dataset_t *hds = arg1;
2484 struct promotearg *pa = arg2;
2485 struct promotenode *snap = list_head(&pa->shared_snaps);
2486 dsl_dataset_t *origin_ds = snap->ds;
2490 /* Check that it is a real clone */
2491 if (!dsl_dir_is_clone(hds->ds_dir))
2494 /* Since this is so expensive, don't do the preliminary check */
2495 if (!dmu_tx_is_syncing(tx))
2498 if (hds->ds_phys->ds_flags & DS_FLAG_NOPROMOTE)
2501 /* compute origin's new unique space */
2502 snap = list_tail(&pa->clone_snaps);
2503 ASSERT3U(snap->ds->ds_phys->ds_prev_snap_obj, ==, origin_ds->ds_object);
2504 dsl_deadlist_space_range(&snap->ds->ds_deadlist,
2505 origin_ds->ds_phys->ds_prev_snap_txg, UINT64_MAX,
2506 &pa->unique, &unused, &unused);
2509 * Walk the snapshots that we are moving
2511 * Compute space to transfer. Consider the incremental changes
2512 * to used for each snapshot:
2513 * (my used) = (prev's used) + (blocks born) - (blocks killed)
2514 * So each snapshot gave birth to:
2515 * (blocks born) = (my used) - (prev's used) + (blocks killed)
2516 * So a sequence would look like:
2517 * (uN - u(N-1) + kN) + ... + (u1 - u0 + k1) + (u0 - 0 + k0)
2518 * Which simplifies to:
2519 * uN + kN + kN-1 + ... + k1 + k0
2520 * Note however, if we stop before we reach the ORIGIN we get:
2521 * uN + kN + kN-1 + ... + kM - uM-1
2523 pa->used = origin_ds->ds_phys->ds_used_bytes;
2524 pa->comp = origin_ds->ds_phys->ds_compressed_bytes;
2525 pa->uncomp = origin_ds->ds_phys->ds_uncompressed_bytes;
2526 for (snap = list_head(&pa->shared_snaps); snap;
2527 snap = list_next(&pa->shared_snaps, snap)) {
2528 uint64_t val, dlused, dlcomp, dluncomp;
2529 dsl_dataset_t *ds = snap->ds;
2531 /* Check that the snapshot name does not conflict */
2532 VERIFY(0 == dsl_dataset_get_snapname(ds));
2533 err = dsl_dataset_snap_lookup(hds, ds->ds_snapname, &val);
2541 /* The very first snapshot does not have a deadlist */
2542 if (ds->ds_phys->ds_prev_snap_obj == 0)
2545 dsl_deadlist_space(&ds->ds_deadlist,
2546 &dlused, &dlcomp, &dluncomp);
2549 pa->uncomp += dluncomp;
2553 * If we are a clone of a clone then we never reached ORIGIN,
2554 * so we need to subtract out the clone origin's used space.
2556 if (pa->origin_origin) {
2557 pa->used -= pa->origin_origin->ds_phys->ds_used_bytes;
2558 pa->comp -= pa->origin_origin->ds_phys->ds_compressed_bytes;
2559 pa->uncomp -= pa->origin_origin->ds_phys->ds_uncompressed_bytes;
2562 /* Check that there is enough space here */
2563 err = dsl_dir_transfer_possible(origin_ds->ds_dir, hds->ds_dir,
2569 * Compute the amounts of space that will be used by snapshots
2570 * after the promotion (for both origin and clone). For each,
2571 * it is the amount of space that will be on all of their
2572 * deadlists (that was not born before their new origin).
2574 if (hds->ds_dir->dd_phys->dd_flags & DD_FLAG_USED_BREAKDOWN) {
2578 * Note, typically this will not be a clone of a clone,
2579 * so dd_origin_txg will be < TXG_INITIAL, so
2580 * these snaplist_space() -> dsl_deadlist_space_range()
2581 * calls will be fast because they do not have to
2582 * iterate over all bps.
2584 snap = list_head(&pa->origin_snaps);
2585 err = snaplist_space(&pa->shared_snaps,
2586 snap->ds->ds_dir->dd_origin_txg, &pa->cloneusedsnap);
2590 err = snaplist_space(&pa->clone_snaps,
2591 snap->ds->ds_dir->dd_origin_txg, &space);
2594 pa->cloneusedsnap += space;
2596 if (origin_ds->ds_dir->dd_phys->dd_flags & DD_FLAG_USED_BREAKDOWN) {
2597 err = snaplist_space(&pa->origin_snaps,
2598 origin_ds->ds_phys->ds_creation_txg, &pa->originusedsnap);
2605 pa->err_ds = snap->ds->ds_snapname;
2610 dsl_dataset_promote_sync(void *arg1, void *arg2, dmu_tx_t *tx)
2612 dsl_dataset_t *hds = arg1;
2613 struct promotearg *pa = arg2;
2614 struct promotenode *snap = list_head(&pa->shared_snaps);
2615 dsl_dataset_t *origin_ds = snap->ds;
2616 dsl_dataset_t *origin_head;
2617 dsl_dir_t *dd = hds->ds_dir;
2618 dsl_pool_t *dp = hds->ds_dir->dd_pool;
2619 dsl_dir_t *odd = NULL;
2620 uint64_t oldnext_obj;
2623 ASSERT(0 == (hds->ds_phys->ds_flags & DS_FLAG_NOPROMOTE));
2625 snap = list_head(&pa->origin_snaps);
2626 origin_head = snap->ds;
2629 * We need to explicitly open odd, since origin_ds's dd will be
2632 VERIFY(0 == dsl_dir_open_obj(dp, origin_ds->ds_dir->dd_object,
2635 /* change origin's next snap */
2636 dmu_buf_will_dirty(origin_ds->ds_dbuf, tx);
2637 oldnext_obj = origin_ds->ds_phys->ds_next_snap_obj;
2638 snap = list_tail(&pa->clone_snaps);
2639 ASSERT3U(snap->ds->ds_phys->ds_prev_snap_obj, ==, origin_ds->ds_object);
2640 origin_ds->ds_phys->ds_next_snap_obj = snap->ds->ds_object;
2642 /* change the origin's next clone */
2643 if (origin_ds->ds_phys->ds_next_clones_obj) {
2644 remove_from_next_clones(origin_ds, snap->ds->ds_object, tx);
2645 VERIFY3U(0, ==, zap_add_int(dp->dp_meta_objset,
2646 origin_ds->ds_phys->ds_next_clones_obj,
2651 dmu_buf_will_dirty(dd->dd_dbuf, tx);
2652 ASSERT3U(dd->dd_phys->dd_origin_obj, ==, origin_ds->ds_object);
2653 dd->dd_phys->dd_origin_obj = odd->dd_phys->dd_origin_obj;
2654 dd->dd_origin_txg = origin_head->ds_dir->dd_origin_txg;
2655 dmu_buf_will_dirty(odd->dd_dbuf, tx);
2656 odd->dd_phys->dd_origin_obj = origin_ds->ds_object;
2657 origin_head->ds_dir->dd_origin_txg =
2658 origin_ds->ds_phys->ds_creation_txg;
2660 /* change dd_clone entries */
2661 if (spa_version(dp->dp_spa) >= SPA_VERSION_DIR_CLONES) {
2662 VERIFY3U(0, ==, zap_remove_int(dp->dp_meta_objset,
2663 odd->dd_phys->dd_clones, hds->ds_object, tx));
2664 VERIFY3U(0, ==, zap_add_int(dp->dp_meta_objset,
2665 pa->origin_origin->ds_dir->dd_phys->dd_clones,
2666 hds->ds_object, tx));
2668 VERIFY3U(0, ==, zap_remove_int(dp->dp_meta_objset,
2669 pa->origin_origin->ds_dir->dd_phys->dd_clones,
2670 origin_head->ds_object, tx));
2671 if (dd->dd_phys->dd_clones == 0) {
2672 dd->dd_phys->dd_clones = zap_create(dp->dp_meta_objset,
2673 DMU_OT_DSL_CLONES, DMU_OT_NONE, 0, tx);
2675 VERIFY3U(0, ==, zap_add_int(dp->dp_meta_objset,
2676 dd->dd_phys->dd_clones, origin_head->ds_object, tx));
2680 /* move snapshots to this dir */
2681 for (snap = list_head(&pa->shared_snaps); snap;
2682 snap = list_next(&pa->shared_snaps, snap)) {
2683 dsl_dataset_t *ds = snap->ds;
2685 /* unregister props as dsl_dir is changing */
2686 if (ds->ds_objset) {
2687 dmu_objset_evict(ds->ds_objset);
2688 ds->ds_objset = NULL;
2690 /* move snap name entry */
2691 VERIFY(0 == dsl_dataset_get_snapname(ds));
2692 VERIFY(0 == dsl_dataset_snap_remove(origin_head,
2693 ds->ds_snapname, tx));
2694 VERIFY(0 == zap_add(dp->dp_meta_objset,
2695 hds->ds_phys->ds_snapnames_zapobj, ds->ds_snapname,
2696 8, 1, &ds->ds_object, tx));
2698 /* change containing dsl_dir */
2699 dmu_buf_will_dirty(ds->ds_dbuf, tx);
2700 ASSERT3U(ds->ds_phys->ds_dir_obj, ==, odd->dd_object);
2701 ds->ds_phys->ds_dir_obj = dd->dd_object;
2702 ASSERT3P(ds->ds_dir, ==, odd);
2703 dsl_dir_close(ds->ds_dir, ds);
2704 VERIFY(0 == dsl_dir_open_obj(dp, dd->dd_object,
2705 NULL, ds, &ds->ds_dir));
2707 /* move any clone references */
2708 if (ds->ds_phys->ds_next_clones_obj &&
2709 spa_version(dp->dp_spa) >= SPA_VERSION_DIR_CLONES) {
2713 for (zap_cursor_init(&zc, dp->dp_meta_objset,
2714 ds->ds_phys->ds_next_clones_obj);
2715 zap_cursor_retrieve(&zc, &za) == 0;
2716 zap_cursor_advance(&zc)) {
2717 dsl_dataset_t *cnds;
2720 if (za.za_first_integer == oldnext_obj) {
2722 * We've already moved the
2723 * origin's reference.
2728 VERIFY3U(0, ==, dsl_dataset_hold_obj(dp,
2729 za.za_first_integer, FTAG, &cnds));
2730 o = cnds->ds_dir->dd_phys->dd_head_dataset_obj;
2732 VERIFY3U(zap_remove_int(dp->dp_meta_objset,
2733 odd->dd_phys->dd_clones, o, tx), ==, 0);
2734 VERIFY3U(zap_add_int(dp->dp_meta_objset,
2735 dd->dd_phys->dd_clones, o, tx), ==, 0);
2736 dsl_dataset_rele(cnds, FTAG);
2738 zap_cursor_fini(&zc);
2741 ASSERT3U(dsl_prop_numcb(ds), ==, 0);
2745 * Change space accounting.
2746 * Note, pa->*usedsnap and dd_used_breakdown[SNAP] will either
2747 * both be valid, or both be 0 (resulting in delta == 0). This
2748 * is true for each of {clone,origin} independently.
2751 delta = pa->cloneusedsnap -
2752 dd->dd_phys->dd_used_breakdown[DD_USED_SNAP];
2753 ASSERT3S(delta, >=, 0);
2754 ASSERT3U(pa->used, >=, delta);
2755 dsl_dir_diduse_space(dd, DD_USED_SNAP, delta, 0, 0, tx);
2756 dsl_dir_diduse_space(dd, DD_USED_HEAD,
2757 pa->used - delta, pa->comp, pa->uncomp, tx);
2759 delta = pa->originusedsnap -
2760 odd->dd_phys->dd_used_breakdown[DD_USED_SNAP];
2761 ASSERT3S(delta, <=, 0);
2762 ASSERT3U(pa->used, >=, -delta);
2763 dsl_dir_diduse_space(odd, DD_USED_SNAP, delta, 0, 0, tx);
2764 dsl_dir_diduse_space(odd, DD_USED_HEAD,
2765 -pa->used - delta, -pa->comp, -pa->uncomp, tx);
2767 origin_ds->ds_phys->ds_unique_bytes = pa->unique;
2769 /* log history record */
2770 spa_history_log_internal(LOG_DS_PROMOTE, dd->dd_pool->dp_spa, tx,
2771 "dataset = %llu", hds->ds_object);
2773 dsl_dir_close(odd, FTAG);
2776 static char *snaplist_tag = "snaplist";
2778 * Make a list of dsl_dataset_t's for the snapshots between first_obj
2779 * (exclusive) and last_obj (inclusive). The list will be in reverse
2780 * order (last_obj will be the list_head()). If first_obj == 0, do all
2781 * snapshots back to this dataset's origin.
2784 snaplist_make(dsl_pool_t *dp, boolean_t own,
2785 uint64_t first_obj, uint64_t last_obj, list_t *l)
2787 uint64_t obj = last_obj;
2789 ASSERT(RW_LOCK_HELD(&dp->dp_config_rwlock));
2791 list_create(l, sizeof (struct promotenode),
2792 offsetof(struct promotenode, link));
2794 while (obj != first_obj) {
2796 struct promotenode *snap;
2800 err = dsl_dataset_own_obj(dp, obj,
2801 0, snaplist_tag, &ds);
2803 dsl_dataset_make_exclusive(ds, snaplist_tag);
2805 err = dsl_dataset_hold_obj(dp, obj, snaplist_tag, &ds);
2807 if (err == ENOENT) {
2808 /* lost race with snapshot destroy */
2809 struct promotenode *last = list_tail(l);
2810 ASSERT(obj != last->ds->ds_phys->ds_prev_snap_obj);
2811 obj = last->ds->ds_phys->ds_prev_snap_obj;
2818 first_obj = ds->ds_dir->dd_phys->dd_origin_obj;
2820 snap = kmem_alloc(sizeof (struct promotenode), KM_SLEEP);
2822 list_insert_tail(l, snap);
2823 obj = ds->ds_phys->ds_prev_snap_obj;
2830 snaplist_space(list_t *l, uint64_t mintxg, uint64_t *spacep)
2832 struct promotenode *snap;
2835 for (snap = list_head(l); snap; snap = list_next(l, snap)) {
2836 uint64_t used, comp, uncomp;
2837 dsl_deadlist_space_range(&snap->ds->ds_deadlist,
2838 mintxg, UINT64_MAX, &used, &comp, &uncomp);
2845 snaplist_destroy(list_t *l, boolean_t own)
2847 struct promotenode *snap;
2849 if (!l || !list_link_active(&l->list_head))
2852 while ((snap = list_tail(l)) != NULL) {
2853 list_remove(l, snap);
2855 dsl_dataset_disown(snap->ds, snaplist_tag);
2857 dsl_dataset_rele(snap->ds, snaplist_tag);
2858 kmem_free(snap, sizeof (struct promotenode));
2864 * Promote a clone. Nomenclature note:
2865 * "clone" or "cds": the original clone which is being promoted
2866 * "origin" or "ods": the snapshot which is originally clone's origin
2867 * "origin head" or "ohds": the dataset which is the head
2868 * (filesystem/volume) for the origin
2869 * "origin origin": the origin of the origin's filesystem (typically
2870 * NULL, indicating that the clone is not a clone of a clone).
2873 dsl_dataset_promote(const char *name, char *conflsnap)
2878 dmu_object_info_t doi;
2879 struct promotearg pa = { 0 };
2880 struct promotenode *snap;
2883 err = dsl_dataset_hold(name, FTAG, &ds);
2889 err = dmu_object_info(dp->dp_meta_objset,
2890 ds->ds_phys->ds_snapnames_zapobj, &doi);
2892 dsl_dataset_rele(ds, FTAG);
2896 if (dsl_dataset_is_snapshot(ds) || dd->dd_phys->dd_origin_obj == 0) {
2897 dsl_dataset_rele(ds, FTAG);
2902 * We are going to inherit all the snapshots taken before our
2903 * origin (i.e., our new origin will be our parent's origin).
2904 * Take ownership of them so that we can rename them into our
2907 rw_enter(&dp->dp_config_rwlock, RW_READER);
2909 err = snaplist_make(dp, B_TRUE, 0, dd->dd_phys->dd_origin_obj,
2914 err = snaplist_make(dp, B_FALSE, 0, ds->ds_object, &pa.clone_snaps);
2918 snap = list_head(&pa.shared_snaps);
2919 ASSERT3U(snap->ds->ds_object, ==, dd->dd_phys->dd_origin_obj);
2920 err = snaplist_make(dp, B_FALSE, dd->dd_phys->dd_origin_obj,
2921 snap->ds->ds_dir->dd_phys->dd_head_dataset_obj, &pa.origin_snaps);
2925 if (snap->ds->ds_dir->dd_phys->dd_origin_obj != 0) {
2926 err = dsl_dataset_hold_obj(dp,
2927 snap->ds->ds_dir->dd_phys->dd_origin_obj,
2928 FTAG, &pa.origin_origin);
2934 rw_exit(&dp->dp_config_rwlock);
2937 * Add in 128x the snapnames zapobj size, since we will be moving
2938 * a bunch of snapnames to the promoted ds, and dirtying their
2942 err = dsl_sync_task_do(dp, dsl_dataset_promote_check,
2943 dsl_dataset_promote_sync, ds, &pa,
2944 2 + 2 * doi.doi_physical_blocks_512);
2945 if (err && pa.err_ds && conflsnap)
2946 (void) strncpy(conflsnap, pa.err_ds, MAXNAMELEN);
2949 snaplist_destroy(&pa.shared_snaps, B_TRUE);
2950 snaplist_destroy(&pa.clone_snaps, B_FALSE);
2951 snaplist_destroy(&pa.origin_snaps, B_FALSE);
2952 if (pa.origin_origin)
2953 dsl_dataset_rele(pa.origin_origin, FTAG);
2954 dsl_dataset_rele(ds, FTAG);
2958 struct cloneswaparg {
2959 dsl_dataset_t *cds; /* clone dataset */
2960 dsl_dataset_t *ohds; /* origin's head dataset */
2962 int64_t unused_refres_delta; /* change in unconsumed refreservation */
2967 dsl_dataset_clone_swap_check(void *arg1, void *arg2, dmu_tx_t *tx)
2969 struct cloneswaparg *csa = arg1;
2971 /* they should both be heads */
2972 if (dsl_dataset_is_snapshot(csa->cds) ||
2973 dsl_dataset_is_snapshot(csa->ohds))
2976 /* the branch point should be just before them */
2977 if (csa->cds->ds_prev != csa->ohds->ds_prev)
2980 /* cds should be the clone (unless they are unrelated) */
2981 if (csa->cds->ds_prev != NULL &&
2982 csa->cds->ds_prev != csa->cds->ds_dir->dd_pool->dp_origin_snap &&
2983 csa->ohds->ds_object !=
2984 csa->cds->ds_prev->ds_phys->ds_next_snap_obj)
2987 /* the clone should be a child of the origin */
2988 if (csa->cds->ds_dir->dd_parent != csa->ohds->ds_dir)
2991 /* ohds shouldn't be modified unless 'force' */
2992 if (!csa->force && dsl_dataset_modified_since_lastsnap(csa->ohds))
2995 /* adjust amount of any unconsumed refreservation */
2996 csa->unused_refres_delta =
2997 (int64_t)MIN(csa->ohds->ds_reserved,
2998 csa->ohds->ds_phys->ds_unique_bytes) -
2999 (int64_t)MIN(csa->ohds->ds_reserved,
3000 csa->cds->ds_phys->ds_unique_bytes);
3002 if (csa->unused_refres_delta > 0 &&
3003 csa->unused_refres_delta >
3004 dsl_dir_space_available(csa->ohds->ds_dir, NULL, 0, TRUE))
3007 if (csa->ohds->ds_quota != 0 &&
3008 csa->cds->ds_phys->ds_unique_bytes > csa->ohds->ds_quota)
3016 dsl_dataset_clone_swap_sync(void *arg1, void *arg2, dmu_tx_t *tx)
3018 struct cloneswaparg *csa = arg1;
3019 dsl_pool_t *dp = csa->cds->ds_dir->dd_pool;
3021 ASSERT(csa->cds->ds_reserved == 0);
3022 ASSERT(csa->ohds->ds_quota == 0 ||
3023 csa->cds->ds_phys->ds_unique_bytes <= csa->ohds->ds_quota);
3025 dmu_buf_will_dirty(csa->cds->ds_dbuf, tx);
3026 dmu_buf_will_dirty(csa->ohds->ds_dbuf, tx);
3028 if (csa->cds->ds_objset != NULL) {
3029 dmu_objset_evict(csa->cds->ds_objset);
3030 csa->cds->ds_objset = NULL;
3033 if (csa->ohds->ds_objset != NULL) {
3034 dmu_objset_evict(csa->ohds->ds_objset);
3035 csa->ohds->ds_objset = NULL;
3039 * Reset origin's unique bytes, if it exists.
3041 if (csa->cds->ds_prev) {
3042 dsl_dataset_t *origin = csa->cds->ds_prev;
3043 uint64_t comp, uncomp;
3045 dmu_buf_will_dirty(origin->ds_dbuf, tx);
3046 dsl_deadlist_space_range(&csa->cds->ds_deadlist,
3047 origin->ds_phys->ds_prev_snap_txg, UINT64_MAX,
3048 &origin->ds_phys->ds_unique_bytes, &comp, &uncomp);
3054 tmp = csa->ohds->ds_phys->ds_bp;
3055 csa->ohds->ds_phys->ds_bp = csa->cds->ds_phys->ds_bp;
3056 csa->cds->ds_phys->ds_bp = tmp;
3059 /* set dd_*_bytes */
3061 int64_t dused, dcomp, duncomp;
3062 uint64_t cdl_used, cdl_comp, cdl_uncomp;
3063 uint64_t odl_used, odl_comp, odl_uncomp;
3065 ASSERT3U(csa->cds->ds_dir->dd_phys->
3066 dd_used_breakdown[DD_USED_SNAP], ==, 0);
3068 dsl_deadlist_space(&csa->cds->ds_deadlist,
3069 &cdl_used, &cdl_comp, &cdl_uncomp);
3070 dsl_deadlist_space(&csa->ohds->ds_deadlist,
3071 &odl_used, &odl_comp, &odl_uncomp);
3073 dused = csa->cds->ds_phys->ds_used_bytes + cdl_used -
3074 (csa->ohds->ds_phys->ds_used_bytes + odl_used);
3075 dcomp = csa->cds->ds_phys->ds_compressed_bytes + cdl_comp -
3076 (csa->ohds->ds_phys->ds_compressed_bytes + odl_comp);
3077 duncomp = csa->cds->ds_phys->ds_uncompressed_bytes +
3079 (csa->ohds->ds_phys->ds_uncompressed_bytes + odl_uncomp);
3081 dsl_dir_diduse_space(csa->ohds->ds_dir, DD_USED_HEAD,
3082 dused, dcomp, duncomp, tx);
3083 dsl_dir_diduse_space(csa->cds->ds_dir, DD_USED_HEAD,
3084 -dused, -dcomp, -duncomp, tx);
3087 * The difference in the space used by snapshots is the
3088 * difference in snapshot space due to the head's
3089 * deadlist (since that's the only thing that's
3090 * changing that affects the snapused).
3092 dsl_deadlist_space_range(&csa->cds->ds_deadlist,
3093 csa->ohds->ds_dir->dd_origin_txg, UINT64_MAX,
3094 &cdl_used, &cdl_comp, &cdl_uncomp);
3095 dsl_deadlist_space_range(&csa->ohds->ds_deadlist,
3096 csa->ohds->ds_dir->dd_origin_txg, UINT64_MAX,
3097 &odl_used, &odl_comp, &odl_uncomp);
3098 dsl_dir_transfer_space(csa->ohds->ds_dir, cdl_used - odl_used,
3099 DD_USED_HEAD, DD_USED_SNAP, tx);
3102 /* swap ds_*_bytes */
3103 SWITCH64(csa->ohds->ds_phys->ds_used_bytes,
3104 csa->cds->ds_phys->ds_used_bytes);
3105 SWITCH64(csa->ohds->ds_phys->ds_compressed_bytes,
3106 csa->cds->ds_phys->ds_compressed_bytes);
3107 SWITCH64(csa->ohds->ds_phys->ds_uncompressed_bytes,
3108 csa->cds->ds_phys->ds_uncompressed_bytes);
3109 SWITCH64(csa->ohds->ds_phys->ds_unique_bytes,
3110 csa->cds->ds_phys->ds_unique_bytes);
3112 /* apply any parent delta for change in unconsumed refreservation */
3113 dsl_dir_diduse_space(csa->ohds->ds_dir, DD_USED_REFRSRV,
3114 csa->unused_refres_delta, 0, 0, tx);
3119 dsl_deadlist_close(&csa->cds->ds_deadlist);
3120 dsl_deadlist_close(&csa->ohds->ds_deadlist);
3121 SWITCH64(csa->ohds->ds_phys->ds_deadlist_obj,
3122 csa->cds->ds_phys->ds_deadlist_obj);
3123 dsl_deadlist_open(&csa->cds->ds_deadlist, dp->dp_meta_objset,
3124 csa->cds->ds_phys->ds_deadlist_obj);
3125 dsl_deadlist_open(&csa->ohds->ds_deadlist, dp->dp_meta_objset,
3126 csa->ohds->ds_phys->ds_deadlist_obj);
3128 dsl_scan_ds_clone_swapped(csa->ohds, csa->cds, tx);
3132 * Swap 'clone' with its origin head datasets. Used at the end of "zfs
3133 * recv" into an existing fs to swizzle the file system to the new
3134 * version, and by "zfs rollback". Can also be used to swap two
3135 * independent head datasets if neither has any snapshots.
3138 dsl_dataset_clone_swap(dsl_dataset_t *clone, dsl_dataset_t *origin_head,
3141 struct cloneswaparg csa;
3144 ASSERT(clone->ds_owner);
3145 ASSERT(origin_head->ds_owner);
3147 /* Need exclusive access for the swap */
3148 rw_enter(&clone->ds_rwlock, RW_WRITER);
3149 if (!rw_tryenter(&origin_head->ds_rwlock, RW_WRITER)) {
3150 rw_exit(&clone->ds_rwlock);
3151 rw_enter(&origin_head->ds_rwlock, RW_WRITER);
3152 if (!rw_tryenter(&clone->ds_rwlock, RW_WRITER)) {
3153 rw_exit(&origin_head->ds_rwlock);
3158 csa.ohds = origin_head;
3160 error = dsl_sync_task_do(clone->ds_dir->dd_pool,
3161 dsl_dataset_clone_swap_check,
3162 dsl_dataset_clone_swap_sync, &csa, NULL, 9);
3167 * Given a pool name and a dataset object number in that pool,
3168 * return the name of that dataset.
3171 dsl_dsobj_to_dsname(char *pname, uint64_t obj, char *buf)
3178 if ((error = spa_open(pname, &spa, FTAG)) != 0)
3180 dp = spa_get_dsl(spa);
3181 rw_enter(&dp->dp_config_rwlock, RW_READER);
3182 if ((error = dsl_dataset_hold_obj(dp, obj, FTAG, &ds)) == 0) {
3183 dsl_dataset_name(ds, buf);
3184 dsl_dataset_rele(ds, FTAG);
3186 rw_exit(&dp->dp_config_rwlock);
3187 spa_close(spa, FTAG);
3193 dsl_dataset_check_quota(dsl_dataset_t *ds, boolean_t check_quota,
3194 uint64_t asize, uint64_t inflight, uint64_t *used, uint64_t *ref_rsrv)
3198 ASSERT3S(asize, >, 0);
3201 * *ref_rsrv is the portion of asize that will come from any
3202 * unconsumed refreservation space.
3206 mutex_enter(&ds->ds_lock);
3208 * Make a space adjustment for reserved bytes.
3210 if (ds->ds_reserved > ds->ds_phys->ds_unique_bytes) {
3212 ds->ds_reserved - ds->ds_phys->ds_unique_bytes);
3213 *used -= (ds->ds_reserved - ds->ds_phys->ds_unique_bytes);
3215 asize - MIN(asize, parent_delta(ds, asize + inflight));
3218 if (!check_quota || ds->ds_quota == 0) {
3219 mutex_exit(&ds->ds_lock);
3223 * If they are requesting more space, and our current estimate
3224 * is over quota, they get to try again unless the actual
3225 * on-disk is over quota and there are no pending changes (which
3226 * may free up space for us).
3228 if (ds->ds_phys->ds_used_bytes + inflight >= ds->ds_quota) {
3229 if (inflight > 0 || ds->ds_phys->ds_used_bytes < ds->ds_quota)
3234 mutex_exit(&ds->ds_lock);
3241 dsl_dataset_set_quota_check(void *arg1, void *arg2, dmu_tx_t *tx)
3243 dsl_dataset_t *ds = arg1;
3244 dsl_prop_setarg_t *psa = arg2;
3247 if (spa_version(ds->ds_dir->dd_pool->dp_spa) < SPA_VERSION_REFQUOTA)
3250 if ((err = dsl_prop_predict_sync(ds->ds_dir, psa)) != 0)
3253 if (psa->psa_effective_value == 0)
3256 if (psa->psa_effective_value < ds->ds_phys->ds_used_bytes ||
3257 psa->psa_effective_value < ds->ds_reserved)
3263 extern void dsl_prop_set_sync(void *, void *, dmu_tx_t *);
3266 dsl_dataset_set_quota_sync(void *arg1, void *arg2, dmu_tx_t *tx)
3268 dsl_dataset_t *ds = arg1;
3269 dsl_prop_setarg_t *psa = arg2;
3270 uint64_t effective_value = psa->psa_effective_value;
3272 dsl_prop_set_sync(ds, psa, tx);
3273 DSL_PROP_CHECK_PREDICTION(ds->ds_dir, psa);
3275 if (ds->ds_quota != effective_value) {
3276 dmu_buf_will_dirty(ds->ds_dbuf, tx);
3277 ds->ds_quota = effective_value;
3279 spa_history_log_internal(LOG_DS_REFQUOTA,
3280 ds->ds_dir->dd_pool->dp_spa, tx, "%lld dataset = %llu ",
3281 (longlong_t)ds->ds_quota, ds->ds_object);
3286 dsl_dataset_set_quota(const char *dsname, zprop_source_t source, uint64_t quota)
3289 dsl_prop_setarg_t psa;
3292 dsl_prop_setarg_init_uint64(&psa, "refquota", source, "a);
3294 err = dsl_dataset_hold(dsname, FTAG, &ds);
3299 * If someone removes a file, then tries to set the quota, we
3300 * want to make sure the file freeing takes effect.
3302 txg_wait_open(ds->ds_dir->dd_pool, 0);
3304 err = dsl_sync_task_do(ds->ds_dir->dd_pool,
3305 dsl_dataset_set_quota_check, dsl_dataset_set_quota_sync,
3308 dsl_dataset_rele(ds, FTAG);
3313 dsl_dataset_set_reservation_check(void *arg1, void *arg2, dmu_tx_t *tx)
3315 dsl_dataset_t *ds = arg1;
3316 dsl_prop_setarg_t *psa = arg2;
3317 uint64_t effective_value;
3321 if (spa_version(ds->ds_dir->dd_pool->dp_spa) <
3322 SPA_VERSION_REFRESERVATION)
3325 if (dsl_dataset_is_snapshot(ds))
3328 if ((err = dsl_prop_predict_sync(ds->ds_dir, psa)) != 0)
3331 effective_value = psa->psa_effective_value;
3334 * If we are doing the preliminary check in open context, the
3335 * space estimates may be inaccurate.
3337 if (!dmu_tx_is_syncing(tx))
3340 mutex_enter(&ds->ds_lock);
3341 if (!DS_UNIQUE_IS_ACCURATE(ds))
3342 dsl_dataset_recalc_head_uniq(ds);
3343 unique = ds->ds_phys->ds_unique_bytes;
3344 mutex_exit(&ds->ds_lock);
3346 if (MAX(unique, effective_value) > MAX(unique, ds->ds_reserved)) {
3347 uint64_t delta = MAX(unique, effective_value) -
3348 MAX(unique, ds->ds_reserved);
3350 if (delta > dsl_dir_space_available(ds->ds_dir, NULL, 0, TRUE))
3352 if (ds->ds_quota > 0 &&
3353 effective_value > ds->ds_quota)
3361 dsl_dataset_set_reservation_sync(void *arg1, void *arg2, dmu_tx_t *tx)
3363 dsl_dataset_t *ds = arg1;
3364 dsl_prop_setarg_t *psa = arg2;
3365 uint64_t effective_value = psa->psa_effective_value;
3369 dsl_prop_set_sync(ds, psa, tx);
3370 DSL_PROP_CHECK_PREDICTION(ds->ds_dir, psa);
3372 dmu_buf_will_dirty(ds->ds_dbuf, tx);
3374 mutex_enter(&ds->ds_dir->dd_lock);
3375 mutex_enter(&ds->ds_lock);
3376 ASSERT(DS_UNIQUE_IS_ACCURATE(ds));
3377 unique = ds->ds_phys->ds_unique_bytes;
3378 delta = MAX(0, (int64_t)(effective_value - unique)) -
3379 MAX(0, (int64_t)(ds->ds_reserved - unique));
3380 ds->ds_reserved = effective_value;
3381 mutex_exit(&ds->ds_lock);
3383 dsl_dir_diduse_space(ds->ds_dir, DD_USED_REFRSRV, delta, 0, 0, tx);
3384 mutex_exit(&ds->ds_dir->dd_lock);
3386 spa_history_log_internal(LOG_DS_REFRESERV,
3387 ds->ds_dir->dd_pool->dp_spa, tx, "%lld dataset = %llu",
3388 (longlong_t)effective_value, ds->ds_object);
3392 dsl_dataset_set_reservation(const char *dsname, zprop_source_t source,
3393 uint64_t reservation)
3396 dsl_prop_setarg_t psa;
3399 dsl_prop_setarg_init_uint64(&psa, "refreservation", source,
3402 err = dsl_dataset_hold(dsname, FTAG, &ds);
3406 err = dsl_sync_task_do(ds->ds_dir->dd_pool,
3407 dsl_dataset_set_reservation_check,
3408 dsl_dataset_set_reservation_sync, ds, &psa, 0);
3410 dsl_dataset_rele(ds, FTAG);
3414 struct dsl_ds_holdarg {
3415 dsl_sync_task_group_t *dstg;
3418 boolean_t recursive;
3421 char failed[MAXPATHLEN];
3425 * The max length of a temporary tag prefix is the number of hex digits
3426 * required to express UINT64_MAX plus one for the hyphen.
3428 #define MAX_TAG_PREFIX_LEN 17
3431 dsl_dataset_user_hold_check(void *arg1, void *arg2, dmu_tx_t *tx)
3433 dsl_dataset_t *ds = arg1;
3434 struct dsl_ds_holdarg *ha = arg2;
3435 char *htag = ha->htag;
3436 objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
3439 if (spa_version(ds->ds_dir->dd_pool->dp_spa) < SPA_VERSION_USERREFS)
3442 if (!dsl_dataset_is_snapshot(ds))
3445 /* tags must be unique */
3446 mutex_enter(&ds->ds_lock);
3447 if (ds->ds_phys->ds_userrefs_obj) {
3448 error = zap_lookup(mos, ds->ds_phys->ds_userrefs_obj, htag,
3452 else if (error == ENOENT)
3455 mutex_exit(&ds->ds_lock);
3457 if (error == 0 && ha->temphold &&
3458 strlen(htag) + MAX_TAG_PREFIX_LEN >= MAXNAMELEN)
3465 dsl_dataset_user_hold_sync(void *arg1, void *arg2, dmu_tx_t *tx)
3467 dsl_dataset_t *ds = arg1;
3468 struct dsl_ds_holdarg *ha = arg2;
3469 char *htag = ha->htag;
3470 dsl_pool_t *dp = ds->ds_dir->dd_pool;
3471 objset_t *mos = dp->dp_meta_objset;
3472 uint64_t now = gethrestime_sec();
3475 mutex_enter(&ds->ds_lock);
3476 if (ds->ds_phys->ds_userrefs_obj == 0) {
3478 * This is the first user hold for this dataset. Create
3479 * the userrefs zap object.
3481 dmu_buf_will_dirty(ds->ds_dbuf, tx);
3482 zapobj = ds->ds_phys->ds_userrefs_obj =
3483 zap_create(mos, DMU_OT_USERREFS, DMU_OT_NONE, 0, tx);
3485 zapobj = ds->ds_phys->ds_userrefs_obj;
3488 mutex_exit(&ds->ds_lock);
3490 VERIFY(0 == zap_add(mos, zapobj, htag, 8, 1, &now, tx));
3493 VERIFY(0 == dsl_pool_user_hold(dp, ds->ds_object,
3497 spa_history_log_internal(LOG_DS_USER_HOLD,
3498 dp->dp_spa, tx, "<%s> temp = %d dataset = %llu", htag,
3499 (int)ha->temphold, ds->ds_object);
3503 dsl_dataset_user_hold_one(const char *dsname, void *arg)
3505 struct dsl_ds_holdarg *ha = arg;
3510 /* alloc a buffer to hold dsname@snapname plus terminating NULL */
3511 name = kmem_asprintf("%s@%s", dsname, ha->snapname);
3512 error = dsl_dataset_hold(name, ha->dstg, &ds);
3515 ha->gotone = B_TRUE;
3516 dsl_sync_task_create(ha->dstg, dsl_dataset_user_hold_check,
3517 dsl_dataset_user_hold_sync, ds, ha, 0);
3518 } else if (error == ENOENT && ha->recursive) {
3521 (void) strlcpy(ha->failed, dsname, sizeof (ha->failed));
3527 dsl_dataset_user_hold(char *dsname, char *snapname, char *htag,
3528 boolean_t recursive, boolean_t temphold)
3530 struct dsl_ds_holdarg *ha;
3531 dsl_sync_task_t *dst;
3535 ha = kmem_zalloc(sizeof (struct dsl_ds_holdarg), KM_SLEEP);
3537 (void) strlcpy(ha->failed, dsname, sizeof (ha->failed));
3539 error = spa_open(dsname, &spa, FTAG);
3541 kmem_free(ha, sizeof (struct dsl_ds_holdarg));
3545 ha->dstg = dsl_sync_task_group_create(spa_get_dsl(spa));
3547 ha->snapname = snapname;
3548 ha->recursive = recursive;
3549 ha->temphold = temphold;
3551 error = dmu_objset_find(dsname, dsl_dataset_user_hold_one,
3552 ha, DS_FIND_CHILDREN);
3554 error = dsl_dataset_user_hold_one(dsname, ha);
3557 error = dsl_sync_task_group_wait(ha->dstg);
3559 for (dst = list_head(&ha->dstg->dstg_tasks); dst;
3560 dst = list_next(&ha->dstg->dstg_tasks, dst)) {
3561 dsl_dataset_t *ds = dst->dst_arg1;
3564 dsl_dataset_name(ds, ha->failed);
3565 *strchr(ha->failed, '@') = '\0';
3567 dsl_dataset_rele(ds, ha->dstg);
3570 if (error == 0 && recursive && !ha->gotone)
3574 (void) strlcpy(dsname, ha->failed, sizeof (ha->failed));
3576 dsl_sync_task_group_destroy(ha->dstg);
3577 kmem_free(ha, sizeof (struct dsl_ds_holdarg));
3578 spa_close(spa, FTAG);
3582 struct dsl_ds_releasearg {
3585 boolean_t own; /* do we own or just hold ds? */
3589 dsl_dataset_release_might_destroy(dsl_dataset_t *ds, const char *htag,
3590 boolean_t *might_destroy)
3592 objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
3597 *might_destroy = B_FALSE;
3599 mutex_enter(&ds->ds_lock);
3600 zapobj = ds->ds_phys->ds_userrefs_obj;
3602 /* The tag can't possibly exist */
3603 mutex_exit(&ds->ds_lock);
3607 /* Make sure the tag exists */
3608 error = zap_lookup(mos, zapobj, htag, 8, 1, &tmp);
3610 mutex_exit(&ds->ds_lock);
3611 if (error == ENOENT)
3616 if (ds->ds_userrefs == 1 && ds->ds_phys->ds_num_children == 1 &&
3617 DS_IS_DEFER_DESTROY(ds))
3618 *might_destroy = B_TRUE;
3620 mutex_exit(&ds->ds_lock);
3625 dsl_dataset_user_release_check(void *arg1, void *tag, dmu_tx_t *tx)
3627 struct dsl_ds_releasearg *ra = arg1;
3628 dsl_dataset_t *ds = ra->ds;
3629 boolean_t might_destroy;
3632 if (spa_version(ds->ds_dir->dd_pool->dp_spa) < SPA_VERSION_USERREFS)
3635 error = dsl_dataset_release_might_destroy(ds, ra->htag, &might_destroy);
3639 if (might_destroy) {
3640 struct dsl_ds_destroyarg dsda = {0};
3642 if (dmu_tx_is_syncing(tx)) {
3644 * If we're not prepared to remove the snapshot,
3645 * we can't allow the release to happen right now.
3651 dsda.releasing = B_TRUE;
3652 return (dsl_dataset_destroy_check(&dsda, tag, tx));
3659 dsl_dataset_user_release_sync(void *arg1, void *tag, dmu_tx_t *tx)
3661 struct dsl_ds_releasearg *ra = arg1;
3662 dsl_dataset_t *ds = ra->ds;
3663 dsl_pool_t *dp = ds->ds_dir->dd_pool;
3664 objset_t *mos = dp->dp_meta_objset;
3666 uint64_t dsobj = ds->ds_object;
3670 if (ds->ds_objset) {
3671 dmu_objset_evict(ds->ds_objset);
3672 ds->ds_objset = NULL;
3675 mutex_enter(&ds->ds_lock);
3677 refs = ds->ds_userrefs;
3678 mutex_exit(&ds->ds_lock);
3679 error = dsl_pool_user_release(dp, ds->ds_object, ra->htag, tx);
3680 VERIFY(error == 0 || error == ENOENT);
3681 zapobj = ds->ds_phys->ds_userrefs_obj;
3682 VERIFY(0 == zap_remove(mos, zapobj, ra->htag, tx));
3683 if (ds->ds_userrefs == 0 && ds->ds_phys->ds_num_children == 1 &&
3684 DS_IS_DEFER_DESTROY(ds)) {
3685 struct dsl_ds_destroyarg dsda = {0};
3689 dsda.releasing = B_TRUE;
3690 /* We already did the destroy_check */
3691 dsl_dataset_destroy_sync(&dsda, tag, tx);
3694 spa_history_log_internal(LOG_DS_USER_RELEASE,
3695 dp->dp_spa, tx, "<%s> %lld dataset = %llu",
3696 ra->htag, (longlong_t)refs, dsobj);
3700 dsl_dataset_user_release_one(const char *dsname, void *arg)
3702 struct dsl_ds_holdarg *ha = arg;
3703 struct dsl_ds_releasearg *ra;
3706 void *dtag = ha->dstg;
3708 boolean_t own = B_FALSE;
3709 boolean_t might_destroy;
3711 /* alloc a buffer to hold dsname@snapname, plus the terminating NULL */
3712 name = kmem_asprintf("%s@%s", dsname, ha->snapname);
3713 error = dsl_dataset_hold(name, dtag, &ds);
3715 if (error == ENOENT && ha->recursive)
3717 (void) strlcpy(ha->failed, dsname, sizeof (ha->failed));
3721 ha->gotone = B_TRUE;
3723 ASSERT(dsl_dataset_is_snapshot(ds));
3725 error = dsl_dataset_release_might_destroy(ds, ha->htag, &might_destroy);
3727 dsl_dataset_rele(ds, dtag);
3731 if (might_destroy) {
3733 name = kmem_asprintf("%s@%s", dsname, ha->snapname);
3734 error = zfs_unmount_snap(name, NULL);
3737 dsl_dataset_rele(ds, dtag);
3741 if (!dsl_dataset_tryown(ds, B_TRUE, dtag)) {
3742 dsl_dataset_rele(ds, dtag);
3746 dsl_dataset_make_exclusive(ds, dtag);
3750 ra = kmem_alloc(sizeof (struct dsl_ds_releasearg), KM_SLEEP);
3752 ra->htag = ha->htag;
3754 dsl_sync_task_create(ha->dstg, dsl_dataset_user_release_check,
3755 dsl_dataset_user_release_sync, ra, dtag, 0);
3761 dsl_dataset_user_release(char *dsname, char *snapname, char *htag,
3762 boolean_t recursive)
3764 struct dsl_ds_holdarg *ha;
3765 dsl_sync_task_t *dst;
3770 ha = kmem_zalloc(sizeof (struct dsl_ds_holdarg), KM_SLEEP);
3772 (void) strlcpy(ha->failed, dsname, sizeof (ha->failed));
3774 error = spa_open(dsname, &spa, FTAG);
3776 kmem_free(ha, sizeof (struct dsl_ds_holdarg));
3780 ha->dstg = dsl_sync_task_group_create(spa_get_dsl(spa));
3782 ha->snapname = snapname;
3783 ha->recursive = recursive;
3785 error = dmu_objset_find(dsname, dsl_dataset_user_release_one,
3786 ha, DS_FIND_CHILDREN);
3788 error = dsl_dataset_user_release_one(dsname, ha);
3791 error = dsl_sync_task_group_wait(ha->dstg);
3793 for (dst = list_head(&ha->dstg->dstg_tasks); dst;
3794 dst = list_next(&ha->dstg->dstg_tasks, dst)) {
3795 struct dsl_ds_releasearg *ra = dst->dst_arg1;
3796 dsl_dataset_t *ds = ra->ds;
3799 dsl_dataset_name(ds, ha->failed);
3802 dsl_dataset_disown(ds, ha->dstg);
3804 dsl_dataset_rele(ds, ha->dstg);
3806 kmem_free(ra, sizeof (struct dsl_ds_releasearg));
3809 if (error == 0 && recursive && !ha->gotone)
3812 if (error && error != EBUSY)
3813 (void) strlcpy(dsname, ha->failed, sizeof (ha->failed));
3815 dsl_sync_task_group_destroy(ha->dstg);
3816 kmem_free(ha, sizeof (struct dsl_ds_holdarg));
3817 spa_close(spa, FTAG);
3820 * We can get EBUSY if we were racing with deferred destroy and
3821 * dsl_dataset_user_release_check() hadn't done the necessary
3822 * open context setup. We can also get EBUSY if we're racing
3823 * with destroy and that thread is the ds_owner. Either way
3824 * the busy condition should be transient, and we should retry
3825 * the release operation.
3834 * Called at spa_load time to release a stale temporary user hold.
3837 dsl_dataset_user_release_tmp(dsl_pool_t *dp, uint64_t dsobj, char *htag)
3845 rw_enter(&dp->dp_config_rwlock, RW_READER);
3846 error = dsl_dataset_hold_obj(dp, dsobj, FTAG, &ds);
3847 rw_exit(&dp->dp_config_rwlock);
3850 namelen = dsl_dataset_namelen(ds)+1;
3851 name = kmem_alloc(namelen, KM_SLEEP);
3852 dsl_dataset_name(ds, name);
3853 dsl_dataset_rele(ds, FTAG);
3855 snap = strchr(name, '@');
3858 return (dsl_dataset_user_release(name, snap, htag, B_FALSE));
3862 dsl_dataset_get_holds(const char *dsname, nvlist_t **nvp)
3867 err = dsl_dataset_hold(dsname, FTAG, &ds);
3871 VERIFY(0 == nvlist_alloc(nvp, NV_UNIQUE_NAME, KM_SLEEP));
3872 if (ds->ds_phys->ds_userrefs_obj != 0) {
3873 zap_attribute_t *za;
3876 za = kmem_alloc(sizeof (zap_attribute_t), KM_SLEEP);
3877 for (zap_cursor_init(&zc, ds->ds_dir->dd_pool->dp_meta_objset,
3878 ds->ds_phys->ds_userrefs_obj);
3879 zap_cursor_retrieve(&zc, za) == 0;
3880 zap_cursor_advance(&zc)) {
3881 VERIFY(0 == nvlist_add_uint64(*nvp, za->za_name,
3882 za->za_first_integer));
3884 zap_cursor_fini(&zc);
3885 kmem_free(za, sizeof (zap_attribute_t));
3887 dsl_dataset_rele(ds, FTAG);
3892 * Note, this fuction is used as the callback for dmu_objset_find(). We
3893 * always return 0 so that we will continue to find and process
3894 * inconsistent datasets, even if we encounter an error trying to
3895 * process one of them.
3899 dsl_destroy_inconsistent(const char *dsname, void *arg)
3903 if (dsl_dataset_own(dsname, B_TRUE, FTAG, &ds) == 0) {
3904 if (DS_IS_INCONSISTENT(ds))
3905 (void) dsl_dataset_destroy(ds, FTAG, B_FALSE);
3907 dsl_dataset_disown(ds, FTAG);