4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
25 #include <sys/dmu_objset.h>
26 #include <sys/dsl_dataset.h>
27 #include <sys/dsl_dir.h>
28 #include <sys/dsl_prop.h>
29 #include <sys/dsl_synctask.h>
30 #include <sys/dmu_traverse.h>
31 #include <sys/dmu_tx.h>
35 #include <sys/unique.h>
36 #include <sys/zfs_context.h>
37 #include <sys/zfs_ioctl.h>
39 #include <sys/zfs_znode.h>
40 #include <sys/zfs_onexit.h>
42 #include <sys/dsl_scan.h>
43 #include <sys/dsl_deadlist.h>
45 static char *dsl_reaper = "the grim reaper";
47 static dsl_checkfunc_t dsl_dataset_destroy_begin_check;
48 static dsl_syncfunc_t dsl_dataset_destroy_begin_sync;
49 static dsl_syncfunc_t dsl_dataset_set_reservation_sync;
51 #define SWITCH64(x, y) \
53 uint64_t __tmp = (x); \
58 #define DS_REF_MAX (1ULL << 62)
60 #define DSL_DEADLIST_BLOCKSIZE SPA_MAXBLOCKSIZE
62 #define DSL_DATASET_IS_DESTROYED(ds) ((ds)->ds_owner == dsl_reaper)
66 * Figure out how much of this delta should be propogated to the dsl_dir
67 * layer. If there's a refreservation, that space has already been
68 * partially accounted for in our ancestors.
71 parent_delta(dsl_dataset_t *ds, int64_t delta)
73 uint64_t old_bytes, new_bytes;
75 if (ds->ds_reserved == 0)
78 old_bytes = MAX(ds->ds_phys->ds_unique_bytes, ds->ds_reserved);
79 new_bytes = MAX(ds->ds_phys->ds_unique_bytes + delta, ds->ds_reserved);
81 ASSERT3U(ABS((int64_t)(new_bytes - old_bytes)), <=, ABS(delta));
82 return (new_bytes - old_bytes);
86 dsl_dataset_block_born(dsl_dataset_t *ds, const blkptr_t *bp, dmu_tx_t *tx)
88 int used = bp_get_dsize_sync(tx->tx_pool->dp_spa, bp);
89 int compressed = BP_GET_PSIZE(bp);
90 int uncompressed = BP_GET_UCSIZE(bp);
93 dprintf_bp(bp, "ds=%p", ds);
95 ASSERT(dmu_tx_is_syncing(tx));
96 /* It could have been compressed away to nothing */
99 ASSERT(BP_GET_TYPE(bp) != DMU_OT_NONE);
100 ASSERT3U(BP_GET_TYPE(bp), <, DMU_OT_NUMTYPES);
103 * Account for the meta-objset space in its placeholder
106 ASSERT3U(compressed, ==, uncompressed); /* it's all metadata */
107 dsl_dir_diduse_space(tx->tx_pool->dp_mos_dir, DD_USED_HEAD,
108 used, compressed, uncompressed, tx);
109 dsl_dir_dirty(tx->tx_pool->dp_mos_dir, tx);
112 dmu_buf_will_dirty(ds->ds_dbuf, tx);
114 mutex_enter(&ds->ds_dir->dd_lock);
115 mutex_enter(&ds->ds_lock);
116 delta = parent_delta(ds, used);
117 ds->ds_phys->ds_used_bytes += used;
118 ds->ds_phys->ds_compressed_bytes += compressed;
119 ds->ds_phys->ds_uncompressed_bytes += uncompressed;
120 ds->ds_phys->ds_unique_bytes += used;
121 mutex_exit(&ds->ds_lock);
122 dsl_dir_diduse_space(ds->ds_dir, DD_USED_HEAD, delta,
123 compressed, uncompressed, tx);
124 dsl_dir_transfer_space(ds->ds_dir, used - delta,
125 DD_USED_REFRSRV, DD_USED_HEAD, tx);
126 mutex_exit(&ds->ds_dir->dd_lock);
130 dsl_dataset_block_kill(dsl_dataset_t *ds, const blkptr_t *bp, dmu_tx_t *tx,
136 ASSERT(dmu_tx_is_syncing(tx));
137 ASSERT(bp->blk_birth <= tx->tx_txg);
139 int used = bp_get_dsize_sync(tx->tx_pool->dp_spa, bp);
140 int compressed = BP_GET_PSIZE(bp);
141 int uncompressed = BP_GET_UCSIZE(bp);
146 * Account for the meta-objset space in its placeholder
149 dsl_free(tx->tx_pool, tx->tx_txg, bp);
151 dsl_dir_diduse_space(tx->tx_pool->dp_mos_dir, DD_USED_HEAD,
152 -used, -compressed, -uncompressed, tx);
153 dsl_dir_dirty(tx->tx_pool->dp_mos_dir, tx);
156 ASSERT3P(tx->tx_pool, ==, ds->ds_dir->dd_pool);
158 ASSERT(!dsl_dataset_is_snapshot(ds));
159 dmu_buf_will_dirty(ds->ds_dbuf, tx);
161 if (bp->blk_birth > ds->ds_phys->ds_prev_snap_txg) {
164 dprintf_bp(bp, "freeing ds=%llu", ds->ds_object);
165 dsl_free(tx->tx_pool, tx->tx_txg, bp);
167 mutex_enter(&ds->ds_dir->dd_lock);
168 mutex_enter(&ds->ds_lock);
169 ASSERT(ds->ds_phys->ds_unique_bytes >= used ||
170 !DS_UNIQUE_IS_ACCURATE(ds));
171 delta = parent_delta(ds, -used);
172 ds->ds_phys->ds_unique_bytes -= used;
173 mutex_exit(&ds->ds_lock);
174 dsl_dir_diduse_space(ds->ds_dir, DD_USED_HEAD,
175 delta, -compressed, -uncompressed, tx);
176 dsl_dir_transfer_space(ds->ds_dir, -used - delta,
177 DD_USED_REFRSRV, DD_USED_HEAD, tx);
178 mutex_exit(&ds->ds_dir->dd_lock);
180 dprintf_bp(bp, "putting on dead list: %s", "");
183 * We are here as part of zio's write done callback,
184 * which means we're a zio interrupt thread. We can't
185 * call dsl_deadlist_insert() now because it may block
186 * waiting for I/O. Instead, put bp on the deferred
187 * queue and let dsl_pool_sync() finish the job.
189 bplist_append(&ds->ds_pending_deadlist, bp);
191 dsl_deadlist_insert(&ds->ds_deadlist, bp, tx);
193 ASSERT3U(ds->ds_prev->ds_object, ==,
194 ds->ds_phys->ds_prev_snap_obj);
195 ASSERT(ds->ds_prev->ds_phys->ds_num_children > 0);
196 /* if (bp->blk_birth > prev prev snap txg) prev unique += bs */
197 if (ds->ds_prev->ds_phys->ds_next_snap_obj ==
198 ds->ds_object && bp->blk_birth >
199 ds->ds_prev->ds_phys->ds_prev_snap_txg) {
200 dmu_buf_will_dirty(ds->ds_prev->ds_dbuf, tx);
201 mutex_enter(&ds->ds_prev->ds_lock);
202 ds->ds_prev->ds_phys->ds_unique_bytes += used;
203 mutex_exit(&ds->ds_prev->ds_lock);
205 if (bp->blk_birth > ds->ds_dir->dd_origin_txg) {
206 dsl_dir_transfer_space(ds->ds_dir, used,
207 DD_USED_HEAD, DD_USED_SNAP, tx);
210 mutex_enter(&ds->ds_lock);
211 ASSERT3U(ds->ds_phys->ds_used_bytes, >=, used);
212 ds->ds_phys->ds_used_bytes -= used;
213 ASSERT3U(ds->ds_phys->ds_compressed_bytes, >=, compressed);
214 ds->ds_phys->ds_compressed_bytes -= compressed;
215 ASSERT3U(ds->ds_phys->ds_uncompressed_bytes, >=, uncompressed);
216 ds->ds_phys->ds_uncompressed_bytes -= uncompressed;
217 mutex_exit(&ds->ds_lock);
223 dsl_dataset_prev_snap_txg(dsl_dataset_t *ds)
225 uint64_t trysnap = 0;
230 * The snapshot creation could fail, but that would cause an
231 * incorrect FALSE return, which would only result in an
232 * overestimation of the amount of space that an operation would
233 * consume, which is OK.
235 * There's also a small window where we could miss a pending
236 * snapshot, because we could set the sync task in the quiescing
237 * phase. So this should only be used as a guess.
239 if (ds->ds_trysnap_txg >
240 spa_last_synced_txg(ds->ds_dir->dd_pool->dp_spa))
241 trysnap = ds->ds_trysnap_txg;
242 return (MAX(ds->ds_phys->ds_prev_snap_txg, trysnap));
246 dsl_dataset_block_freeable(dsl_dataset_t *ds, const blkptr_t *bp,
249 if (blk_birth <= dsl_dataset_prev_snap_txg(ds))
252 ddt_prefetch(dsl_dataset_get_spa(ds), bp);
259 dsl_dataset_evict(dmu_buf_t *db, void *dsv)
261 dsl_dataset_t *ds = dsv;
263 ASSERT(ds->ds_owner == NULL || DSL_DATASET_IS_DESTROYED(ds));
265 unique_remove(ds->ds_fsid_guid);
267 if (ds->ds_objset != NULL)
268 dmu_objset_evict(ds->ds_objset);
271 dsl_dataset_drop_ref(ds->ds_prev, ds);
275 bplist_destroy(&ds->ds_pending_deadlist);
277 dsl_deadlist_close(&ds->ds_deadlist);
279 ASSERT(ds->ds_deadlist.dl_dbuf == NULL);
280 ASSERT(!ds->ds_deadlist.dl_oldfmt);
283 dsl_dir_close(ds->ds_dir, ds);
285 ASSERT(!list_link_active(&ds->ds_synced_link));
287 mutex_destroy(&ds->ds_lock);
288 mutex_destroy(&ds->ds_recvlock);
289 mutex_destroy(&ds->ds_opening_lock);
290 rw_destroy(&ds->ds_rwlock);
291 cv_destroy(&ds->ds_exclusive_cv);
293 kmem_free(ds, sizeof (dsl_dataset_t));
297 dsl_dataset_get_snapname(dsl_dataset_t *ds)
299 dsl_dataset_phys_t *headphys;
302 dsl_pool_t *dp = ds->ds_dir->dd_pool;
303 objset_t *mos = dp->dp_meta_objset;
305 if (ds->ds_snapname[0])
307 if (ds->ds_phys->ds_next_snap_obj == 0)
310 err = dmu_bonus_hold(mos, ds->ds_dir->dd_phys->dd_head_dataset_obj,
314 headphys = headdbuf->db_data;
315 err = zap_value_search(dp->dp_meta_objset,
316 headphys->ds_snapnames_zapobj, ds->ds_object, 0, ds->ds_snapname);
317 dmu_buf_rele(headdbuf, FTAG);
322 dsl_dataset_snap_lookup(dsl_dataset_t *ds, const char *name, uint64_t *value)
324 objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
325 uint64_t snapobj = ds->ds_phys->ds_snapnames_zapobj;
329 if (ds->ds_phys->ds_flags & DS_FLAG_CI_DATASET)
334 err = zap_lookup_norm(mos, snapobj, name, 8, 1,
335 value, mt, NULL, 0, NULL);
336 if (err == ENOTSUP && mt == MT_FIRST)
337 err = zap_lookup(mos, snapobj, name, 8, 1, value);
342 dsl_dataset_snap_remove(dsl_dataset_t *ds, char *name, dmu_tx_t *tx)
344 objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
345 uint64_t snapobj = ds->ds_phys->ds_snapnames_zapobj;
349 dsl_dir_snap_cmtime_update(ds->ds_dir);
351 if (ds->ds_phys->ds_flags & DS_FLAG_CI_DATASET)
356 err = zap_remove_norm(mos, snapobj, name, mt, tx);
357 if (err == ENOTSUP && mt == MT_FIRST)
358 err = zap_remove(mos, snapobj, name, tx);
363 dsl_dataset_get_ref(dsl_pool_t *dp, uint64_t dsobj, void *tag,
366 objset_t *mos = dp->dp_meta_objset;
370 dmu_object_info_t doi;
372 ASSERT(RW_LOCK_HELD(&dp->dp_config_rwlock) ||
373 dsl_pool_sync_context(dp));
375 err = dmu_bonus_hold(mos, dsobj, tag, &dbuf);
379 /* Make sure dsobj has the correct object type. */
380 dmu_object_info_from_db(dbuf, &doi);
381 if (doi.doi_type != DMU_OT_DSL_DATASET)
384 ds = dmu_buf_get_user(dbuf);
386 dsl_dataset_t *winner;
388 ds = kmem_zalloc(sizeof (dsl_dataset_t), KM_SLEEP);
390 ds->ds_object = dsobj;
391 ds->ds_phys = dbuf->db_data;
393 mutex_init(&ds->ds_lock, NULL, MUTEX_DEFAULT, NULL);
394 mutex_init(&ds->ds_recvlock, NULL, MUTEX_DEFAULT, NULL);
395 mutex_init(&ds->ds_opening_lock, NULL, MUTEX_DEFAULT, NULL);
396 rw_init(&ds->ds_rwlock, 0, 0, 0);
397 cv_init(&ds->ds_exclusive_cv, NULL, CV_DEFAULT, NULL);
399 bplist_create(&ds->ds_pending_deadlist);
400 dsl_deadlist_open(&ds->ds_deadlist,
401 mos, ds->ds_phys->ds_deadlist_obj);
404 err = dsl_dir_open_obj(dp,
405 ds->ds_phys->ds_dir_obj, NULL, ds, &ds->ds_dir);
408 mutex_destroy(&ds->ds_lock);
409 mutex_destroy(&ds->ds_recvlock);
410 mutex_destroy(&ds->ds_opening_lock);
411 rw_destroy(&ds->ds_rwlock);
412 cv_destroy(&ds->ds_exclusive_cv);
413 bplist_destroy(&ds->ds_pending_deadlist);
414 dsl_deadlist_close(&ds->ds_deadlist);
415 kmem_free(ds, sizeof (dsl_dataset_t));
416 dmu_buf_rele(dbuf, tag);
420 if (!dsl_dataset_is_snapshot(ds)) {
421 ds->ds_snapname[0] = '\0';
422 if (ds->ds_phys->ds_prev_snap_obj) {
423 err = dsl_dataset_get_ref(dp,
424 ds->ds_phys->ds_prev_snap_obj,
428 if (zfs_flags & ZFS_DEBUG_SNAPNAMES)
429 err = dsl_dataset_get_snapname(ds);
430 if (err == 0 && ds->ds_phys->ds_userrefs_obj != 0) {
432 ds->ds_dir->dd_pool->dp_meta_objset,
433 ds->ds_phys->ds_userrefs_obj,
438 if (err == 0 && !dsl_dataset_is_snapshot(ds)) {
440 * In sync context, we're called with either no lock
441 * or with the write lock. If we're not syncing,
442 * we're always called with the read lock held.
444 boolean_t need_lock =
445 !RW_WRITE_HELD(&dp->dp_config_rwlock) &&
446 dsl_pool_sync_context(dp);
449 rw_enter(&dp->dp_config_rwlock, RW_READER);
451 err = dsl_prop_get_ds(ds,
452 "refreservation", sizeof (uint64_t), 1,
453 &ds->ds_reserved, NULL);
455 err = dsl_prop_get_ds(ds,
456 "refquota", sizeof (uint64_t), 1,
457 &ds->ds_quota, NULL);
461 rw_exit(&dp->dp_config_rwlock);
463 ds->ds_reserved = ds->ds_quota = 0;
467 winner = dmu_buf_set_user_ie(dbuf, ds, &ds->ds_phys,
471 bplist_destroy(&ds->ds_pending_deadlist);
472 dsl_deadlist_close(&ds->ds_deadlist);
474 dsl_dataset_drop_ref(ds->ds_prev, ds);
475 dsl_dir_close(ds->ds_dir, ds);
476 mutex_destroy(&ds->ds_lock);
477 mutex_destroy(&ds->ds_recvlock);
478 mutex_destroy(&ds->ds_opening_lock);
479 rw_destroy(&ds->ds_rwlock);
480 cv_destroy(&ds->ds_exclusive_cv);
481 kmem_free(ds, sizeof (dsl_dataset_t));
483 dmu_buf_rele(dbuf, tag);
489 unique_insert(ds->ds_phys->ds_fsid_guid);
492 ASSERT3P(ds->ds_dbuf, ==, dbuf);
493 ASSERT3P(ds->ds_phys, ==, dbuf->db_data);
494 ASSERT(ds->ds_phys->ds_prev_snap_obj != 0 ||
495 spa_version(dp->dp_spa) < SPA_VERSION_ORIGIN ||
496 dp->dp_origin_snap == NULL || ds == dp->dp_origin_snap);
497 mutex_enter(&ds->ds_lock);
498 if (!dsl_pool_sync_context(dp) && DSL_DATASET_IS_DESTROYED(ds)) {
499 mutex_exit(&ds->ds_lock);
500 dmu_buf_rele(ds->ds_dbuf, tag);
503 mutex_exit(&ds->ds_lock);
509 dsl_dataset_hold_ref(dsl_dataset_t *ds, void *tag)
511 dsl_pool_t *dp = ds->ds_dir->dd_pool;
514 * In syncing context we don't want the rwlock lock: there
515 * may be an existing writer waiting for sync phase to
516 * finish. We don't need to worry about such writers, since
517 * sync phase is single-threaded, so the writer can't be
518 * doing anything while we are active.
520 if (dsl_pool_sync_context(dp)) {
521 ASSERT(!DSL_DATASET_IS_DESTROYED(ds));
526 * Normal users will hold the ds_rwlock as a READER until they
527 * are finished (i.e., call dsl_dataset_rele()). "Owners" will
528 * drop their READER lock after they set the ds_owner field.
530 * If the dataset is being destroyed, the destroy thread will
531 * obtain a WRITER lock for exclusive access after it's done its
532 * open-context work and then change the ds_owner to
533 * dsl_reaper once destruction is assured. So threads
534 * may block here temporarily, until the "destructability" of
535 * the dataset is determined.
537 ASSERT(!RW_WRITE_HELD(&dp->dp_config_rwlock));
538 mutex_enter(&ds->ds_lock);
539 while (!rw_tryenter(&ds->ds_rwlock, RW_READER)) {
540 rw_exit(&dp->dp_config_rwlock);
541 cv_wait(&ds->ds_exclusive_cv, &ds->ds_lock);
542 if (DSL_DATASET_IS_DESTROYED(ds)) {
543 mutex_exit(&ds->ds_lock);
544 dsl_dataset_drop_ref(ds, tag);
545 rw_enter(&dp->dp_config_rwlock, RW_READER);
549 * The dp_config_rwlock lives above the ds_lock. And
550 * we need to check DSL_DATASET_IS_DESTROYED() while
551 * holding the ds_lock, so we have to drop and reacquire
554 mutex_exit(&ds->ds_lock);
555 rw_enter(&dp->dp_config_rwlock, RW_READER);
556 mutex_enter(&ds->ds_lock);
558 mutex_exit(&ds->ds_lock);
563 dsl_dataset_hold_obj(dsl_pool_t *dp, uint64_t dsobj, void *tag,
566 int err = dsl_dataset_get_ref(dp, dsobj, tag, dsp);
570 return (dsl_dataset_hold_ref(*dsp, tag));
574 dsl_dataset_own_obj(dsl_pool_t *dp, uint64_t dsobj, boolean_t inconsistentok,
575 void *tag, dsl_dataset_t **dsp)
577 int err = dsl_dataset_hold_obj(dp, dsobj, tag, dsp);
580 if (!dsl_dataset_tryown(*dsp, inconsistentok, tag)) {
581 dsl_dataset_rele(*dsp, tag);
589 dsl_dataset_hold(const char *name, void *tag, dsl_dataset_t **dsp)
593 const char *snapname;
597 err = dsl_dir_open_spa(NULL, name, FTAG, &dd, &snapname);
602 obj = dd->dd_phys->dd_head_dataset_obj;
603 rw_enter(&dp->dp_config_rwlock, RW_READER);
605 err = dsl_dataset_get_ref(dp, obj, tag, dsp);
611 err = dsl_dataset_hold_ref(*dsp, tag);
613 /* we may be looking for a snapshot */
614 if (err == 0 && snapname != NULL) {
615 dsl_dataset_t *ds = NULL;
617 if (*snapname++ != '@') {
618 dsl_dataset_rele(*dsp, tag);
623 dprintf("looking for snapshot '%s'\n", snapname);
624 err = dsl_dataset_snap_lookup(*dsp, snapname, &obj);
626 err = dsl_dataset_get_ref(dp, obj, tag, &ds);
627 dsl_dataset_rele(*dsp, tag);
629 ASSERT3U((err == 0), ==, (ds != NULL));
632 mutex_enter(&ds->ds_lock);
633 if (ds->ds_snapname[0] == 0)
634 (void) strlcpy(ds->ds_snapname, snapname,
635 sizeof (ds->ds_snapname));
636 mutex_exit(&ds->ds_lock);
637 err = dsl_dataset_hold_ref(ds, tag);
638 *dsp = err ? NULL : ds;
642 rw_exit(&dp->dp_config_rwlock);
643 dsl_dir_close(dd, FTAG);
648 dsl_dataset_own(const char *name, boolean_t inconsistentok,
649 void *tag, dsl_dataset_t **dsp)
651 int err = dsl_dataset_hold(name, tag, dsp);
654 if (!dsl_dataset_tryown(*dsp, inconsistentok, tag)) {
655 dsl_dataset_rele(*dsp, tag);
662 dsl_dataset_name(dsl_dataset_t *ds, char *name)
665 (void) strcpy(name, "mos");
667 dsl_dir_name(ds->ds_dir, name);
668 VERIFY(0 == dsl_dataset_get_snapname(ds));
669 if (ds->ds_snapname[0]) {
670 (void) strcat(name, "@");
672 * We use a "recursive" mutex so that we
673 * can call dprintf_ds() with ds_lock held.
675 if (!MUTEX_HELD(&ds->ds_lock)) {
676 mutex_enter(&ds->ds_lock);
677 (void) strcat(name, ds->ds_snapname);
678 mutex_exit(&ds->ds_lock);
680 (void) strcat(name, ds->ds_snapname);
687 dsl_dataset_namelen(dsl_dataset_t *ds)
692 result = 3; /* "mos" */
694 result = dsl_dir_namelen(ds->ds_dir);
695 VERIFY(0 == dsl_dataset_get_snapname(ds));
696 if (ds->ds_snapname[0]) {
697 ++result; /* adding one for the @-sign */
698 if (!MUTEX_HELD(&ds->ds_lock)) {
699 mutex_enter(&ds->ds_lock);
700 result += strlen(ds->ds_snapname);
701 mutex_exit(&ds->ds_lock);
703 result += strlen(ds->ds_snapname);
712 dsl_dataset_drop_ref(dsl_dataset_t *ds, void *tag)
714 dmu_buf_rele(ds->ds_dbuf, tag);
718 dsl_dataset_rele(dsl_dataset_t *ds, void *tag)
720 if (!dsl_pool_sync_context(ds->ds_dir->dd_pool)) {
721 rw_exit(&ds->ds_rwlock);
723 dsl_dataset_drop_ref(ds, tag);
727 dsl_dataset_disown(dsl_dataset_t *ds, void *tag)
729 ASSERT((ds->ds_owner == tag && ds->ds_dbuf) ||
730 (DSL_DATASET_IS_DESTROYED(ds) && ds->ds_dbuf == NULL));
732 mutex_enter(&ds->ds_lock);
734 if (RW_WRITE_HELD(&ds->ds_rwlock)) {
735 rw_exit(&ds->ds_rwlock);
736 cv_broadcast(&ds->ds_exclusive_cv);
738 mutex_exit(&ds->ds_lock);
740 dsl_dataset_drop_ref(ds, tag);
742 dsl_dataset_evict(NULL, ds);
746 dsl_dataset_tryown(dsl_dataset_t *ds, boolean_t inconsistentok, void *tag)
748 boolean_t gotit = FALSE;
750 mutex_enter(&ds->ds_lock);
751 if (ds->ds_owner == NULL &&
752 (!DS_IS_INCONSISTENT(ds) || inconsistentok)) {
754 if (!dsl_pool_sync_context(ds->ds_dir->dd_pool))
755 rw_exit(&ds->ds_rwlock);
758 mutex_exit(&ds->ds_lock);
763 dsl_dataset_make_exclusive(dsl_dataset_t *ds, void *owner)
765 ASSERT3P(owner, ==, ds->ds_owner);
766 if (!RW_WRITE_HELD(&ds->ds_rwlock))
767 rw_enter(&ds->ds_rwlock, RW_WRITER);
771 dsl_dataset_create_sync_dd(dsl_dir_t *dd, dsl_dataset_t *origin,
772 uint64_t flags, dmu_tx_t *tx)
774 dsl_pool_t *dp = dd->dd_pool;
776 dsl_dataset_phys_t *dsphys;
778 objset_t *mos = dp->dp_meta_objset;
781 origin = dp->dp_origin_snap;
783 ASSERT(origin == NULL || origin->ds_dir->dd_pool == dp);
784 ASSERT(origin == NULL || origin->ds_phys->ds_num_children > 0);
785 ASSERT(dmu_tx_is_syncing(tx));
786 ASSERT(dd->dd_phys->dd_head_dataset_obj == 0);
788 dsobj = dmu_object_alloc(mos, DMU_OT_DSL_DATASET, 0,
789 DMU_OT_DSL_DATASET, sizeof (dsl_dataset_phys_t), tx);
790 VERIFY(0 == dmu_bonus_hold(mos, dsobj, FTAG, &dbuf));
791 dmu_buf_will_dirty(dbuf, tx);
792 dsphys = dbuf->db_data;
793 bzero(dsphys, sizeof (dsl_dataset_phys_t));
794 dsphys->ds_dir_obj = dd->dd_object;
795 dsphys->ds_flags = flags;
796 dsphys->ds_fsid_guid = unique_create();
797 (void) random_get_pseudo_bytes((void*)&dsphys->ds_guid,
798 sizeof (dsphys->ds_guid));
799 dsphys->ds_snapnames_zapobj =
800 zap_create_norm(mos, U8_TEXTPREP_TOUPPER, DMU_OT_DSL_DS_SNAP_MAP,
802 dsphys->ds_creation_time = gethrestime_sec();
803 dsphys->ds_creation_txg = tx->tx_txg == TXG_INITIAL ? 1 : tx->tx_txg;
805 if (origin == NULL) {
806 dsphys->ds_deadlist_obj = dsl_deadlist_alloc(mos, tx);
810 dsphys->ds_prev_snap_obj = origin->ds_object;
811 dsphys->ds_prev_snap_txg =
812 origin->ds_phys->ds_creation_txg;
813 dsphys->ds_used_bytes =
814 origin->ds_phys->ds_used_bytes;
815 dsphys->ds_compressed_bytes =
816 origin->ds_phys->ds_compressed_bytes;
817 dsphys->ds_uncompressed_bytes =
818 origin->ds_phys->ds_uncompressed_bytes;
819 dsphys->ds_bp = origin->ds_phys->ds_bp;
820 dsphys->ds_flags |= origin->ds_phys->ds_flags;
822 dmu_buf_will_dirty(origin->ds_dbuf, tx);
823 origin->ds_phys->ds_num_children++;
825 VERIFY3U(0, ==, dsl_dataset_hold_obj(dp,
826 origin->ds_dir->dd_phys->dd_head_dataset_obj, FTAG, &ohds));
827 dsphys->ds_deadlist_obj = dsl_deadlist_clone(&ohds->ds_deadlist,
828 dsphys->ds_prev_snap_txg, dsphys->ds_prev_snap_obj, tx);
829 dsl_dataset_rele(ohds, FTAG);
831 if (spa_version(dp->dp_spa) >= SPA_VERSION_NEXT_CLONES) {
832 if (origin->ds_phys->ds_next_clones_obj == 0) {
833 origin->ds_phys->ds_next_clones_obj =
835 DMU_OT_NEXT_CLONES, DMU_OT_NONE, 0, tx);
837 VERIFY(0 == zap_add_int(mos,
838 origin->ds_phys->ds_next_clones_obj,
842 dmu_buf_will_dirty(dd->dd_dbuf, tx);
843 dd->dd_phys->dd_origin_obj = origin->ds_object;
844 if (spa_version(dp->dp_spa) >= SPA_VERSION_DIR_CLONES) {
845 if (origin->ds_dir->dd_phys->dd_clones == 0) {
846 dmu_buf_will_dirty(origin->ds_dir->dd_dbuf, tx);
847 origin->ds_dir->dd_phys->dd_clones =
849 DMU_OT_DSL_CLONES, DMU_OT_NONE, 0, tx);
851 VERIFY3U(0, ==, zap_add_int(mos,
852 origin->ds_dir->dd_phys->dd_clones, dsobj, tx));
856 if (spa_version(dp->dp_spa) >= SPA_VERSION_UNIQUE_ACCURATE)
857 dsphys->ds_flags |= DS_FLAG_UNIQUE_ACCURATE;
859 dmu_buf_rele(dbuf, FTAG);
861 dmu_buf_will_dirty(dd->dd_dbuf, tx);
862 dd->dd_phys->dd_head_dataset_obj = dsobj;
868 dsl_dataset_create_sync(dsl_dir_t *pdd, const char *lastname,
869 dsl_dataset_t *origin, uint64_t flags, cred_t *cr, dmu_tx_t *tx)
871 dsl_pool_t *dp = pdd->dd_pool;
872 uint64_t dsobj, ddobj;
875 ASSERT(lastname[0] != '@');
877 ddobj = dsl_dir_create_sync(dp, pdd, lastname, tx);
878 VERIFY(0 == dsl_dir_open_obj(dp, ddobj, lastname, FTAG, &dd));
880 dsobj = dsl_dataset_create_sync_dd(dd, origin, flags, tx);
882 dsl_deleg_set_create_perms(dd, tx, cr);
884 dsl_dir_close(dd, FTAG);
887 * If we are creating a clone, make sure we zero out any stale
888 * data from the origin snapshots zil header.
890 if (origin != NULL) {
894 VERIFY3U(0, ==, dsl_dataset_hold_obj(dp, dsobj, FTAG, &ds));
895 VERIFY3U(0, ==, dmu_objset_from_ds(ds, &os));
896 bzero(&os->os_zil_header, sizeof (os->os_zil_header));
897 dsl_dataset_dirty(ds, tx);
898 dsl_dataset_rele(ds, FTAG);
905 dsl_sync_task_group_t *dstg;
912 dsl_snapshot_destroy_one(const char *name, void *arg)
914 struct destroyarg *da = arg;
919 dsname = kmem_asprintf("%s@%s", name, da->snapname);
920 err = dsl_dataset_own(dsname, B_TRUE, da->dstg, &ds);
923 struct dsl_ds_destroyarg *dsda;
925 dsl_dataset_make_exclusive(ds, da->dstg);
926 dsda = kmem_zalloc(sizeof (struct dsl_ds_destroyarg), KM_SLEEP);
928 dsda->defer = da->defer;
929 dsl_sync_task_create(da->dstg, dsl_dataset_destroy_check,
930 dsl_dataset_destroy_sync, dsda, da->dstg, 0);
931 } else if (err == ENOENT) {
934 (void) strcpy(da->failed, name);
940 * Destroy 'snapname' in all descendants of 'fsname'.
942 #pragma weak dmu_snapshots_destroy = dsl_snapshots_destroy
944 dsl_snapshots_destroy(char *fsname, char *snapname, boolean_t defer)
947 struct destroyarg da;
948 dsl_sync_task_t *dst;
951 err = spa_open(fsname, &spa, FTAG);
954 da.dstg = dsl_sync_task_group_create(spa_get_dsl(spa));
955 da.snapname = snapname;
959 err = dmu_objset_find(fsname,
960 dsl_snapshot_destroy_one, &da, DS_FIND_CHILDREN);
963 err = dsl_sync_task_group_wait(da.dstg);
965 for (dst = list_head(&da.dstg->dstg_tasks); dst;
966 dst = list_next(&da.dstg->dstg_tasks, dst)) {
967 struct dsl_ds_destroyarg *dsda = dst->dst_arg1;
968 dsl_dataset_t *ds = dsda->ds;
971 * Return the file system name that triggered the error
974 dsl_dataset_name(ds, fsname);
975 *strchr(fsname, '@') = '\0';
977 ASSERT3P(dsda->rm_origin, ==, NULL);
978 dsl_dataset_disown(ds, da.dstg);
979 kmem_free(dsda, sizeof (struct dsl_ds_destroyarg));
982 dsl_sync_task_group_destroy(da.dstg);
983 spa_close(spa, FTAG);
988 dsl_dataset_might_destroy_origin(dsl_dataset_t *ds)
990 boolean_t might_destroy = B_FALSE;
992 mutex_enter(&ds->ds_lock);
993 if (ds->ds_phys->ds_num_children == 2 && ds->ds_userrefs == 0 &&
994 DS_IS_DEFER_DESTROY(ds))
995 might_destroy = B_TRUE;
996 mutex_exit(&ds->ds_lock);
998 return (might_destroy);
1002 * If we're removing a clone, and these three conditions are true:
1003 * 1) the clone's origin has no other children
1004 * 2) the clone's origin has no user references
1005 * 3) the clone's origin has been marked for deferred destruction
1006 * Then, prepare to remove the origin as part of this sync task group.
1009 dsl_dataset_origin_rm_prep(struct dsl_ds_destroyarg *dsda, void *tag)
1011 dsl_dataset_t *ds = dsda->ds;
1012 dsl_dataset_t *origin = ds->ds_prev;
1014 if (dsl_dataset_might_destroy_origin(origin)) {
1019 namelen = dsl_dataset_namelen(origin) + 1;
1020 name = kmem_alloc(namelen, KM_SLEEP);
1021 dsl_dataset_name(origin, name);
1023 error = zfs_unmount_snap(name, NULL);
1025 kmem_free(name, namelen);
1029 error = dsl_dataset_own(name, B_TRUE, tag, &origin);
1030 kmem_free(name, namelen);
1033 dsda->rm_origin = origin;
1034 dsl_dataset_make_exclusive(origin, tag);
1041 * ds must be opened as OWNER. On return (whether successful or not),
1042 * ds will be closed and caller can no longer dereference it.
1045 dsl_dataset_destroy(dsl_dataset_t *ds, void *tag, boolean_t defer)
1048 dsl_sync_task_group_t *dstg;
1052 struct dsl_ds_destroyarg dsda = { 0 };
1053 dsl_dataset_t dummy_ds = { 0 };
1057 if (dsl_dataset_is_snapshot(ds)) {
1058 /* Destroying a snapshot is simpler */
1059 dsl_dataset_make_exclusive(ds, tag);
1062 err = dsl_sync_task_do(ds->ds_dir->dd_pool,
1063 dsl_dataset_destroy_check, dsl_dataset_destroy_sync,
1065 ASSERT3P(dsda.rm_origin, ==, NULL);
1073 dummy_ds.ds_dir = dd;
1074 dummy_ds.ds_object = ds->ds_object;
1077 * Check for errors and mark this ds as inconsistent, in
1078 * case we crash while freeing the objects.
1080 err = dsl_sync_task_do(dd->dd_pool, dsl_dataset_destroy_begin_check,
1081 dsl_dataset_destroy_begin_sync, ds, NULL, 0);
1085 err = dmu_objset_from_ds(ds, &os);
1090 * remove the objects in open context, so that we won't
1091 * have too much to do in syncing context.
1093 for (obj = 0; err == 0; err = dmu_object_next(os, &obj, FALSE,
1094 ds->ds_phys->ds_prev_snap_txg)) {
1096 * Ignore errors, if there is not enough disk space
1097 * we will deal with it in dsl_dataset_destroy_sync().
1099 (void) dmu_free_object(os, obj);
1105 * Only the ZIL knows how to free log blocks.
1107 zil_destroy(dmu_objset_zil(os), B_FALSE);
1110 * Sync out all in-flight IO.
1112 txg_wait_synced(dd->dd_pool, 0);
1115 * If we managed to free all the objects in open
1116 * context, the user space accounting should be zero.
1118 if (ds->ds_phys->ds_bp.blk_fill == 0 &&
1119 dmu_objset_userused_enabled(os)) {
1122 ASSERT(zap_count(os, DMU_USERUSED_OBJECT, &count) != 0 ||
1124 ASSERT(zap_count(os, DMU_GROUPUSED_OBJECT, &count) != 0 ||
1128 rw_enter(&dd->dd_pool->dp_config_rwlock, RW_READER);
1129 err = dsl_dir_open_obj(dd->dd_pool, dd->dd_object, NULL, FTAG, &dd);
1130 rw_exit(&dd->dd_pool->dp_config_rwlock);
1136 * Blow away the dsl_dir + head dataset.
1138 dsl_dataset_make_exclusive(ds, tag);
1140 * If we're removing a clone, we might also need to remove its
1144 dsda.need_prep = B_FALSE;
1145 if (dsl_dir_is_clone(dd)) {
1146 err = dsl_dataset_origin_rm_prep(&dsda, tag);
1148 dsl_dir_close(dd, FTAG);
1153 dstg = dsl_sync_task_group_create(ds->ds_dir->dd_pool);
1154 dsl_sync_task_create(dstg, dsl_dataset_destroy_check,
1155 dsl_dataset_destroy_sync, &dsda, tag, 0);
1156 dsl_sync_task_create(dstg, dsl_dir_destroy_check,
1157 dsl_dir_destroy_sync, &dummy_ds, FTAG, 0);
1158 err = dsl_sync_task_group_wait(dstg);
1159 dsl_sync_task_group_destroy(dstg);
1162 * We could be racing against 'zfs release' or 'zfs destroy -d'
1163 * on the origin snap, in which case we can get EBUSY if we
1164 * needed to destroy the origin snap but were not ready to
1167 if (dsda.need_prep) {
1168 ASSERT(err == EBUSY);
1169 ASSERT(dsl_dir_is_clone(dd));
1170 ASSERT(dsda.rm_origin == NULL);
1172 } while (dsda.need_prep);
1174 if (dsda.rm_origin != NULL)
1175 dsl_dataset_disown(dsda.rm_origin, tag);
1177 /* if it is successful, dsl_dir_destroy_sync will close the dd */
1179 dsl_dir_close(dd, FTAG);
1181 dsl_dataset_disown(ds, tag);
1186 dsl_dataset_get_blkptr(dsl_dataset_t *ds)
1188 return (&ds->ds_phys->ds_bp);
1192 dsl_dataset_set_blkptr(dsl_dataset_t *ds, blkptr_t *bp, dmu_tx_t *tx)
1194 ASSERT(dmu_tx_is_syncing(tx));
1195 /* If it's the meta-objset, set dp_meta_rootbp */
1197 tx->tx_pool->dp_meta_rootbp = *bp;
1199 dmu_buf_will_dirty(ds->ds_dbuf, tx);
1200 ds->ds_phys->ds_bp = *bp;
1205 dsl_dataset_get_spa(dsl_dataset_t *ds)
1207 return (ds->ds_dir->dd_pool->dp_spa);
1211 dsl_dataset_dirty(dsl_dataset_t *ds, dmu_tx_t *tx)
1215 if (ds == NULL) /* this is the meta-objset */
1218 ASSERT(ds->ds_objset != NULL);
1220 if (ds->ds_phys->ds_next_snap_obj != 0)
1221 panic("dirtying snapshot!");
1223 dp = ds->ds_dir->dd_pool;
1225 if (txg_list_add(&dp->dp_dirty_datasets, ds, tx->tx_txg) == 0) {
1226 /* up the hold count until we can be written out */
1227 dmu_buf_add_ref(ds->ds_dbuf, ds);
1232 * The unique space in the head dataset can be calculated by subtracting
1233 * the space used in the most recent snapshot, that is still being used
1234 * in this file system, from the space currently in use. To figure out
1235 * the space in the most recent snapshot still in use, we need to take
1236 * the total space used in the snapshot and subtract out the space that
1237 * has been freed up since the snapshot was taken.
1240 dsl_dataset_recalc_head_uniq(dsl_dataset_t *ds)
1243 uint64_t dlused, dlcomp, dluncomp;
1245 ASSERT(!dsl_dataset_is_snapshot(ds));
1247 if (ds->ds_phys->ds_prev_snap_obj != 0)
1248 mrs_used = ds->ds_prev->ds_phys->ds_used_bytes;
1252 dsl_deadlist_space(&ds->ds_deadlist, &dlused, &dlcomp, &dluncomp);
1254 ASSERT3U(dlused, <=, mrs_used);
1255 ds->ds_phys->ds_unique_bytes =
1256 ds->ds_phys->ds_used_bytes - (mrs_used - dlused);
1258 if (spa_version(ds->ds_dir->dd_pool->dp_spa) >=
1259 SPA_VERSION_UNIQUE_ACCURATE)
1260 ds->ds_phys->ds_flags |= DS_FLAG_UNIQUE_ACCURATE;
1270 kill_blkptr(spa_t *spa, zilog_t *zilog, const blkptr_t *bp, arc_buf_t *pbuf,
1271 const zbookmark_t *zb, const dnode_phys_t *dnp, void *arg)
1273 struct killarg *ka = arg;
1274 dmu_tx_t *tx = ka->tx;
1279 if (zb->zb_level == ZB_ZIL_LEVEL) {
1280 ASSERT(zilog != NULL);
1282 * It's a block in the intent log. It has no
1283 * accounting, so just free it.
1285 dsl_free(ka->tx->tx_pool, ka->tx->tx_txg, bp);
1287 ASSERT(zilog == NULL);
1288 ASSERT3U(bp->blk_birth, >, ka->ds->ds_phys->ds_prev_snap_txg);
1289 (void) dsl_dataset_block_kill(ka->ds, bp, tx, B_FALSE);
1297 dsl_dataset_destroy_begin_check(void *arg1, void *arg2, dmu_tx_t *tx)
1299 dsl_dataset_t *ds = arg1;
1300 objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
1305 * Can't delete a head dataset if there are snapshots of it.
1306 * (Except if the only snapshots are from the branch we cloned
1309 if (ds->ds_prev != NULL &&
1310 ds->ds_prev->ds_phys->ds_next_snap_obj == ds->ds_object)
1314 * This is really a dsl_dir thing, but check it here so that
1315 * we'll be less likely to leave this dataset inconsistent &
1318 err = zap_count(mos, ds->ds_dir->dd_phys->dd_child_dir_zapobj, &count);
1329 dsl_dataset_destroy_begin_sync(void *arg1, void *arg2, dmu_tx_t *tx)
1331 dsl_dataset_t *ds = arg1;
1332 dsl_pool_t *dp = ds->ds_dir->dd_pool;
1334 /* Mark it as inconsistent on-disk, in case we crash */
1335 dmu_buf_will_dirty(ds->ds_dbuf, tx);
1336 ds->ds_phys->ds_flags |= DS_FLAG_INCONSISTENT;
1338 spa_history_log_internal(LOG_DS_DESTROY_BEGIN, dp->dp_spa, tx,
1339 "dataset = %llu", ds->ds_object);
1343 dsl_dataset_origin_check(struct dsl_ds_destroyarg *dsda, void *tag,
1346 dsl_dataset_t *ds = dsda->ds;
1347 dsl_dataset_t *ds_prev = ds->ds_prev;
1349 if (dsl_dataset_might_destroy_origin(ds_prev)) {
1350 struct dsl_ds_destroyarg ndsda = {0};
1353 * If we're not prepared to remove the origin, don't remove
1356 if (dsda->rm_origin == NULL) {
1357 dsda->need_prep = B_TRUE;
1362 ndsda.is_origin_rm = B_TRUE;
1363 return (dsl_dataset_destroy_check(&ndsda, tag, tx));
1367 * If we're not going to remove the origin after all,
1368 * undo the open context setup.
1370 if (dsda->rm_origin != NULL) {
1371 dsl_dataset_disown(dsda->rm_origin, tag);
1372 dsda->rm_origin = NULL;
1379 * If you add new checks here, you may need to add
1380 * additional checks to the "temporary" case in
1381 * snapshot_check() in dmu_objset.c.
1385 dsl_dataset_destroy_check(void *arg1, void *arg2, dmu_tx_t *tx)
1387 struct dsl_ds_destroyarg *dsda = arg1;
1388 dsl_dataset_t *ds = dsda->ds;
1390 /* we have an owner hold, so noone else can destroy us */
1391 ASSERT(!DSL_DATASET_IS_DESTROYED(ds));
1394 * Only allow deferred destroy on pools that support it.
1395 * NOTE: deferred destroy is only supported on snapshots.
1398 if (spa_version(ds->ds_dir->dd_pool->dp_spa) <
1399 SPA_VERSION_USERREFS)
1401 ASSERT(dsl_dataset_is_snapshot(ds));
1406 * Can't delete a head dataset if there are snapshots of it.
1407 * (Except if the only snapshots are from the branch we cloned
1410 if (ds->ds_prev != NULL &&
1411 ds->ds_prev->ds_phys->ds_next_snap_obj == ds->ds_object)
1415 * If we made changes this txg, traverse_dsl_dataset won't find
1418 if (ds->ds_phys->ds_bp.blk_birth >= tx->tx_txg)
1421 if (dsl_dataset_is_snapshot(ds)) {
1423 * If this snapshot has an elevated user reference count,
1424 * we can't destroy it yet.
1426 if (ds->ds_userrefs > 0 && !dsda->releasing)
1429 mutex_enter(&ds->ds_lock);
1431 * Can't delete a branch point. However, if we're destroying
1432 * a clone and removing its origin due to it having a user
1433 * hold count of 0 and having been marked for deferred destroy,
1434 * it's OK for the origin to have a single clone.
1436 if (ds->ds_phys->ds_num_children >
1437 (dsda->is_origin_rm ? 2 : 1)) {
1438 mutex_exit(&ds->ds_lock);
1441 mutex_exit(&ds->ds_lock);
1442 } else if (dsl_dir_is_clone(ds->ds_dir)) {
1443 return (dsl_dataset_origin_check(dsda, arg2, tx));
1446 /* XXX we should do some i/o error checking... */
1458 dsl_dataset_refs_gone(dmu_buf_t *db, void *argv)
1460 struct refsarg *arg = argv;
1462 mutex_enter(&arg->lock);
1464 cv_signal(&arg->cv);
1465 mutex_exit(&arg->lock);
1469 dsl_dataset_drain_refs(dsl_dataset_t *ds, void *tag)
1473 mutex_init(&arg.lock, NULL, MUTEX_DEFAULT, NULL);
1474 cv_init(&arg.cv, NULL, CV_DEFAULT, NULL);
1476 (void) dmu_buf_update_user(ds->ds_dbuf, ds, &arg, &ds->ds_phys,
1477 dsl_dataset_refs_gone);
1478 dmu_buf_rele(ds->ds_dbuf, tag);
1479 mutex_enter(&arg.lock);
1481 cv_wait(&arg.cv, &arg.lock);
1483 mutex_exit(&arg.lock);
1486 mutex_destroy(&arg.lock);
1487 cv_destroy(&arg.cv);
1491 remove_from_next_clones(dsl_dataset_t *ds, uint64_t obj, dmu_tx_t *tx)
1493 objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
1497 ASSERT(ds->ds_phys->ds_num_children >= 2);
1498 err = zap_remove_int(mos, ds->ds_phys->ds_next_clones_obj, obj, tx);
1500 * The err should not be ENOENT, but a bug in a previous version
1501 * of the code could cause upgrade_clones_cb() to not set
1502 * ds_next_snap_obj when it should, leading to a missing entry.
1503 * If we knew that the pool was created after
1504 * SPA_VERSION_NEXT_CLONES, we could assert that it isn't
1505 * ENOENT. However, at least we can check that we don't have
1506 * too many entries in the next_clones_obj even after failing to
1509 if (err != ENOENT) {
1510 VERIFY3U(err, ==, 0);
1512 ASSERT3U(0, ==, zap_count(mos, ds->ds_phys->ds_next_clones_obj,
1514 ASSERT3U(count, <=, ds->ds_phys->ds_num_children - 2);
1518 dsl_dataset_remove_clones_key(dsl_dataset_t *ds, uint64_t mintxg, dmu_tx_t *tx)
1520 objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
1525 * If it is the old version, dd_clones doesn't exist so we can't
1526 * find the clones, but deadlist_remove_key() is a no-op so it
1529 if (ds->ds_dir->dd_phys->dd_clones == 0)
1532 for (zap_cursor_init(&zc, mos, ds->ds_dir->dd_phys->dd_clones);
1533 zap_cursor_retrieve(&zc, &za) == 0;
1534 zap_cursor_advance(&zc)) {
1535 dsl_dataset_t *clone;
1537 VERIFY3U(0, ==, dsl_dataset_hold_obj(ds->ds_dir->dd_pool,
1538 za.za_first_integer, FTAG, &clone));
1539 if (clone->ds_dir->dd_origin_txg > mintxg) {
1540 dsl_deadlist_remove_key(&clone->ds_deadlist,
1542 dsl_dataset_remove_clones_key(clone, mintxg, tx);
1544 dsl_dataset_rele(clone, FTAG);
1546 zap_cursor_fini(&zc);
1549 struct process_old_arg {
1551 dsl_dataset_t *ds_prev;
1552 boolean_t after_branch_point;
1554 uint64_t used, comp, uncomp;
1558 process_old_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
1560 struct process_old_arg *poa = arg;
1561 dsl_pool_t *dp = poa->ds->ds_dir->dd_pool;
1563 if (bp->blk_birth <= poa->ds->ds_phys->ds_prev_snap_txg) {
1564 dsl_deadlist_insert(&poa->ds->ds_deadlist, bp, tx);
1565 if (poa->ds_prev && !poa->after_branch_point &&
1567 poa->ds_prev->ds_phys->ds_prev_snap_txg) {
1568 poa->ds_prev->ds_phys->ds_unique_bytes +=
1569 bp_get_dsize_sync(dp->dp_spa, bp);
1572 poa->used += bp_get_dsize_sync(dp->dp_spa, bp);
1573 poa->comp += BP_GET_PSIZE(bp);
1574 poa->uncomp += BP_GET_UCSIZE(bp);
1575 dsl_free_sync(poa->pio, dp, tx->tx_txg, bp);
1581 process_old_deadlist(dsl_dataset_t *ds, dsl_dataset_t *ds_prev,
1582 dsl_dataset_t *ds_next, boolean_t after_branch_point, dmu_tx_t *tx)
1584 struct process_old_arg poa = { 0 };
1585 dsl_pool_t *dp = ds->ds_dir->dd_pool;
1586 objset_t *mos = dp->dp_meta_objset;
1588 ASSERT(ds->ds_deadlist.dl_oldfmt);
1589 ASSERT(ds_next->ds_deadlist.dl_oldfmt);
1592 poa.ds_prev = ds_prev;
1593 poa.after_branch_point = after_branch_point;
1594 poa.pio = zio_root(dp->dp_spa, NULL, NULL, ZIO_FLAG_MUSTSUCCEED);
1595 VERIFY3U(0, ==, bpobj_iterate(&ds_next->ds_deadlist.dl_bpobj,
1596 process_old_cb, &poa, tx));
1597 VERIFY3U(zio_wait(poa.pio), ==, 0);
1598 ASSERT3U(poa.used, ==, ds->ds_phys->ds_unique_bytes);
1600 /* change snapused */
1601 dsl_dir_diduse_space(ds->ds_dir, DD_USED_SNAP,
1602 -poa.used, -poa.comp, -poa.uncomp, tx);
1604 /* swap next's deadlist to our deadlist */
1605 dsl_deadlist_close(&ds->ds_deadlist);
1606 dsl_deadlist_close(&ds_next->ds_deadlist);
1607 SWITCH64(ds_next->ds_phys->ds_deadlist_obj,
1608 ds->ds_phys->ds_deadlist_obj);
1609 dsl_deadlist_open(&ds->ds_deadlist, mos, ds->ds_phys->ds_deadlist_obj);
1610 dsl_deadlist_open(&ds_next->ds_deadlist, mos,
1611 ds_next->ds_phys->ds_deadlist_obj);
1615 dsl_dataset_destroy_sync(void *arg1, void *tag, dmu_tx_t *tx)
1617 struct dsl_ds_destroyarg *dsda = arg1;
1618 dsl_dataset_t *ds = dsda->ds;
1620 int after_branch_point = FALSE;
1621 dsl_pool_t *dp = ds->ds_dir->dd_pool;
1622 objset_t *mos = dp->dp_meta_objset;
1623 dsl_dataset_t *ds_prev = NULL;
1624 boolean_t wont_destroy;
1627 wont_destroy = (dsda->defer &&
1628 (ds->ds_userrefs > 0 || ds->ds_phys->ds_num_children > 1));
1630 ASSERT(ds->ds_owner || wont_destroy);
1631 ASSERT(dsda->defer || ds->ds_phys->ds_num_children <= 1);
1632 ASSERT(ds->ds_prev == NULL ||
1633 ds->ds_prev->ds_phys->ds_next_snap_obj != ds->ds_object);
1634 ASSERT3U(ds->ds_phys->ds_bp.blk_birth, <=, tx->tx_txg);
1637 ASSERT(spa_version(dp->dp_spa) >= SPA_VERSION_USERREFS);
1638 dmu_buf_will_dirty(ds->ds_dbuf, tx);
1639 ds->ds_phys->ds_flags |= DS_FLAG_DEFER_DESTROY;
1643 /* signal any waiters that this dataset is going away */
1644 mutex_enter(&ds->ds_lock);
1645 ds->ds_owner = dsl_reaper;
1646 cv_broadcast(&ds->ds_exclusive_cv);
1647 mutex_exit(&ds->ds_lock);
1649 /* Remove our reservation */
1650 if (ds->ds_reserved != 0) {
1651 dsl_prop_setarg_t psa;
1654 dsl_prop_setarg_init_uint64(&psa, "refreservation",
1655 (ZPROP_SRC_NONE | ZPROP_SRC_LOCAL | ZPROP_SRC_RECEIVED),
1657 psa.psa_effective_value = 0; /* predict default value */
1659 dsl_dataset_set_reservation_sync(ds, &psa, tx);
1660 ASSERT3U(ds->ds_reserved, ==, 0);
1663 ASSERT(RW_WRITE_HELD(&dp->dp_config_rwlock));
1665 dsl_scan_ds_destroyed(ds, tx);
1667 obj = ds->ds_object;
1669 if (ds->ds_phys->ds_prev_snap_obj != 0) {
1671 ds_prev = ds->ds_prev;
1673 VERIFY(0 == dsl_dataset_hold_obj(dp,
1674 ds->ds_phys->ds_prev_snap_obj, FTAG, &ds_prev));
1676 after_branch_point =
1677 (ds_prev->ds_phys->ds_next_snap_obj != obj);
1679 dmu_buf_will_dirty(ds_prev->ds_dbuf, tx);
1680 if (after_branch_point &&
1681 ds_prev->ds_phys->ds_next_clones_obj != 0) {
1682 remove_from_next_clones(ds_prev, obj, tx);
1683 if (ds->ds_phys->ds_next_snap_obj != 0) {
1684 VERIFY(0 == zap_add_int(mos,
1685 ds_prev->ds_phys->ds_next_clones_obj,
1686 ds->ds_phys->ds_next_snap_obj, tx));
1689 if (after_branch_point &&
1690 ds->ds_phys->ds_next_snap_obj == 0) {
1691 /* This clone is toast. */
1692 ASSERT(ds_prev->ds_phys->ds_num_children > 1);
1693 ds_prev->ds_phys->ds_num_children--;
1696 * If the clone's origin has no other clones, no
1697 * user holds, and has been marked for deferred
1698 * deletion, then we should have done the necessary
1699 * destroy setup for it.
1701 if (ds_prev->ds_phys->ds_num_children == 1 &&
1702 ds_prev->ds_userrefs == 0 &&
1703 DS_IS_DEFER_DESTROY(ds_prev)) {
1704 ASSERT3P(dsda->rm_origin, !=, NULL);
1706 ASSERT3P(dsda->rm_origin, ==, NULL);
1708 } else if (!after_branch_point) {
1709 ds_prev->ds_phys->ds_next_snap_obj =
1710 ds->ds_phys->ds_next_snap_obj;
1714 if (dsl_dataset_is_snapshot(ds)) {
1715 dsl_dataset_t *ds_next;
1716 uint64_t old_unique;
1717 uint64_t used = 0, comp = 0, uncomp = 0;
1719 VERIFY(0 == dsl_dataset_hold_obj(dp,
1720 ds->ds_phys->ds_next_snap_obj, FTAG, &ds_next));
1721 ASSERT3U(ds_next->ds_phys->ds_prev_snap_obj, ==, obj);
1723 old_unique = ds_next->ds_phys->ds_unique_bytes;
1725 dmu_buf_will_dirty(ds_next->ds_dbuf, tx);
1726 ds_next->ds_phys->ds_prev_snap_obj =
1727 ds->ds_phys->ds_prev_snap_obj;
1728 ds_next->ds_phys->ds_prev_snap_txg =
1729 ds->ds_phys->ds_prev_snap_txg;
1730 ASSERT3U(ds->ds_phys->ds_prev_snap_txg, ==,
1731 ds_prev ? ds_prev->ds_phys->ds_creation_txg : 0);
1734 if (ds_next->ds_deadlist.dl_oldfmt) {
1735 process_old_deadlist(ds, ds_prev, ds_next,
1736 after_branch_point, tx);
1738 /* Adjust prev's unique space. */
1739 if (ds_prev && !after_branch_point) {
1740 dsl_deadlist_space_range(&ds_next->ds_deadlist,
1741 ds_prev->ds_phys->ds_prev_snap_txg,
1742 ds->ds_phys->ds_prev_snap_txg,
1743 &used, &comp, &uncomp);
1744 ds_prev->ds_phys->ds_unique_bytes += used;
1747 /* Adjust snapused. */
1748 dsl_deadlist_space_range(&ds_next->ds_deadlist,
1749 ds->ds_phys->ds_prev_snap_txg, UINT64_MAX,
1750 &used, &comp, &uncomp);
1751 dsl_dir_diduse_space(ds->ds_dir, DD_USED_SNAP,
1752 -used, -comp, -uncomp, tx);
1754 /* Move blocks to be freed to pool's free list. */
1755 dsl_deadlist_move_bpobj(&ds_next->ds_deadlist,
1756 &dp->dp_free_bpobj, ds->ds_phys->ds_prev_snap_txg,
1758 dsl_dir_diduse_space(tx->tx_pool->dp_free_dir,
1759 DD_USED_HEAD, used, comp, uncomp, tx);
1760 dsl_dir_dirty(tx->tx_pool->dp_free_dir, tx);
1762 /* Merge our deadlist into next's and free it. */
1763 dsl_deadlist_merge(&ds_next->ds_deadlist,
1764 ds->ds_phys->ds_deadlist_obj, tx);
1766 dsl_deadlist_close(&ds->ds_deadlist);
1767 dsl_deadlist_free(mos, ds->ds_phys->ds_deadlist_obj, tx);
1769 /* Collapse range in clone heads */
1770 dsl_dataset_remove_clones_key(ds,
1771 ds->ds_phys->ds_creation_txg, tx);
1773 if (dsl_dataset_is_snapshot(ds_next)) {
1774 dsl_dataset_t *ds_nextnext;
1777 * Update next's unique to include blocks which
1778 * were previously shared by only this snapshot
1779 * and it. Those blocks will be born after the
1780 * prev snap and before this snap, and will have
1781 * died after the next snap and before the one
1782 * after that (ie. be on the snap after next's
1785 VERIFY(0 == dsl_dataset_hold_obj(dp,
1786 ds_next->ds_phys->ds_next_snap_obj,
1787 FTAG, &ds_nextnext));
1788 dsl_deadlist_space_range(&ds_nextnext->ds_deadlist,
1789 ds->ds_phys->ds_prev_snap_txg,
1790 ds->ds_phys->ds_creation_txg,
1791 &used, &comp, &uncomp);
1792 ds_next->ds_phys->ds_unique_bytes += used;
1793 dsl_dataset_rele(ds_nextnext, FTAG);
1794 ASSERT3P(ds_next->ds_prev, ==, NULL);
1796 /* Collapse range in this head. */
1798 VERIFY3U(0, ==, dsl_dataset_hold_obj(dp,
1799 ds->ds_dir->dd_phys->dd_head_dataset_obj,
1801 dsl_deadlist_remove_key(&hds->ds_deadlist,
1802 ds->ds_phys->ds_creation_txg, tx);
1803 dsl_dataset_rele(hds, FTAG);
1806 ASSERT3P(ds_next->ds_prev, ==, ds);
1807 dsl_dataset_drop_ref(ds_next->ds_prev, ds_next);
1808 ds_next->ds_prev = NULL;
1810 VERIFY(0 == dsl_dataset_get_ref(dp,
1811 ds->ds_phys->ds_prev_snap_obj,
1812 ds_next, &ds_next->ds_prev));
1815 dsl_dataset_recalc_head_uniq(ds_next);
1818 * Reduce the amount of our unconsmed refreservation
1819 * being charged to our parent by the amount of
1820 * new unique data we have gained.
1822 if (old_unique < ds_next->ds_reserved) {
1824 uint64_t new_unique =
1825 ds_next->ds_phys->ds_unique_bytes;
1827 ASSERT(old_unique <= new_unique);
1828 mrsdelta = MIN(new_unique - old_unique,
1829 ds_next->ds_reserved - old_unique);
1830 dsl_dir_diduse_space(ds->ds_dir,
1831 DD_USED_REFRSRV, -mrsdelta, 0, 0, tx);
1834 dsl_dataset_rele(ds_next, FTAG);
1837 * There's no next snapshot, so this is a head dataset.
1838 * Destroy the deadlist. Unless it's a clone, the
1839 * deadlist should be empty. (If it's a clone, it's
1840 * safe to ignore the deadlist contents.)
1844 dsl_deadlist_close(&ds->ds_deadlist);
1845 dsl_deadlist_free(mos, ds->ds_phys->ds_deadlist_obj, tx);
1846 ds->ds_phys->ds_deadlist_obj = 0;
1849 * Free everything that we point to (that's born after
1850 * the previous snapshot, if we are a clone)
1852 * NB: this should be very quick, because we already
1853 * freed all the objects in open context.
1857 err = traverse_dataset(ds, ds->ds_phys->ds_prev_snap_txg,
1858 TRAVERSE_POST, kill_blkptr, &ka);
1859 ASSERT3U(err, ==, 0);
1860 ASSERT(!DS_UNIQUE_IS_ACCURATE(ds) ||
1861 ds->ds_phys->ds_unique_bytes == 0);
1863 if (ds->ds_prev != NULL) {
1864 if (spa_version(dp->dp_spa) >= SPA_VERSION_DIR_CLONES) {
1865 VERIFY3U(0, ==, zap_remove_int(mos,
1866 ds->ds_prev->ds_dir->dd_phys->dd_clones,
1867 ds->ds_object, tx));
1869 dsl_dataset_rele(ds->ds_prev, ds);
1870 ds->ds_prev = ds_prev = NULL;
1875 * This must be done after the dsl_traverse(), because it will
1876 * re-open the objset.
1878 if (ds->ds_objset) {
1879 dmu_objset_evict(ds->ds_objset);
1880 ds->ds_objset = NULL;
1883 if (ds->ds_dir->dd_phys->dd_head_dataset_obj == ds->ds_object) {
1884 /* Erase the link in the dir */
1885 dmu_buf_will_dirty(ds->ds_dir->dd_dbuf, tx);
1886 ds->ds_dir->dd_phys->dd_head_dataset_obj = 0;
1887 ASSERT(ds->ds_phys->ds_snapnames_zapobj != 0);
1888 err = zap_destroy(mos, ds->ds_phys->ds_snapnames_zapobj, tx);
1891 /* remove from snapshot namespace */
1892 dsl_dataset_t *ds_head;
1893 ASSERT(ds->ds_phys->ds_snapnames_zapobj == 0);
1894 VERIFY(0 == dsl_dataset_hold_obj(dp,
1895 ds->ds_dir->dd_phys->dd_head_dataset_obj, FTAG, &ds_head));
1896 VERIFY(0 == dsl_dataset_get_snapname(ds));
1901 err = dsl_dataset_snap_lookup(ds_head,
1902 ds->ds_snapname, &val);
1903 ASSERT3U(err, ==, 0);
1904 ASSERT3U(val, ==, obj);
1907 err = dsl_dataset_snap_remove(ds_head, ds->ds_snapname, tx);
1909 dsl_dataset_rele(ds_head, FTAG);
1912 if (ds_prev && ds->ds_prev != ds_prev)
1913 dsl_dataset_rele(ds_prev, FTAG);
1915 spa_prop_clear_bootfs(dp->dp_spa, ds->ds_object, tx);
1916 spa_history_log_internal(LOG_DS_DESTROY, dp->dp_spa, tx,
1917 "dataset = %llu", ds->ds_object);
1919 if (ds->ds_phys->ds_next_clones_obj != 0) {
1921 ASSERT(0 == zap_count(mos,
1922 ds->ds_phys->ds_next_clones_obj, &count) && count == 0);
1923 VERIFY(0 == dmu_object_free(mos,
1924 ds->ds_phys->ds_next_clones_obj, tx));
1926 if (ds->ds_phys->ds_props_obj != 0)
1927 VERIFY(0 == zap_destroy(mos, ds->ds_phys->ds_props_obj, tx));
1928 if (ds->ds_phys->ds_userrefs_obj != 0)
1929 VERIFY(0 == zap_destroy(mos, ds->ds_phys->ds_userrefs_obj, tx));
1930 dsl_dir_close(ds->ds_dir, ds);
1932 dsl_dataset_drain_refs(ds, tag);
1933 VERIFY(0 == dmu_object_free(mos, obj, tx));
1935 if (dsda->rm_origin) {
1937 * Remove the origin of the clone we just destroyed.
1939 struct dsl_ds_destroyarg ndsda = {0};
1941 ndsda.ds = dsda->rm_origin;
1942 dsl_dataset_destroy_sync(&ndsda, tag, tx);
1947 dsl_dataset_snapshot_reserve_space(dsl_dataset_t *ds, dmu_tx_t *tx)
1951 if (!dmu_tx_is_syncing(tx))
1955 * If there's an fs-only reservation, any blocks that might become
1956 * owned by the snapshot dataset must be accommodated by space
1957 * outside of the reservation.
1959 ASSERT(ds->ds_reserved == 0 || DS_UNIQUE_IS_ACCURATE(ds));
1960 asize = MIN(ds->ds_phys->ds_unique_bytes, ds->ds_reserved);
1961 if (asize > dsl_dir_space_available(ds->ds_dir, NULL, 0, TRUE))
1965 * Propogate any reserved space for this snapshot to other
1966 * snapshot checks in this sync group.
1969 dsl_dir_willuse_space(ds->ds_dir, asize, tx);
1975 dsl_dataset_snapshot_check(void *arg1, void *arg2, dmu_tx_t *tx)
1977 dsl_dataset_t *ds = arg1;
1978 const char *snapname = arg2;
1983 * We don't allow multiple snapshots of the same txg. If there
1984 * is already one, try again.
1986 if (ds->ds_phys->ds_prev_snap_txg >= tx->tx_txg)
1990 * Check for conflicting name snapshot name.
1992 err = dsl_dataset_snap_lookup(ds, snapname, &value);
1999 * Check that the dataset's name is not too long. Name consists
2000 * of the dataset's length + 1 for the @-sign + snapshot name's length
2002 if (dsl_dataset_namelen(ds) + 1 + strlen(snapname) >= MAXNAMELEN)
2003 return (ENAMETOOLONG);
2005 err = dsl_dataset_snapshot_reserve_space(ds, tx);
2009 ds->ds_trysnap_txg = tx->tx_txg;
2014 dsl_dataset_snapshot_sync(void *arg1, void *arg2, dmu_tx_t *tx)
2016 dsl_dataset_t *ds = arg1;
2017 const char *snapname = arg2;
2018 dsl_pool_t *dp = ds->ds_dir->dd_pool;
2020 dsl_dataset_phys_t *dsphys;
2021 uint64_t dsobj, crtxg;
2022 objset_t *mos = dp->dp_meta_objset;
2025 ASSERT(RW_WRITE_HELD(&dp->dp_config_rwlock));
2028 * The origin's ds_creation_txg has to be < TXG_INITIAL
2030 if (strcmp(snapname, ORIGIN_DIR_NAME) == 0)
2035 dsobj = dmu_object_alloc(mos, DMU_OT_DSL_DATASET, 0,
2036 DMU_OT_DSL_DATASET, sizeof (dsl_dataset_phys_t), tx);
2037 VERIFY(0 == dmu_bonus_hold(mos, dsobj, FTAG, &dbuf));
2038 dmu_buf_will_dirty(dbuf, tx);
2039 dsphys = dbuf->db_data;
2040 bzero(dsphys, sizeof (dsl_dataset_phys_t));
2041 dsphys->ds_dir_obj = ds->ds_dir->dd_object;
2042 dsphys->ds_fsid_guid = unique_create();
2043 (void) random_get_pseudo_bytes((void*)&dsphys->ds_guid,
2044 sizeof (dsphys->ds_guid));
2045 dsphys->ds_prev_snap_obj = ds->ds_phys->ds_prev_snap_obj;
2046 dsphys->ds_prev_snap_txg = ds->ds_phys->ds_prev_snap_txg;
2047 dsphys->ds_next_snap_obj = ds->ds_object;
2048 dsphys->ds_num_children = 1;
2049 dsphys->ds_creation_time = gethrestime_sec();
2050 dsphys->ds_creation_txg = crtxg;
2051 dsphys->ds_deadlist_obj = ds->ds_phys->ds_deadlist_obj;
2052 dsphys->ds_used_bytes = ds->ds_phys->ds_used_bytes;
2053 dsphys->ds_compressed_bytes = ds->ds_phys->ds_compressed_bytes;
2054 dsphys->ds_uncompressed_bytes = ds->ds_phys->ds_uncompressed_bytes;
2055 dsphys->ds_flags = ds->ds_phys->ds_flags;
2056 dsphys->ds_bp = ds->ds_phys->ds_bp;
2057 dmu_buf_rele(dbuf, FTAG);
2059 ASSERT3U(ds->ds_prev != 0, ==, ds->ds_phys->ds_prev_snap_obj != 0);
2061 uint64_t next_clones_obj =
2062 ds->ds_prev->ds_phys->ds_next_clones_obj;
2063 ASSERT(ds->ds_prev->ds_phys->ds_next_snap_obj ==
2065 ds->ds_prev->ds_phys->ds_num_children > 1);
2066 if (ds->ds_prev->ds_phys->ds_next_snap_obj == ds->ds_object) {
2067 dmu_buf_will_dirty(ds->ds_prev->ds_dbuf, tx);
2068 ASSERT3U(ds->ds_phys->ds_prev_snap_txg, ==,
2069 ds->ds_prev->ds_phys->ds_creation_txg);
2070 ds->ds_prev->ds_phys->ds_next_snap_obj = dsobj;
2071 } else if (next_clones_obj != 0) {
2072 remove_from_next_clones(ds->ds_prev,
2073 dsphys->ds_next_snap_obj, tx);
2074 VERIFY3U(0, ==, zap_add_int(mos,
2075 next_clones_obj, dsobj, tx));
2080 * If we have a reference-reservation on this dataset, we will
2081 * need to increase the amount of refreservation being charged
2082 * since our unique space is going to zero.
2084 if (ds->ds_reserved) {
2086 ASSERT(DS_UNIQUE_IS_ACCURATE(ds));
2087 delta = MIN(ds->ds_phys->ds_unique_bytes, ds->ds_reserved);
2088 dsl_dir_diduse_space(ds->ds_dir, DD_USED_REFRSRV,
2092 dmu_buf_will_dirty(ds->ds_dbuf, tx);
2093 zfs_dbgmsg("taking snapshot %s@%s/%llu; newkey=%llu",
2094 ds->ds_dir->dd_myname, snapname, dsobj,
2095 ds->ds_phys->ds_prev_snap_txg);
2096 ds->ds_phys->ds_deadlist_obj = dsl_deadlist_clone(&ds->ds_deadlist,
2097 UINT64_MAX, ds->ds_phys->ds_prev_snap_obj, tx);
2098 dsl_deadlist_close(&ds->ds_deadlist);
2099 dsl_deadlist_open(&ds->ds_deadlist, mos, ds->ds_phys->ds_deadlist_obj);
2100 dsl_deadlist_add_key(&ds->ds_deadlist,
2101 ds->ds_phys->ds_prev_snap_txg, tx);
2103 ASSERT3U(ds->ds_phys->ds_prev_snap_txg, <, tx->tx_txg);
2104 ds->ds_phys->ds_prev_snap_obj = dsobj;
2105 ds->ds_phys->ds_prev_snap_txg = crtxg;
2106 ds->ds_phys->ds_unique_bytes = 0;
2107 if (spa_version(dp->dp_spa) >= SPA_VERSION_UNIQUE_ACCURATE)
2108 ds->ds_phys->ds_flags |= DS_FLAG_UNIQUE_ACCURATE;
2110 err = zap_add(mos, ds->ds_phys->ds_snapnames_zapobj,
2111 snapname, 8, 1, &dsobj, tx);
2115 dsl_dataset_drop_ref(ds->ds_prev, ds);
2116 VERIFY(0 == dsl_dataset_get_ref(dp,
2117 ds->ds_phys->ds_prev_snap_obj, ds, &ds->ds_prev));
2119 dsl_scan_ds_snapshotted(ds, tx);
2121 dsl_dir_snap_cmtime_update(ds->ds_dir);
2123 spa_history_log_internal(LOG_DS_SNAPSHOT, dp->dp_spa, tx,
2124 "dataset = %llu", dsobj);
2128 dsl_dataset_sync(dsl_dataset_t *ds, zio_t *zio, dmu_tx_t *tx)
2130 ASSERT(dmu_tx_is_syncing(tx));
2131 ASSERT(ds->ds_objset != NULL);
2132 ASSERT(ds->ds_phys->ds_next_snap_obj == 0);
2135 * in case we had to change ds_fsid_guid when we opened it,
2138 dmu_buf_will_dirty(ds->ds_dbuf, tx);
2139 ds->ds_phys->ds_fsid_guid = ds->ds_fsid_guid;
2141 dsl_dir_dirty(ds->ds_dir, tx);
2142 dmu_objset_sync(ds->ds_objset, zio, tx);
2146 dsl_dataset_stats(dsl_dataset_t *ds, nvlist_t *nv)
2148 uint64_t refd, avail, uobjs, aobjs;
2150 dsl_dir_stats(ds->ds_dir, nv);
2152 dsl_dataset_space(ds, &refd, &avail, &uobjs, &aobjs);
2153 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_AVAILABLE, avail);
2154 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_REFERENCED, refd);
2156 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_CREATION,
2157 ds->ds_phys->ds_creation_time);
2158 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_CREATETXG,
2159 ds->ds_phys->ds_creation_txg);
2160 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_REFQUOTA,
2162 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_REFRESERVATION,
2164 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_GUID,
2165 ds->ds_phys->ds_guid);
2166 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_UNIQUE,
2167 ds->ds_phys->ds_unique_bytes);
2168 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_OBJSETID,
2170 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_USERREFS,
2172 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_DEFER_DESTROY,
2173 DS_IS_DEFER_DESTROY(ds) ? 1 : 0);
2175 if (ds->ds_phys->ds_next_snap_obj) {
2177 * This is a snapshot; override the dd's space used with
2178 * our unique space and compression ratio.
2180 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_USED,
2181 ds->ds_phys->ds_unique_bytes);
2182 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_COMPRESSRATIO,
2183 ds->ds_phys->ds_compressed_bytes == 0 ? 100 :
2184 (ds->ds_phys->ds_uncompressed_bytes * 100 /
2185 ds->ds_phys->ds_compressed_bytes));
2190 dsl_dataset_fast_stat(dsl_dataset_t *ds, dmu_objset_stats_t *stat)
2192 stat->dds_creation_txg = ds->ds_phys->ds_creation_txg;
2193 stat->dds_inconsistent = ds->ds_phys->ds_flags & DS_FLAG_INCONSISTENT;
2194 stat->dds_guid = ds->ds_phys->ds_guid;
2195 if (ds->ds_phys->ds_next_snap_obj) {
2196 stat->dds_is_snapshot = B_TRUE;
2197 stat->dds_num_clones = ds->ds_phys->ds_num_children - 1;
2199 stat->dds_is_snapshot = B_FALSE;
2200 stat->dds_num_clones = 0;
2203 /* clone origin is really a dsl_dir thing... */
2204 rw_enter(&ds->ds_dir->dd_pool->dp_config_rwlock, RW_READER);
2205 if (dsl_dir_is_clone(ds->ds_dir)) {
2208 VERIFY(0 == dsl_dataset_get_ref(ds->ds_dir->dd_pool,
2209 ds->ds_dir->dd_phys->dd_origin_obj, FTAG, &ods));
2210 dsl_dataset_name(ods, stat->dds_origin);
2211 dsl_dataset_drop_ref(ods, FTAG);
2213 stat->dds_origin[0] = '\0';
2215 rw_exit(&ds->ds_dir->dd_pool->dp_config_rwlock);
2219 dsl_dataset_fsid_guid(dsl_dataset_t *ds)
2221 return (ds->ds_fsid_guid);
2225 dsl_dataset_space(dsl_dataset_t *ds,
2226 uint64_t *refdbytesp, uint64_t *availbytesp,
2227 uint64_t *usedobjsp, uint64_t *availobjsp)
2229 *refdbytesp = ds->ds_phys->ds_used_bytes;
2230 *availbytesp = dsl_dir_space_available(ds->ds_dir, NULL, 0, TRUE);
2231 if (ds->ds_reserved > ds->ds_phys->ds_unique_bytes)
2232 *availbytesp += ds->ds_reserved - ds->ds_phys->ds_unique_bytes;
2233 if (ds->ds_quota != 0) {
2235 * Adjust available bytes according to refquota
2237 if (*refdbytesp < ds->ds_quota)
2238 *availbytesp = MIN(*availbytesp,
2239 ds->ds_quota - *refdbytesp);
2243 *usedobjsp = ds->ds_phys->ds_bp.blk_fill;
2244 *availobjsp = DN_MAX_OBJECT - *usedobjsp;
2248 dsl_dataset_modified_since_lastsnap(dsl_dataset_t *ds)
2250 dsl_pool_t *dp = ds->ds_dir->dd_pool;
2252 ASSERT(RW_LOCK_HELD(&dp->dp_config_rwlock) ||
2253 dsl_pool_sync_context(dp));
2254 if (ds->ds_prev == NULL)
2256 if (ds->ds_phys->ds_bp.blk_birth >
2257 ds->ds_prev->ds_phys->ds_creation_txg) {
2258 objset_t *os, *os_prev;
2260 * It may be that only the ZIL differs, because it was
2261 * reset in the head. Don't count that as being
2264 if (dmu_objset_from_ds(ds, &os) != 0)
2266 if (dmu_objset_from_ds(ds->ds_prev, &os_prev) != 0)
2268 return (bcmp(&os->os_phys->os_meta_dnode,
2269 &os_prev->os_phys->os_meta_dnode,
2270 sizeof (os->os_phys->os_meta_dnode)) != 0);
2277 dsl_dataset_snapshot_rename_check(void *arg1, void *arg2, dmu_tx_t *tx)
2279 dsl_dataset_t *ds = arg1;
2280 char *newsnapname = arg2;
2281 dsl_dir_t *dd = ds->ds_dir;
2286 err = dsl_dataset_hold_obj(dd->dd_pool,
2287 dd->dd_phys->dd_head_dataset_obj, FTAG, &hds);
2291 /* new name better not be in use */
2292 err = dsl_dataset_snap_lookup(hds, newsnapname, &val);
2293 dsl_dataset_rele(hds, FTAG);
2297 else if (err == ENOENT)
2300 /* dataset name + 1 for the "@" + the new snapshot name must fit */
2301 if (dsl_dir_namelen(ds->ds_dir) + 1 + strlen(newsnapname) >= MAXNAMELEN)
2308 dsl_dataset_snapshot_rename_sync(void *arg1, void *arg2, dmu_tx_t *tx)
2310 dsl_dataset_t *ds = arg1;
2311 const char *newsnapname = arg2;
2312 dsl_dir_t *dd = ds->ds_dir;
2313 objset_t *mos = dd->dd_pool->dp_meta_objset;
2317 ASSERT(ds->ds_phys->ds_next_snap_obj != 0);
2319 VERIFY(0 == dsl_dataset_hold_obj(dd->dd_pool,
2320 dd->dd_phys->dd_head_dataset_obj, FTAG, &hds));
2322 VERIFY(0 == dsl_dataset_get_snapname(ds));
2323 err = dsl_dataset_snap_remove(hds, ds->ds_snapname, tx);
2324 ASSERT3U(err, ==, 0);
2325 mutex_enter(&ds->ds_lock);
2326 (void) strcpy(ds->ds_snapname, newsnapname);
2327 mutex_exit(&ds->ds_lock);
2328 err = zap_add(mos, hds->ds_phys->ds_snapnames_zapobj,
2329 ds->ds_snapname, 8, 1, &ds->ds_object, tx);
2330 ASSERT3U(err, ==, 0);
2332 spa_history_log_internal(LOG_DS_RENAME, dd->dd_pool->dp_spa, tx,
2333 "dataset = %llu", ds->ds_object);
2334 dsl_dataset_rele(hds, FTAG);
2337 struct renamesnaparg {
2338 dsl_sync_task_group_t *dstg;
2339 char failed[MAXPATHLEN];
2345 dsl_snapshot_rename_one(const char *name, void *arg)
2347 struct renamesnaparg *ra = arg;
2348 dsl_dataset_t *ds = NULL;
2352 snapname = kmem_asprintf("%s@%s", name, ra->oldsnap);
2353 (void) strlcpy(ra->failed, snapname, sizeof (ra->failed));
2356 * For recursive snapshot renames the parent won't be changing
2357 * so we just pass name for both the to/from argument.
2359 err = zfs_secpolicy_rename_perms(snapname, snapname, CRED());
2362 return (err == ENOENT ? 0 : err);
2367 * For all filesystems undergoing rename, we'll need to unmount it.
2369 (void) zfs_unmount_snap(snapname, NULL);
2371 err = dsl_dataset_hold(snapname, ra->dstg, &ds);
2374 return (err == ENOENT ? 0 : err);
2376 dsl_sync_task_create(ra->dstg, dsl_dataset_snapshot_rename_check,
2377 dsl_dataset_snapshot_rename_sync, ds, ra->newsnap, 0);
2383 dsl_recursive_rename(char *oldname, const char *newname)
2386 struct renamesnaparg *ra;
2387 dsl_sync_task_t *dst;
2389 char *cp, *fsname = spa_strdup(oldname);
2390 int len = strlen(oldname) + 1;
2392 /* truncate the snapshot name to get the fsname */
2393 cp = strchr(fsname, '@');
2396 err = spa_open(fsname, &spa, FTAG);
2398 kmem_free(fsname, len);
2401 ra = kmem_alloc(sizeof (struct renamesnaparg), KM_SLEEP);
2402 ra->dstg = dsl_sync_task_group_create(spa_get_dsl(spa));
2404 ra->oldsnap = strchr(oldname, '@') + 1;
2405 ra->newsnap = strchr(newname, '@') + 1;
2408 err = dmu_objset_find(fsname, dsl_snapshot_rename_one, ra,
2410 kmem_free(fsname, len);
2413 err = dsl_sync_task_group_wait(ra->dstg);
2416 for (dst = list_head(&ra->dstg->dstg_tasks); dst;
2417 dst = list_next(&ra->dstg->dstg_tasks, dst)) {
2418 dsl_dataset_t *ds = dst->dst_arg1;
2420 dsl_dir_name(ds->ds_dir, ra->failed);
2421 (void) strlcat(ra->failed, "@", sizeof (ra->failed));
2422 (void) strlcat(ra->failed, ra->newsnap,
2423 sizeof (ra->failed));
2425 dsl_dataset_rele(ds, ra->dstg);
2429 (void) strlcpy(oldname, ra->failed, sizeof (ra->failed));
2431 dsl_sync_task_group_destroy(ra->dstg);
2432 kmem_free(ra, sizeof (struct renamesnaparg));
2433 spa_close(spa, FTAG);
2438 dsl_valid_rename(const char *oldname, void *arg)
2440 int delta = *(int *)arg;
2442 if (strlen(oldname) + delta >= MAXNAMELEN)
2443 return (ENAMETOOLONG);
2448 #pragma weak dmu_objset_rename = dsl_dataset_rename
2450 dsl_dataset_rename(char *oldname, const char *newname, boolean_t recursive)
2457 err = dsl_dir_open(oldname, FTAG, &dd, &tail);
2462 int delta = strlen(newname) - strlen(oldname);
2464 /* if we're growing, validate child name lengths */
2466 err = dmu_objset_find(oldname, dsl_valid_rename,
2467 &delta, DS_FIND_CHILDREN | DS_FIND_SNAPSHOTS);
2470 err = dsl_dir_rename(dd, newname);
2471 dsl_dir_close(dd, FTAG);
2475 if (tail[0] != '@') {
2476 /* the name ended in a nonexistent component */
2477 dsl_dir_close(dd, FTAG);
2481 dsl_dir_close(dd, FTAG);
2483 /* new name must be snapshot in same filesystem */
2484 tail = strchr(newname, '@');
2488 if (strncmp(oldname, newname, tail - newname) != 0)
2492 err = dsl_recursive_rename(oldname, newname);
2494 err = dsl_dataset_hold(oldname, FTAG, &ds);
2498 err = dsl_sync_task_do(ds->ds_dir->dd_pool,
2499 dsl_dataset_snapshot_rename_check,
2500 dsl_dataset_snapshot_rename_sync, ds, (char *)tail, 1);
2502 dsl_dataset_rele(ds, FTAG);
2508 struct promotenode {
2514 list_t shared_snaps, origin_snaps, clone_snaps;
2515 dsl_dataset_t *origin_origin;
2516 uint64_t used, comp, uncomp, unique, cloneusedsnap, originusedsnap;
2520 static int snaplist_space(list_t *l, uint64_t mintxg, uint64_t *spacep);
2521 static boolean_t snaplist_unstable(list_t *l);
2524 dsl_dataset_promote_check(void *arg1, void *arg2, dmu_tx_t *tx)
2526 dsl_dataset_t *hds = arg1;
2527 struct promotearg *pa = arg2;
2528 struct promotenode *snap = list_head(&pa->shared_snaps);
2529 dsl_dataset_t *origin_ds = snap->ds;
2533 /* Check that it is a real clone */
2534 if (!dsl_dir_is_clone(hds->ds_dir))
2537 /* Since this is so expensive, don't do the preliminary check */
2538 if (!dmu_tx_is_syncing(tx))
2541 if (hds->ds_phys->ds_flags & DS_FLAG_NOPROMOTE)
2544 /* compute origin's new unique space */
2545 snap = list_tail(&pa->clone_snaps);
2546 ASSERT3U(snap->ds->ds_phys->ds_prev_snap_obj, ==, origin_ds->ds_object);
2547 dsl_deadlist_space_range(&snap->ds->ds_deadlist,
2548 origin_ds->ds_phys->ds_prev_snap_txg, UINT64_MAX,
2549 &pa->unique, &unused, &unused);
2552 * Walk the snapshots that we are moving
2554 * Compute space to transfer. Consider the incremental changes
2555 * to used for each snapshot:
2556 * (my used) = (prev's used) + (blocks born) - (blocks killed)
2557 * So each snapshot gave birth to:
2558 * (blocks born) = (my used) - (prev's used) + (blocks killed)
2559 * So a sequence would look like:
2560 * (uN - u(N-1) + kN) + ... + (u1 - u0 + k1) + (u0 - 0 + k0)
2561 * Which simplifies to:
2562 * uN + kN + kN-1 + ... + k1 + k0
2563 * Note however, if we stop before we reach the ORIGIN we get:
2564 * uN + kN + kN-1 + ... + kM - uM-1
2566 pa->used = origin_ds->ds_phys->ds_used_bytes;
2567 pa->comp = origin_ds->ds_phys->ds_compressed_bytes;
2568 pa->uncomp = origin_ds->ds_phys->ds_uncompressed_bytes;
2569 for (snap = list_head(&pa->shared_snaps); snap;
2570 snap = list_next(&pa->shared_snaps, snap)) {
2571 uint64_t val, dlused, dlcomp, dluncomp;
2572 dsl_dataset_t *ds = snap->ds;
2574 /* Check that the snapshot name does not conflict */
2575 VERIFY(0 == dsl_dataset_get_snapname(ds));
2576 err = dsl_dataset_snap_lookup(hds, ds->ds_snapname, &val);
2584 /* The very first snapshot does not have a deadlist */
2585 if (ds->ds_phys->ds_prev_snap_obj == 0)
2588 dsl_deadlist_space(&ds->ds_deadlist,
2589 &dlused, &dlcomp, &dluncomp);
2592 pa->uncomp += dluncomp;
2596 * If we are a clone of a clone then we never reached ORIGIN,
2597 * so we need to subtract out the clone origin's used space.
2599 if (pa->origin_origin) {
2600 pa->used -= pa->origin_origin->ds_phys->ds_used_bytes;
2601 pa->comp -= pa->origin_origin->ds_phys->ds_compressed_bytes;
2602 pa->uncomp -= pa->origin_origin->ds_phys->ds_uncompressed_bytes;
2605 /* Check that there is enough space here */
2606 err = dsl_dir_transfer_possible(origin_ds->ds_dir, hds->ds_dir,
2612 * Compute the amounts of space that will be used by snapshots
2613 * after the promotion (for both origin and clone). For each,
2614 * it is the amount of space that will be on all of their
2615 * deadlists (that was not born before their new origin).
2617 if (hds->ds_dir->dd_phys->dd_flags & DD_FLAG_USED_BREAKDOWN) {
2621 * Note, typically this will not be a clone of a clone,
2622 * so dd_origin_txg will be < TXG_INITIAL, so
2623 * these snaplist_space() -> dsl_deadlist_space_range()
2624 * calls will be fast because they do not have to
2625 * iterate over all bps.
2627 snap = list_head(&pa->origin_snaps);
2628 err = snaplist_space(&pa->shared_snaps,
2629 snap->ds->ds_dir->dd_origin_txg, &pa->cloneusedsnap);
2633 err = snaplist_space(&pa->clone_snaps,
2634 snap->ds->ds_dir->dd_origin_txg, &space);
2637 pa->cloneusedsnap += space;
2639 if (origin_ds->ds_dir->dd_phys->dd_flags & DD_FLAG_USED_BREAKDOWN) {
2640 err = snaplist_space(&pa->origin_snaps,
2641 origin_ds->ds_phys->ds_creation_txg, &pa->originusedsnap);
2648 pa->err_ds = snap->ds->ds_snapname;
2653 dsl_dataset_promote_sync(void *arg1, void *arg2, dmu_tx_t *tx)
2655 dsl_dataset_t *hds = arg1;
2656 struct promotearg *pa = arg2;
2657 struct promotenode *snap = list_head(&pa->shared_snaps);
2658 dsl_dataset_t *origin_ds = snap->ds;
2659 dsl_dataset_t *origin_head;
2660 dsl_dir_t *dd = hds->ds_dir;
2661 dsl_pool_t *dp = hds->ds_dir->dd_pool;
2662 dsl_dir_t *odd = NULL;
2663 uint64_t oldnext_obj;
2666 ASSERT(0 == (hds->ds_phys->ds_flags & DS_FLAG_NOPROMOTE));
2668 snap = list_head(&pa->origin_snaps);
2669 origin_head = snap->ds;
2672 * We need to explicitly open odd, since origin_ds's dd will be
2675 VERIFY(0 == dsl_dir_open_obj(dp, origin_ds->ds_dir->dd_object,
2678 /* change origin's next snap */
2679 dmu_buf_will_dirty(origin_ds->ds_dbuf, tx);
2680 oldnext_obj = origin_ds->ds_phys->ds_next_snap_obj;
2681 snap = list_tail(&pa->clone_snaps);
2682 ASSERT3U(snap->ds->ds_phys->ds_prev_snap_obj, ==, origin_ds->ds_object);
2683 origin_ds->ds_phys->ds_next_snap_obj = snap->ds->ds_object;
2685 /* change the origin's next clone */
2686 if (origin_ds->ds_phys->ds_next_clones_obj) {
2687 remove_from_next_clones(origin_ds, snap->ds->ds_object, tx);
2688 VERIFY3U(0, ==, zap_add_int(dp->dp_meta_objset,
2689 origin_ds->ds_phys->ds_next_clones_obj,
2694 dmu_buf_will_dirty(dd->dd_dbuf, tx);
2695 ASSERT3U(dd->dd_phys->dd_origin_obj, ==, origin_ds->ds_object);
2696 dd->dd_phys->dd_origin_obj = odd->dd_phys->dd_origin_obj;
2697 dd->dd_origin_txg = origin_head->ds_dir->dd_origin_txg;
2698 dmu_buf_will_dirty(odd->dd_dbuf, tx);
2699 odd->dd_phys->dd_origin_obj = origin_ds->ds_object;
2700 origin_head->ds_dir->dd_origin_txg =
2701 origin_ds->ds_phys->ds_creation_txg;
2703 /* change dd_clone entries */
2704 if (spa_version(dp->dp_spa) >= SPA_VERSION_DIR_CLONES) {
2705 VERIFY3U(0, ==, zap_remove_int(dp->dp_meta_objset,
2706 odd->dd_phys->dd_clones, hds->ds_object, tx));
2707 VERIFY3U(0, ==, zap_add_int(dp->dp_meta_objset,
2708 pa->origin_origin->ds_dir->dd_phys->dd_clones,
2709 hds->ds_object, tx));
2711 VERIFY3U(0, ==, zap_remove_int(dp->dp_meta_objset,
2712 pa->origin_origin->ds_dir->dd_phys->dd_clones,
2713 origin_head->ds_object, tx));
2714 if (dd->dd_phys->dd_clones == 0) {
2715 dd->dd_phys->dd_clones = zap_create(dp->dp_meta_objset,
2716 DMU_OT_DSL_CLONES, DMU_OT_NONE, 0, tx);
2718 VERIFY3U(0, ==, zap_add_int(dp->dp_meta_objset,
2719 dd->dd_phys->dd_clones, origin_head->ds_object, tx));
2723 /* move snapshots to this dir */
2724 for (snap = list_head(&pa->shared_snaps); snap;
2725 snap = list_next(&pa->shared_snaps, snap)) {
2726 dsl_dataset_t *ds = snap->ds;
2728 /* unregister props as dsl_dir is changing */
2729 if (ds->ds_objset) {
2730 dmu_objset_evict(ds->ds_objset);
2731 ds->ds_objset = NULL;
2733 /* move snap name entry */
2734 VERIFY(0 == dsl_dataset_get_snapname(ds));
2735 VERIFY(0 == dsl_dataset_snap_remove(origin_head,
2736 ds->ds_snapname, tx));
2737 VERIFY(0 == zap_add(dp->dp_meta_objset,
2738 hds->ds_phys->ds_snapnames_zapobj, ds->ds_snapname,
2739 8, 1, &ds->ds_object, tx));
2741 /* change containing dsl_dir */
2742 dmu_buf_will_dirty(ds->ds_dbuf, tx);
2743 ASSERT3U(ds->ds_phys->ds_dir_obj, ==, odd->dd_object);
2744 ds->ds_phys->ds_dir_obj = dd->dd_object;
2745 ASSERT3P(ds->ds_dir, ==, odd);
2746 dsl_dir_close(ds->ds_dir, ds);
2747 VERIFY(0 == dsl_dir_open_obj(dp, dd->dd_object,
2748 NULL, ds, &ds->ds_dir));
2750 /* move any clone references */
2751 if (ds->ds_phys->ds_next_clones_obj &&
2752 spa_version(dp->dp_spa) >= SPA_VERSION_DIR_CLONES) {
2756 for (zap_cursor_init(&zc, dp->dp_meta_objset,
2757 ds->ds_phys->ds_next_clones_obj);
2758 zap_cursor_retrieve(&zc, &za) == 0;
2759 zap_cursor_advance(&zc)) {
2760 dsl_dataset_t *cnds;
2763 if (za.za_first_integer == oldnext_obj) {
2765 * We've already moved the
2766 * origin's reference.
2771 VERIFY3U(0, ==, dsl_dataset_hold_obj(dp,
2772 za.za_first_integer, FTAG, &cnds));
2773 o = cnds->ds_dir->dd_phys->dd_head_dataset_obj;
2775 VERIFY3U(zap_remove_int(dp->dp_meta_objset,
2776 odd->dd_phys->dd_clones, o, tx), ==, 0);
2777 VERIFY3U(zap_add_int(dp->dp_meta_objset,
2778 dd->dd_phys->dd_clones, o, tx), ==, 0);
2779 dsl_dataset_rele(cnds, FTAG);
2781 zap_cursor_fini(&zc);
2784 ASSERT3U(dsl_prop_numcb(ds), ==, 0);
2788 * Change space accounting.
2789 * Note, pa->*usedsnap and dd_used_breakdown[SNAP] will either
2790 * both be valid, or both be 0 (resulting in delta == 0). This
2791 * is true for each of {clone,origin} independently.
2794 delta = pa->cloneusedsnap -
2795 dd->dd_phys->dd_used_breakdown[DD_USED_SNAP];
2796 ASSERT3S(delta, >=, 0);
2797 ASSERT3U(pa->used, >=, delta);
2798 dsl_dir_diduse_space(dd, DD_USED_SNAP, delta, 0, 0, tx);
2799 dsl_dir_diduse_space(dd, DD_USED_HEAD,
2800 pa->used - delta, pa->comp, pa->uncomp, tx);
2802 delta = pa->originusedsnap -
2803 odd->dd_phys->dd_used_breakdown[DD_USED_SNAP];
2804 ASSERT3S(delta, <=, 0);
2805 ASSERT3U(pa->used, >=, -delta);
2806 dsl_dir_diduse_space(odd, DD_USED_SNAP, delta, 0, 0, tx);
2807 dsl_dir_diduse_space(odd, DD_USED_HEAD,
2808 -pa->used - delta, -pa->comp, -pa->uncomp, tx);
2810 origin_ds->ds_phys->ds_unique_bytes = pa->unique;
2812 /* log history record */
2813 spa_history_log_internal(LOG_DS_PROMOTE, dd->dd_pool->dp_spa, tx,
2814 "dataset = %llu", hds->ds_object);
2816 dsl_dir_close(odd, FTAG);
2819 static char *snaplist_tag = "snaplist";
2821 * Make a list of dsl_dataset_t's for the snapshots between first_obj
2822 * (exclusive) and last_obj (inclusive). The list will be in reverse
2823 * order (last_obj will be the list_head()). If first_obj == 0, do all
2824 * snapshots back to this dataset's origin.
2827 snaplist_make(dsl_pool_t *dp, boolean_t own,
2828 uint64_t first_obj, uint64_t last_obj, list_t *l)
2830 uint64_t obj = last_obj;
2832 ASSERT(RW_LOCK_HELD(&dp->dp_config_rwlock));
2834 list_create(l, sizeof (struct promotenode),
2835 offsetof(struct promotenode, link));
2837 while (obj != first_obj) {
2839 struct promotenode *snap;
2843 err = dsl_dataset_own_obj(dp, obj,
2844 0, snaplist_tag, &ds);
2846 dsl_dataset_make_exclusive(ds, snaplist_tag);
2848 err = dsl_dataset_hold_obj(dp, obj, snaplist_tag, &ds);
2850 if (err == ENOENT) {
2851 /* lost race with snapshot destroy */
2852 struct promotenode *last = list_tail(l);
2853 ASSERT(obj != last->ds->ds_phys->ds_prev_snap_obj);
2854 obj = last->ds->ds_phys->ds_prev_snap_obj;
2861 first_obj = ds->ds_dir->dd_phys->dd_origin_obj;
2863 snap = kmem_alloc(sizeof (struct promotenode), KM_SLEEP);
2865 list_insert_tail(l, snap);
2866 obj = ds->ds_phys->ds_prev_snap_obj;
2873 snaplist_space(list_t *l, uint64_t mintxg, uint64_t *spacep)
2875 struct promotenode *snap;
2878 for (snap = list_head(l); snap; snap = list_next(l, snap)) {
2879 uint64_t used, comp, uncomp;
2880 dsl_deadlist_space_range(&snap->ds->ds_deadlist,
2881 mintxg, UINT64_MAX, &used, &comp, &uncomp);
2888 snaplist_destroy(list_t *l, boolean_t own)
2890 struct promotenode *snap;
2892 if (!l || !list_link_active(&l->list_head))
2895 while ((snap = list_tail(l)) != NULL) {
2896 list_remove(l, snap);
2898 dsl_dataset_disown(snap->ds, snaplist_tag);
2900 dsl_dataset_rele(snap->ds, snaplist_tag);
2901 kmem_free(snap, sizeof (struct promotenode));
2907 * Promote a clone. Nomenclature note:
2908 * "clone" or "cds": the original clone which is being promoted
2909 * "origin" or "ods": the snapshot which is originally clone's origin
2910 * "origin head" or "ohds": the dataset which is the head
2911 * (filesystem/volume) for the origin
2912 * "origin origin": the origin of the origin's filesystem (typically
2913 * NULL, indicating that the clone is not a clone of a clone).
2916 dsl_dataset_promote(const char *name, char *conflsnap)
2921 dmu_object_info_t doi;
2922 struct promotearg pa = { 0 };
2923 struct promotenode *snap;
2926 err = dsl_dataset_hold(name, FTAG, &ds);
2932 err = dmu_object_info(dp->dp_meta_objset,
2933 ds->ds_phys->ds_snapnames_zapobj, &doi);
2935 dsl_dataset_rele(ds, FTAG);
2939 if (dsl_dataset_is_snapshot(ds) || dd->dd_phys->dd_origin_obj == 0) {
2940 dsl_dataset_rele(ds, FTAG);
2945 * We are going to inherit all the snapshots taken before our
2946 * origin (i.e., our new origin will be our parent's origin).
2947 * Take ownership of them so that we can rename them into our
2950 rw_enter(&dp->dp_config_rwlock, RW_READER);
2952 err = snaplist_make(dp, B_TRUE, 0, dd->dd_phys->dd_origin_obj,
2957 err = snaplist_make(dp, B_FALSE, 0, ds->ds_object, &pa.clone_snaps);
2961 snap = list_head(&pa.shared_snaps);
2962 ASSERT3U(snap->ds->ds_object, ==, dd->dd_phys->dd_origin_obj);
2963 err = snaplist_make(dp, B_FALSE, dd->dd_phys->dd_origin_obj,
2964 snap->ds->ds_dir->dd_phys->dd_head_dataset_obj, &pa.origin_snaps);
2968 if (snap->ds->ds_dir->dd_phys->dd_origin_obj != 0) {
2969 err = dsl_dataset_hold_obj(dp,
2970 snap->ds->ds_dir->dd_phys->dd_origin_obj,
2971 FTAG, &pa.origin_origin);
2977 rw_exit(&dp->dp_config_rwlock);
2980 * Add in 128x the snapnames zapobj size, since we will be moving
2981 * a bunch of snapnames to the promoted ds, and dirtying their
2985 err = dsl_sync_task_do(dp, dsl_dataset_promote_check,
2986 dsl_dataset_promote_sync, ds, &pa,
2987 2 + 2 * doi.doi_physical_blocks_512);
2988 if (err && pa.err_ds && conflsnap)
2989 (void) strncpy(conflsnap, pa.err_ds, MAXNAMELEN);
2992 snaplist_destroy(&pa.shared_snaps, B_TRUE);
2993 snaplist_destroy(&pa.clone_snaps, B_FALSE);
2994 snaplist_destroy(&pa.origin_snaps, B_FALSE);
2995 if (pa.origin_origin)
2996 dsl_dataset_rele(pa.origin_origin, FTAG);
2997 dsl_dataset_rele(ds, FTAG);
3001 struct cloneswaparg {
3002 dsl_dataset_t *cds; /* clone dataset */
3003 dsl_dataset_t *ohds; /* origin's head dataset */
3005 int64_t unused_refres_delta; /* change in unconsumed refreservation */
3010 dsl_dataset_clone_swap_check(void *arg1, void *arg2, dmu_tx_t *tx)
3012 struct cloneswaparg *csa = arg1;
3014 /* they should both be heads */
3015 if (dsl_dataset_is_snapshot(csa->cds) ||
3016 dsl_dataset_is_snapshot(csa->ohds))
3019 /* the branch point should be just before them */
3020 if (csa->cds->ds_prev != csa->ohds->ds_prev)
3023 /* cds should be the clone (unless they are unrelated) */
3024 if (csa->cds->ds_prev != NULL &&
3025 csa->cds->ds_prev != csa->cds->ds_dir->dd_pool->dp_origin_snap &&
3026 csa->ohds->ds_object !=
3027 csa->cds->ds_prev->ds_phys->ds_next_snap_obj)
3030 /* the clone should be a child of the origin */
3031 if (csa->cds->ds_dir->dd_parent != csa->ohds->ds_dir)
3034 /* ohds shouldn't be modified unless 'force' */
3035 if (!csa->force && dsl_dataset_modified_since_lastsnap(csa->ohds))
3038 /* adjust amount of any unconsumed refreservation */
3039 csa->unused_refres_delta =
3040 (int64_t)MIN(csa->ohds->ds_reserved,
3041 csa->ohds->ds_phys->ds_unique_bytes) -
3042 (int64_t)MIN(csa->ohds->ds_reserved,
3043 csa->cds->ds_phys->ds_unique_bytes);
3045 if (csa->unused_refres_delta > 0 &&
3046 csa->unused_refres_delta >
3047 dsl_dir_space_available(csa->ohds->ds_dir, NULL, 0, TRUE))
3050 if (csa->ohds->ds_quota != 0 &&
3051 csa->cds->ds_phys->ds_unique_bytes > csa->ohds->ds_quota)
3059 dsl_dataset_clone_swap_sync(void *arg1, void *arg2, dmu_tx_t *tx)
3061 struct cloneswaparg *csa = arg1;
3062 dsl_pool_t *dp = csa->cds->ds_dir->dd_pool;
3064 ASSERT(csa->cds->ds_reserved == 0);
3065 ASSERT(csa->ohds->ds_quota == 0 ||
3066 csa->cds->ds_phys->ds_unique_bytes <= csa->ohds->ds_quota);
3068 dmu_buf_will_dirty(csa->cds->ds_dbuf, tx);
3069 dmu_buf_will_dirty(csa->ohds->ds_dbuf, tx);
3071 if (csa->cds->ds_objset != NULL) {
3072 dmu_objset_evict(csa->cds->ds_objset);
3073 csa->cds->ds_objset = NULL;
3076 if (csa->ohds->ds_objset != NULL) {
3077 dmu_objset_evict(csa->ohds->ds_objset);
3078 csa->ohds->ds_objset = NULL;
3082 * Reset origin's unique bytes, if it exists.
3084 if (csa->cds->ds_prev) {
3085 dsl_dataset_t *origin = csa->cds->ds_prev;
3086 uint64_t comp, uncomp;
3088 dmu_buf_will_dirty(origin->ds_dbuf, tx);
3089 dsl_deadlist_space_range(&csa->cds->ds_deadlist,
3090 origin->ds_phys->ds_prev_snap_txg, UINT64_MAX,
3091 &origin->ds_phys->ds_unique_bytes, &comp, &uncomp);
3097 tmp = csa->ohds->ds_phys->ds_bp;
3098 csa->ohds->ds_phys->ds_bp = csa->cds->ds_phys->ds_bp;
3099 csa->cds->ds_phys->ds_bp = tmp;
3102 /* set dd_*_bytes */
3104 int64_t dused, dcomp, duncomp;
3105 uint64_t cdl_used, cdl_comp, cdl_uncomp;
3106 uint64_t odl_used, odl_comp, odl_uncomp;
3108 ASSERT3U(csa->cds->ds_dir->dd_phys->
3109 dd_used_breakdown[DD_USED_SNAP], ==, 0);
3111 dsl_deadlist_space(&csa->cds->ds_deadlist,
3112 &cdl_used, &cdl_comp, &cdl_uncomp);
3113 dsl_deadlist_space(&csa->ohds->ds_deadlist,
3114 &odl_used, &odl_comp, &odl_uncomp);
3116 dused = csa->cds->ds_phys->ds_used_bytes + cdl_used -
3117 (csa->ohds->ds_phys->ds_used_bytes + odl_used);
3118 dcomp = csa->cds->ds_phys->ds_compressed_bytes + cdl_comp -
3119 (csa->ohds->ds_phys->ds_compressed_bytes + odl_comp);
3120 duncomp = csa->cds->ds_phys->ds_uncompressed_bytes +
3122 (csa->ohds->ds_phys->ds_uncompressed_bytes + odl_uncomp);
3124 dsl_dir_diduse_space(csa->ohds->ds_dir, DD_USED_HEAD,
3125 dused, dcomp, duncomp, tx);
3126 dsl_dir_diduse_space(csa->cds->ds_dir, DD_USED_HEAD,
3127 -dused, -dcomp, -duncomp, tx);
3130 * The difference in the space used by snapshots is the
3131 * difference in snapshot space due to the head's
3132 * deadlist (since that's the only thing that's
3133 * changing that affects the snapused).
3135 dsl_deadlist_space_range(&csa->cds->ds_deadlist,
3136 csa->ohds->ds_dir->dd_origin_txg, UINT64_MAX,
3137 &cdl_used, &cdl_comp, &cdl_uncomp);
3138 dsl_deadlist_space_range(&csa->ohds->ds_deadlist,
3139 csa->ohds->ds_dir->dd_origin_txg, UINT64_MAX,
3140 &odl_used, &odl_comp, &odl_uncomp);
3141 dsl_dir_transfer_space(csa->ohds->ds_dir, cdl_used - odl_used,
3142 DD_USED_HEAD, DD_USED_SNAP, tx);
3145 /* swap ds_*_bytes */
3146 SWITCH64(csa->ohds->ds_phys->ds_used_bytes,
3147 csa->cds->ds_phys->ds_used_bytes);
3148 SWITCH64(csa->ohds->ds_phys->ds_compressed_bytes,
3149 csa->cds->ds_phys->ds_compressed_bytes);
3150 SWITCH64(csa->ohds->ds_phys->ds_uncompressed_bytes,
3151 csa->cds->ds_phys->ds_uncompressed_bytes);
3152 SWITCH64(csa->ohds->ds_phys->ds_unique_bytes,
3153 csa->cds->ds_phys->ds_unique_bytes);
3155 /* apply any parent delta for change in unconsumed refreservation */
3156 dsl_dir_diduse_space(csa->ohds->ds_dir, DD_USED_REFRSRV,
3157 csa->unused_refres_delta, 0, 0, tx);
3162 dsl_deadlist_close(&csa->cds->ds_deadlist);
3163 dsl_deadlist_close(&csa->ohds->ds_deadlist);
3164 SWITCH64(csa->ohds->ds_phys->ds_deadlist_obj,
3165 csa->cds->ds_phys->ds_deadlist_obj);
3166 dsl_deadlist_open(&csa->cds->ds_deadlist, dp->dp_meta_objset,
3167 csa->cds->ds_phys->ds_deadlist_obj);
3168 dsl_deadlist_open(&csa->ohds->ds_deadlist, dp->dp_meta_objset,
3169 csa->ohds->ds_phys->ds_deadlist_obj);
3171 dsl_scan_ds_clone_swapped(csa->ohds, csa->cds, tx);
3175 * Swap 'clone' with its origin head datasets. Used at the end of "zfs
3176 * recv" into an existing fs to swizzle the file system to the new
3177 * version, and by "zfs rollback". Can also be used to swap two
3178 * independent head datasets if neither has any snapshots.
3181 dsl_dataset_clone_swap(dsl_dataset_t *clone, dsl_dataset_t *origin_head,
3184 struct cloneswaparg csa;
3187 ASSERT(clone->ds_owner);
3188 ASSERT(origin_head->ds_owner);
3191 * Need exclusive access for the swap. If we're swapping these
3192 * datasets back after an error, we already hold the locks.
3194 if (!RW_WRITE_HELD(&clone->ds_rwlock))
3195 rw_enter(&clone->ds_rwlock, RW_WRITER);
3196 if (!RW_WRITE_HELD(&origin_head->ds_rwlock) &&
3197 !rw_tryenter(&origin_head->ds_rwlock, RW_WRITER)) {
3198 rw_exit(&clone->ds_rwlock);
3199 rw_enter(&origin_head->ds_rwlock, RW_WRITER);
3200 if (!rw_tryenter(&clone->ds_rwlock, RW_WRITER)) {
3201 rw_exit(&origin_head->ds_rwlock);
3206 csa.ohds = origin_head;
3208 error = dsl_sync_task_do(clone->ds_dir->dd_pool,
3209 dsl_dataset_clone_swap_check,
3210 dsl_dataset_clone_swap_sync, &csa, NULL, 9);
3215 * Given a pool name and a dataset object number in that pool,
3216 * return the name of that dataset.
3219 dsl_dsobj_to_dsname(char *pname, uint64_t obj, char *buf)
3226 if ((error = spa_open(pname, &spa, FTAG)) != 0)
3228 dp = spa_get_dsl(spa);
3229 rw_enter(&dp->dp_config_rwlock, RW_READER);
3230 if ((error = dsl_dataset_hold_obj(dp, obj, FTAG, &ds)) == 0) {
3231 dsl_dataset_name(ds, buf);
3232 dsl_dataset_rele(ds, FTAG);
3234 rw_exit(&dp->dp_config_rwlock);
3235 spa_close(spa, FTAG);
3241 dsl_dataset_check_quota(dsl_dataset_t *ds, boolean_t check_quota,
3242 uint64_t asize, uint64_t inflight, uint64_t *used, uint64_t *ref_rsrv)
3246 ASSERT3S(asize, >, 0);
3249 * *ref_rsrv is the portion of asize that will come from any
3250 * unconsumed refreservation space.
3254 mutex_enter(&ds->ds_lock);
3256 * Make a space adjustment for reserved bytes.
3258 if (ds->ds_reserved > ds->ds_phys->ds_unique_bytes) {
3260 ds->ds_reserved - ds->ds_phys->ds_unique_bytes);
3261 *used -= (ds->ds_reserved - ds->ds_phys->ds_unique_bytes);
3263 asize - MIN(asize, parent_delta(ds, asize + inflight));
3266 if (!check_quota || ds->ds_quota == 0) {
3267 mutex_exit(&ds->ds_lock);
3271 * If they are requesting more space, and our current estimate
3272 * is over quota, they get to try again unless the actual
3273 * on-disk is over quota and there are no pending changes (which
3274 * may free up space for us).
3276 if (ds->ds_phys->ds_used_bytes + inflight >= ds->ds_quota) {
3277 if (inflight > 0 || ds->ds_phys->ds_used_bytes < ds->ds_quota)
3282 mutex_exit(&ds->ds_lock);
3289 dsl_dataset_set_quota_check(void *arg1, void *arg2, dmu_tx_t *tx)
3291 dsl_dataset_t *ds = arg1;
3292 dsl_prop_setarg_t *psa = arg2;
3295 if (spa_version(ds->ds_dir->dd_pool->dp_spa) < SPA_VERSION_REFQUOTA)
3298 if ((err = dsl_prop_predict_sync(ds->ds_dir, psa)) != 0)
3301 if (psa->psa_effective_value == 0)
3304 if (psa->psa_effective_value < ds->ds_phys->ds_used_bytes ||
3305 psa->psa_effective_value < ds->ds_reserved)
3311 extern void dsl_prop_set_sync(void *, void *, dmu_tx_t *);
3314 dsl_dataset_set_quota_sync(void *arg1, void *arg2, dmu_tx_t *tx)
3316 dsl_dataset_t *ds = arg1;
3317 dsl_prop_setarg_t *psa = arg2;
3318 uint64_t effective_value = psa->psa_effective_value;
3320 dsl_prop_set_sync(ds, psa, tx);
3321 DSL_PROP_CHECK_PREDICTION(ds->ds_dir, psa);
3323 if (ds->ds_quota != effective_value) {
3324 dmu_buf_will_dirty(ds->ds_dbuf, tx);
3325 ds->ds_quota = effective_value;
3327 spa_history_log_internal(LOG_DS_REFQUOTA,
3328 ds->ds_dir->dd_pool->dp_spa, tx, "%lld dataset = %llu ",
3329 (longlong_t)ds->ds_quota, ds->ds_object);
3334 dsl_dataset_set_quota(const char *dsname, zprop_source_t source, uint64_t quota)
3337 dsl_prop_setarg_t psa;
3340 dsl_prop_setarg_init_uint64(&psa, "refquota", source, "a);
3342 err = dsl_dataset_hold(dsname, FTAG, &ds);
3347 * If someone removes a file, then tries to set the quota, we
3348 * want to make sure the file freeing takes effect.
3350 txg_wait_open(ds->ds_dir->dd_pool, 0);
3352 err = dsl_sync_task_do(ds->ds_dir->dd_pool,
3353 dsl_dataset_set_quota_check, dsl_dataset_set_quota_sync,
3356 dsl_dataset_rele(ds, FTAG);
3361 dsl_dataset_set_reservation_check(void *arg1, void *arg2, dmu_tx_t *tx)
3363 dsl_dataset_t *ds = arg1;
3364 dsl_prop_setarg_t *psa = arg2;
3365 uint64_t effective_value;
3369 if (spa_version(ds->ds_dir->dd_pool->dp_spa) <
3370 SPA_VERSION_REFRESERVATION)
3373 if (dsl_dataset_is_snapshot(ds))
3376 if ((err = dsl_prop_predict_sync(ds->ds_dir, psa)) != 0)
3379 effective_value = psa->psa_effective_value;
3382 * If we are doing the preliminary check in open context, the
3383 * space estimates may be inaccurate.
3385 if (!dmu_tx_is_syncing(tx))
3388 mutex_enter(&ds->ds_lock);
3389 if (!DS_UNIQUE_IS_ACCURATE(ds))
3390 dsl_dataset_recalc_head_uniq(ds);
3391 unique = ds->ds_phys->ds_unique_bytes;
3392 mutex_exit(&ds->ds_lock);
3394 if (MAX(unique, effective_value) > MAX(unique, ds->ds_reserved)) {
3395 uint64_t delta = MAX(unique, effective_value) -
3396 MAX(unique, ds->ds_reserved);
3398 if (delta > dsl_dir_space_available(ds->ds_dir, NULL, 0, TRUE))
3400 if (ds->ds_quota > 0 &&
3401 effective_value > ds->ds_quota)
3409 dsl_dataset_set_reservation_sync(void *arg1, void *arg2, dmu_tx_t *tx)
3411 dsl_dataset_t *ds = arg1;
3412 dsl_prop_setarg_t *psa = arg2;
3413 uint64_t effective_value = psa->psa_effective_value;
3417 dsl_prop_set_sync(ds, psa, tx);
3418 DSL_PROP_CHECK_PREDICTION(ds->ds_dir, psa);
3420 dmu_buf_will_dirty(ds->ds_dbuf, tx);
3422 mutex_enter(&ds->ds_dir->dd_lock);
3423 mutex_enter(&ds->ds_lock);
3424 ASSERT(DS_UNIQUE_IS_ACCURATE(ds));
3425 unique = ds->ds_phys->ds_unique_bytes;
3426 delta = MAX(0, (int64_t)(effective_value - unique)) -
3427 MAX(0, (int64_t)(ds->ds_reserved - unique));
3428 ds->ds_reserved = effective_value;
3429 mutex_exit(&ds->ds_lock);
3431 dsl_dir_diduse_space(ds->ds_dir, DD_USED_REFRSRV, delta, 0, 0, tx);
3432 mutex_exit(&ds->ds_dir->dd_lock);
3434 spa_history_log_internal(LOG_DS_REFRESERV,
3435 ds->ds_dir->dd_pool->dp_spa, tx, "%lld dataset = %llu",
3436 (longlong_t)effective_value, ds->ds_object);
3440 dsl_dataset_set_reservation(const char *dsname, zprop_source_t source,
3441 uint64_t reservation)
3444 dsl_prop_setarg_t psa;
3447 dsl_prop_setarg_init_uint64(&psa, "refreservation", source,
3450 err = dsl_dataset_hold(dsname, FTAG, &ds);
3454 err = dsl_sync_task_do(ds->ds_dir->dd_pool,
3455 dsl_dataset_set_reservation_check,
3456 dsl_dataset_set_reservation_sync, ds, &psa, 0);
3458 dsl_dataset_rele(ds, FTAG);
3462 typedef struct zfs_hold_cleanup_arg {
3465 char htag[MAXNAMELEN];
3466 } zfs_hold_cleanup_arg_t;
3469 dsl_dataset_user_release_onexit(void *arg)
3471 zfs_hold_cleanup_arg_t *ca = arg;
3473 (void) dsl_dataset_user_release_tmp(ca->dp, ca->dsobj, ca->htag,
3475 kmem_free(ca, sizeof (zfs_hold_cleanup_arg_t));
3479 dsl_register_onexit_hold_cleanup(dsl_dataset_t *ds, const char *htag,
3482 zfs_hold_cleanup_arg_t *ca;
3484 ca = kmem_alloc(sizeof (zfs_hold_cleanup_arg_t), KM_SLEEP);
3485 ca->dp = ds->ds_dir->dd_pool;
3486 ca->dsobj = ds->ds_object;
3487 (void) strlcpy(ca->htag, htag, sizeof (ca->htag));
3488 VERIFY3U(0, ==, zfs_onexit_add_cb(minor,
3489 dsl_dataset_user_release_onexit, ca, NULL));
3493 * If you add new checks here, you may need to add
3494 * additional checks to the "temporary" case in
3495 * snapshot_check() in dmu_objset.c.
3498 dsl_dataset_user_hold_check(void *arg1, void *arg2, dmu_tx_t *tx)
3500 dsl_dataset_t *ds = arg1;
3501 struct dsl_ds_holdarg *ha = arg2;
3502 char *htag = ha->htag;
3503 objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
3506 if (spa_version(ds->ds_dir->dd_pool->dp_spa) < SPA_VERSION_USERREFS)
3509 if (!dsl_dataset_is_snapshot(ds))
3512 /* tags must be unique */
3513 mutex_enter(&ds->ds_lock);
3514 if (ds->ds_phys->ds_userrefs_obj) {
3515 error = zap_lookup(mos, ds->ds_phys->ds_userrefs_obj, htag,
3519 else if (error == ENOENT)
3522 mutex_exit(&ds->ds_lock);
3524 if (error == 0 && ha->temphold &&
3525 strlen(htag) + MAX_TAG_PREFIX_LEN >= MAXNAMELEN)
3532 dsl_dataset_user_hold_sync(void *arg1, void *arg2, dmu_tx_t *tx)
3534 dsl_dataset_t *ds = arg1;
3535 struct dsl_ds_holdarg *ha = arg2;
3536 char *htag = ha->htag;
3537 dsl_pool_t *dp = ds->ds_dir->dd_pool;
3538 objset_t *mos = dp->dp_meta_objset;
3539 uint64_t now = gethrestime_sec();
3542 mutex_enter(&ds->ds_lock);
3543 if (ds->ds_phys->ds_userrefs_obj == 0) {
3545 * This is the first user hold for this dataset. Create
3546 * the userrefs zap object.
3548 dmu_buf_will_dirty(ds->ds_dbuf, tx);
3549 zapobj = ds->ds_phys->ds_userrefs_obj =
3550 zap_create(mos, DMU_OT_USERREFS, DMU_OT_NONE, 0, tx);
3552 zapobj = ds->ds_phys->ds_userrefs_obj;
3555 mutex_exit(&ds->ds_lock);
3557 VERIFY(0 == zap_add(mos, zapobj, htag, 8, 1, &now, tx));
3560 VERIFY(0 == dsl_pool_user_hold(dp, ds->ds_object,
3564 spa_history_log_internal(LOG_DS_USER_HOLD,
3565 dp->dp_spa, tx, "<%s> temp = %d dataset = %llu", htag,
3566 (int)ha->temphold, ds->ds_object);
3570 dsl_dataset_user_hold_one(const char *dsname, void *arg)
3572 struct dsl_ds_holdarg *ha = arg;
3577 /* alloc a buffer to hold dsname@snapname plus terminating NULL */
3578 name = kmem_asprintf("%s@%s", dsname, ha->snapname);
3579 error = dsl_dataset_hold(name, ha->dstg, &ds);
3582 ha->gotone = B_TRUE;
3583 dsl_sync_task_create(ha->dstg, dsl_dataset_user_hold_check,
3584 dsl_dataset_user_hold_sync, ds, ha, 0);
3585 } else if (error == ENOENT && ha->recursive) {
3588 (void) strlcpy(ha->failed, dsname, sizeof (ha->failed));
3594 dsl_dataset_user_hold_for_send(dsl_dataset_t *ds, char *htag,
3597 struct dsl_ds_holdarg *ha;
3600 ha = kmem_zalloc(sizeof (struct dsl_ds_holdarg), KM_SLEEP);
3602 ha->temphold = temphold;
3603 error = dsl_sync_task_do(ds->ds_dir->dd_pool,
3604 dsl_dataset_user_hold_check, dsl_dataset_user_hold_sync,
3606 kmem_free(ha, sizeof (struct dsl_ds_holdarg));
3612 dsl_dataset_user_hold(char *dsname, char *snapname, char *htag,
3613 boolean_t recursive, boolean_t temphold, int cleanup_fd)
3615 struct dsl_ds_holdarg *ha;
3616 dsl_sync_task_t *dst;
3621 if (cleanup_fd != -1) {
3622 /* Currently we only support cleanup-on-exit of tempholds. */
3625 error = zfs_onexit_fd_hold(cleanup_fd, &minor);
3630 ha = kmem_zalloc(sizeof (struct dsl_ds_holdarg), KM_SLEEP);
3632 (void) strlcpy(ha->failed, dsname, sizeof (ha->failed));
3634 error = spa_open(dsname, &spa, FTAG);
3636 kmem_free(ha, sizeof (struct dsl_ds_holdarg));
3637 if (cleanup_fd != -1)
3638 zfs_onexit_fd_rele(cleanup_fd);
3642 ha->dstg = dsl_sync_task_group_create(spa_get_dsl(spa));
3644 ha->snapname = snapname;
3645 ha->recursive = recursive;
3646 ha->temphold = temphold;
3649 error = dmu_objset_find(dsname, dsl_dataset_user_hold_one,
3650 ha, DS_FIND_CHILDREN);
3652 error = dsl_dataset_user_hold_one(dsname, ha);
3655 error = dsl_sync_task_group_wait(ha->dstg);
3657 for (dst = list_head(&ha->dstg->dstg_tasks); dst;
3658 dst = list_next(&ha->dstg->dstg_tasks, dst)) {
3659 dsl_dataset_t *ds = dst->dst_arg1;
3662 dsl_dataset_name(ds, ha->failed);
3663 *strchr(ha->failed, '@') = '\0';
3664 } else if (error == 0 && minor != 0 && temphold) {
3666 * If this hold is to be released upon process exit,
3667 * register that action now.
3669 dsl_register_onexit_hold_cleanup(ds, htag, minor);
3671 dsl_dataset_rele(ds, ha->dstg);
3674 if (error == 0 && recursive && !ha->gotone)
3678 (void) strlcpy(dsname, ha->failed, sizeof (ha->failed));
3680 dsl_sync_task_group_destroy(ha->dstg);
3682 kmem_free(ha, sizeof (struct dsl_ds_holdarg));
3683 spa_close(spa, FTAG);
3684 if (cleanup_fd != -1)
3685 zfs_onexit_fd_rele(cleanup_fd);
3689 struct dsl_ds_releasearg {
3692 boolean_t own; /* do we own or just hold ds? */
3696 dsl_dataset_release_might_destroy(dsl_dataset_t *ds, const char *htag,
3697 boolean_t *might_destroy)
3699 objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
3704 *might_destroy = B_FALSE;
3706 mutex_enter(&ds->ds_lock);
3707 zapobj = ds->ds_phys->ds_userrefs_obj;
3709 /* The tag can't possibly exist */
3710 mutex_exit(&ds->ds_lock);
3714 /* Make sure the tag exists */
3715 error = zap_lookup(mos, zapobj, htag, 8, 1, &tmp);
3717 mutex_exit(&ds->ds_lock);
3718 if (error == ENOENT)
3723 if (ds->ds_userrefs == 1 && ds->ds_phys->ds_num_children == 1 &&
3724 DS_IS_DEFER_DESTROY(ds))
3725 *might_destroy = B_TRUE;
3727 mutex_exit(&ds->ds_lock);
3732 dsl_dataset_user_release_check(void *arg1, void *tag, dmu_tx_t *tx)
3734 struct dsl_ds_releasearg *ra = arg1;
3735 dsl_dataset_t *ds = ra->ds;
3736 boolean_t might_destroy;
3739 if (spa_version(ds->ds_dir->dd_pool->dp_spa) < SPA_VERSION_USERREFS)
3742 error = dsl_dataset_release_might_destroy(ds, ra->htag, &might_destroy);
3746 if (might_destroy) {
3747 struct dsl_ds_destroyarg dsda = {0};
3749 if (dmu_tx_is_syncing(tx)) {
3751 * If we're not prepared to remove the snapshot,
3752 * we can't allow the release to happen right now.
3758 dsda.releasing = B_TRUE;
3759 return (dsl_dataset_destroy_check(&dsda, tag, tx));
3766 dsl_dataset_user_release_sync(void *arg1, void *tag, dmu_tx_t *tx)
3768 struct dsl_ds_releasearg *ra = arg1;
3769 dsl_dataset_t *ds = ra->ds;
3770 dsl_pool_t *dp = ds->ds_dir->dd_pool;
3771 objset_t *mos = dp->dp_meta_objset;
3773 uint64_t dsobj = ds->ds_object;
3777 mutex_enter(&ds->ds_lock);
3779 refs = ds->ds_userrefs;
3780 mutex_exit(&ds->ds_lock);
3781 error = dsl_pool_user_release(dp, ds->ds_object, ra->htag, tx);
3782 VERIFY(error == 0 || error == ENOENT);
3783 zapobj = ds->ds_phys->ds_userrefs_obj;
3784 VERIFY(0 == zap_remove(mos, zapobj, ra->htag, tx));
3785 if (ds->ds_userrefs == 0 && ds->ds_phys->ds_num_children == 1 &&
3786 DS_IS_DEFER_DESTROY(ds)) {
3787 struct dsl_ds_destroyarg dsda = {0};
3791 dsda.releasing = B_TRUE;
3792 /* We already did the destroy_check */
3793 dsl_dataset_destroy_sync(&dsda, tag, tx);
3796 spa_history_log_internal(LOG_DS_USER_RELEASE,
3797 dp->dp_spa, tx, "<%s> %lld dataset = %llu",
3798 ra->htag, (longlong_t)refs, dsobj);
3802 dsl_dataset_user_release_one(const char *dsname, void *arg)
3804 struct dsl_ds_holdarg *ha = arg;
3805 struct dsl_ds_releasearg *ra;
3808 void *dtag = ha->dstg;
3810 boolean_t own = B_FALSE;
3811 boolean_t might_destroy;
3813 /* alloc a buffer to hold dsname@snapname, plus the terminating NULL */
3814 name = kmem_asprintf("%s@%s", dsname, ha->snapname);
3815 error = dsl_dataset_hold(name, dtag, &ds);
3817 if (error == ENOENT && ha->recursive)
3819 (void) strlcpy(ha->failed, dsname, sizeof (ha->failed));
3823 ha->gotone = B_TRUE;
3825 ASSERT(dsl_dataset_is_snapshot(ds));
3827 error = dsl_dataset_release_might_destroy(ds, ha->htag, &might_destroy);
3829 dsl_dataset_rele(ds, dtag);
3833 if (might_destroy) {
3835 name = kmem_asprintf("%s@%s", dsname, ha->snapname);
3836 error = zfs_unmount_snap(name, NULL);
3839 dsl_dataset_rele(ds, dtag);
3843 if (!dsl_dataset_tryown(ds, B_TRUE, dtag)) {
3844 dsl_dataset_rele(ds, dtag);
3848 dsl_dataset_make_exclusive(ds, dtag);
3852 ra = kmem_alloc(sizeof (struct dsl_ds_releasearg), KM_SLEEP);
3854 ra->htag = ha->htag;
3856 dsl_sync_task_create(ha->dstg, dsl_dataset_user_release_check,
3857 dsl_dataset_user_release_sync, ra, dtag, 0);
3863 dsl_dataset_user_release(char *dsname, char *snapname, char *htag,
3864 boolean_t recursive)
3866 struct dsl_ds_holdarg *ha;
3867 dsl_sync_task_t *dst;
3872 ha = kmem_zalloc(sizeof (struct dsl_ds_holdarg), KM_SLEEP);
3874 (void) strlcpy(ha->failed, dsname, sizeof (ha->failed));
3876 error = spa_open(dsname, &spa, FTAG);
3878 kmem_free(ha, sizeof (struct dsl_ds_holdarg));
3882 ha->dstg = dsl_sync_task_group_create(spa_get_dsl(spa));
3884 ha->snapname = snapname;
3885 ha->recursive = recursive;
3887 error = dmu_objset_find(dsname, dsl_dataset_user_release_one,
3888 ha, DS_FIND_CHILDREN);
3890 error = dsl_dataset_user_release_one(dsname, ha);
3893 error = dsl_sync_task_group_wait(ha->dstg);
3895 for (dst = list_head(&ha->dstg->dstg_tasks); dst;
3896 dst = list_next(&ha->dstg->dstg_tasks, dst)) {
3897 struct dsl_ds_releasearg *ra = dst->dst_arg1;
3898 dsl_dataset_t *ds = ra->ds;
3901 dsl_dataset_name(ds, ha->failed);
3904 dsl_dataset_disown(ds, ha->dstg);
3906 dsl_dataset_rele(ds, ha->dstg);
3908 kmem_free(ra, sizeof (struct dsl_ds_releasearg));
3911 if (error == 0 && recursive && !ha->gotone)
3914 if (error && error != EBUSY)
3915 (void) strlcpy(dsname, ha->failed, sizeof (ha->failed));
3917 dsl_sync_task_group_destroy(ha->dstg);
3918 kmem_free(ha, sizeof (struct dsl_ds_holdarg));
3919 spa_close(spa, FTAG);
3922 * We can get EBUSY if we were racing with deferred destroy and
3923 * dsl_dataset_user_release_check() hadn't done the necessary
3924 * open context setup. We can also get EBUSY if we're racing
3925 * with destroy and that thread is the ds_owner. Either way
3926 * the busy condition should be transient, and we should retry
3927 * the release operation.
3936 * Called at spa_load time (with retry == B_FALSE) to release a stale
3937 * temporary user hold. Also called by the onexit code (with retry == B_TRUE).
3940 dsl_dataset_user_release_tmp(dsl_pool_t *dp, uint64_t dsobj, char *htag,
3950 rw_enter(&dp->dp_config_rwlock, RW_READER);
3951 error = dsl_dataset_hold_obj(dp, dsobj, FTAG, &ds);
3952 rw_exit(&dp->dp_config_rwlock);
3955 namelen = dsl_dataset_namelen(ds)+1;
3956 name = kmem_alloc(namelen, KM_SLEEP);
3957 dsl_dataset_name(ds, name);
3958 dsl_dataset_rele(ds, FTAG);
3960 snap = strchr(name, '@');
3963 error = dsl_dataset_user_release(name, snap, htag, B_FALSE);
3964 kmem_free(name, namelen);
3967 * The object can't have been destroyed because we have a hold,
3968 * but it might have been renamed, resulting in ENOENT. Retry
3969 * if we've been requested to do so.
3971 * It would be nice if we could use the dsobj all the way
3972 * through and avoid ENOENT entirely. But we might need to
3973 * unmount the snapshot, and there's currently no way to lookup
3974 * a vfsp using a ZFS object id.
3976 } while ((error == ENOENT) && retry);
3982 dsl_dataset_get_holds(const char *dsname, nvlist_t **nvp)
3987 err = dsl_dataset_hold(dsname, FTAG, &ds);
3991 VERIFY(0 == nvlist_alloc(nvp, NV_UNIQUE_NAME, KM_SLEEP));
3992 if (ds->ds_phys->ds_userrefs_obj != 0) {
3993 zap_attribute_t *za;
3996 za = kmem_alloc(sizeof (zap_attribute_t), KM_SLEEP);
3997 for (zap_cursor_init(&zc, ds->ds_dir->dd_pool->dp_meta_objset,
3998 ds->ds_phys->ds_userrefs_obj);
3999 zap_cursor_retrieve(&zc, za) == 0;
4000 zap_cursor_advance(&zc)) {
4001 VERIFY(0 == nvlist_add_uint64(*nvp, za->za_name,
4002 za->za_first_integer));
4004 zap_cursor_fini(&zc);
4005 kmem_free(za, sizeof (zap_attribute_t));
4007 dsl_dataset_rele(ds, FTAG);
4012 * Note, this fuction is used as the callback for dmu_objset_find(). We
4013 * always return 0 so that we will continue to find and process
4014 * inconsistent datasets, even if we encounter an error trying to
4015 * process one of them.
4019 dsl_destroy_inconsistent(const char *dsname, void *arg)
4023 if (dsl_dataset_own(dsname, B_TRUE, FTAG, &ds) == 0) {
4024 if (DS_IS_INCONSISTENT(ds))
4025 (void) dsl_dataset_destroy(ds, FTAG, B_FALSE);
4027 dsl_dataset_disown(ds, FTAG);