4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
27 #include <sys/dmu_objset.h>
28 #include <sys/dmu_tx.h>
29 #include <sys/dsl_dataset.h>
30 #include <sys/dsl_dir.h>
31 #include <sys/dsl_prop.h>
32 #include <sys/dsl_synctask.h>
33 #include <sys/dsl_deleg.h>
38 #include <sys/sunddi.h>
39 #include "zfs_namecheck.h"
41 static uint64_t dsl_dir_space_towrite(dsl_dir_t *dd);
42 static void dsl_dir_set_reservation_sync(void *arg1, void *arg2,
43 cred_t *cr, dmu_tx_t *tx);
48 dsl_dir_evict(dmu_buf_t *db, void *arg)
51 dsl_pool_t *dp = dd->dd_pool;
54 for (t = 0; t < TXG_SIZE; t++) {
55 ASSERT(!txg_list_member(&dp->dp_dirty_dirs, dd, t));
56 ASSERT(dd->dd_tempreserved[t] == 0);
57 ASSERT(dd->dd_space_towrite[t] == 0);
61 dsl_dir_close(dd->dd_parent, dd);
63 spa_close(dd->dd_pool->dp_spa, dd);
66 * The props callback list should be empty since they hold the
69 list_destroy(&dd->dd_prop_cbs);
70 mutex_destroy(&dd->dd_lock);
71 kmem_free(dd, sizeof (dsl_dir_t));
75 dsl_dir_open_obj(dsl_pool_t *dp, uint64_t ddobj,
76 const char *tail, void *tag, dsl_dir_t **ddp)
82 ASSERT(RW_LOCK_HELD(&dp->dp_config_rwlock) ||
83 dsl_pool_sync_context(dp));
85 err = dmu_bonus_hold(dp->dp_meta_objset, ddobj, tag, &dbuf);
88 dd = dmu_buf_get_user(dbuf);
91 dmu_object_info_t doi;
92 dmu_object_info_from_db(dbuf, &doi);
93 ASSERT3U(doi.doi_type, ==, DMU_OT_DSL_DIR);
94 ASSERT3U(doi.doi_bonus_size, >=, sizeof (dsl_dir_phys_t));
101 dd = kmem_zalloc(sizeof (dsl_dir_t), KM_SLEEP);
102 dd->dd_object = ddobj;
105 dd->dd_phys = dbuf->db_data;
106 mutex_init(&dd->dd_lock, NULL, MUTEX_DEFAULT, NULL);
108 list_create(&dd->dd_prop_cbs, sizeof (dsl_prop_cb_record_t),
109 offsetof(dsl_prop_cb_record_t, cbr_node));
111 if (dd->dd_phys->dd_parent_obj) {
112 err = dsl_dir_open_obj(dp, dd->dd_phys->dd_parent_obj,
113 NULL, dd, &dd->dd_parent);
120 err = zap_lookup(dp->dp_meta_objset,
121 dd->dd_parent->dd_phys->dd_child_dir_zapobj,
122 tail, sizeof (foundobj), 1, &foundobj);
123 ASSERT(err || foundobj == ddobj);
125 (void) strcpy(dd->dd_myname, tail);
127 err = zap_value_search(dp->dp_meta_objset,
128 dd->dd_parent->dd_phys->dd_child_dir_zapobj,
129 ddobj, 0, dd->dd_myname);
134 (void) strcpy(dd->dd_myname, spa_name(dp->dp_spa));
137 winner = dmu_buf_set_user_ie(dbuf, dd, &dd->dd_phys,
141 dsl_dir_close(dd->dd_parent, dd);
142 mutex_destroy(&dd->dd_lock);
143 kmem_free(dd, sizeof (dsl_dir_t));
146 spa_open_ref(dp->dp_spa, dd);
151 * The dsl_dir_t has both open-to-close and instantiate-to-evict
152 * holds on the spa. We need the open-to-close holds because
153 * otherwise the spa_refcnt wouldn't change when we open a
154 * dir which the spa also has open, so we could incorrectly
155 * think it was OK to unload/export/destroy the pool. We need
156 * the instantiate-to-evict hold because the dsl_dir_t has a
157 * pointer to the dd_pool, which has a pointer to the spa_t.
159 spa_open_ref(dp->dp_spa, tag);
160 ASSERT3P(dd->dd_pool, ==, dp);
161 ASSERT3U(dd->dd_object, ==, ddobj);
162 ASSERT3P(dd->dd_dbuf, ==, dbuf);
168 dsl_dir_close(dd->dd_parent, dd);
169 mutex_destroy(&dd->dd_lock);
170 kmem_free(dd, sizeof (dsl_dir_t));
171 dmu_buf_rele(dbuf, tag);
177 dsl_dir_close(dsl_dir_t *dd, void *tag)
179 dprintf_dd(dd, "%s\n", "");
180 spa_close(dd->dd_pool->dp_spa, tag);
181 dmu_buf_rele(dd->dd_dbuf, tag);
184 /* buf must be long enough (MAXNAMELEN + strlen(MOS_DIR_NAME) + 1 should do) */
186 dsl_dir_name(dsl_dir_t *dd, char *buf)
189 dsl_dir_name(dd->dd_parent, buf);
190 (void) strcat(buf, "/");
194 if (!MUTEX_HELD(&dd->dd_lock)) {
196 * recursive mutex so that we can use
197 * dprintf_dd() with dd_lock held
199 mutex_enter(&dd->dd_lock);
200 (void) strcat(buf, dd->dd_myname);
201 mutex_exit(&dd->dd_lock);
203 (void) strcat(buf, dd->dd_myname);
207 /* Calculate name legnth, avoiding all the strcat calls of dsl_dir_name */
209 dsl_dir_namelen(dsl_dir_t *dd)
214 /* parent's name + 1 for the "/" */
215 result = dsl_dir_namelen(dd->dd_parent) + 1;
218 if (!MUTEX_HELD(&dd->dd_lock)) {
219 /* see dsl_dir_name */
220 mutex_enter(&dd->dd_lock);
221 result += strlen(dd->dd_myname);
222 mutex_exit(&dd->dd_lock);
224 result += strlen(dd->dd_myname);
231 getcomponent(const char *path, char *component, const char **nextp)
234 if ((path == NULL) || (path[0] == '\0'))
236 /* This would be a good place to reserve some namespace... */
237 p = strpbrk(path, "/@");
238 if (p && (p[1] == '/' || p[1] == '@')) {
239 /* two separators in a row */
242 if (p == NULL || p == path) {
244 * if the first thing is an @ or /, it had better be an
245 * @ and it had better not have any more ats or slashes,
246 * and it had better have something after the @.
249 (p[0] != '@' || strpbrk(path+1, "/@") || p[1] == '\0'))
251 if (strlen(path) >= MAXNAMELEN)
252 return (ENAMETOOLONG);
253 (void) strcpy(component, path);
255 } else if (p[0] == '/') {
256 if (p-path >= MAXNAMELEN)
257 return (ENAMETOOLONG);
258 (void) strncpy(component, path, p - path);
259 component[p-path] = '\0';
261 } else if (p[0] == '@') {
263 * if the next separator is an @, there better not be
266 if (strchr(path, '/'))
268 if (p-path >= MAXNAMELEN)
269 return (ENAMETOOLONG);
270 (void) strncpy(component, path, p - path);
271 component[p-path] = '\0';
273 ASSERT(!"invalid p");
280 * same as dsl_open_dir, ignore the first component of name and use the
284 dsl_dir_open_spa(spa_t *spa, const char *name, void *tag,
285 dsl_dir_t **ddp, const char **tailp)
287 char buf[MAXNAMELEN];
288 const char *next, *nextnext = NULL;
293 int openedspa = FALSE;
295 dprintf("%s\n", name);
297 err = getcomponent(name, buf, &next);
301 err = spa_open(buf, &spa, FTAG);
303 dprintf("spa_open(%s) failed\n", buf);
308 /* XXX this assertion belongs in spa_open */
309 ASSERT(!dsl_pool_sync_context(spa_get_dsl(spa)));
312 dp = spa_get_dsl(spa);
314 rw_enter(&dp->dp_config_rwlock, RW_READER);
315 err = dsl_dir_open_obj(dp, dp->dp_root_dir_obj, NULL, tag, &dd);
317 rw_exit(&dp->dp_config_rwlock);
319 spa_close(spa, FTAG);
323 while (next != NULL) {
325 err = getcomponent(next, buf, &nextnext);
328 ASSERT(next[0] != '\0');
331 dprintf("looking up %s in obj%lld\n",
332 buf, dd->dd_phys->dd_child_dir_zapobj);
334 err = zap_lookup(dp->dp_meta_objset,
335 dd->dd_phys->dd_child_dir_zapobj,
336 buf, sizeof (ddobj), 1, &ddobj);
343 err = dsl_dir_open_obj(dp, ddobj, buf, tag, &child_ds);
346 dsl_dir_close(dd, tag);
350 rw_exit(&dp->dp_config_rwlock);
353 dsl_dir_close(dd, tag);
355 spa_close(spa, FTAG);
360 * It's an error if there's more than one component left, or
361 * tailp==NULL and there's any component left.
364 (tailp == NULL || (nextnext && nextnext[0] != '\0'))) {
366 dsl_dir_close(dd, tag);
367 dprintf("next=%p (%s) tail=%p\n", next, next?next:"", tailp);
373 spa_close(spa, FTAG);
379 * Return the dsl_dir_t, and possibly the last component which couldn't
380 * be found in *tail. Return NULL if the path is bogus, or if
381 * tail==NULL and we couldn't parse the whole name. (*tail)[0] == '@'
382 * means that the last component is a snapshot.
385 dsl_dir_open(const char *name, void *tag, dsl_dir_t **ddp, const char **tailp)
387 return (dsl_dir_open_spa(NULL, name, tag, ddp, tailp));
391 dsl_dir_create_sync(dsl_pool_t *dp, dsl_dir_t *pds, const char *name,
394 objset_t *mos = dp->dp_meta_objset;
396 dsl_dir_phys_t *dsphys;
399 ddobj = dmu_object_alloc(mos, DMU_OT_DSL_DIR, 0,
400 DMU_OT_DSL_DIR, sizeof (dsl_dir_phys_t), tx);
402 VERIFY(0 == zap_add(mos, pds->dd_phys->dd_child_dir_zapobj,
403 name, sizeof (uint64_t), 1, &ddobj, tx));
405 /* it's the root dir */
406 VERIFY(0 == zap_add(mos, DMU_POOL_DIRECTORY_OBJECT,
407 DMU_POOL_ROOT_DATASET, sizeof (uint64_t), 1, &ddobj, tx));
409 VERIFY(0 == dmu_bonus_hold(mos, ddobj, FTAG, &dbuf));
410 dmu_buf_will_dirty(dbuf, tx);
411 dsphys = dbuf->db_data;
413 dsphys->dd_creation_time = gethrestime_sec();
415 dsphys->dd_parent_obj = pds->dd_object;
416 dsphys->dd_props_zapobj = zap_create(mos,
417 DMU_OT_DSL_PROPS, DMU_OT_NONE, 0, tx);
418 dsphys->dd_child_dir_zapobj = zap_create(mos,
419 DMU_OT_DSL_DIR_CHILD_MAP, DMU_OT_NONE, 0, tx);
420 if (spa_version(dp->dp_spa) >= SPA_VERSION_USED_BREAKDOWN)
421 dsphys->dd_flags |= DD_FLAG_USED_BREAKDOWN;
422 dmu_buf_rele(dbuf, FTAG);
429 dsl_dir_destroy_check(void *arg1, void *arg2, dmu_tx_t *tx)
431 dsl_dir_t *dd = arg1;
432 dsl_pool_t *dp = dd->dd_pool;
433 objset_t *mos = dp->dp_meta_objset;
438 * There should be exactly two holds, both from
439 * dsl_dataset_destroy: one on the dd directory, and one on its
440 * head ds. Otherwise, someone is trying to lookup something
441 * inside this dir while we want to destroy it. The
442 * config_rwlock ensures that nobody else opens it after we
445 if (dmu_buf_refcount(dd->dd_dbuf) > 2)
448 err = zap_count(mos, dd->dd_phys->dd_child_dir_zapobj, &count);
458 dsl_dir_destroy_sync(void *arg1, void *tag, cred_t *cr, dmu_tx_t *tx)
460 dsl_dir_t *dd = arg1;
461 objset_t *mos = dd->dd_pool->dp_meta_objset;
465 ASSERT(RW_WRITE_HELD(&dd->dd_pool->dp_config_rwlock));
466 ASSERT(dd->dd_phys->dd_head_dataset_obj == 0);
468 /* Remove our reservation. */
470 dsl_dir_set_reservation_sync(dd, &val, cr, tx);
471 ASSERT3U(dd->dd_phys->dd_used_bytes, ==, 0);
472 ASSERT3U(dd->dd_phys->dd_reserved, ==, 0);
473 for (t = 0; t < DD_USED_NUM; t++)
474 ASSERT3U(dd->dd_phys->dd_used_breakdown[t], ==, 0);
476 VERIFY(0 == zap_destroy(mos, dd->dd_phys->dd_child_dir_zapobj, tx));
477 VERIFY(0 == zap_destroy(mos, dd->dd_phys->dd_props_zapobj, tx));
478 VERIFY(0 == dsl_deleg_destroy(mos, dd->dd_phys->dd_deleg_zapobj, tx));
479 VERIFY(0 == zap_remove(mos,
480 dd->dd_parent->dd_phys->dd_child_dir_zapobj, dd->dd_myname, tx));
483 dsl_dir_close(dd, tag);
484 VERIFY(0 == dmu_object_free(mos, obj, tx));
488 dsl_dir_is_clone(dsl_dir_t *dd)
490 return (dd->dd_phys->dd_origin_obj &&
491 (dd->dd_pool->dp_origin_snap == NULL ||
492 dd->dd_phys->dd_origin_obj !=
493 dd->dd_pool->dp_origin_snap->ds_object));
497 dsl_dir_stats(dsl_dir_t *dd, nvlist_t *nv)
499 mutex_enter(&dd->dd_lock);
500 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_USED,
501 dd->dd_phys->dd_used_bytes);
502 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_QUOTA, dd->dd_phys->dd_quota);
503 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_RESERVATION,
504 dd->dd_phys->dd_reserved);
505 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_COMPRESSRATIO,
506 dd->dd_phys->dd_compressed_bytes == 0 ? 100 :
507 (dd->dd_phys->dd_uncompressed_bytes * 100 /
508 dd->dd_phys->dd_compressed_bytes));
509 if (dd->dd_phys->dd_flags & DD_FLAG_USED_BREAKDOWN) {
510 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_USEDSNAP,
511 dd->dd_phys->dd_used_breakdown[DD_USED_SNAP]);
512 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_USEDDS,
513 dd->dd_phys->dd_used_breakdown[DD_USED_HEAD]);
514 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_USEDREFRESERV,
515 dd->dd_phys->dd_used_breakdown[DD_USED_REFRSRV]);
516 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_USEDCHILD,
517 dd->dd_phys->dd_used_breakdown[DD_USED_CHILD] +
518 dd->dd_phys->dd_used_breakdown[DD_USED_CHILD_RSRV]);
520 mutex_exit(&dd->dd_lock);
522 rw_enter(&dd->dd_pool->dp_config_rwlock, RW_READER);
523 if (dsl_dir_is_clone(dd)) {
525 char buf[MAXNAMELEN];
527 VERIFY(0 == dsl_dataset_hold_obj(dd->dd_pool,
528 dd->dd_phys->dd_origin_obj, FTAG, &ds));
529 dsl_dataset_name(ds, buf);
530 dsl_dataset_rele(ds, FTAG);
531 dsl_prop_nvlist_add_string(nv, ZFS_PROP_ORIGIN, buf);
533 rw_exit(&dd->dd_pool->dp_config_rwlock);
537 dsl_dir_dirty(dsl_dir_t *dd, dmu_tx_t *tx)
539 dsl_pool_t *dp = dd->dd_pool;
543 if (txg_list_add(&dp->dp_dirty_dirs, dd, tx->tx_txg) == 0) {
544 /* up the hold count until we can be written out */
545 dmu_buf_add_ref(dd->dd_dbuf, dd);
550 parent_delta(dsl_dir_t *dd, uint64_t used, int64_t delta)
552 uint64_t old_accounted = MAX(used, dd->dd_phys->dd_reserved);
553 uint64_t new_accounted = MAX(used + delta, dd->dd_phys->dd_reserved);
554 return (new_accounted - old_accounted);
558 dsl_dir_sync(dsl_dir_t *dd, dmu_tx_t *tx)
560 ASSERT(dmu_tx_is_syncing(tx));
562 dmu_buf_will_dirty(dd->dd_dbuf, tx);
564 mutex_enter(&dd->dd_lock);
565 ASSERT3U(dd->dd_tempreserved[tx->tx_txg&TXG_MASK], ==, 0);
566 dprintf_dd(dd, "txg=%llu towrite=%lluK\n", tx->tx_txg,
567 dd->dd_space_towrite[tx->tx_txg&TXG_MASK] / 1024);
568 dd->dd_space_towrite[tx->tx_txg&TXG_MASK] = 0;
569 mutex_exit(&dd->dd_lock);
571 /* release the hold from dsl_dir_dirty */
572 dmu_buf_rele(dd->dd_dbuf, dd);
576 dsl_dir_space_towrite(dsl_dir_t *dd)
581 ASSERT(MUTEX_HELD(&dd->dd_lock));
583 for (i = 0; i < TXG_SIZE; i++) {
584 space += dd->dd_space_towrite[i&TXG_MASK];
585 ASSERT3U(dd->dd_space_towrite[i&TXG_MASK], >=, 0);
591 * How much space would dd have available if ancestor had delta applied
592 * to it? If ondiskonly is set, we're only interested in what's
593 * on-disk, not estimated pending changes.
596 dsl_dir_space_available(dsl_dir_t *dd,
597 dsl_dir_t *ancestor, int64_t delta, int ondiskonly)
599 uint64_t parentspace, myspace, quota, used;
602 * If there are no restrictions otherwise, assume we have
603 * unlimited space available.
606 parentspace = UINT64_MAX;
608 if (dd->dd_parent != NULL) {
609 parentspace = dsl_dir_space_available(dd->dd_parent,
610 ancestor, delta, ondiskonly);
613 mutex_enter(&dd->dd_lock);
614 if (dd->dd_phys->dd_quota != 0)
615 quota = dd->dd_phys->dd_quota;
616 used = dd->dd_phys->dd_used_bytes;
618 used += dsl_dir_space_towrite(dd);
620 if (dd->dd_parent == NULL) {
621 uint64_t poolsize = dsl_pool_adjustedsize(dd->dd_pool, FALSE);
622 quota = MIN(quota, poolsize);
625 if (dd->dd_phys->dd_reserved > used && parentspace != UINT64_MAX) {
627 * We have some space reserved, in addition to what our
630 parentspace += dd->dd_phys->dd_reserved - used;
633 if (dd == ancestor) {
635 ASSERT(used >= -delta);
637 if (parentspace != UINT64_MAX)
638 parentspace -= delta;
646 * While it's OK to be a little over quota, if
647 * we think we are using more space than there
648 * is in the pool (which is already 1.6% more than
649 * dsl_pool_adjustedsize()), something is very
652 ASSERT3U(used, <=, spa_get_space(dd->dd_pool->dp_spa));
655 * the lesser of the space provided by our parent and
656 * the space left in our quota
658 myspace = MIN(parentspace, quota - used);
661 mutex_exit(&dd->dd_lock);
674 dsl_dir_tempreserve_impl(dsl_dir_t *dd, uint64_t asize, boolean_t netfree,
675 boolean_t ignorequota, boolean_t checkrefquota, list_t *tr_list,
676 dmu_tx_t *tx, boolean_t first)
678 uint64_t txg = tx->tx_txg;
679 uint64_t est_inflight, used_on_disk, quota, parent_rsrv;
680 struct tempreserve *tr;
682 int txgidx = txg & TXG_MASK;
684 uint64_t ref_rsrv = 0;
686 ASSERT3U(txg, !=, 0);
687 ASSERT3S(asize, >, 0);
689 mutex_enter(&dd->dd_lock);
692 * Check against the dsl_dir's quota. We don't add in the delta
693 * when checking for over-quota because they get one free hit.
695 est_inflight = dsl_dir_space_towrite(dd);
696 for (i = 0; i < TXG_SIZE; i++)
697 est_inflight += dd->dd_tempreserved[i];
698 used_on_disk = dd->dd_phys->dd_used_bytes;
701 * On the first iteration, fetch the dataset's used-on-disk and
702 * refreservation values. Also, if checkrefquota is set, test if
703 * allocating this space would exceed the dataset's refquota.
705 if (first && tx->tx_objset) {
707 dsl_dataset_t *ds = tx->tx_objset->os->os_dsl_dataset;
709 error = dsl_dataset_check_quota(ds, checkrefquota,
710 asize, est_inflight, &used_on_disk, &ref_rsrv);
712 mutex_exit(&dd->dd_lock);
718 * If this transaction will result in a net free of space,
719 * we want to let it through.
721 if (ignorequota || netfree || dd->dd_phys->dd_quota == 0)
724 quota = dd->dd_phys->dd_quota;
727 * Adjust the quota against the actual pool size at the root.
728 * To ensure that it's possible to remove files from a full
729 * pool without inducing transient overcommits, we throttle
730 * netfree transactions against a quota that is slightly larger,
731 * but still within the pool's allocation slop. In cases where
732 * we're very close to full, this will allow a steady trickle of
733 * removes to get through.
735 if (dd->dd_parent == NULL) {
736 uint64_t poolsize = dsl_pool_adjustedsize(dd->dd_pool, netfree);
737 if (poolsize < quota) {
744 * If they are requesting more space, and our current estimate
745 * is over quota, they get to try again unless the actual
746 * on-disk is over quota and there are no pending changes (which
747 * may free up space for us).
749 if (used_on_disk + est_inflight > quota) {
750 if (est_inflight > 0 || used_on_disk < quota)
752 dprintf_dd(dd, "failing: used=%lluK inflight = %lluK "
753 "quota=%lluK tr=%lluK err=%d\n",
754 used_on_disk>>10, est_inflight>>10,
755 quota>>10, asize>>10, enospc);
756 mutex_exit(&dd->dd_lock);
760 /* We need to up our estimated delta before dropping dd_lock */
761 dd->dd_tempreserved[txgidx] += asize;
763 parent_rsrv = parent_delta(dd, used_on_disk + est_inflight,
765 mutex_exit(&dd->dd_lock);
767 tr = kmem_zalloc(sizeof (struct tempreserve), KM_SLEEP);
770 list_insert_tail(tr_list, tr);
772 /* see if it's OK with our parent */
773 if (dd->dd_parent && parent_rsrv) {
774 boolean_t ismos = (dd->dd_phys->dd_head_dataset_obj == 0);
776 return (dsl_dir_tempreserve_impl(dd->dd_parent,
777 parent_rsrv, netfree, ismos, TRUE, tr_list, tx, FALSE));
784 * Reserve space in this dsl_dir, to be used in this tx's txg.
785 * After the space has been dirtied (and dsl_dir_willuse_space()
786 * has been called), the reservation should be canceled, using
787 * dsl_dir_tempreserve_clear().
790 dsl_dir_tempreserve_space(dsl_dir_t *dd, uint64_t lsize, uint64_t asize,
791 uint64_t fsize, uint64_t usize, void **tr_cookiep, dmu_tx_t *tx)
801 tr_list = kmem_alloc(sizeof (list_t), KM_SLEEP);
802 list_create(tr_list, sizeof (struct tempreserve),
803 offsetof(struct tempreserve, tr_node));
804 ASSERT3S(asize, >, 0);
805 ASSERT3S(fsize, >=, 0);
807 err = arc_tempreserve_space(lsize, tx->tx_txg);
809 struct tempreserve *tr;
811 tr = kmem_zalloc(sizeof (struct tempreserve), KM_SLEEP);
813 list_insert_tail(tr_list, tr);
815 err = dsl_pool_tempreserve_space(dd->dd_pool, asize, tx);
818 txg_delay(dd->dd_pool, tx->tx_txg, 1);
821 dsl_pool_memory_pressure(dd->dd_pool);
825 struct tempreserve *tr;
827 tr = kmem_zalloc(sizeof (struct tempreserve), KM_SLEEP);
828 tr->tr_dp = dd->dd_pool;
830 list_insert_tail(tr_list, tr);
832 err = dsl_dir_tempreserve_impl(dd, asize, fsize >= asize,
833 FALSE, asize > usize, tr_list, tx, TRUE);
837 dsl_dir_tempreserve_clear(tr_list, tx);
839 *tr_cookiep = tr_list;
845 * Clear a temporary reservation that we previously made with
846 * dsl_dir_tempreserve_space().
849 dsl_dir_tempreserve_clear(void *tr_cookie, dmu_tx_t *tx)
851 int txgidx = tx->tx_txg & TXG_MASK;
852 list_t *tr_list = tr_cookie;
853 struct tempreserve *tr;
855 ASSERT3U(tx->tx_txg, !=, 0);
857 if (tr_cookie == NULL)
860 while (tr = list_head(tr_list)) {
862 dsl_pool_tempreserve_clear(tr->tr_dp, tr->tr_size, tx);
863 } else if (tr->tr_ds) {
864 mutex_enter(&tr->tr_ds->dd_lock);
865 ASSERT3U(tr->tr_ds->dd_tempreserved[txgidx], >=,
867 tr->tr_ds->dd_tempreserved[txgidx] -= tr->tr_size;
868 mutex_exit(&tr->tr_ds->dd_lock);
870 arc_tempreserve_clear(tr->tr_size);
872 list_remove(tr_list, tr);
873 kmem_free(tr, sizeof (struct tempreserve));
876 kmem_free(tr_list, sizeof (list_t));
880 dsl_dir_willuse_space_impl(dsl_dir_t *dd, int64_t space, dmu_tx_t *tx)
882 int64_t parent_space;
885 mutex_enter(&dd->dd_lock);
887 dd->dd_space_towrite[tx->tx_txg & TXG_MASK] += space;
889 est_used = dsl_dir_space_towrite(dd) + dd->dd_phys->dd_used_bytes;
890 parent_space = parent_delta(dd, est_used, space);
891 mutex_exit(&dd->dd_lock);
893 /* Make sure that we clean up dd_space_to* */
894 dsl_dir_dirty(dd, tx);
896 /* XXX this is potentially expensive and unnecessary... */
897 if (parent_space && dd->dd_parent)
898 dsl_dir_willuse_space_impl(dd->dd_parent, parent_space, tx);
902 * Call in open context when we think we're going to write/free space,
903 * eg. when dirtying data. Be conservative (ie. OK to write less than
904 * this or free more than this, but don't write more or free less).
907 dsl_dir_willuse_space(dsl_dir_t *dd, int64_t space, dmu_tx_t *tx)
909 dsl_pool_willuse_space(dd->dd_pool, space, tx);
910 dsl_dir_willuse_space_impl(dd, space, tx);
913 /* call from syncing context when we actually write/free space for this dd */
915 dsl_dir_diduse_space(dsl_dir_t *dd, dd_used_t type,
916 int64_t used, int64_t compressed, int64_t uncompressed, dmu_tx_t *tx)
918 int64_t accounted_delta;
919 boolean_t needlock = !MUTEX_HELD(&dd->dd_lock);
921 ASSERT(dmu_tx_is_syncing(tx));
922 ASSERT(type < DD_USED_NUM);
924 dsl_dir_dirty(dd, tx);
927 mutex_enter(&dd->dd_lock);
928 accounted_delta = parent_delta(dd, dd->dd_phys->dd_used_bytes, used);
929 ASSERT(used >= 0 || dd->dd_phys->dd_used_bytes >= -used);
930 ASSERT(compressed >= 0 ||
931 dd->dd_phys->dd_compressed_bytes >= -compressed);
932 ASSERT(uncompressed >= 0 ||
933 dd->dd_phys->dd_uncompressed_bytes >= -uncompressed);
934 dd->dd_phys->dd_used_bytes += used;
935 dd->dd_phys->dd_uncompressed_bytes += uncompressed;
936 dd->dd_phys->dd_compressed_bytes += compressed;
938 if (dd->dd_phys->dd_flags & DD_FLAG_USED_BREAKDOWN) {
940 dd->dd_phys->dd_used_breakdown[type] >= -used);
941 dd->dd_phys->dd_used_breakdown[type] += used;
945 for (t = 0; t < DD_USED_NUM; t++)
946 u += dd->dd_phys->dd_used_breakdown[t];
947 ASSERT3U(u, ==, dd->dd_phys->dd_used_bytes);
951 mutex_exit(&dd->dd_lock);
953 if (dd->dd_parent != NULL) {
954 dsl_dir_diduse_space(dd->dd_parent, DD_USED_CHILD,
955 accounted_delta, compressed, uncompressed, tx);
956 dsl_dir_transfer_space(dd->dd_parent,
957 used - accounted_delta,
958 DD_USED_CHILD_RSRV, DD_USED_CHILD, tx);
963 dsl_dir_transfer_space(dsl_dir_t *dd, int64_t delta,
964 dd_used_t oldtype, dd_used_t newtype, dmu_tx_t *tx)
966 boolean_t needlock = !MUTEX_HELD(&dd->dd_lock);
968 ASSERT(dmu_tx_is_syncing(tx));
969 ASSERT(oldtype < DD_USED_NUM);
970 ASSERT(newtype < DD_USED_NUM);
972 if (delta == 0 || !(dd->dd_phys->dd_flags & DD_FLAG_USED_BREAKDOWN))
975 dsl_dir_dirty(dd, tx);
977 mutex_enter(&dd->dd_lock);
979 dd->dd_phys->dd_used_breakdown[oldtype] >= delta :
980 dd->dd_phys->dd_used_breakdown[newtype] >= -delta);
981 ASSERT(dd->dd_phys->dd_used_bytes >= ABS(delta));
982 dd->dd_phys->dd_used_breakdown[oldtype] -= delta;
983 dd->dd_phys->dd_used_breakdown[newtype] += delta;
985 mutex_exit(&dd->dd_lock);
989 dsl_dir_set_quota_check(void *arg1, void *arg2, dmu_tx_t *tx)
991 dsl_dir_t *dd = arg1;
992 uint64_t *quotap = arg2;
993 uint64_t new_quota = *quotap;
1000 mutex_enter(&dd->dd_lock);
1002 * If we are doing the preliminary check in open context, and
1003 * there are pending changes, then don't fail it, since the
1004 * pending changes could under-estimate the amount of space to be
1007 towrite = dsl_dir_space_towrite(dd);
1008 if ((dmu_tx_is_syncing(tx) || towrite == 0) &&
1009 (new_quota < dd->dd_phys->dd_reserved ||
1010 new_quota < dd->dd_phys->dd_used_bytes + towrite)) {
1013 mutex_exit(&dd->dd_lock);
1019 dsl_dir_set_quota_sync(void *arg1, void *arg2, cred_t *cr, dmu_tx_t *tx)
1021 dsl_dir_t *dd = arg1;
1022 uint64_t *quotap = arg2;
1023 uint64_t new_quota = *quotap;
1025 dmu_buf_will_dirty(dd->dd_dbuf, tx);
1027 mutex_enter(&dd->dd_lock);
1028 dd->dd_phys->dd_quota = new_quota;
1029 mutex_exit(&dd->dd_lock);
1031 spa_history_internal_log(LOG_DS_QUOTA, dd->dd_pool->dp_spa,
1032 tx, cr, "%lld dataset = %llu ",
1033 (longlong_t)new_quota, dd->dd_phys->dd_head_dataset_obj);
1037 dsl_dir_set_quota(const char *ddname, uint64_t quota)
1042 err = dsl_dir_open(ddname, FTAG, &dd, NULL);
1046 if (quota != dd->dd_phys->dd_quota) {
1048 * If someone removes a file, then tries to set the quota, we
1049 * want to make sure the file freeing takes effect.
1051 txg_wait_open(dd->dd_pool, 0);
1053 err = dsl_sync_task_do(dd->dd_pool, dsl_dir_set_quota_check,
1054 dsl_dir_set_quota_sync, dd, "a, 0);
1056 dsl_dir_close(dd, FTAG);
1061 dsl_dir_set_reservation_check(void *arg1, void *arg2, dmu_tx_t *tx)
1063 dsl_dir_t *dd = arg1;
1064 uint64_t *reservationp = arg2;
1065 uint64_t new_reservation = *reservationp;
1066 uint64_t used, avail;
1069 * If we are doing the preliminary check in open context, the
1070 * space estimates may be inaccurate.
1072 if (!dmu_tx_is_syncing(tx))
1075 mutex_enter(&dd->dd_lock);
1076 used = dd->dd_phys->dd_used_bytes;
1077 mutex_exit(&dd->dd_lock);
1079 if (dd->dd_parent) {
1080 avail = dsl_dir_space_available(dd->dd_parent,
1083 avail = dsl_pool_adjustedsize(dd->dd_pool, B_FALSE) - used;
1086 if (MAX(used, new_reservation) > MAX(used, dd->dd_phys->dd_reserved)) {
1087 uint64_t delta = MAX(used, new_reservation) -
1088 MAX(used, dd->dd_phys->dd_reserved);
1092 if (dd->dd_phys->dd_quota > 0 &&
1093 new_reservation > dd->dd_phys->dd_quota)
1102 dsl_dir_set_reservation_sync(void *arg1, void *arg2, cred_t *cr, dmu_tx_t *tx)
1104 dsl_dir_t *dd = arg1;
1105 uint64_t *reservationp = arg2;
1106 uint64_t new_reservation = *reservationp;
1110 dmu_buf_will_dirty(dd->dd_dbuf, tx);
1112 mutex_enter(&dd->dd_lock);
1113 used = dd->dd_phys->dd_used_bytes;
1114 delta = MAX(used, new_reservation) -
1115 MAX(used, dd->dd_phys->dd_reserved);
1116 dd->dd_phys->dd_reserved = new_reservation;
1118 if (dd->dd_parent != NULL) {
1119 /* Roll up this additional usage into our ancestors */
1120 dsl_dir_diduse_space(dd->dd_parent, DD_USED_CHILD_RSRV,
1123 mutex_exit(&dd->dd_lock);
1125 spa_history_internal_log(LOG_DS_RESERVATION, dd->dd_pool->dp_spa,
1126 tx, cr, "%lld dataset = %llu",
1127 (longlong_t)new_reservation, dd->dd_phys->dd_head_dataset_obj);
1131 dsl_dir_set_reservation(const char *ddname, uint64_t reservation)
1136 err = dsl_dir_open(ddname, FTAG, &dd, NULL);
1139 err = dsl_sync_task_do(dd->dd_pool, dsl_dir_set_reservation_check,
1140 dsl_dir_set_reservation_sync, dd, &reservation, 0);
1141 dsl_dir_close(dd, FTAG);
1146 closest_common_ancestor(dsl_dir_t *ds1, dsl_dir_t *ds2)
1148 for (; ds1; ds1 = ds1->dd_parent) {
1150 for (dd = ds2; dd; dd = dd->dd_parent) {
1159 * If delta is applied to dd, how much of that delta would be applied to
1160 * ancestor? Syncing context only.
1163 would_change(dsl_dir_t *dd, int64_t delta, dsl_dir_t *ancestor)
1168 mutex_enter(&dd->dd_lock);
1169 delta = parent_delta(dd, dd->dd_phys->dd_used_bytes, delta);
1170 mutex_exit(&dd->dd_lock);
1171 return (would_change(dd->dd_parent, delta, ancestor));
1175 dsl_dir_t *newparent;
1176 const char *mynewname;
1181 dsl_dir_rename_check(void *arg1, void *arg2, dmu_tx_t *tx)
1183 dsl_dir_t *dd = arg1;
1184 struct renamearg *ra = arg2;
1185 dsl_pool_t *dp = dd->dd_pool;
1186 objset_t *mos = dp->dp_meta_objset;
1190 /* There should be 2 references: the open and the dirty */
1191 if (dmu_buf_refcount(dd->dd_dbuf) > 2)
1194 /* check for existing name */
1195 err = zap_lookup(mos, ra->newparent->dd_phys->dd_child_dir_zapobj,
1196 ra->mynewname, 8, 1, &val);
1202 if (ra->newparent != dd->dd_parent) {
1203 /* is there enough space? */
1205 MAX(dd->dd_phys->dd_used_bytes, dd->dd_phys->dd_reserved);
1207 /* no rename into our descendant */
1208 if (closest_common_ancestor(dd, ra->newparent) == dd)
1211 if (err = dsl_dir_transfer_possible(dd->dd_parent,
1212 ra->newparent, myspace))
1220 dsl_dir_rename_sync(void *arg1, void *arg2, cred_t *cr, dmu_tx_t *tx)
1222 dsl_dir_t *dd = arg1;
1223 struct renamearg *ra = arg2;
1224 dsl_pool_t *dp = dd->dd_pool;
1225 objset_t *mos = dp->dp_meta_objset;
1228 ASSERT(dmu_buf_refcount(dd->dd_dbuf) <= 2);
1230 if (ra->newparent != dd->dd_parent) {
1231 dsl_dir_diduse_space(dd->dd_parent, DD_USED_CHILD,
1232 -dd->dd_phys->dd_used_bytes,
1233 -dd->dd_phys->dd_compressed_bytes,
1234 -dd->dd_phys->dd_uncompressed_bytes, tx);
1235 dsl_dir_diduse_space(ra->newparent, DD_USED_CHILD,
1236 dd->dd_phys->dd_used_bytes,
1237 dd->dd_phys->dd_compressed_bytes,
1238 dd->dd_phys->dd_uncompressed_bytes, tx);
1240 if (dd->dd_phys->dd_reserved > dd->dd_phys->dd_used_bytes) {
1241 uint64_t unused_rsrv = dd->dd_phys->dd_reserved -
1242 dd->dd_phys->dd_used_bytes;
1244 dsl_dir_diduse_space(dd->dd_parent, DD_USED_CHILD_RSRV,
1245 -unused_rsrv, 0, 0, tx);
1246 dsl_dir_diduse_space(ra->newparent, DD_USED_CHILD_RSRV,
1247 unused_rsrv, 0, 0, tx);
1251 dmu_buf_will_dirty(dd->dd_dbuf, tx);
1253 /* remove from old parent zapobj */
1254 err = zap_remove(mos, dd->dd_parent->dd_phys->dd_child_dir_zapobj,
1256 ASSERT3U(err, ==, 0);
1258 (void) strcpy(dd->dd_myname, ra->mynewname);
1259 dsl_dir_close(dd->dd_parent, dd);
1260 dd->dd_phys->dd_parent_obj = ra->newparent->dd_object;
1261 VERIFY(0 == dsl_dir_open_obj(dd->dd_pool,
1262 ra->newparent->dd_object, NULL, dd, &dd->dd_parent));
1264 /* add to new parent zapobj */
1265 err = zap_add(mos, ra->newparent->dd_phys->dd_child_dir_zapobj,
1266 dd->dd_myname, 8, 1, &dd->dd_object, tx);
1267 ASSERT3U(err, ==, 0);
1269 spa_history_internal_log(LOG_DS_RENAME, dd->dd_pool->dp_spa,
1270 tx, cr, "dataset = %llu", dd->dd_phys->dd_head_dataset_obj);
1274 dsl_dir_rename(dsl_dir_t *dd, const char *newname)
1276 struct renamearg ra;
1279 /* new parent should exist */
1280 err = dsl_dir_open(newname, FTAG, &ra.newparent, &ra.mynewname);
1284 /* can't rename to different pool */
1285 if (dd->dd_pool != ra.newparent->dd_pool) {
1290 /* new name should not already exist */
1291 if (ra.mynewname == NULL) {
1296 err = dsl_sync_task_do(dd->dd_pool,
1297 dsl_dir_rename_check, dsl_dir_rename_sync, dd, &ra, 3);
1300 dsl_dir_close(ra.newparent, FTAG);
1305 dsl_dir_transfer_possible(dsl_dir_t *sdd, dsl_dir_t *tdd, uint64_t space)
1307 dsl_dir_t *ancestor;
1311 ancestor = closest_common_ancestor(sdd, tdd);
1312 adelta = would_change(sdd, -space, ancestor);
1313 avail = dsl_dir_space_available(tdd, ancestor, adelta, FALSE);