+static objset_used_cb_t *used_cbs[DMU_OST_NUMTYPES];
+
+void
+dmu_objset_register_type(dmu_objset_type_t ost, objset_used_cb_t *cb)
+{
+ used_cbs[ost] = cb;
+}
+
+boolean_t
+dmu_objset_userused_enabled(objset_impl_t *os)
+{
+ return (spa_version(os->os_spa) >= SPA_VERSION_USERSPACE &&
+ used_cbs[os->os_phys->os_type] &&
+ os->os_userused_dnode);
+}
+
+void
+dmu_objset_do_userquota_callbacks(objset_impl_t *os, dmu_tx_t *tx)
+{
+ dnode_t *dn;
+ list_t *list = &os->os_synced_dnodes;
+ static const char zerobuf[DN_MAX_BONUSLEN] = {0};
+
+ ASSERT(list_head(list) == NULL || dmu_objset_userused_enabled(os));
+
+ while (dn = list_head(list)) {
+ dmu_object_type_t bonustype;
+
+ ASSERT(!DMU_OBJECT_IS_SPECIAL(dn->dn_object));
+ ASSERT(dn->dn_oldphys);
+ ASSERT(dn->dn_phys->dn_type == DMU_OT_NONE ||
+ dn->dn_phys->dn_flags &
+ DNODE_FLAG_USERUSED_ACCOUNTED);
+
+ /* Allocate the user/groupused objects if necessary. */
+ if (os->os_userused_dnode->dn_type == DMU_OT_NONE) {
+ VERIFY(0 == zap_create_claim(&os->os,
+ DMU_USERUSED_OBJECT,
+ DMU_OT_USERGROUP_USED, DMU_OT_NONE, 0, tx));
+ VERIFY(0 == zap_create_claim(&os->os,
+ DMU_GROUPUSED_OBJECT,
+ DMU_OT_USERGROUP_USED, DMU_OT_NONE, 0, tx));
+ }
+
+ /*
+ * If the object was not previously
+ * accounted, pretend that it was free.
+ */
+ if (!(dn->dn_oldphys->dn_flags &
+ DNODE_FLAG_USERUSED_ACCOUNTED)) {
+ bzero(dn->dn_oldphys, sizeof (dnode_phys_t));
+ }
+
+ /*
+ * If the object was freed, use the previous bonustype.
+ */
+ bonustype = dn->dn_phys->dn_bonustype ?
+ dn->dn_phys->dn_bonustype : dn->dn_oldphys->dn_bonustype;
+ ASSERT(dn->dn_phys->dn_type != 0 ||
+ (bcmp(DN_BONUS(dn->dn_phys), zerobuf,
+ DN_MAX_BONUSLEN) == 0 &&
+ DN_USED_BYTES(dn->dn_phys) == 0));
+ ASSERT(dn->dn_oldphys->dn_type != 0 ||
+ (bcmp(DN_BONUS(dn->dn_oldphys), zerobuf,
+ DN_MAX_BONUSLEN) == 0 &&
+ DN_USED_BYTES(dn->dn_oldphys) == 0));
+ used_cbs[os->os_phys->os_type](&os->os, bonustype,
+ DN_BONUS(dn->dn_oldphys), DN_BONUS(dn->dn_phys),
+ DN_USED_BYTES(dn->dn_oldphys),
+ DN_USED_BYTES(dn->dn_phys), tx);
+
+ /*
+ * The mutex is needed here for interlock with dnode_allocate.
+ */
+ mutex_enter(&dn->dn_mtx);
+ zio_buf_free(dn->dn_oldphys, sizeof (dnode_phys_t));
+ dn->dn_oldphys = NULL;
+ mutex_exit(&dn->dn_mtx);
+
+ list_remove(list, dn);
+ dnode_rele(dn, list);
+ }
+}
+
+boolean_t
+dmu_objset_userspace_present(objset_t *os)
+{
+ return (os->os->os_phys->os_flags &
+ OBJSET_FLAG_USERACCOUNTING_COMPLETE);
+}
+
+int
+dmu_objset_userspace_upgrade(objset_t *os)
+{
+ uint64_t obj;
+ int err = 0;
+
+ if (dmu_objset_userspace_present(os))
+ return (0);
+ if (!dmu_objset_userused_enabled(os->os))
+ return (ENOTSUP);
+ if (dmu_objset_is_snapshot(os))
+ return (EINVAL);
+
+ /*
+ * We simply need to mark every object dirty, so that it will be
+ * synced out and now accounted. If this is called
+ * concurrently, or if we already did some work before crashing,
+ * that's fine, since we track each object's accounted state
+ * independently.
+ */
+
+ for (obj = 0; err == 0; err = dmu_object_next(os, &obj, FALSE, 0)) {
+ dmu_tx_t *tx = dmu_tx_create(os);
+ dmu_buf_t *db;
+ int objerr;
+
+ if (issig(JUSTLOOKING) && issig(FORREAL))
+ return (EINTR);
+
+ objerr = dmu_bonus_hold(os, obj, FTAG, &db);
+ if (objerr)
+ continue;
+ dmu_tx_hold_bonus(tx, obj);
+ objerr = dmu_tx_assign(tx, TXG_WAIT);
+ if (objerr) {
+ dmu_tx_abort(tx);
+ continue;
+ }
+ dmu_buf_will_dirty(db, tx);
+ dmu_buf_rele(db, FTAG);
+ dmu_tx_commit(tx);
+ }
+
+ os->os->os_flags |= OBJSET_FLAG_USERACCOUNTING_COMPLETE;
+ txg_wait_synced(dmu_objset_pool(os), 0);
+ return (0);
+}
+