Illumos #1644, #1645, #1646, #1647, #1708
[zfs.git] / module / zfs / dmu_send.c
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23  * Copyright (c) 2011 by Delphix. All rights reserved.
24  * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
25  * Copyright (c) 2011 by Delphix. All rights reserved.
26  */
27
28 #include <sys/dmu.h>
29 #include <sys/dmu_impl.h>
30 #include <sys/dmu_tx.h>
31 #include <sys/dbuf.h>
32 #include <sys/dnode.h>
33 #include <sys/zfs_context.h>
34 #include <sys/dmu_objset.h>
35 #include <sys/dmu_traverse.h>
36 #include <sys/dsl_dataset.h>
37 #include <sys/dsl_dir.h>
38 #include <sys/dsl_prop.h>
39 #include <sys/dsl_pool.h>
40 #include <sys/dsl_synctask.h>
41 #include <sys/zfs_ioctl.h>
42 #include <sys/zap.h>
43 #include <sys/zio_checksum.h>
44 #include <sys/zfs_znode.h>
45 #include <zfs_fletcher.h>
46 #include <sys/avl.h>
47 #include <sys/ddt.h>
48 #include <sys/zfs_onexit.h>
49
50 /* Set this tunable to TRUE to replace corrupt data with 0x2f5baddb10c */
51 int zfs_send_corrupt_data = B_FALSE;
52
53 static char *dmu_recv_tag = "dmu_recv_tag";
54
55 /*
56  * The list of data whose inclusion in a send stream can be pending from
57  * one call to backup_cb to another.  Multiple calls to dump_free() and
58  * dump_freeobjects() can be aggregated into a single DRR_FREE or
59  * DRR_FREEOBJECTS replay record.
60  */
61 typedef enum {
62         PENDING_NONE,
63         PENDING_FREE,
64         PENDING_FREEOBJECTS
65 } pendop_t;
66
67 struct backuparg {
68         dmu_replay_record_t *drr;
69         vnode_t *vp;
70         offset_t *off;
71         objset_t *os;
72         zio_cksum_t zc;
73         uint64_t toguid;
74         int err;
75         pendop_t pending_op;
76 };
77
78 static int
79 dump_bytes(struct backuparg *ba, void *buf, int len)
80 {
81         ssize_t resid; /* have to get resid to get detailed errno */
82         ASSERT3U(len % 8, ==, 0);
83
84         fletcher_4_incremental_native(buf, len, &ba->zc);
85         ba->err = vn_rdwr(UIO_WRITE, ba->vp,
86             (caddr_t)buf, len,
87             0, UIO_SYSSPACE, FAPPEND, RLIM64_INFINITY, CRED(), &resid);
88         *ba->off += len;
89         return (ba->err);
90 }
91
92 static int
93 dump_free(struct backuparg *ba, uint64_t object, uint64_t offset,
94     uint64_t length)
95 {
96         struct drr_free *drrf = &(ba->drr->drr_u.drr_free);
97
98         /*
99          * If there is a pending op, but it's not PENDING_FREE, push it out,
100          * since free block aggregation can only be done for blocks of the
101          * same type (i.e., DRR_FREE records can only be aggregated with
102          * other DRR_FREE records.  DRR_FREEOBJECTS records can only be
103          * aggregated with other DRR_FREEOBJECTS records.
104          */
105         if (ba->pending_op != PENDING_NONE && ba->pending_op != PENDING_FREE) {
106                 if (dump_bytes(ba, ba->drr, sizeof (dmu_replay_record_t)) != 0)
107                         return (EINTR);
108                 ba->pending_op = PENDING_NONE;
109         }
110
111         if (ba->pending_op == PENDING_FREE) {
112                 /*
113                  * There should never be a PENDING_FREE if length is -1
114                  * (because dump_dnode is the only place where this
115                  * function is called with a -1, and only after flushing
116                  * any pending record).
117                  */
118                 ASSERT(length != -1ULL);
119                 /*
120                  * Check to see whether this free block can be aggregated
121                  * with pending one.
122                  */
123                 if (drrf->drr_object == object && drrf->drr_offset +
124                     drrf->drr_length == offset) {
125                         drrf->drr_length += length;
126                         return (0);
127                 } else {
128                         /* not a continuation.  Push out pending record */
129                         if (dump_bytes(ba, ba->drr,
130                             sizeof (dmu_replay_record_t)) != 0)
131                                 return (EINTR);
132                         ba->pending_op = PENDING_NONE;
133                 }
134         }
135         /* create a FREE record and make it pending */
136         bzero(ba->drr, sizeof (dmu_replay_record_t));
137         ba->drr->drr_type = DRR_FREE;
138         drrf->drr_object = object;
139         drrf->drr_offset = offset;
140         drrf->drr_length = length;
141         drrf->drr_toguid = ba->toguid;
142         if (length == -1ULL) {
143                 if (dump_bytes(ba, ba->drr, sizeof (dmu_replay_record_t)) != 0)
144                         return (EINTR);
145         } else {
146                 ba->pending_op = PENDING_FREE;
147         }
148
149         return (0);
150 }
151
152 static int
153 dump_data(struct backuparg *ba, dmu_object_type_t type,
154     uint64_t object, uint64_t offset, int blksz, const blkptr_t *bp, void *data)
155 {
156         struct drr_write *drrw = &(ba->drr->drr_u.drr_write);
157
158
159         /*
160          * If there is any kind of pending aggregation (currently either
161          * a grouping of free objects or free blocks), push it out to
162          * the stream, since aggregation can't be done across operations
163          * of different types.
164          */
165         if (ba->pending_op != PENDING_NONE) {
166                 if (dump_bytes(ba, ba->drr, sizeof (dmu_replay_record_t)) != 0)
167                         return (EINTR);
168                 ba->pending_op = PENDING_NONE;
169         }
170         /* write a DATA record */
171         bzero(ba->drr, sizeof (dmu_replay_record_t));
172         ba->drr->drr_type = DRR_WRITE;
173         drrw->drr_object = object;
174         drrw->drr_type = type;
175         drrw->drr_offset = offset;
176         drrw->drr_length = blksz;
177         drrw->drr_toguid = ba->toguid;
178         drrw->drr_checksumtype = BP_GET_CHECKSUM(bp);
179         if (zio_checksum_table[drrw->drr_checksumtype].ci_dedup)
180                 drrw->drr_checksumflags |= DRR_CHECKSUM_DEDUP;
181         DDK_SET_LSIZE(&drrw->drr_key, BP_GET_LSIZE(bp));
182         DDK_SET_PSIZE(&drrw->drr_key, BP_GET_PSIZE(bp));
183         DDK_SET_COMPRESS(&drrw->drr_key, BP_GET_COMPRESS(bp));
184         drrw->drr_key.ddk_cksum = bp->blk_cksum;
185
186         if (dump_bytes(ba, ba->drr, sizeof (dmu_replay_record_t)) != 0)
187                 return (EINTR);
188         if (dump_bytes(ba, data, blksz) != 0)
189                 return (EINTR);
190         return (0);
191 }
192
193 static int
194 dump_spill(struct backuparg *ba, uint64_t object, int blksz, void *data)
195 {
196         struct drr_spill *drrs = &(ba->drr->drr_u.drr_spill);
197
198         if (ba->pending_op != PENDING_NONE) {
199                 if (dump_bytes(ba, ba->drr, sizeof (dmu_replay_record_t)) != 0)
200                         return (EINTR);
201                 ba->pending_op = PENDING_NONE;
202         }
203
204         /* write a SPILL record */
205         bzero(ba->drr, sizeof (dmu_replay_record_t));
206         ba->drr->drr_type = DRR_SPILL;
207         drrs->drr_object = object;
208         drrs->drr_length = blksz;
209         drrs->drr_toguid = ba->toguid;
210
211         if (dump_bytes(ba, ba->drr, sizeof (dmu_replay_record_t)))
212                 return (EINTR);
213         if (dump_bytes(ba, data, blksz))
214                 return (EINTR);
215         return (0);
216 }
217
218 static int
219 dump_freeobjects(struct backuparg *ba, uint64_t firstobj, uint64_t numobjs)
220 {
221         struct drr_freeobjects *drrfo = &(ba->drr->drr_u.drr_freeobjects);
222
223         /*
224          * If there is a pending op, but it's not PENDING_FREEOBJECTS,
225          * push it out, since free block aggregation can only be done for
226          * blocks of the same type (i.e., DRR_FREE records can only be
227          * aggregated with other DRR_FREE records.  DRR_FREEOBJECTS records
228          * can only be aggregated with other DRR_FREEOBJECTS records.
229          */
230         if (ba->pending_op != PENDING_NONE &&
231             ba->pending_op != PENDING_FREEOBJECTS) {
232                 if (dump_bytes(ba, ba->drr, sizeof (dmu_replay_record_t)) != 0)
233                         return (EINTR);
234                 ba->pending_op = PENDING_NONE;
235         }
236         if (ba->pending_op == PENDING_FREEOBJECTS) {
237                 /*
238                  * See whether this free object array can be aggregated
239                  * with pending one
240                  */
241                 if (drrfo->drr_firstobj + drrfo->drr_numobjs == firstobj) {
242                         drrfo->drr_numobjs += numobjs;
243                         return (0);
244                 } else {
245                         /* can't be aggregated.  Push out pending record */
246                         if (dump_bytes(ba, ba->drr,
247                             sizeof (dmu_replay_record_t)) != 0)
248                                 return (EINTR);
249                         ba->pending_op = PENDING_NONE;
250                 }
251         }
252
253         /* write a FREEOBJECTS record */
254         bzero(ba->drr, sizeof (dmu_replay_record_t));
255         ba->drr->drr_type = DRR_FREEOBJECTS;
256         drrfo->drr_firstobj = firstobj;
257         drrfo->drr_numobjs = numobjs;
258         drrfo->drr_toguid = ba->toguid;
259
260         ba->pending_op = PENDING_FREEOBJECTS;
261
262         return (0);
263 }
264
265 static int
266 dump_dnode(struct backuparg *ba, uint64_t object, dnode_phys_t *dnp)
267 {
268         struct drr_object *drro = &(ba->drr->drr_u.drr_object);
269
270         if (dnp == NULL || dnp->dn_type == DMU_OT_NONE)
271                 return (dump_freeobjects(ba, object, 1));
272
273         if (ba->pending_op != PENDING_NONE) {
274                 if (dump_bytes(ba, ba->drr, sizeof (dmu_replay_record_t)) != 0)
275                         return (EINTR);
276                 ba->pending_op = PENDING_NONE;
277         }
278
279         /* write an OBJECT record */
280         bzero(ba->drr, sizeof (dmu_replay_record_t));
281         ba->drr->drr_type = DRR_OBJECT;
282         drro->drr_object = object;
283         drro->drr_type = dnp->dn_type;
284         drro->drr_bonustype = dnp->dn_bonustype;
285         drro->drr_blksz = dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT;
286         drro->drr_bonuslen = dnp->dn_bonuslen;
287         drro->drr_checksumtype = dnp->dn_checksum;
288         drro->drr_compress = dnp->dn_compress;
289         drro->drr_toguid = ba->toguid;
290
291         if (dump_bytes(ba, ba->drr, sizeof (dmu_replay_record_t)) != 0)
292                 return (EINTR);
293
294         if (dump_bytes(ba, DN_BONUS(dnp), P2ROUNDUP(dnp->dn_bonuslen, 8)) != 0)
295                 return (EINTR);
296
297         /* free anything past the end of the file */
298         if (dump_free(ba, object, (dnp->dn_maxblkid + 1) *
299             (dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT), -1ULL))
300                 return (EINTR);
301         if (ba->err)
302                 return (EINTR);
303         return (0);
304 }
305
306 #define BP_SPAN(dnp, level) \
307         (((uint64_t)dnp->dn_datablkszsec) << (SPA_MINBLOCKSHIFT + \
308         (level) * (dnp->dn_indblkshift - SPA_BLKPTRSHIFT)))
309
310 /* ARGSUSED */
311 static int
312 backup_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp, arc_buf_t *pbuf,
313     const zbookmark_t *zb, const dnode_phys_t *dnp, void *arg)
314 {
315         struct backuparg *ba = arg;
316         dmu_object_type_t type = bp ? BP_GET_TYPE(bp) : DMU_OT_NONE;
317         int err = 0;
318
319         if (issig(JUSTLOOKING) && issig(FORREAL))
320                 return (EINTR);
321
322         if (zb->zb_object != DMU_META_DNODE_OBJECT &&
323             DMU_OBJECT_IS_SPECIAL(zb->zb_object)) {
324                 return (0);
325         } else if (bp == NULL && zb->zb_object == DMU_META_DNODE_OBJECT) {
326                 uint64_t span = BP_SPAN(dnp, zb->zb_level);
327                 uint64_t dnobj = (zb->zb_blkid * span) >> DNODE_SHIFT;
328                 err = dump_freeobjects(ba, dnobj, span >> DNODE_SHIFT);
329         } else if (bp == NULL) {
330                 uint64_t span = BP_SPAN(dnp, zb->zb_level);
331                 err = dump_free(ba, zb->zb_object, zb->zb_blkid * span, span);
332         } else if (zb->zb_level > 0 || type == DMU_OT_OBJSET) {
333                 return (0);
334         } else if (type == DMU_OT_DNODE) {
335                 dnode_phys_t *blk;
336                 int i;
337                 int blksz = BP_GET_LSIZE(bp);
338                 uint32_t aflags = ARC_WAIT;
339                 arc_buf_t *abuf;
340
341                 if (dsl_read(NULL, spa, bp, pbuf,
342                     arc_getbuf_func, &abuf, ZIO_PRIORITY_ASYNC_READ,
343                     ZIO_FLAG_CANFAIL, &aflags, zb) != 0)
344                         return (EIO);
345
346                 blk = abuf->b_data;
347                 for (i = 0; i < blksz >> DNODE_SHIFT; i++) {
348                         uint64_t dnobj = (zb->zb_blkid <<
349                             (DNODE_BLOCK_SHIFT - DNODE_SHIFT)) + i;
350                         err = dump_dnode(ba, dnobj, blk+i);
351                         if (err)
352                                 break;
353                 }
354                 (void) arc_buf_remove_ref(abuf, &abuf);
355         } else if (type == DMU_OT_SA) {
356                 uint32_t aflags = ARC_WAIT;
357                 arc_buf_t *abuf;
358                 int blksz = BP_GET_LSIZE(bp);
359
360                 if (arc_read_nolock(NULL, spa, bp,
361                     arc_getbuf_func, &abuf, ZIO_PRIORITY_ASYNC_READ,
362                     ZIO_FLAG_CANFAIL, &aflags, zb) != 0)
363                         return (EIO);
364
365                 err = dump_spill(ba, zb->zb_object, blksz, abuf->b_data);
366                 (void) arc_buf_remove_ref(abuf, &abuf);
367         } else { /* it's a level-0 block of a regular object */
368                 uint32_t aflags = ARC_WAIT;
369                 arc_buf_t *abuf;
370                 int blksz = BP_GET_LSIZE(bp);
371
372                 if (dsl_read(NULL, spa, bp, pbuf,
373                     arc_getbuf_func, &abuf, ZIO_PRIORITY_ASYNC_READ,
374                     ZIO_FLAG_CANFAIL, &aflags, zb) != 0) {
375                         if (zfs_send_corrupt_data) {
376                                 uint64_t *ptr;
377                                 /* Send a block filled with 0x"zfs badd bloc" */
378                                 abuf = arc_buf_alloc(spa, blksz, &abuf,
379                                     ARC_BUFC_DATA);
380                                 for (ptr = abuf->b_data;
381                                     (char *)ptr < (char *)abuf->b_data + blksz;
382                                     ptr++)
383                                         *ptr = 0x2f5baddb10c;
384                         } else {
385                                 return (EIO);
386                         }
387                 }
388
389                 err = dump_data(ba, type, zb->zb_object, zb->zb_blkid * blksz,
390                     blksz, bp, abuf->b_data);
391                 (void) arc_buf_remove_ref(abuf, &abuf);
392         }
393
394         ASSERT(err == 0 || err == EINTR);
395         return (err);
396 }
397
398 int
399 dmu_sendbackup(objset_t *tosnap, objset_t *fromsnap, boolean_t fromorigin,
400     vnode_t *vp, offset_t *off)
401 {
402         dsl_dataset_t *ds = tosnap->os_dsl_dataset;
403         dsl_dataset_t *fromds = fromsnap ? fromsnap->os_dsl_dataset : NULL;
404         dmu_replay_record_t *drr;
405         struct backuparg ba;
406         int err;
407         uint64_t fromtxg = 0;
408
409         /* tosnap must be a snapshot */
410         if (ds->ds_phys->ds_next_snap_obj == 0)
411                 return (EINVAL);
412
413         /* fromsnap must be an earlier snapshot from the same fs as tosnap */
414         if (fromds && (ds->ds_dir != fromds->ds_dir ||
415             fromds->ds_phys->ds_creation_txg >= ds->ds_phys->ds_creation_txg))
416                 return (EXDEV);
417
418         if (fromorigin) {
419                 dsl_pool_t *dp = ds->ds_dir->dd_pool;
420
421                 if (fromsnap)
422                         return (EINVAL);
423
424                 if (dsl_dir_is_clone(ds->ds_dir)) {
425                         rw_enter(&dp->dp_config_rwlock, RW_READER);
426                         err = dsl_dataset_hold_obj(dp,
427                             ds->ds_dir->dd_phys->dd_origin_obj, FTAG, &fromds);
428                         rw_exit(&dp->dp_config_rwlock);
429                         if (err)
430                                 return (err);
431                 } else {
432                         fromorigin = B_FALSE;
433                 }
434         }
435
436
437         drr = kmem_zalloc(sizeof (dmu_replay_record_t), KM_SLEEP);
438         drr->drr_type = DRR_BEGIN;
439         drr->drr_u.drr_begin.drr_magic = DMU_BACKUP_MAGIC;
440         DMU_SET_STREAM_HDRTYPE(drr->drr_u.drr_begin.drr_versioninfo,
441             DMU_SUBSTREAM);
442
443 #ifdef _KERNEL
444         if (dmu_objset_type(tosnap) == DMU_OST_ZFS) {
445                 uint64_t version;
446                 if (zfs_get_zplprop(tosnap, ZFS_PROP_VERSION, &version) != 0)
447                         return (EINVAL);
448                 if (version == ZPL_VERSION_SA) {
449                         DMU_SET_FEATUREFLAGS(
450                             drr->drr_u.drr_begin.drr_versioninfo,
451                             DMU_BACKUP_FEATURE_SA_SPILL);
452                 }
453         }
454 #endif
455
456         drr->drr_u.drr_begin.drr_creation_time =
457             ds->ds_phys->ds_creation_time;
458         drr->drr_u.drr_begin.drr_type = tosnap->os_phys->os_type;
459         if (fromorigin)
460                 drr->drr_u.drr_begin.drr_flags |= DRR_FLAG_CLONE;
461         drr->drr_u.drr_begin.drr_toguid = ds->ds_phys->ds_guid;
462         if (ds->ds_phys->ds_flags & DS_FLAG_CI_DATASET)
463                 drr->drr_u.drr_begin.drr_flags |= DRR_FLAG_CI_DATA;
464
465         if (fromds)
466                 drr->drr_u.drr_begin.drr_fromguid = fromds->ds_phys->ds_guid;
467         dsl_dataset_name(ds, drr->drr_u.drr_begin.drr_toname);
468
469         if (fromds)
470                 fromtxg = fromds->ds_phys->ds_creation_txg;
471         if (fromorigin)
472                 dsl_dataset_rele(fromds, FTAG);
473
474         ba.drr = drr;
475         ba.vp = vp;
476         ba.os = tosnap;
477         ba.off = off;
478         ba.toguid = ds->ds_phys->ds_guid;
479         ZIO_SET_CHECKSUM(&ba.zc, 0, 0, 0, 0);
480         ba.pending_op = PENDING_NONE;
481
482         if (dump_bytes(&ba, drr, sizeof (dmu_replay_record_t)) != 0) {
483                 kmem_free(drr, sizeof (dmu_replay_record_t));
484                 return (ba.err);
485         }
486
487         err = traverse_dataset(ds, fromtxg, TRAVERSE_PRE | TRAVERSE_PREFETCH,
488             backup_cb, &ba);
489
490         if (ba.pending_op != PENDING_NONE)
491                 if (dump_bytes(&ba, drr, sizeof (dmu_replay_record_t)) != 0)
492                         err = EINTR;
493
494         if (err) {
495                 if (err == EINTR && ba.err)
496                         err = ba.err;
497                 kmem_free(drr, sizeof (dmu_replay_record_t));
498                 return (err);
499         }
500
501         bzero(drr, sizeof (dmu_replay_record_t));
502         drr->drr_type = DRR_END;
503         drr->drr_u.drr_end.drr_checksum = ba.zc;
504         drr->drr_u.drr_end.drr_toguid = ba.toguid;
505
506         if (dump_bytes(&ba, drr, sizeof (dmu_replay_record_t)) != 0) {
507                 kmem_free(drr, sizeof (dmu_replay_record_t));
508                 return (ba.err);
509         }
510
511         kmem_free(drr, sizeof (dmu_replay_record_t));
512
513         return (0);
514 }
515
516 int
517 dmu_send_estimate(objset_t *tosnap, objset_t *fromsnap, boolean_t fromorigin,
518     uint64_t *sizep)
519 {
520         dsl_dataset_t *ds = tosnap->os_dsl_dataset;
521         dsl_dataset_t *fromds = fromsnap ? fromsnap->os_dsl_dataset : NULL;
522         dsl_pool_t *dp = ds->ds_dir->dd_pool;
523         int err;
524         uint64_t size, recordsize;
525
526         /* tosnap must be a snapshot */
527         if (ds->ds_phys->ds_next_snap_obj == 0)
528                 return (EINVAL);
529
530         /* fromsnap must be an earlier snapshot from the same fs as tosnap */
531         if (fromds && (ds->ds_dir != fromds->ds_dir ||
532             fromds->ds_phys->ds_creation_txg >= ds->ds_phys->ds_creation_txg))
533                 return (EXDEV);
534
535         if (fromorigin) {
536                 if (fromsnap)
537                         return (EINVAL);
538
539                 if (dsl_dir_is_clone(ds->ds_dir)) {
540                         rw_enter(&dp->dp_config_rwlock, RW_READER);
541                         err = dsl_dataset_hold_obj(dp,
542                             ds->ds_dir->dd_phys->dd_origin_obj, FTAG, &fromds);
543                         rw_exit(&dp->dp_config_rwlock);
544                         if (err)
545                                 return (err);
546                 } else {
547                         fromorigin = B_FALSE;
548                 }
549         }
550
551         /* Get uncompressed size estimate of changed data. */
552         if (fromds == NULL) {
553                 size = ds->ds_phys->ds_uncompressed_bytes;
554         } else {
555                 uint64_t used, comp;
556                 err = dsl_dataset_space_written(fromds, ds,
557                     &used, &comp, &size);
558                 if (fromorigin)
559                         dsl_dataset_rele(fromds, FTAG);
560                 if (err)
561                         return (err);
562         }
563
564         /*
565          * Assume that space (both on-disk and in-stream) is dominated by
566          * data.  We will adjust for indirect blocks and the copies property,
567          * but ignore per-object space used (eg, dnodes and DRR_OBJECT records).
568          */
569
570         /*
571          * Subtract out approximate space used by indirect blocks.
572          * Assume most space is used by data blocks (non-indirect, non-dnode).
573          * Assume all blocks are recordsize.  Assume ditto blocks and
574          * internal fragmentation counter out compression.
575          *
576          * Therefore, space used by indirect blocks is sizeof(blkptr_t) per
577          * block, which we observe in practice.
578          */
579         rw_enter(&dp->dp_config_rwlock, RW_READER);
580         err = dsl_prop_get_ds(ds, "recordsize",
581             sizeof (recordsize), 1, &recordsize, NULL);
582         rw_exit(&dp->dp_config_rwlock);
583         if (err)
584                 return (err);
585         size -= size / recordsize * sizeof (blkptr_t);
586
587         /* Add in the space for the record associated with each block. */
588         size += size / recordsize * sizeof (dmu_replay_record_t);
589
590         *sizep = size;
591
592         return (0);
593 }
594
595 struct recvbeginsyncarg {
596         const char *tofs;
597         const char *tosnap;
598         dsl_dataset_t *origin;
599         uint64_t fromguid;
600         dmu_objset_type_t type;
601         void *tag;
602         boolean_t force;
603         uint64_t dsflags;
604         char clonelastname[MAXNAMELEN];
605         dsl_dataset_t *ds; /* the ds to recv into; returned from the syncfunc */
606         cred_t *cr;
607 };
608
609 /* ARGSUSED */
610 static int
611 recv_new_check(void *arg1, void *arg2, dmu_tx_t *tx)
612 {
613         dsl_dir_t *dd = arg1;
614         struct recvbeginsyncarg *rbsa = arg2;
615         objset_t *mos = dd->dd_pool->dp_meta_objset;
616         uint64_t val;
617         int err;
618
619         err = zap_lookup(mos, dd->dd_phys->dd_child_dir_zapobj,
620             strrchr(rbsa->tofs, '/') + 1, sizeof (uint64_t), 1, &val);
621
622         if (err != ENOENT)
623                 return (err ? err : EEXIST);
624
625         if (rbsa->origin) {
626                 /* make sure it's a snap in the same pool */
627                 if (rbsa->origin->ds_dir->dd_pool != dd->dd_pool)
628                         return (EXDEV);
629                 if (!dsl_dataset_is_snapshot(rbsa->origin))
630                         return (EINVAL);
631                 if (rbsa->origin->ds_phys->ds_guid != rbsa->fromguid)
632                         return (ENODEV);
633         }
634
635         return (0);
636 }
637
638 static void
639 recv_new_sync(void *arg1, void *arg2, dmu_tx_t *tx)
640 {
641         dsl_dir_t *dd = arg1;
642         struct recvbeginsyncarg *rbsa = arg2;
643         uint64_t flags = DS_FLAG_INCONSISTENT | rbsa->dsflags;
644         uint64_t dsobj;
645
646         /* Create and open new dataset. */
647         dsobj = dsl_dataset_create_sync(dd, strrchr(rbsa->tofs, '/') + 1,
648             rbsa->origin, flags, rbsa->cr, tx);
649         VERIFY(0 == dsl_dataset_own_obj(dd->dd_pool, dsobj,
650             B_TRUE, dmu_recv_tag, &rbsa->ds));
651
652         if (rbsa->origin == NULL) {
653                 (void) dmu_objset_create_impl(dd->dd_pool->dp_spa,
654                     rbsa->ds, &rbsa->ds->ds_phys->ds_bp, rbsa->type, tx);
655         }
656
657         spa_history_log_internal(LOG_DS_REPLAY_FULL_SYNC,
658             dd->dd_pool->dp_spa, tx, "dataset = %lld", dsobj);
659 }
660
661 /* ARGSUSED */
662 static int
663 recv_existing_check(void *arg1, void *arg2, dmu_tx_t *tx)
664 {
665         dsl_dataset_t *ds = arg1;
666         struct recvbeginsyncarg *rbsa = arg2;
667         int err;
668         uint64_t val;
669
670         /* must not have any changes since most recent snapshot */
671         if (!rbsa->force && dsl_dataset_modified_since_lastsnap(ds))
672                 return (ETXTBSY);
673
674         /* new snapshot name must not exist */
675         err = zap_lookup(ds->ds_dir->dd_pool->dp_meta_objset,
676             ds->ds_phys->ds_snapnames_zapobj, rbsa->tosnap, 8, 1, &val);
677         if (err == 0)
678                 return (EEXIST);
679         if (err != ENOENT)
680                 return (err);
681
682         if (rbsa->fromguid) {
683                 /* if incremental, most recent snapshot must match fromguid */
684                 if (ds->ds_prev == NULL)
685                         return (ENODEV);
686
687                 /*
688                  * most recent snapshot must match fromguid, or there are no
689                  * changes since the fromguid one
690                  */
691                 if (ds->ds_prev->ds_phys->ds_guid != rbsa->fromguid) {
692                         uint64_t birth = ds->ds_prev->ds_phys->ds_bp.blk_birth;
693                         uint64_t obj = ds->ds_prev->ds_phys->ds_prev_snap_obj;
694                         while (obj != 0) {
695                                 dsl_dataset_t *snap;
696                                 err = dsl_dataset_hold_obj(ds->ds_dir->dd_pool,
697                                     obj, FTAG, &snap);
698                                 if (err)
699                                         return (ENODEV);
700                                 if (snap->ds_phys->ds_creation_txg < birth) {
701                                         dsl_dataset_rele(snap, FTAG);
702                                         return (ENODEV);
703                                 }
704                                 if (snap->ds_phys->ds_guid == rbsa->fromguid) {
705                                         dsl_dataset_rele(snap, FTAG);
706                                         break; /* it's ok */
707                                 }
708                                 obj = snap->ds_phys->ds_prev_snap_obj;
709                                 dsl_dataset_rele(snap, FTAG);
710                         }
711                         if (obj == 0)
712                                 return (ENODEV);
713                 }
714         } else {
715                 /* if full, most recent snapshot must be $ORIGIN */
716                 if (ds->ds_phys->ds_prev_snap_txg >= TXG_INITIAL)
717                         return (ENODEV);
718         }
719
720         /* temporary clone name must not exist */
721         err = zap_lookup(ds->ds_dir->dd_pool->dp_meta_objset,
722             ds->ds_dir->dd_phys->dd_child_dir_zapobj,
723             rbsa->clonelastname, 8, 1, &val);
724         if (err == 0)
725                 return (EEXIST);
726         if (err != ENOENT)
727                 return (err);
728
729         return (0);
730 }
731
732 /* ARGSUSED */
733 static void
734 recv_existing_sync(void *arg1, void *arg2, dmu_tx_t *tx)
735 {
736         dsl_dataset_t *ohds = arg1;
737         struct recvbeginsyncarg *rbsa = arg2;
738         dsl_pool_t *dp = ohds->ds_dir->dd_pool;
739         dsl_dataset_t *cds;
740         uint64_t flags = DS_FLAG_INCONSISTENT | rbsa->dsflags;
741         uint64_t dsobj;
742
743         /* create and open the temporary clone */
744         dsobj = dsl_dataset_create_sync(ohds->ds_dir, rbsa->clonelastname,
745             ohds->ds_prev, flags, rbsa->cr, tx);
746         VERIFY(0 == dsl_dataset_own_obj(dp, dsobj, B_TRUE, dmu_recv_tag, &cds));
747
748         /*
749          * If we actually created a non-clone, we need to create the
750          * objset in our new dataset.
751          */
752         if (BP_IS_HOLE(dsl_dataset_get_blkptr(cds))) {
753                 (void) dmu_objset_create_impl(dp->dp_spa,
754                     cds, dsl_dataset_get_blkptr(cds), rbsa->type, tx);
755         }
756
757         rbsa->ds = cds;
758
759         spa_history_log_internal(LOG_DS_REPLAY_INC_SYNC,
760             dp->dp_spa, tx, "dataset = %lld", dsobj);
761 }
762
763 static boolean_t
764 dmu_recv_verify_features(dsl_dataset_t *ds, struct drr_begin *drrb)
765 {
766         int featureflags;
767
768         featureflags = DMU_GET_FEATUREFLAGS(drrb->drr_versioninfo);
769
770         /* Verify pool version supports SA if SA_SPILL feature set */
771         return ((featureflags & DMU_BACKUP_FEATURE_SA_SPILL) &&
772             (spa_version(dsl_dataset_get_spa(ds)) < SPA_VERSION_SA));
773 }
774
775 /*
776  * NB: callers *MUST* call dmu_recv_stream() if dmu_recv_begin()
777  * succeeds; otherwise we will leak the holds on the datasets.
778  */
779 int
780 dmu_recv_begin(char *tofs, char *tosnap, char *top_ds, struct drr_begin *drrb,
781     boolean_t force, objset_t *origin, dmu_recv_cookie_t *drc)
782 {
783         int err = 0;
784         boolean_t byteswap;
785         struct recvbeginsyncarg rbsa = { 0 };
786         uint64_t versioninfo;
787         int flags;
788         dsl_dataset_t *ds;
789
790         if (drrb->drr_magic == DMU_BACKUP_MAGIC)
791                 byteswap = FALSE;
792         else if (drrb->drr_magic == BSWAP_64(DMU_BACKUP_MAGIC))
793                 byteswap = TRUE;
794         else
795                 return (EINVAL);
796
797         rbsa.tofs = tofs;
798         rbsa.tosnap = tosnap;
799         rbsa.origin = origin ? origin->os_dsl_dataset : NULL;
800         rbsa.fromguid = drrb->drr_fromguid;
801         rbsa.type = drrb->drr_type;
802         rbsa.tag = FTAG;
803         rbsa.dsflags = 0;
804         rbsa.cr = CRED();
805         versioninfo = drrb->drr_versioninfo;
806         flags = drrb->drr_flags;
807
808         if (byteswap) {
809                 rbsa.type = BSWAP_32(rbsa.type);
810                 rbsa.fromguid = BSWAP_64(rbsa.fromguid);
811                 versioninfo = BSWAP_64(versioninfo);
812                 flags = BSWAP_32(flags);
813         }
814
815         if (DMU_GET_STREAM_HDRTYPE(versioninfo) == DMU_COMPOUNDSTREAM ||
816             rbsa.type >= DMU_OST_NUMTYPES ||
817             ((flags & DRR_FLAG_CLONE) && origin == NULL))
818                 return (EINVAL);
819
820         if (flags & DRR_FLAG_CI_DATA)
821                 rbsa.dsflags = DS_FLAG_CI_DATASET;
822
823         bzero(drc, sizeof (dmu_recv_cookie_t));
824         drc->drc_drrb = drrb;
825         drc->drc_tosnap = tosnap;
826         drc->drc_top_ds = top_ds;
827         drc->drc_force = force;
828
829         /*
830          * Process the begin in syncing context.
831          */
832
833         /* open the dataset we are logically receiving into */
834         err = dsl_dataset_hold(tofs, dmu_recv_tag, &ds);
835         if (err == 0) {
836                 if (dmu_recv_verify_features(ds, drrb)) {
837                         dsl_dataset_rele(ds, dmu_recv_tag);
838                         return (ENOTSUP);
839                 }
840                 /* target fs already exists; recv into temp clone */
841
842                 /* Can't recv a clone into an existing fs */
843                 if (flags & DRR_FLAG_CLONE) {
844                         dsl_dataset_rele(ds, dmu_recv_tag);
845                         return (EINVAL);
846                 }
847
848                 /* must not have an incremental recv already in progress */
849                 if (!mutex_tryenter(&ds->ds_recvlock)) {
850                         dsl_dataset_rele(ds, dmu_recv_tag);
851                         return (EBUSY);
852                 }
853
854                 /* tmp clone name is: tofs/%tosnap" */
855                 (void) snprintf(rbsa.clonelastname, sizeof (rbsa.clonelastname),
856                     "%%%s", tosnap);
857                 rbsa.force = force;
858                 err = dsl_sync_task_do(ds->ds_dir->dd_pool,
859                     recv_existing_check, recv_existing_sync, ds, &rbsa, 5);
860                 if (err) {
861                         mutex_exit(&ds->ds_recvlock);
862                         dsl_dataset_rele(ds, dmu_recv_tag);
863                         return (err);
864                 }
865                 drc->drc_logical_ds = ds;
866                 drc->drc_real_ds = rbsa.ds;
867         } else if (err == ENOENT) {
868                 /* target fs does not exist; must be a full backup or clone */
869                 char *cp;
870
871                 /*
872                  * If it's a non-clone incremental, we are missing the
873                  * target fs, so fail the recv.
874                  */
875                 if (rbsa.fromguid && !(flags & DRR_FLAG_CLONE))
876                         return (ENOENT);
877
878                 /* Open the parent of tofs */
879                 cp = strrchr(tofs, '/');
880                 *cp = '\0';
881                 err = dsl_dataset_hold(tofs, FTAG, &ds);
882                 *cp = '/';
883                 if (err)
884                         return (err);
885
886                 if (dmu_recv_verify_features(ds, drrb)) {
887                         dsl_dataset_rele(ds, FTAG);
888                         return (ENOTSUP);
889                 }
890
891                 err = dsl_sync_task_do(ds->ds_dir->dd_pool,
892                     recv_new_check, recv_new_sync, ds->ds_dir, &rbsa, 5);
893                 dsl_dataset_rele(ds, FTAG);
894                 if (err)
895                         return (err);
896                 drc->drc_logical_ds = drc->drc_real_ds = rbsa.ds;
897                 drc->drc_newfs = B_TRUE;
898         }
899
900         return (err);
901 }
902
903 struct restorearg {
904         int err;
905         int byteswap;
906         vnode_t *vp;
907         char *buf;
908         uint64_t voff;
909         int bufsize; /* amount of memory allocated for buf */
910         zio_cksum_t cksum;
911         avl_tree_t *guid_to_ds_map;
912 };
913
914 typedef struct guid_map_entry {
915         uint64_t        guid;
916         dsl_dataset_t   *gme_ds;
917         avl_node_t      avlnode;
918 } guid_map_entry_t;
919
920 static int
921 guid_compare(const void *arg1, const void *arg2)
922 {
923         const guid_map_entry_t *gmep1 = arg1;
924         const guid_map_entry_t *gmep2 = arg2;
925
926         if (gmep1->guid < gmep2->guid)
927                 return (-1);
928         else if (gmep1->guid > gmep2->guid)
929                 return (1);
930         return (0);
931 }
932
933 static void
934 free_guid_map_onexit(void *arg)
935 {
936         avl_tree_t *ca = arg;
937         void *cookie = NULL;
938         guid_map_entry_t *gmep;
939
940         while ((gmep = avl_destroy_nodes(ca, &cookie)) != NULL) {
941                 dsl_dataset_rele(gmep->gme_ds, ca);
942                 kmem_free(gmep, sizeof (guid_map_entry_t));
943         }
944         avl_destroy(ca);
945         kmem_free(ca, sizeof (avl_tree_t));
946 }
947
948 static void *
949 restore_read(struct restorearg *ra, int len)
950 {
951         void *rv;
952         int done = 0;
953
954         /* some things will require 8-byte alignment, so everything must */
955         ASSERT3U(len % 8, ==, 0);
956
957         while (done < len) {
958                 ssize_t resid;
959
960                 ra->err = vn_rdwr(UIO_READ, ra->vp,
961                     (caddr_t)ra->buf + done, len - done,
962                     ra->voff, UIO_SYSSPACE, FAPPEND,
963                     RLIM64_INFINITY, CRED(), &resid);
964
965                 if (resid == len - done)
966                         ra->err = EINVAL;
967                 ra->voff += len - done - resid;
968                 done = len - resid;
969                 if (ra->err)
970                         return (NULL);
971         }
972
973         ASSERT3U(done, ==, len);
974         rv = ra->buf;
975         if (ra->byteswap)
976                 fletcher_4_incremental_byteswap(rv, len, &ra->cksum);
977         else
978                 fletcher_4_incremental_native(rv, len, &ra->cksum);
979         return (rv);
980 }
981
982 noinline static void
983 backup_byteswap(dmu_replay_record_t *drr)
984 {
985 #define DO64(X) (drr->drr_u.X = BSWAP_64(drr->drr_u.X))
986 #define DO32(X) (drr->drr_u.X = BSWAP_32(drr->drr_u.X))
987         drr->drr_type = BSWAP_32(drr->drr_type);
988         drr->drr_payloadlen = BSWAP_32(drr->drr_payloadlen);
989         switch (drr->drr_type) {
990         case DRR_BEGIN:
991                 DO64(drr_begin.drr_magic);
992                 DO64(drr_begin.drr_versioninfo);
993                 DO64(drr_begin.drr_creation_time);
994                 DO32(drr_begin.drr_type);
995                 DO32(drr_begin.drr_flags);
996                 DO64(drr_begin.drr_toguid);
997                 DO64(drr_begin.drr_fromguid);
998                 break;
999         case DRR_OBJECT:
1000                 DO64(drr_object.drr_object);
1001                 /* DO64(drr_object.drr_allocation_txg); */
1002                 DO32(drr_object.drr_type);
1003                 DO32(drr_object.drr_bonustype);
1004                 DO32(drr_object.drr_blksz);
1005                 DO32(drr_object.drr_bonuslen);
1006                 DO64(drr_object.drr_toguid);
1007                 break;
1008         case DRR_FREEOBJECTS:
1009                 DO64(drr_freeobjects.drr_firstobj);
1010                 DO64(drr_freeobjects.drr_numobjs);
1011                 DO64(drr_freeobjects.drr_toguid);
1012                 break;
1013         case DRR_WRITE:
1014                 DO64(drr_write.drr_object);
1015                 DO32(drr_write.drr_type);
1016                 DO64(drr_write.drr_offset);
1017                 DO64(drr_write.drr_length);
1018                 DO64(drr_write.drr_toguid);
1019                 DO64(drr_write.drr_key.ddk_cksum.zc_word[0]);
1020                 DO64(drr_write.drr_key.ddk_cksum.zc_word[1]);
1021                 DO64(drr_write.drr_key.ddk_cksum.zc_word[2]);
1022                 DO64(drr_write.drr_key.ddk_cksum.zc_word[3]);
1023                 DO64(drr_write.drr_key.ddk_prop);
1024                 break;
1025         case DRR_WRITE_BYREF:
1026                 DO64(drr_write_byref.drr_object);
1027                 DO64(drr_write_byref.drr_offset);
1028                 DO64(drr_write_byref.drr_length);
1029                 DO64(drr_write_byref.drr_toguid);
1030                 DO64(drr_write_byref.drr_refguid);
1031                 DO64(drr_write_byref.drr_refobject);
1032                 DO64(drr_write_byref.drr_refoffset);
1033                 DO64(drr_write_byref.drr_key.ddk_cksum.zc_word[0]);
1034                 DO64(drr_write_byref.drr_key.ddk_cksum.zc_word[1]);
1035                 DO64(drr_write_byref.drr_key.ddk_cksum.zc_word[2]);
1036                 DO64(drr_write_byref.drr_key.ddk_cksum.zc_word[3]);
1037                 DO64(drr_write_byref.drr_key.ddk_prop);
1038                 break;
1039         case DRR_FREE:
1040                 DO64(drr_free.drr_object);
1041                 DO64(drr_free.drr_offset);
1042                 DO64(drr_free.drr_length);
1043                 DO64(drr_free.drr_toguid);
1044                 break;
1045         case DRR_SPILL:
1046                 DO64(drr_spill.drr_object);
1047                 DO64(drr_spill.drr_length);
1048                 DO64(drr_spill.drr_toguid);
1049                 break;
1050         case DRR_END:
1051                 DO64(drr_end.drr_checksum.zc_word[0]);
1052                 DO64(drr_end.drr_checksum.zc_word[1]);
1053                 DO64(drr_end.drr_checksum.zc_word[2]);
1054                 DO64(drr_end.drr_checksum.zc_word[3]);
1055                 DO64(drr_end.drr_toguid);
1056                 break;
1057         default:
1058                 break;
1059         }
1060 #undef DO64
1061 #undef DO32
1062 }
1063
1064 noinline static int
1065 restore_object(struct restorearg *ra, objset_t *os, struct drr_object *drro)
1066 {
1067         int err;
1068         dmu_tx_t *tx;
1069         void *data = NULL;
1070
1071         if (drro->drr_type == DMU_OT_NONE ||
1072             drro->drr_type >= DMU_OT_NUMTYPES ||
1073             drro->drr_bonustype >= DMU_OT_NUMTYPES ||
1074             drro->drr_checksumtype >= ZIO_CHECKSUM_FUNCTIONS ||
1075             drro->drr_compress >= ZIO_COMPRESS_FUNCTIONS ||
1076             P2PHASE(drro->drr_blksz, SPA_MINBLOCKSIZE) ||
1077             drro->drr_blksz < SPA_MINBLOCKSIZE ||
1078             drro->drr_blksz > SPA_MAXBLOCKSIZE ||
1079             drro->drr_bonuslen > DN_MAX_BONUSLEN) {
1080                 return (EINVAL);
1081         }
1082
1083         err = dmu_object_info(os, drro->drr_object, NULL);
1084
1085         if (err != 0 && err != ENOENT)
1086                 return (EINVAL);
1087
1088         if (drro->drr_bonuslen) {
1089                 data = restore_read(ra, P2ROUNDUP(drro->drr_bonuslen, 8));
1090                 if (ra->err)
1091                         return (ra->err);
1092         }
1093
1094         if (err == ENOENT) {
1095                 /* currently free, want to be allocated */
1096                 tx = dmu_tx_create(os);
1097                 dmu_tx_hold_bonus(tx, DMU_NEW_OBJECT);
1098                 err = dmu_tx_assign(tx, TXG_WAIT);
1099                 if (err) {
1100                         dmu_tx_abort(tx);
1101                         return (err);
1102                 }
1103                 err = dmu_object_claim(os, drro->drr_object,
1104                     drro->drr_type, drro->drr_blksz,
1105                     drro->drr_bonustype, drro->drr_bonuslen, tx);
1106                 dmu_tx_commit(tx);
1107         } else {
1108                 /* currently allocated, want to be allocated */
1109                 err = dmu_object_reclaim(os, drro->drr_object,
1110                     drro->drr_type, drro->drr_blksz,
1111                     drro->drr_bonustype, drro->drr_bonuslen);
1112         }
1113         if (err) {
1114                 return (EINVAL);
1115         }
1116
1117         tx = dmu_tx_create(os);
1118         dmu_tx_hold_bonus(tx, drro->drr_object);
1119         err = dmu_tx_assign(tx, TXG_WAIT);
1120         if (err) {
1121                 dmu_tx_abort(tx);
1122                 return (err);
1123         }
1124
1125         dmu_object_set_checksum(os, drro->drr_object, drro->drr_checksumtype,
1126             tx);
1127         dmu_object_set_compress(os, drro->drr_object, drro->drr_compress, tx);
1128
1129         if (data != NULL) {
1130                 dmu_buf_t *db;
1131
1132                 VERIFY(0 == dmu_bonus_hold(os, drro->drr_object, FTAG, &db));
1133                 dmu_buf_will_dirty(db, tx);
1134
1135                 ASSERT3U(db->db_size, >=, drro->drr_bonuslen);
1136                 bcopy(data, db->db_data, drro->drr_bonuslen);
1137                 if (ra->byteswap) {
1138                         dmu_ot[drro->drr_bonustype].ot_byteswap(db->db_data,
1139                             drro->drr_bonuslen);
1140                 }
1141                 dmu_buf_rele(db, FTAG);
1142         }
1143         dmu_tx_commit(tx);
1144         return (0);
1145 }
1146
1147 /* ARGSUSED */
1148 noinline static int
1149 restore_freeobjects(struct restorearg *ra, objset_t *os,
1150     struct drr_freeobjects *drrfo)
1151 {
1152         uint64_t obj;
1153
1154         if (drrfo->drr_firstobj + drrfo->drr_numobjs < drrfo->drr_firstobj)
1155                 return (EINVAL);
1156
1157         for (obj = drrfo->drr_firstobj;
1158             obj < drrfo->drr_firstobj + drrfo->drr_numobjs;
1159             (void) dmu_object_next(os, &obj, FALSE, 0)) {
1160                 int err;
1161
1162                 if (dmu_object_info(os, obj, NULL) != 0)
1163                         continue;
1164
1165                 err = dmu_free_object(os, obj);
1166                 if (err)
1167                         return (err);
1168         }
1169         return (0);
1170 }
1171
1172 noinline static int
1173 restore_write(struct restorearg *ra, objset_t *os,
1174     struct drr_write *drrw)
1175 {
1176         dmu_tx_t *tx;
1177         void *data;
1178         int err;
1179
1180         if (drrw->drr_offset + drrw->drr_length < drrw->drr_offset ||
1181             drrw->drr_type >= DMU_OT_NUMTYPES)
1182                 return (EINVAL);
1183
1184         data = restore_read(ra, drrw->drr_length);
1185         if (data == NULL)
1186                 return (ra->err);
1187
1188         if (dmu_object_info(os, drrw->drr_object, NULL) != 0)
1189                 return (EINVAL);
1190
1191         tx = dmu_tx_create(os);
1192
1193         dmu_tx_hold_write(tx, drrw->drr_object,
1194             drrw->drr_offset, drrw->drr_length);
1195         err = dmu_tx_assign(tx, TXG_WAIT);
1196         if (err) {
1197                 dmu_tx_abort(tx);
1198                 return (err);
1199         }
1200         if (ra->byteswap)
1201                 dmu_ot[drrw->drr_type].ot_byteswap(data, drrw->drr_length);
1202         dmu_write(os, drrw->drr_object,
1203             drrw->drr_offset, drrw->drr_length, data, tx);
1204         dmu_tx_commit(tx);
1205         return (0);
1206 }
1207
1208 /*
1209  * Handle a DRR_WRITE_BYREF record.  This record is used in dedup'ed
1210  * streams to refer to a copy of the data that is already on the
1211  * system because it came in earlier in the stream.  This function
1212  * finds the earlier copy of the data, and uses that copy instead of
1213  * data from the stream to fulfill this write.
1214  */
1215 static int
1216 restore_write_byref(struct restorearg *ra, objset_t *os,
1217     struct drr_write_byref *drrwbr)
1218 {
1219         dmu_tx_t *tx;
1220         int err;
1221         guid_map_entry_t gmesrch;
1222         guid_map_entry_t *gmep;
1223         avl_index_t     where;
1224         objset_t *ref_os = NULL;
1225         dmu_buf_t *dbp;
1226
1227         if (drrwbr->drr_offset + drrwbr->drr_length < drrwbr->drr_offset)
1228                 return (EINVAL);
1229
1230         /*
1231          * If the GUID of the referenced dataset is different from the
1232          * GUID of the target dataset, find the referenced dataset.
1233          */
1234         if (drrwbr->drr_toguid != drrwbr->drr_refguid) {
1235                 gmesrch.guid = drrwbr->drr_refguid;
1236                 if ((gmep = avl_find(ra->guid_to_ds_map, &gmesrch,
1237                     &where)) == NULL) {
1238                         return (EINVAL);
1239                 }
1240                 if (dmu_objset_from_ds(gmep->gme_ds, &ref_os))
1241                         return (EINVAL);
1242         } else {
1243                 ref_os = os;
1244         }
1245
1246         err = dmu_buf_hold(ref_os, drrwbr->drr_refobject,
1247             drrwbr->drr_refoffset, FTAG, &dbp, DMU_READ_PREFETCH);
1248         if (err)
1249                 return (err);
1250
1251         tx = dmu_tx_create(os);
1252
1253         dmu_tx_hold_write(tx, drrwbr->drr_object,
1254             drrwbr->drr_offset, drrwbr->drr_length);
1255         err = dmu_tx_assign(tx, TXG_WAIT);
1256         if (err) {
1257                 dmu_tx_abort(tx);
1258                 return (err);
1259         }
1260         dmu_write(os, drrwbr->drr_object,
1261             drrwbr->drr_offset, drrwbr->drr_length, dbp->db_data, tx);
1262         dmu_buf_rele(dbp, FTAG);
1263         dmu_tx_commit(tx);
1264         return (0);
1265 }
1266
1267 static int
1268 restore_spill(struct restorearg *ra, objset_t *os, struct drr_spill *drrs)
1269 {
1270         dmu_tx_t *tx;
1271         void *data;
1272         dmu_buf_t *db, *db_spill;
1273         int err;
1274
1275         if (drrs->drr_length < SPA_MINBLOCKSIZE ||
1276             drrs->drr_length > SPA_MAXBLOCKSIZE)
1277                 return (EINVAL);
1278
1279         data = restore_read(ra, drrs->drr_length);
1280         if (data == NULL)
1281                 return (ra->err);
1282
1283         if (dmu_object_info(os, drrs->drr_object, NULL) != 0)
1284                 return (EINVAL);
1285
1286         VERIFY(0 == dmu_bonus_hold(os, drrs->drr_object, FTAG, &db));
1287         if ((err = dmu_spill_hold_by_bonus(db, FTAG, &db_spill)) != 0) {
1288                 dmu_buf_rele(db, FTAG);
1289                 return (err);
1290         }
1291
1292         tx = dmu_tx_create(os);
1293
1294         dmu_tx_hold_spill(tx, db->db_object);
1295
1296         err = dmu_tx_assign(tx, TXG_WAIT);
1297         if (err) {
1298                 dmu_buf_rele(db, FTAG);
1299                 dmu_buf_rele(db_spill, FTAG);
1300                 dmu_tx_abort(tx);
1301                 return (err);
1302         }
1303         dmu_buf_will_dirty(db_spill, tx);
1304
1305         if (db_spill->db_size < drrs->drr_length)
1306                 VERIFY(0 == dbuf_spill_set_blksz(db_spill,
1307                     drrs->drr_length, tx));
1308         bcopy(data, db_spill->db_data, drrs->drr_length);
1309
1310         dmu_buf_rele(db, FTAG);
1311         dmu_buf_rele(db_spill, FTAG);
1312
1313         dmu_tx_commit(tx);
1314         return (0);
1315 }
1316
1317 /* ARGSUSED */
1318 noinline static int
1319 restore_free(struct restorearg *ra, objset_t *os,
1320     struct drr_free *drrf)
1321 {
1322         int err;
1323
1324         if (drrf->drr_length != -1ULL &&
1325             drrf->drr_offset + drrf->drr_length < drrf->drr_offset)
1326                 return (EINVAL);
1327
1328         if (dmu_object_info(os, drrf->drr_object, NULL) != 0)
1329                 return (EINVAL);
1330
1331         err = dmu_free_long_range(os, drrf->drr_object,
1332             drrf->drr_offset, drrf->drr_length);
1333         return (err);
1334 }
1335
1336 /*
1337  * NB: callers *must* call dmu_recv_end() if this succeeds.
1338  */
1339 int
1340 dmu_recv_stream(dmu_recv_cookie_t *drc, vnode_t *vp, offset_t *voffp,
1341     int cleanup_fd, uint64_t *action_handlep)
1342 {
1343         struct restorearg ra = { 0 };
1344         dmu_replay_record_t *drr;
1345         objset_t *os;
1346         zio_cksum_t pcksum;
1347         int featureflags;
1348
1349         if (drc->drc_drrb->drr_magic == BSWAP_64(DMU_BACKUP_MAGIC))
1350                 ra.byteswap = TRUE;
1351
1352         {
1353                 /* compute checksum of drr_begin record */
1354                 dmu_replay_record_t *drr;
1355                 drr = kmem_zalloc(sizeof (dmu_replay_record_t), KM_SLEEP);
1356
1357                 drr->drr_type = DRR_BEGIN;
1358                 drr->drr_u.drr_begin = *drc->drc_drrb;
1359                 if (ra.byteswap) {
1360                         fletcher_4_incremental_byteswap(drr,
1361                             sizeof (dmu_replay_record_t), &ra.cksum);
1362                 } else {
1363                         fletcher_4_incremental_native(drr,
1364                             sizeof (dmu_replay_record_t), &ra.cksum);
1365                 }
1366                 kmem_free(drr, sizeof (dmu_replay_record_t));
1367         }
1368
1369         if (ra.byteswap) {
1370                 struct drr_begin *drrb = drc->drc_drrb;
1371                 drrb->drr_magic = BSWAP_64(drrb->drr_magic);
1372                 drrb->drr_versioninfo = BSWAP_64(drrb->drr_versioninfo);
1373                 drrb->drr_creation_time = BSWAP_64(drrb->drr_creation_time);
1374                 drrb->drr_type = BSWAP_32(drrb->drr_type);
1375                 drrb->drr_toguid = BSWAP_64(drrb->drr_toguid);
1376                 drrb->drr_fromguid = BSWAP_64(drrb->drr_fromguid);
1377         }
1378
1379         ra.vp = vp;
1380         ra.voff = *voffp;
1381         ra.bufsize = 1<<20;
1382         ra.buf = vmem_alloc(ra.bufsize, KM_SLEEP);
1383
1384         /* these were verified in dmu_recv_begin */
1385         ASSERT(DMU_GET_STREAM_HDRTYPE(drc->drc_drrb->drr_versioninfo) ==
1386             DMU_SUBSTREAM);
1387         ASSERT(drc->drc_drrb->drr_type < DMU_OST_NUMTYPES);
1388
1389         /*
1390          * Open the objset we are modifying.
1391          */
1392         VERIFY(dmu_objset_from_ds(drc->drc_real_ds, &os) == 0);
1393
1394         ASSERT(drc->drc_real_ds->ds_phys->ds_flags & DS_FLAG_INCONSISTENT);
1395
1396         featureflags = DMU_GET_FEATUREFLAGS(drc->drc_drrb->drr_versioninfo);
1397
1398         /* if this stream is dedup'ed, set up the avl tree for guid mapping */
1399         if (featureflags & DMU_BACKUP_FEATURE_DEDUP) {
1400                 minor_t minor;
1401
1402                 if (cleanup_fd == -1) {
1403                         ra.err = EBADF;
1404                         goto out;
1405                 }
1406                 ra.err = zfs_onexit_fd_hold(cleanup_fd, &minor);
1407                 if (ra.err) {
1408                         cleanup_fd = -1;
1409                         goto out;
1410                 }
1411
1412                 if (*action_handlep == 0) {
1413                         ra.guid_to_ds_map =
1414                             kmem_alloc(sizeof (avl_tree_t), KM_SLEEP);
1415                         avl_create(ra.guid_to_ds_map, guid_compare,
1416                             sizeof (guid_map_entry_t),
1417                             offsetof(guid_map_entry_t, avlnode));
1418                         ra.err = zfs_onexit_add_cb(minor,
1419                             free_guid_map_onexit, ra.guid_to_ds_map,
1420                             action_handlep);
1421                         if (ra.err)
1422                                 goto out;
1423                 } else {
1424                         ra.err = zfs_onexit_cb_data(minor, *action_handlep,
1425                             (void **)&ra.guid_to_ds_map);
1426                         if (ra.err)
1427                                 goto out;
1428                 }
1429
1430                 drc->drc_guid_to_ds_map = ra.guid_to_ds_map;
1431         }
1432
1433         /*
1434          * Read records and process them.
1435          */
1436         pcksum = ra.cksum;
1437         while (ra.err == 0 &&
1438             NULL != (drr = restore_read(&ra, sizeof (*drr)))) {
1439                 if (issig(JUSTLOOKING) && issig(FORREAL)) {
1440                         ra.err = EINTR;
1441                         goto out;
1442                 }
1443
1444                 if (ra.byteswap)
1445                         backup_byteswap(drr);
1446
1447                 switch (drr->drr_type) {
1448                 case DRR_OBJECT:
1449                 {
1450                         /*
1451                          * We need to make a copy of the record header,
1452                          * because restore_{object,write} may need to
1453                          * restore_read(), which will invalidate drr.
1454                          */
1455                         struct drr_object drro = drr->drr_u.drr_object;
1456                         ra.err = restore_object(&ra, os, &drro);
1457                         break;
1458                 }
1459                 case DRR_FREEOBJECTS:
1460                 {
1461                         struct drr_freeobjects drrfo =
1462                             drr->drr_u.drr_freeobjects;
1463                         ra.err = restore_freeobjects(&ra, os, &drrfo);
1464                         break;
1465                 }
1466                 case DRR_WRITE:
1467                 {
1468                         struct drr_write drrw = drr->drr_u.drr_write;
1469                         ra.err = restore_write(&ra, os, &drrw);
1470                         break;
1471                 }
1472                 case DRR_WRITE_BYREF:
1473                 {
1474                         struct drr_write_byref drrwbr =
1475                             drr->drr_u.drr_write_byref;
1476                         ra.err = restore_write_byref(&ra, os, &drrwbr);
1477                         break;
1478                 }
1479                 case DRR_FREE:
1480                 {
1481                         struct drr_free drrf = drr->drr_u.drr_free;
1482                         ra.err = restore_free(&ra, os, &drrf);
1483                         break;
1484                 }
1485                 case DRR_END:
1486                 {
1487                         struct drr_end drre = drr->drr_u.drr_end;
1488                         /*
1489                          * We compare against the *previous* checksum
1490                          * value, because the stored checksum is of
1491                          * everything before the DRR_END record.
1492                          */
1493                         if (!ZIO_CHECKSUM_EQUAL(drre.drr_checksum, pcksum))
1494                                 ra.err = ECKSUM;
1495                         goto out;
1496                 }
1497                 case DRR_SPILL:
1498                 {
1499                         struct drr_spill drrs = drr->drr_u.drr_spill;
1500                         ra.err = restore_spill(&ra, os, &drrs);
1501                         break;
1502                 }
1503                 default:
1504                         ra.err = EINVAL;
1505                         goto out;
1506                 }
1507                 pcksum = ra.cksum;
1508         }
1509         ASSERT(ra.err != 0);
1510
1511 out:
1512         if ((featureflags & DMU_BACKUP_FEATURE_DEDUP) && (cleanup_fd != -1))
1513                 zfs_onexit_fd_rele(cleanup_fd);
1514
1515         if (ra.err != 0) {
1516                 /*
1517                  * destroy what we created, so we don't leave it in the
1518                  * inconsistent restoring state.
1519                  */
1520                 txg_wait_synced(drc->drc_real_ds->ds_dir->dd_pool, 0);
1521
1522                 (void) dsl_dataset_destroy(drc->drc_real_ds, dmu_recv_tag,
1523                     B_FALSE);
1524                 if (drc->drc_real_ds != drc->drc_logical_ds) {
1525                         mutex_exit(&drc->drc_logical_ds->ds_recvlock);
1526                         dsl_dataset_rele(drc->drc_logical_ds, dmu_recv_tag);
1527                 }
1528         }
1529
1530         vmem_free(ra.buf, ra.bufsize);
1531         *voffp = ra.voff;
1532         return (ra.err);
1533 }
1534
1535 struct recvendsyncarg {
1536         char *tosnap;
1537         uint64_t creation_time;
1538         uint64_t toguid;
1539 };
1540
1541 static int
1542 recv_end_check(void *arg1, void *arg2, dmu_tx_t *tx)
1543 {
1544         dsl_dataset_t *ds = arg1;
1545         struct recvendsyncarg *resa = arg2;
1546
1547         return (dsl_dataset_snapshot_check(ds, resa->tosnap, tx));
1548 }
1549
1550 static void
1551 recv_end_sync(void *arg1, void *arg2, dmu_tx_t *tx)
1552 {
1553         dsl_dataset_t *ds = arg1;
1554         struct recvendsyncarg *resa = arg2;
1555
1556         dsl_dataset_snapshot_sync(ds, resa->tosnap, tx);
1557
1558         /* set snapshot's creation time and guid */
1559         dmu_buf_will_dirty(ds->ds_prev->ds_dbuf, tx);
1560         ds->ds_prev->ds_phys->ds_creation_time = resa->creation_time;
1561         ds->ds_prev->ds_phys->ds_guid = resa->toguid;
1562         ds->ds_prev->ds_phys->ds_flags &= ~DS_FLAG_INCONSISTENT;
1563
1564         dmu_buf_will_dirty(ds->ds_dbuf, tx);
1565         ds->ds_phys->ds_flags &= ~DS_FLAG_INCONSISTENT;
1566 }
1567
1568 static int
1569 add_ds_to_guidmap(avl_tree_t *guid_map, dsl_dataset_t *ds)
1570 {
1571         dsl_pool_t *dp = ds->ds_dir->dd_pool;
1572         uint64_t snapobj = ds->ds_phys->ds_prev_snap_obj;
1573         dsl_dataset_t *snapds;
1574         guid_map_entry_t *gmep;
1575         int err;
1576
1577         ASSERT(guid_map != NULL);
1578
1579         rw_enter(&dp->dp_config_rwlock, RW_READER);
1580         err = dsl_dataset_hold_obj(dp, snapobj, guid_map, &snapds);
1581         if (err == 0) {
1582                 gmep = kmem_alloc(sizeof (guid_map_entry_t), KM_SLEEP);
1583                 gmep->guid = snapds->ds_phys->ds_guid;
1584                 gmep->gme_ds = snapds;
1585                 avl_add(guid_map, gmep);
1586         }
1587
1588         rw_exit(&dp->dp_config_rwlock);
1589         return (err);
1590 }
1591
1592 static int
1593 dmu_recv_existing_end(dmu_recv_cookie_t *drc)
1594 {
1595         struct recvendsyncarg resa;
1596         dsl_dataset_t *ds = drc->drc_logical_ds;
1597         int err, myerr;
1598
1599         /*
1600          * XXX hack; seems the ds is still dirty and dsl_pool_zil_clean()
1601          * expects it to have a ds_user_ptr (and zil), but clone_swap()
1602          * can close it.
1603          */
1604         txg_wait_synced(ds->ds_dir->dd_pool, 0);
1605
1606         if (dsl_dataset_tryown(ds, FALSE, dmu_recv_tag)) {
1607                 err = dsl_dataset_clone_swap(drc->drc_real_ds, ds,
1608                     drc->drc_force);
1609                 if (err)
1610                         goto out;
1611         } else {
1612                 mutex_exit(&ds->ds_recvlock);
1613                 dsl_dataset_rele(ds, dmu_recv_tag);
1614                 (void) dsl_dataset_destroy(drc->drc_real_ds, dmu_recv_tag,
1615                     B_FALSE);
1616                 return (EBUSY);
1617         }
1618
1619         resa.creation_time = drc->drc_drrb->drr_creation_time;
1620         resa.toguid = drc->drc_drrb->drr_toguid;
1621         resa.tosnap = drc->drc_tosnap;
1622
1623         err = dsl_sync_task_do(ds->ds_dir->dd_pool,
1624             recv_end_check, recv_end_sync, ds, &resa, 3);
1625         if (err) {
1626                 /* swap back */
1627                 (void) dsl_dataset_clone_swap(drc->drc_real_ds, ds, B_TRUE);
1628         }
1629
1630 out:
1631         mutex_exit(&ds->ds_recvlock);
1632         if (err == 0 && drc->drc_guid_to_ds_map != NULL)
1633                 (void) add_ds_to_guidmap(drc->drc_guid_to_ds_map, ds);
1634         dsl_dataset_disown(ds, dmu_recv_tag);
1635         myerr = dsl_dataset_destroy(drc->drc_real_ds, dmu_recv_tag, B_FALSE);
1636         ASSERT3U(myerr, ==, 0);
1637         return (err);
1638 }
1639
1640 static int
1641 dmu_recv_new_end(dmu_recv_cookie_t *drc)
1642 {
1643         struct recvendsyncarg resa;
1644         dsl_dataset_t *ds = drc->drc_logical_ds;
1645         int err;
1646
1647         /*
1648          * XXX hack; seems the ds is still dirty and dsl_pool_zil_clean()
1649          * expects it to have a ds_user_ptr (and zil), but clone_swap()
1650          * can close it.
1651          */
1652         txg_wait_synced(ds->ds_dir->dd_pool, 0);
1653
1654         resa.creation_time = drc->drc_drrb->drr_creation_time;
1655         resa.toguid = drc->drc_drrb->drr_toguid;
1656         resa.tosnap = drc->drc_tosnap;
1657
1658         err = dsl_sync_task_do(ds->ds_dir->dd_pool,
1659             recv_end_check, recv_end_sync, ds, &resa, 3);
1660         if (err) {
1661                 /* clean up the fs we just recv'd into */
1662                 (void) dsl_dataset_destroy(ds, dmu_recv_tag, B_FALSE);
1663         } else {
1664                 if (drc->drc_guid_to_ds_map != NULL)
1665                         (void) add_ds_to_guidmap(drc->drc_guid_to_ds_map, ds);
1666                 /* release the hold from dmu_recv_begin */
1667                 dsl_dataset_disown(ds, dmu_recv_tag);
1668         }
1669         return (err);
1670 }
1671
1672 int
1673 dmu_recv_end(dmu_recv_cookie_t *drc)
1674 {
1675         if (drc->drc_logical_ds != drc->drc_real_ds)
1676                 return (dmu_recv_existing_end(drc));
1677         else
1678                 return (dmu_recv_new_end(drc));
1679 }