4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
25 /* Portions Copyright 2007 Jeremy Teo */
26 /* Portions Copyright 2010 Robert Milkowski */
29 #include <sys/types.h>
30 #include <sys/param.h>
32 #include <sys/systm.h>
33 #include <sys/sysmacros.h>
34 #include <sys/resource.h>
36 #include <sys/vfs_opreg.h>
40 #include <sys/taskq.h>
42 #include <sys/vmsystm.h>
43 #include <sys/atomic.h>
45 #include <sys/pathname.h>
46 #include <sys/cmn_err.h>
47 #include <sys/errno.h>
48 #include <sys/unistd.h>
49 #include <sys/zfs_dir.h>
50 #include <sys/zfs_acl.h>
51 #include <sys/zfs_ioctl.h>
52 #include <sys/fs/zfs.h>
54 #include <sys/dmu_objset.h>
60 #include <sys/dirent.h>
61 #include <sys/policy.h>
62 #include <sys/sunddi.h>
65 #include "fs/fs_subr.h"
66 #include <sys/zfs_fuid.h>
67 #include <sys/zfs_sa.h>
68 #include <sys/zfs_vnops.h>
70 #include <sys/zfs_rlock.h>
71 #include <sys/extdirent.h>
72 #include <sys/kidmap.h>
79 * Each vnode op performs some logical unit of work. To do this, the ZPL must
80 * properly lock its in-core state, create a DMU transaction, do the work,
81 * record this work in the intent log (ZIL), commit the DMU transaction,
82 * and wait for the intent log to commit if it is a synchronous operation.
83 * Moreover, the vnode ops must work in both normal and log replay context.
84 * The ordering of events is important to avoid deadlocks and references
85 * to freed memory. The example below illustrates the following Big Rules:
87 * (1) A check must be made in each zfs thread for a mounted file system.
88 * This is done avoiding races using ZFS_ENTER(zsb).
89 * A ZFS_EXIT(zsb) is needed before all returns. Any znodes
90 * must be checked with ZFS_VERIFY_ZP(zp). Both of these macros
91 * can return EIO from the calling function.
93 * (2) iput() should always be the last thing except for zil_commit()
94 * (if necessary) and ZFS_EXIT(). This is for 3 reasons:
95 * First, if it's the last reference, the vnode/znode
96 * can be freed, so the zp may point to freed memory. Second, the last
97 * reference will call zfs_zinactive(), which may induce a lot of work --
98 * pushing cached pages (which acquires range locks) and syncing out
99 * cached atime changes. Third, zfs_zinactive() may require a new tx,
100 * which could deadlock the system if you were already holding one.
101 * If you must call iput() within a tx then use iput_ASYNC().
103 * (3) All range locks must be grabbed before calling dmu_tx_assign(),
104 * as they can span dmu_tx_assign() calls.
106 * (4) Always pass TXG_NOWAIT as the second argument to dmu_tx_assign().
107 * This is critical because we don't want to block while holding locks.
108 * Note, in particular, that if a lock is sometimes acquired before
109 * the tx assigns, and sometimes after (e.g. z_lock), then failing to
110 * use a non-blocking assign can deadlock the system. The scenario:
112 * Thread A has grabbed a lock before calling dmu_tx_assign().
113 * Thread B is in an already-assigned tx, and blocks for this lock.
114 * Thread A calls dmu_tx_assign(TXG_WAIT) and blocks in txg_wait_open()
115 * forever, because the previous txg can't quiesce until B's tx commits.
117 * If dmu_tx_assign() returns ERESTART and zsb->z_assign is TXG_NOWAIT,
118 * then drop all locks, call dmu_tx_wait(), and try again.
120 * (5) If the operation succeeded, generate the intent log entry for it
121 * before dropping locks. This ensures that the ordering of events
122 * in the intent log matches the order in which they actually occurred.
123 * During ZIL replay the zfs_log_* functions will update the sequence
124 * number to indicate the zil transaction has replayed.
126 * (6) At the end of each vnode op, the DMU tx must always commit,
127 * regardless of whether there were any errors.
129 * (7) After dropping all locks, invoke zil_commit(zilog, foid)
130 * to ensure that synchronous semantics are provided when necessary.
132 * In general, this is how things should be ordered in each vnode op:
134 * ZFS_ENTER(zsb); // exit if unmounted
136 * zfs_dirent_lock(&dl, ...) // lock directory entry (may igrab())
137 * rw_enter(...); // grab any other locks you need
138 * tx = dmu_tx_create(...); // get DMU tx
139 * dmu_tx_hold_*(); // hold each object you might modify
140 * error = dmu_tx_assign(tx, TXG_NOWAIT); // try to assign
142 * rw_exit(...); // drop locks
143 * zfs_dirent_unlock(dl); // unlock directory entry
144 * iput(...); // release held vnodes
145 * if (error == ERESTART) {
150 * dmu_tx_abort(tx); // abort DMU tx
151 * ZFS_EXIT(zsb); // finished in zfs
152 * return (error); // really out of space
154 * error = do_real_work(); // do whatever this VOP does
156 * zfs_log_*(...); // on success, make ZIL entry
157 * dmu_tx_commit(tx); // commit DMU tx -- error or not
158 * rw_exit(...); // drop locks
159 * zfs_dirent_unlock(dl); // unlock directory entry
160 * iput(...); // release held vnodes
161 * zil_commit(zilog, foid); // synchronous when necessary
162 * ZFS_EXIT(zsb); // finished in zfs
163 * return (error); // done, report error
168 * When a file is memory mapped, we must keep the IO data synchronized
169 * between the DMU cache and the memory mapped pages. What this means:
171 * On Write: If we find a memory mapped page, we write to *both*
172 * the page and the dmu buffer.
175 update_pages(struct inode *ip, int64_t start, int len,
176 objset_t *os, uint64_t oid)
178 struct address_space *mp = ip->i_mapping;
184 off = start & (PAGE_CACHE_SIZE-1);
185 for (start &= PAGE_CACHE_MASK; len > 0; start += PAGE_CACHE_SIZE) {
186 nbytes = MIN(PAGE_CACHE_SIZE - off, len);
188 pp = find_lock_page(mp, start >> PAGE_CACHE_SHIFT);
190 if (mapping_writably_mapped(mp))
191 flush_dcache_page(pp);
194 (void) dmu_read(os, oid, start+off, nbytes, pb+off,
198 if (mapping_writably_mapped(mp))
199 flush_dcache_page(pp);
201 mark_page_accessed(pp);
205 page_cache_release(pp);
214 * When a file is memory mapped, we must keep the IO data synchronized
215 * between the DMU cache and the memory mapped pages. What this means:
217 * On Read: We "read" preferentially from memory mapped pages,
218 * else we default from the dmu buffer.
220 * NOTE: We will always "break up" the IO into PAGESIZE uiomoves when
221 * the file is memory mapped.
224 mappedread(struct inode *ip, int nbytes, uio_t *uio)
226 struct address_space *mp = ip->i_mapping;
228 znode_t *zp = ITOZ(ip);
229 objset_t *os = ITOZSB(ip)->z_os;
236 start = uio->uio_loffset;
237 off = start & (PAGE_CACHE_SIZE-1);
238 for (start &= PAGE_CACHE_MASK; len > 0; start += PAGE_CACHE_SIZE) {
239 bytes = MIN(PAGE_CACHE_SIZE - off, len);
241 pp = find_lock_page(mp, start >> PAGE_CACHE_SHIFT);
243 ASSERT(PageUptodate(pp));
246 error = uiomove(pb + off, bytes, UIO_READ, uio);
249 if (mapping_writably_mapped(mp))
250 flush_dcache_page(pp);
252 mark_page_accessed(pp);
254 page_cache_release(pp);
256 error = dmu_read_uio(os, zp->z_id, uio, bytes);
268 offset_t zfs_read_chunk_size = 1024 * 1024; /* Tunable */
271 * Read bytes from specified file into supplied buffer.
273 * IN: ip - inode of file to be read from.
274 * uio - structure supplying read location, range info,
276 * ioflag - FSYNC flags; used to provide FRSYNC semantics.
277 * O_DIRECT flag; used to bypass page cache.
278 * cr - credentials of caller.
280 * OUT: uio - updated offset and range, buffer filled.
282 * RETURN: 0 if success
283 * error code if failure
286 * inode - atime updated if byte count > 0
290 zfs_read(struct inode *ip, uio_t *uio, int ioflag, cred_t *cr)
292 znode_t *zp = ITOZ(ip);
293 zfs_sb_t *zsb = ITOZSB(ip);
298 #ifdef HAVE_UIO_ZEROCOPY
300 #endif /* HAVE_UIO_ZEROCOPY */
306 if (zp->z_pflags & ZFS_AV_QUARANTINED) {
312 * Validate file offset
314 if (uio->uio_loffset < (offset_t)0) {
320 * Fasttrack empty reads
322 if (uio->uio_resid == 0) {
327 #ifdef HAVE_MANDLOCKS
329 * Check for mandatory locks
331 if (MANDMODE(zp->z_mode)) {
332 if (error = chklock(ip, FREAD,
333 uio->uio_loffset, uio->uio_resid, uio->uio_fmode, ct)) {
338 #endif /* HAVE_MANDLOCK */
341 * If we're in FRSYNC mode, sync out this znode before reading it.
343 if (ioflag & FRSYNC || zsb->z_os->os_sync == ZFS_SYNC_ALWAYS)
344 zil_commit(zsb->z_log, zp->z_id);
347 * Lock the range against changes.
349 rl = zfs_range_lock(zp, uio->uio_loffset, uio->uio_resid, RL_READER);
352 * If we are reading past end-of-file we can skip
353 * to the end; but we might still need to set atime.
355 if (uio->uio_loffset >= zp->z_size) {
360 ASSERT(uio->uio_loffset < zp->z_size);
361 n = MIN(uio->uio_resid, zp->z_size - uio->uio_loffset);
363 #ifdef HAVE_UIO_ZEROCOPY
364 if ((uio->uio_extflg == UIO_XUIO) &&
365 (((xuio_t *)uio)->xu_type == UIOTYPE_ZEROCOPY)) {
367 int blksz = zp->z_blksz;
368 uint64_t offset = uio->uio_loffset;
370 xuio = (xuio_t *)uio;
372 nblk = (P2ROUNDUP(offset + n, blksz) - P2ALIGN(offset,
375 ASSERT(offset + n <= blksz);
378 (void) dmu_xuio_init(xuio, nblk);
380 if (vn_has_cached_data(ip)) {
382 * For simplicity, we always allocate a full buffer
383 * even if we only expect to read a portion of a block.
385 while (--nblk >= 0) {
386 (void) dmu_xuio_add(xuio,
387 dmu_request_arcbuf(sa_get_db(zp->z_sa_hdl),
392 #endif /* HAVE_UIO_ZEROCOPY */
395 nbytes = MIN(n, zfs_read_chunk_size -
396 P2PHASE(uio->uio_loffset, zfs_read_chunk_size));
398 if (zp->z_is_mapped && !(ioflag & O_DIRECT))
399 error = mappedread(ip, nbytes, uio);
401 error = dmu_read_uio(os, zp->z_id, uio, nbytes);
404 /* convert checksum errors into IO errors */
413 zfs_range_unlock(rl);
415 ZFS_ACCESSTIME_STAMP(zsb, zp);
416 zfs_inode_update(zp);
420 EXPORT_SYMBOL(zfs_read);
423 * Write the bytes to a file.
425 * IN: ip - inode of file to be written to.
426 * uio - structure supplying write location, range info,
428 * ioflag - FAPPEND flag set if in append mode.
429 * O_DIRECT flag; used to bypass page cache.
430 * cr - credentials of caller.
432 * OUT: uio - updated offset and range.
434 * RETURN: 0 if success
435 * error code if failure
438 * ip - ctime|mtime updated if byte count > 0
443 zfs_write(struct inode *ip, uio_t *uio, int ioflag, cred_t *cr)
445 znode_t *zp = ITOZ(ip);
446 rlim64_t limit = uio->uio_limit;
447 ssize_t start_resid = uio->uio_resid;
451 zfs_sb_t *zsb = ZTOZSB(zp);
456 int max_blksz = zsb->z_max_blksz;
459 iovec_t *aiov = NULL;
462 iovec_t *iovp = uio->uio_iov;
465 sa_bulk_attr_t bulk[4];
466 uint64_t mtime[2], ctime[2];
467 ASSERTV(int iovcnt = uio->uio_iovcnt);
470 * Fasttrack empty write
476 if (limit == RLIM64_INFINITY || limit > MAXOFFSET_T)
482 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zsb), NULL, &mtime, 16);
483 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zsb), NULL, &ctime, 16);
484 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_SIZE(zsb), NULL, &zp->z_size, 8);
485 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zsb), NULL,
489 * If immutable or not appending then return EPERM
491 if ((zp->z_pflags & (ZFS_IMMUTABLE | ZFS_READONLY)) ||
492 ((zp->z_pflags & ZFS_APPENDONLY) && !(ioflag & FAPPEND) &&
493 (uio->uio_loffset < zp->z_size))) {
501 * Validate file offset
503 woff = ioflag & FAPPEND ? zp->z_size : uio->uio_loffset;
509 #ifdef HAVE_MANDLOCKS
511 * Check for mandatory locks before calling zfs_range_lock()
512 * in order to prevent a deadlock with locks set via fcntl().
514 if (MANDMODE((mode_t)zp->z_mode) &&
515 (error = chklock(ip, FWRITE, woff, n, uio->uio_fmode, ct)) != 0) {
519 #endif /* HAVE_MANDLOCKS */
521 #ifdef HAVE_UIO_ZEROCOPY
523 * Pre-fault the pages to ensure slow (eg NFS) pages
525 * Skip this if uio contains loaned arc_buf.
527 if ((uio->uio_extflg == UIO_XUIO) &&
528 (((xuio_t *)uio)->xu_type == UIOTYPE_ZEROCOPY))
529 xuio = (xuio_t *)uio;
531 uio_prefaultpages(MIN(n, max_blksz), uio);
532 #endif /* HAVE_UIO_ZEROCOPY */
535 * If in append mode, set the io offset pointer to eof.
537 if (ioflag & FAPPEND) {
539 * Obtain an appending range lock to guarantee file append
540 * semantics. We reset the write offset once we have the lock.
542 rl = zfs_range_lock(zp, 0, n, RL_APPEND);
544 if (rl->r_len == UINT64_MAX) {
546 * We overlocked the file because this write will cause
547 * the file block size to increase.
548 * Note that zp_size cannot change with this lock held.
552 uio->uio_loffset = woff;
555 * Note that if the file block size will change as a result of
556 * this write, then this range lock will lock the entire file
557 * so that we can re-write the block safely.
559 rl = zfs_range_lock(zp, woff, n, RL_WRITER);
563 zfs_range_unlock(rl);
568 if ((woff + n) > limit || woff > (limit - n))
571 /* Will this write extend the file length? */
572 write_eof = (woff + n > zp->z_size);
574 end_size = MAX(zp->z_size, woff + n);
577 * Write the file in reasonable size chunks. Each chunk is written
578 * in a separate transaction; this keeps the intent log records small
579 * and allows us to do more fine-grained space accounting.
583 woff = uio->uio_loffset;
585 if (zfs_owner_overquota(zsb, zp, B_FALSE) ||
586 zfs_owner_overquota(zsb, zp, B_TRUE)) {
588 dmu_return_arcbuf(abuf);
593 if (xuio && abuf == NULL) {
594 ASSERT(i_iov < iovcnt);
596 abuf = dmu_xuio_arcbuf(xuio, i_iov);
597 dmu_xuio_clear(xuio, i_iov);
598 ASSERT((aiov->iov_base == abuf->b_data) ||
599 ((char *)aiov->iov_base - (char *)abuf->b_data +
600 aiov->iov_len == arc_buf_size(abuf)));
602 } else if (abuf == NULL && n >= max_blksz &&
603 woff >= zp->z_size &&
604 P2PHASE(woff, max_blksz) == 0 &&
605 zp->z_blksz == max_blksz) {
607 * This write covers a full block. "Borrow" a buffer
608 * from the dmu so that we can fill it before we enter
609 * a transaction. This avoids the possibility of
610 * holding up the transaction if the data copy hangs
611 * up on a pagefault (e.g., from an NFS server mapping).
615 abuf = dmu_request_arcbuf(sa_get_db(zp->z_sa_hdl),
617 ASSERT(abuf != NULL);
618 ASSERT(arc_buf_size(abuf) == max_blksz);
619 if ((error = uiocopy(abuf->b_data, max_blksz,
620 UIO_WRITE, uio, &cbytes))) {
621 dmu_return_arcbuf(abuf);
624 ASSERT(cbytes == max_blksz);
628 * Start a transaction.
630 tx = dmu_tx_create(zsb->z_os);
631 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
632 dmu_tx_hold_write(tx, zp->z_id, woff, MIN(n, max_blksz));
633 zfs_sa_upgrade_txholds(tx, zp);
634 error = dmu_tx_assign(tx, TXG_NOWAIT);
636 if (error == ERESTART) {
643 dmu_return_arcbuf(abuf);
648 * If zfs_range_lock() over-locked we grow the blocksize
649 * and then reduce the lock range. This will only happen
650 * on the first iteration since zfs_range_reduce() will
651 * shrink down r_len to the appropriate size.
653 if (rl->r_len == UINT64_MAX) {
656 if (zp->z_blksz > max_blksz) {
657 ASSERT(!ISP2(zp->z_blksz));
658 new_blksz = MIN(end_size, SPA_MAXBLOCKSIZE);
660 new_blksz = MIN(end_size, max_blksz);
662 zfs_grow_blocksize(zp, new_blksz, tx);
663 zfs_range_reduce(rl, woff, n);
667 * XXX - should we really limit each write to z_max_blksz?
668 * Perhaps we should use SPA_MAXBLOCKSIZE chunks?
670 nbytes = MIN(n, max_blksz - P2PHASE(woff, max_blksz));
673 tx_bytes = uio->uio_resid;
674 error = dmu_write_uio_dbuf(sa_get_db(zp->z_sa_hdl),
676 tx_bytes -= uio->uio_resid;
679 ASSERT(xuio == NULL || tx_bytes == aiov->iov_len);
681 * If this is not a full block write, but we are
682 * extending the file past EOF and this data starts
683 * block-aligned, use assign_arcbuf(). Otherwise,
684 * write via dmu_write().
686 if (tx_bytes < max_blksz && (!write_eof ||
687 aiov->iov_base != abuf->b_data)) {
689 dmu_write(zsb->z_os, zp->z_id, woff,
690 aiov->iov_len, aiov->iov_base, tx);
691 dmu_return_arcbuf(abuf);
692 xuio_stat_wbuf_copied();
694 ASSERT(xuio || tx_bytes == max_blksz);
695 dmu_assign_arcbuf(sa_get_db(zp->z_sa_hdl),
698 ASSERT(tx_bytes <= uio->uio_resid);
699 uioskip(uio, tx_bytes);
702 if (tx_bytes && zp->z_is_mapped && !(ioflag & O_DIRECT))
703 update_pages(ip, woff, tx_bytes, zsb->z_os, zp->z_id);
706 * If we made no progress, we're done. If we made even
707 * partial progress, update the znode and ZIL accordingly.
710 (void) sa_update(zp->z_sa_hdl, SA_ZPL_SIZE(zsb),
711 (void *)&zp->z_size, sizeof (uint64_t), tx);
718 * Clear Set-UID/Set-GID bits on successful write if not
719 * privileged and at least one of the excute bits is set.
721 * It would be nice to to this after all writes have
722 * been done, but that would still expose the ISUID/ISGID
723 * to another app after the partial write is committed.
725 * Note: we don't call zfs_fuid_map_id() here because
726 * user 0 is not an ephemeral uid.
728 mutex_enter(&zp->z_acl_lock);
729 if ((zp->z_mode & (S_IXUSR | (S_IXUSR >> 3) |
730 (S_IXUSR >> 6))) != 0 &&
731 (zp->z_mode & (S_ISUID | S_ISGID)) != 0 &&
732 secpolicy_vnode_setid_retain(cr,
733 (zp->z_mode & S_ISUID) != 0 && zp->z_uid == 0) != 0) {
735 zp->z_mode &= ~(S_ISUID | S_ISGID);
736 newmode = zp->z_mode;
737 (void) sa_update(zp->z_sa_hdl, SA_ZPL_MODE(zsb),
738 (void *)&newmode, sizeof (uint64_t), tx);
740 mutex_exit(&zp->z_acl_lock);
742 zfs_tstamp_update_setup(zp, CONTENT_MODIFIED, mtime, ctime,
746 * Update the file size (zp_size) if it has changed;
747 * account for possible concurrent updates.
749 while ((end_size = zp->z_size) < uio->uio_loffset) {
750 (void) atomic_cas_64(&zp->z_size, end_size,
755 * If we are replaying and eof is non zero then force
756 * the file size to the specified eof. Note, there's no
757 * concurrency during replay.
759 if (zsb->z_replay && zsb->z_replay_eof != 0)
760 zp->z_size = zsb->z_replay_eof;
762 error = sa_bulk_update(zp->z_sa_hdl, bulk, count, tx);
764 zfs_log_write(zilog, tx, TX_WRITE, zp, woff, tx_bytes, ioflag);
769 ASSERT(tx_bytes == nbytes);
773 uio_prefaultpages(MIN(n, max_blksz), uio);
776 zfs_range_unlock(rl);
779 * If we're in replay mode, or we made no progress, return error.
780 * Otherwise, it's at least a partial write, so it's successful.
782 if (zsb->z_replay || uio->uio_resid == start_resid) {
787 if (ioflag & (FSYNC | FDSYNC) ||
788 zsb->z_os->os_sync == ZFS_SYNC_ALWAYS)
789 zil_commit(zilog, zp->z_id);
791 zfs_inode_update(zp);
795 EXPORT_SYMBOL(zfs_write);
798 iput_async(struct inode *ip, taskq_t *taskq)
800 ASSERT(atomic_read(&ip->i_count) > 0);
801 if (atomic_read(&ip->i_count) == 1)
802 taskq_dispatch(taskq, (task_func_t *)iput, ip, TQ_SLEEP);
808 zfs_get_done(zgd_t *zgd, int error)
810 znode_t *zp = zgd->zgd_private;
811 objset_t *os = ZTOZSB(zp)->z_os;
814 dmu_buf_rele(zgd->zgd_db, zgd);
816 zfs_range_unlock(zgd->zgd_rl);
819 * Release the vnode asynchronously as we currently have the
820 * txg stopped from syncing.
822 iput_async(ZTOI(zp), dsl_pool_iput_taskq(dmu_objset_pool(os)));
824 if (error == 0 && zgd->zgd_bp)
825 zil_add_block(zgd->zgd_zilog, zgd->zgd_bp);
827 kmem_free(zgd, sizeof (zgd_t));
831 static int zil_fault_io = 0;
835 * Get data to generate a TX_WRITE intent log record.
838 zfs_get_data(void *arg, lr_write_t *lr, char *buf, zio_t *zio)
841 objset_t *os = zsb->z_os;
843 uint64_t object = lr->lr_foid;
844 uint64_t offset = lr->lr_offset;
845 uint64_t size = lr->lr_length;
846 blkptr_t *bp = &lr->lr_blkptr;
855 * Nothing to do if the file has been removed
857 if (zfs_zget(zsb, object, &zp) != 0)
859 if (zp->z_unlinked) {
861 * Release the vnode asynchronously as we currently have the
862 * txg stopped from syncing.
864 iput_async(ZTOI(zp), dsl_pool_iput_taskq(dmu_objset_pool(os)));
868 zgd = (zgd_t *)kmem_zalloc(sizeof (zgd_t), KM_SLEEP);
869 zgd->zgd_zilog = zsb->z_log;
870 zgd->zgd_private = zp;
873 * Write records come in two flavors: immediate and indirect.
874 * For small writes it's cheaper to store the data with the
875 * log record (immediate); for large writes it's cheaper to
876 * sync the data and get a pointer to it (indirect) so that
877 * we don't have to write the data twice.
879 if (buf != NULL) { /* immediate write */
880 zgd->zgd_rl = zfs_range_lock(zp, offset, size, RL_READER);
881 /* test for truncation needs to be done while range locked */
882 if (offset >= zp->z_size) {
885 error = dmu_read(os, object, offset, size, buf,
886 DMU_READ_NO_PREFETCH);
888 ASSERT(error == 0 || error == ENOENT);
889 } else { /* indirect write */
891 * Have to lock the whole block to ensure when it's
892 * written out and it's checksum is being calculated
893 * that no one can change the data. We need to re-check
894 * blocksize after we get the lock in case it's changed!
899 blkoff = ISP2(size) ? P2PHASE(offset, size) : offset;
901 zgd->zgd_rl = zfs_range_lock(zp, offset, size,
903 if (zp->z_blksz == size)
906 zfs_range_unlock(zgd->zgd_rl);
908 /* test for truncation needs to be done while range locked */
909 if (lr->lr_offset >= zp->z_size)
918 error = dmu_buf_hold(os, object, offset, zgd, &db,
919 DMU_READ_NO_PREFETCH);
925 ASSERT(db->db_offset == offset);
926 ASSERT(db->db_size == size);
928 error = dmu_sync(zio, lr->lr_common.lrc_txg,
930 ASSERT(error || lr->lr_length <= zp->z_blksz);
933 * On success, we need to wait for the write I/O
934 * initiated by dmu_sync() to complete before we can
935 * release this dbuf. We will finish everything up
936 * in the zfs_get_done() callback.
941 if (error == EALREADY) {
942 lr->lr_common.lrc_txtype = TX_WRITE2;
948 zfs_get_done(zgd, error);
955 zfs_access(struct inode *ip, int mode, int flag, cred_t *cr)
957 znode_t *zp = ITOZ(ip);
958 zfs_sb_t *zsb = ITOZSB(ip);
964 if (flag & V_ACE_MASK)
965 error = zfs_zaccess(zp, mode, flag, B_FALSE, cr);
967 error = zfs_zaccess_rwx(zp, mode, flag, cr);
972 EXPORT_SYMBOL(zfs_access);
975 * Lookup an entry in a directory, or an extended attribute directory.
976 * If it exists, return a held inode reference for it.
978 * IN: dip - inode of directory to search.
979 * nm - name of entry to lookup.
980 * flags - LOOKUP_XATTR set if looking for an attribute.
981 * cr - credentials of caller.
982 * direntflags - directory lookup flags
983 * realpnp - returned pathname.
985 * OUT: ipp - inode of located entry, NULL if not found.
987 * RETURN: 0 if success
988 * error code if failure
995 zfs_lookup(struct inode *dip, char *nm, struct inode **ipp, int flags,
996 cred_t *cr, int *direntflags, pathname_t *realpnp)
998 znode_t *zdp = ITOZ(dip);
999 zfs_sb_t *zsb = ITOZSB(dip);
1003 if (!(flags & (LOOKUP_XATTR | FIGNORECASE))) {
1005 if (!S_ISDIR(dip->i_mode)) {
1007 } else if (zdp->z_sa_hdl == NULL) {
1011 if (nm[0] == 0 || (nm[0] == '.' && nm[1] == '\0')) {
1012 error = zfs_fastaccesschk_execute(zdp, cr);
1021 vnode_t *tvp = dnlc_lookup(dvp, nm);
1024 error = zfs_fastaccesschk_execute(zdp, cr);
1029 if (tvp == DNLC_NO_VNODE) {
1034 return (specvp_check(vpp, cr));
1037 #endif /* HAVE_DNLC */
1046 if (flags & LOOKUP_XATTR) {
1048 * If the xattr property is off, refuse the lookup request.
1050 if (!(zsb->z_flags & ZSB_XATTR_USER)) {
1056 * We don't allow recursive attributes..
1057 * Maybe someday we will.
1059 if (zdp->z_pflags & ZFS_XATTR) {
1064 if ((error = zfs_get_xattrdir(zdp, ipp, cr, flags))) {
1070 * Do we have permission to get into attribute directory?
1073 if ((error = zfs_zaccess(ITOZ(*ipp), ACE_EXECUTE, 0,
1083 if (!S_ISDIR(dip->i_mode)) {
1089 * Check accessibility of directory.
1092 if ((error = zfs_zaccess(zdp, ACE_EXECUTE, 0, B_FALSE, cr))) {
1097 if (zsb->z_utf8 && u8_validate(nm, strlen(nm),
1098 NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
1103 error = zfs_dirlook(zdp, nm, ipp, flags, direntflags, realpnp);
1104 if ((error == 0) && (*ipp))
1105 zfs_inode_update(ITOZ(*ipp));
1110 EXPORT_SYMBOL(zfs_lookup);
1113 * Attempt to create a new entry in a directory. If the entry
1114 * already exists, truncate the file if permissible, else return
1115 * an error. Return the ip of the created or trunc'd file.
1117 * IN: dip - inode of directory to put new file entry in.
1118 * name - name of new file entry.
1119 * vap - attributes of new file.
1120 * excl - flag indicating exclusive or non-exclusive mode.
1121 * mode - mode to open file with.
1122 * cr - credentials of caller.
1123 * flag - large file flag [UNUSED].
1124 * vsecp - ACL to be set
1126 * OUT: ipp - inode of created or trunc'd entry.
1128 * RETURN: 0 if success
1129 * error code if failure
1132 * dip - ctime|mtime updated if new entry created
1133 * ip - ctime|mtime always, atime if new
1138 zfs_create(struct inode *dip, char *name, vattr_t *vap, int excl,
1139 int mode, struct inode **ipp, cred_t *cr, int flag, vsecattr_t *vsecp)
1141 znode_t *zp, *dzp = ITOZ(dip);
1142 zfs_sb_t *zsb = ITOZSB(dip);
1150 zfs_acl_ids_t acl_ids;
1151 boolean_t fuid_dirtied;
1152 boolean_t have_acl = B_FALSE;
1155 * If we have an ephemeral id, ACL, or XVATTR then
1156 * make sure file system is at proper version
1162 if (zsb->z_use_fuids == B_FALSE &&
1163 (vsecp || IS_EPHEMERAL(uid) || IS_EPHEMERAL(gid)))
1171 if (zsb->z_utf8 && u8_validate(name, strlen(name),
1172 NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
1177 if (vap->va_mask & ATTR_XVATTR) {
1178 if ((error = secpolicy_xvattr((xvattr_t *)vap,
1179 crgetuid(cr), cr, vap->va_mode)) != 0) {
1187 if (*name == '\0') {
1189 * Null component name refers to the directory itself.
1196 /* possible igrab(zp) */
1199 if (flag & FIGNORECASE)
1202 error = zfs_dirent_lock(&dl, dzp, name, &zp, zflg,
1206 zfs_acl_ids_free(&acl_ids);
1207 if (strcmp(name, "..") == 0)
1218 * Create a new file object and update the directory
1221 if ((error = zfs_zaccess(dzp, ACE_ADD_FILE, 0, B_FALSE, cr))) {
1223 zfs_acl_ids_free(&acl_ids);
1228 * We only support the creation of regular files in
1229 * extended attribute directories.
1232 if ((dzp->z_pflags & ZFS_XATTR) && !S_ISREG(vap->va_mode)) {
1234 zfs_acl_ids_free(&acl_ids);
1239 if (!have_acl && (error = zfs_acl_ids_create(dzp, 0, vap,
1240 cr, vsecp, &acl_ids)) != 0)
1244 if (zfs_acl_ids_overquota(zsb, &acl_ids)) {
1245 zfs_acl_ids_free(&acl_ids);
1250 tx = dmu_tx_create(os);
1252 dmu_tx_hold_sa_create(tx, acl_ids.z_aclp->z_acl_bytes +
1253 ZFS_SA_BASE_ATTR_SIZE);
1255 fuid_dirtied = zsb->z_fuid_dirty;
1257 zfs_fuid_txhold(zsb, tx);
1258 dmu_tx_hold_zap(tx, dzp->z_id, TRUE, name);
1259 dmu_tx_hold_sa(tx, dzp->z_sa_hdl, B_FALSE);
1260 if (!zsb->z_use_sa &&
1261 acl_ids.z_aclp->z_acl_bytes > ZFS_ACE_SPACE) {
1262 dmu_tx_hold_write(tx, DMU_NEW_OBJECT,
1263 0, acl_ids.z_aclp->z_acl_bytes);
1265 error = dmu_tx_assign(tx, TXG_NOWAIT);
1267 zfs_dirent_unlock(dl);
1268 if (error == ERESTART) {
1273 zfs_acl_ids_free(&acl_ids);
1278 zfs_mknode(dzp, vap, tx, cr, 0, &zp, &acl_ids);
1281 zfs_fuid_sync(zsb, tx);
1283 (void) zfs_link_create(dl, zp, tx, ZNEW);
1284 txtype = zfs_log_create_txtype(Z_FILE, vsecp, vap);
1285 if (flag & FIGNORECASE)
1287 zfs_log_create(zilog, tx, txtype, dzp, zp, name,
1288 vsecp, acl_ids.z_fuidp, vap);
1289 zfs_acl_ids_free(&acl_ids);
1292 int aflags = (flag & FAPPEND) ? V_APPEND : 0;
1295 zfs_acl_ids_free(&acl_ids);
1299 * A directory entry already exists for this name.
1302 * Can't truncate an existing file if in exclusive mode.
1309 * Can't open a directory for writing.
1311 if (S_ISDIR(ZTOI(zp)->i_mode)) {
1316 * Verify requested access to file.
1318 if (mode && (error = zfs_zaccess_rwx(zp, mode, aflags, cr))) {
1322 mutex_enter(&dzp->z_lock);
1324 mutex_exit(&dzp->z_lock);
1327 * Truncate regular files if requested.
1329 if (S_ISREG(ZTOI(zp)->i_mode) &&
1330 (vap->va_mask & ATTR_SIZE) && (vap->va_size == 0)) {
1331 /* we can't hold any locks when calling zfs_freesp() */
1332 zfs_dirent_unlock(dl);
1334 error = zfs_freesp(zp, 0, 0, mode, TRUE);
1340 zfs_dirent_unlock(dl);
1346 zfs_inode_update(dzp);
1347 zfs_inode_update(zp);
1351 if (zsb->z_os->os_sync == ZFS_SYNC_ALWAYS)
1352 zil_commit(zilog, 0);
1357 EXPORT_SYMBOL(zfs_create);
1360 * Remove an entry from a directory.
1362 * IN: dip - inode of directory to remove entry from.
1363 * name - name of entry to remove.
1364 * cr - credentials of caller.
1366 * RETURN: 0 if success
1367 * error code if failure
1371 * ip - ctime (if nlink > 0)
1374 uint64_t null_xattr = 0;
1378 zfs_remove(struct inode *dip, char *name, cred_t *cr)
1380 znode_t *zp, *dzp = ITOZ(dip);
1383 zfs_sb_t *zsb = ITOZSB(dip);
1386 uint64_t xattr_obj_unlinked = 0;
1392 pathname_t *realnmp = NULL;
1393 #ifdef HAVE_PN_UTILS
1395 #endif /* HAVE_PN_UTILS */
1403 #ifdef HAVE_PN_UTILS
1404 if (flags & FIGNORECASE) {
1409 #endif /* HAVE_PN_UTILS */
1415 * Attempt to lock directory; fail if entry doesn't exist.
1417 if ((error = zfs_dirent_lock(&dl, dzp, name, &zp, zflg,
1419 #ifdef HAVE_PN_UTILS
1422 #endif /* HAVE_PN_UTILS */
1429 if ((error = zfs_zaccess_delete(dzp, zp, cr))) {
1434 * Need to use rmdir for removing directories.
1436 if (S_ISDIR(ip->i_mode)) {
1443 dnlc_remove(dvp, realnmp->pn_buf);
1445 dnlc_remove(dvp, name);
1446 #endif /* HAVE_DNLC */
1449 * We never delete the znode and always place it in the unlinked
1450 * set. The dentry cache will always hold the last reference and
1451 * is responsible for safely freeing the znode.
1454 tx = dmu_tx_create(zsb->z_os);
1455 dmu_tx_hold_zap(tx, dzp->z_id, FALSE, name);
1456 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
1457 zfs_sa_upgrade_txholds(tx, zp);
1458 zfs_sa_upgrade_txholds(tx, dzp);
1460 /* are there any extended attributes? */
1461 error = sa_lookup(zp->z_sa_hdl, SA_ZPL_XATTR(zsb),
1462 &xattr_obj, sizeof (xattr_obj));
1463 if (error == 0 && xattr_obj) {
1464 error = zfs_zget(zsb, xattr_obj, &xzp);
1465 ASSERT3U(error, ==, 0);
1466 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_TRUE);
1467 dmu_tx_hold_sa(tx, xzp->z_sa_hdl, B_FALSE);
1470 /* charge as an update -- would be nice not to charge at all */
1471 dmu_tx_hold_zap(tx, zsb->z_unlinkedobj, FALSE, NULL);
1473 error = dmu_tx_assign(tx, TXG_NOWAIT);
1475 zfs_dirent_unlock(dl);
1479 if (error == ERESTART) {
1484 #ifdef HAVE_PN_UTILS
1487 #endif /* HAVE_PN_UTILS */
1494 * Remove the directory entry.
1496 error = zfs_link_destroy(dl, zp, tx, zflg, &unlinked);
1505 * Hold z_lock so that we can make sure that the ACL obj
1506 * hasn't changed. Could have been deleted due to
1509 mutex_enter(&zp->z_lock);
1510 (void) sa_lookup(zp->z_sa_hdl, SA_ZPL_XATTR(zsb),
1511 &xattr_obj_unlinked, sizeof (xattr_obj_unlinked));
1512 mutex_exit(&zp->z_lock);
1513 zfs_unlinked_add(zp, tx);
1517 #ifdef HAVE_PN_UTILS
1518 if (flags & FIGNORECASE)
1520 #endif /* HAVE_PN_UTILS */
1521 zfs_log_remove(zilog, tx, txtype, dzp, name, obj);
1525 #ifdef HAVE_PN_UTILS
1528 #endif /* HAVE_PN_UTILS */
1530 zfs_dirent_unlock(dl);
1531 zfs_inode_update(dzp);
1532 zfs_inode_update(zp);
1534 zfs_inode_update(xzp);
1540 if (zsb->z_os->os_sync == ZFS_SYNC_ALWAYS)
1541 zil_commit(zilog, 0);
1546 EXPORT_SYMBOL(zfs_remove);
1549 * Create a new directory and insert it into dip using the name
1550 * provided. Return a pointer to the inserted directory.
1552 * IN: dip - inode of directory to add subdir to.
1553 * dirname - name of new directory.
1554 * vap - attributes of new directory.
1555 * cr - credentials of caller.
1556 * vsecp - ACL to be set
1558 * OUT: ipp - inode of created directory.
1560 * RETURN: 0 if success
1561 * error code if failure
1564 * dip - ctime|mtime updated
1565 * ipp - ctime|mtime|atime updated
1569 zfs_mkdir(struct inode *dip, char *dirname, vattr_t *vap, struct inode **ipp,
1570 cred_t *cr, int flags, vsecattr_t *vsecp)
1572 znode_t *zp, *dzp = ITOZ(dip);
1573 zfs_sb_t *zsb = ITOZSB(dip);
1581 gid_t gid = crgetgid(cr);
1582 zfs_acl_ids_t acl_ids;
1583 boolean_t fuid_dirtied;
1585 ASSERT(S_ISDIR(vap->va_mode));
1588 * If we have an ephemeral id, ACL, or XVATTR then
1589 * make sure file system is at proper version
1593 if (zsb->z_use_fuids == B_FALSE &&
1594 (vsecp || IS_EPHEMERAL(uid) || IS_EPHEMERAL(gid)))
1601 if (dzp->z_pflags & ZFS_XATTR) {
1606 if (zsb->z_utf8 && u8_validate(dirname,
1607 strlen(dirname), NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
1611 if (flags & FIGNORECASE)
1614 if (vap->va_mask & ATTR_XVATTR) {
1615 if ((error = secpolicy_xvattr((xvattr_t *)vap,
1616 crgetuid(cr), cr, vap->va_mode)) != 0) {
1622 if ((error = zfs_acl_ids_create(dzp, 0, vap, cr,
1623 vsecp, &acl_ids)) != 0) {
1628 * First make sure the new directory doesn't exist.
1630 * Existence is checked first to make sure we don't return
1631 * EACCES instead of EEXIST which can cause some applications
1637 if ((error = zfs_dirent_lock(&dl, dzp, dirname, &zp, zf,
1639 zfs_acl_ids_free(&acl_ids);
1644 if ((error = zfs_zaccess(dzp, ACE_ADD_SUBDIRECTORY, 0, B_FALSE, cr))) {
1645 zfs_acl_ids_free(&acl_ids);
1646 zfs_dirent_unlock(dl);
1651 if (zfs_acl_ids_overquota(zsb, &acl_ids)) {
1652 zfs_acl_ids_free(&acl_ids);
1653 zfs_dirent_unlock(dl);
1659 * Add a new entry to the directory.
1661 tx = dmu_tx_create(zsb->z_os);
1662 dmu_tx_hold_zap(tx, dzp->z_id, TRUE, dirname);
1663 dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, FALSE, NULL);
1664 fuid_dirtied = zsb->z_fuid_dirty;
1666 zfs_fuid_txhold(zsb, tx);
1667 if (!zsb->z_use_sa && acl_ids.z_aclp->z_acl_bytes > ZFS_ACE_SPACE) {
1668 dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0,
1669 acl_ids.z_aclp->z_acl_bytes);
1672 dmu_tx_hold_sa_create(tx, acl_ids.z_aclp->z_acl_bytes +
1673 ZFS_SA_BASE_ATTR_SIZE);
1675 error = dmu_tx_assign(tx, TXG_NOWAIT);
1677 zfs_dirent_unlock(dl);
1678 if (error == ERESTART) {
1683 zfs_acl_ids_free(&acl_ids);
1692 zfs_mknode(dzp, vap, tx, cr, 0, &zp, &acl_ids);
1695 zfs_fuid_sync(zsb, tx);
1698 * Now put new name in parent dir.
1700 (void) zfs_link_create(dl, zp, tx, ZNEW);
1704 txtype = zfs_log_create_txtype(Z_DIR, vsecp, vap);
1705 if (flags & FIGNORECASE)
1707 zfs_log_create(zilog, tx, txtype, dzp, zp, dirname, vsecp,
1708 acl_ids.z_fuidp, vap);
1710 zfs_acl_ids_free(&acl_ids);
1714 zfs_dirent_unlock(dl);
1716 if (zsb->z_os->os_sync == ZFS_SYNC_ALWAYS)
1717 zil_commit(zilog, 0);
1719 zfs_inode_update(dzp);
1720 zfs_inode_update(zp);
1724 EXPORT_SYMBOL(zfs_mkdir);
1727 * Remove a directory subdir entry. If the current working
1728 * directory is the same as the subdir to be removed, the
1731 * IN: dip - inode of directory to remove from.
1732 * name - name of directory to be removed.
1733 * cwd - inode of current working directory.
1734 * cr - credentials of caller.
1735 * flags - case flags
1737 * RETURN: 0 if success
1738 * error code if failure
1741 * dip - ctime|mtime updated
1745 zfs_rmdir(struct inode *dip, char *name, struct inode *cwd, cred_t *cr,
1748 znode_t *dzp = ITOZ(dip);
1751 zfs_sb_t *zsb = ITOZSB(dip);
1762 if (flags & FIGNORECASE)
1768 * Attempt to lock directory; fail if entry doesn't exist.
1770 if ((error = zfs_dirent_lock(&dl, dzp, name, &zp, zflg,
1778 if ((error = zfs_zaccess_delete(dzp, zp, cr))) {
1782 if (!S_ISDIR(ip->i_mode)) {
1793 * Grab a lock on the directory to make sure that noone is
1794 * trying to add (or lookup) entries while we are removing it.
1796 rw_enter(&zp->z_name_lock, RW_WRITER);
1799 * Grab a lock on the parent pointer to make sure we play well
1800 * with the treewalk and directory rename code.
1802 rw_enter(&zp->z_parent_lock, RW_WRITER);
1804 tx = dmu_tx_create(zsb->z_os);
1805 dmu_tx_hold_zap(tx, dzp->z_id, FALSE, name);
1806 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
1807 dmu_tx_hold_zap(tx, zsb->z_unlinkedobj, FALSE, NULL);
1808 zfs_sa_upgrade_txholds(tx, zp);
1809 zfs_sa_upgrade_txholds(tx, dzp);
1810 error = dmu_tx_assign(tx, TXG_NOWAIT);
1812 rw_exit(&zp->z_parent_lock);
1813 rw_exit(&zp->z_name_lock);
1814 zfs_dirent_unlock(dl);
1816 if (error == ERESTART) {
1826 error = zfs_link_destroy(dl, zp, tx, zflg, NULL);
1829 uint64_t txtype = TX_RMDIR;
1830 if (flags & FIGNORECASE)
1832 zfs_log_remove(zilog, tx, txtype, dzp, name, ZFS_NO_OBJECT);
1837 rw_exit(&zp->z_parent_lock);
1838 rw_exit(&zp->z_name_lock);
1840 zfs_dirent_unlock(dl);
1844 if (zsb->z_os->os_sync == ZFS_SYNC_ALWAYS)
1845 zil_commit(zilog, 0);
1847 zfs_inode_update(dzp);
1848 zfs_inode_update(zp);
1852 EXPORT_SYMBOL(zfs_rmdir);
1855 * Read as many directory entries as will fit into the provided
1856 * dirent buffer from the given directory cursor position.
1858 * IN: ip - inode of directory to read.
1859 * dirent - buffer for directory entries.
1861 * OUT: dirent - filler buffer of directory entries.
1863 * RETURN: 0 if success
1864 * error code if failure
1867 * ip - atime updated
1869 * Note that the low 4 bits of the cookie returned by zap is always zero.
1870 * This allows us to use the low range for "special" directory entries:
1871 * We use 0 for '.', and 1 for '..'. If this is the root of the filesystem,
1872 * we use the offset 2 for the '.zfs' directory.
1876 zfs_readdir(struct inode *ip, void *dirent, filldir_t filldir,
1877 loff_t *pos, cred_t *cr)
1879 znode_t *zp = ITOZ(ip);
1880 zfs_sb_t *zsb = ITOZSB(ip);
1883 zap_attribute_t zap;
1893 if ((error = sa_lookup(zp->z_sa_hdl, SA_ZPL_PARENT(zsb),
1894 &parent, sizeof (parent))) != 0)
1898 * Quit if directory has been removed (posix)
1905 prefetch = zp->z_zn_prefetch;
1908 * Initialize the iterator cursor.
1912 * Start iteration from the beginning of the directory.
1914 zap_cursor_init(&zc, os, zp->z_id);
1917 * The offset is a serialized cursor.
1919 zap_cursor_init_serialized(&zc, os, zp->z_id, *pos);
1923 * Transform to file-system independent format
1930 * Special case `.', `..', and `.zfs'.
1933 (void) strcpy(zap.za_name, ".");
1934 zap.za_normalization_conflict = 0;
1936 } else if (*pos == 1) {
1937 (void) strcpy(zap.za_name, "..");
1938 zap.za_normalization_conflict = 0;
1940 } else if (*pos == 2 && zfs_show_ctldir(zp)) {
1941 (void) strcpy(zap.za_name, ZFS_CTLDIR_NAME);
1942 zap.za_normalization_conflict = 0;
1943 objnum = ZFSCTL_INO_ROOT;
1948 if ((error = zap_cursor_retrieve(&zc, &zap))) {
1949 if (error == ENOENT)
1955 if (zap.za_integer_length != 8 ||
1956 zap.za_num_integers != 1) {
1957 cmn_err(CE_WARN, "zap_readdir: bad directory "
1958 "entry, obj = %lld, offset = %lld\n",
1959 (u_longlong_t)zp->z_id,
1960 (u_longlong_t)*pos);
1965 objnum = ZFS_DIRENT_OBJ(zap.za_first_integer);
1967 done = filldir(dirent, zap.za_name, strlen(zap.za_name),
1968 zap_cursor_serialize(&zc), objnum, 0);
1973 /* Prefetch znode */
1975 dmu_prefetch(os, objnum, 0, 0);
1979 zap_cursor_advance(&zc);
1980 *pos = zap_cursor_serialize(&zc);
1985 zp->z_zn_prefetch = B_FALSE; /* a lookup will re-enable pre-fetching */
1988 zap_cursor_fini(&zc);
1989 if (error == ENOENT)
1992 ZFS_ACCESSTIME_STAMP(zsb, zp);
1993 zfs_inode_update(zp);
2000 EXPORT_SYMBOL(zfs_readdir);
2002 ulong_t zfs_fsync_sync_cnt = 4;
2005 zfs_fsync(struct inode *ip, int syncflag, cred_t *cr)
2007 znode_t *zp = ITOZ(ip);
2008 zfs_sb_t *zsb = ITOZSB(ip);
2010 (void) tsd_set(zfs_fsyncer_key, (void *)zfs_fsync_sync_cnt);
2012 if (zsb->z_os->os_sync != ZFS_SYNC_DISABLED) {
2015 zil_commit(zsb->z_log, zp->z_id);
2020 EXPORT_SYMBOL(zfs_fsync);
2024 * Get the requested file attributes and place them in the provided
2027 * IN: ip - inode of file.
2028 * vap - va_mask identifies requested attributes.
2029 * If ATTR_XVATTR set, then optional attrs are requested
2030 * flags - ATTR_NOACLCHECK (CIFS server context)
2031 * cr - credentials of caller.
2033 * OUT: vap - attribute values.
2035 * RETURN: 0 (always succeeds)
2039 zfs_getattr(struct inode *ip, vattr_t *vap, int flags, cred_t *cr)
2041 znode_t *zp = ITOZ(ip);
2042 zfs_sb_t *zsb = ITOZSB(ip);
2045 uint64_t mtime[2], ctime[2];
2046 xvattr_t *xvap = (xvattr_t *)vap; /* vap may be an xvattr_t * */
2047 xoptattr_t *xoap = NULL;
2048 boolean_t skipaclchk = (flags & ATTR_NOACLCHECK) ? B_TRUE : B_FALSE;
2049 sa_bulk_attr_t bulk[2];
2055 zfs_fuid_map_ids(zp, cr, &vap->va_uid, &vap->va_gid);
2057 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zsb), NULL, &mtime, 16);
2058 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zsb), NULL, &ctime, 16);
2060 if ((error = sa_bulk_lookup(zp->z_sa_hdl, bulk, count)) != 0) {
2066 * If ACL is trivial don't bother looking for ACE_READ_ATTRIBUTES.
2067 * Also, if we are the owner don't bother, since owner should
2068 * always be allowed to read basic attributes of file.
2070 if (!(zp->z_pflags & ZFS_ACL_TRIVIAL) &&
2071 (vap->va_uid != crgetuid(cr))) {
2072 if ((error = zfs_zaccess(zp, ACE_READ_ATTRIBUTES, 0,
2080 * Return all attributes. It's cheaper to provide the answer
2081 * than to determine whether we were asked the question.
2084 mutex_enter(&zp->z_lock);
2085 vap->va_type = vn_mode_to_vtype(zp->z_mode);
2086 vap->va_mode = zp->z_mode;
2088 vap->va_nodeid = zp->z_id;
2089 if ((zp->z_id == zsb->z_root) && zfs_show_ctldir(zp))
2090 links = zp->z_links + 1;
2092 links = zp->z_links;
2093 vap->va_nlink = MIN(links, ZFS_LINK_MAX);
2094 vap->va_size = i_size_read(ip);
2095 vap->va_rdev = ip->i_rdev;
2096 vap->va_seq = ip->i_generation;
2099 * Add in any requested optional attributes and the create time.
2100 * Also set the corresponding bits in the returned attribute bitmap.
2102 if ((xoap = xva_getxoptattr(xvap)) != NULL && zsb->z_use_fuids) {
2103 if (XVA_ISSET_REQ(xvap, XAT_ARCHIVE)) {
2105 ((zp->z_pflags & ZFS_ARCHIVE) != 0);
2106 XVA_SET_RTN(xvap, XAT_ARCHIVE);
2109 if (XVA_ISSET_REQ(xvap, XAT_READONLY)) {
2110 xoap->xoa_readonly =
2111 ((zp->z_pflags & ZFS_READONLY) != 0);
2112 XVA_SET_RTN(xvap, XAT_READONLY);
2115 if (XVA_ISSET_REQ(xvap, XAT_SYSTEM)) {
2117 ((zp->z_pflags & ZFS_SYSTEM) != 0);
2118 XVA_SET_RTN(xvap, XAT_SYSTEM);
2121 if (XVA_ISSET_REQ(xvap, XAT_HIDDEN)) {
2123 ((zp->z_pflags & ZFS_HIDDEN) != 0);
2124 XVA_SET_RTN(xvap, XAT_HIDDEN);
2127 if (XVA_ISSET_REQ(xvap, XAT_NOUNLINK)) {
2128 xoap->xoa_nounlink =
2129 ((zp->z_pflags & ZFS_NOUNLINK) != 0);
2130 XVA_SET_RTN(xvap, XAT_NOUNLINK);
2133 if (XVA_ISSET_REQ(xvap, XAT_IMMUTABLE)) {
2134 xoap->xoa_immutable =
2135 ((zp->z_pflags & ZFS_IMMUTABLE) != 0);
2136 XVA_SET_RTN(xvap, XAT_IMMUTABLE);
2139 if (XVA_ISSET_REQ(xvap, XAT_APPENDONLY)) {
2140 xoap->xoa_appendonly =
2141 ((zp->z_pflags & ZFS_APPENDONLY) != 0);
2142 XVA_SET_RTN(xvap, XAT_APPENDONLY);
2145 if (XVA_ISSET_REQ(xvap, XAT_NODUMP)) {
2147 ((zp->z_pflags & ZFS_NODUMP) != 0);
2148 XVA_SET_RTN(xvap, XAT_NODUMP);
2151 if (XVA_ISSET_REQ(xvap, XAT_OPAQUE)) {
2153 ((zp->z_pflags & ZFS_OPAQUE) != 0);
2154 XVA_SET_RTN(xvap, XAT_OPAQUE);
2157 if (XVA_ISSET_REQ(xvap, XAT_AV_QUARANTINED)) {
2158 xoap->xoa_av_quarantined =
2159 ((zp->z_pflags & ZFS_AV_QUARANTINED) != 0);
2160 XVA_SET_RTN(xvap, XAT_AV_QUARANTINED);
2163 if (XVA_ISSET_REQ(xvap, XAT_AV_MODIFIED)) {
2164 xoap->xoa_av_modified =
2165 ((zp->z_pflags & ZFS_AV_MODIFIED) != 0);
2166 XVA_SET_RTN(xvap, XAT_AV_MODIFIED);
2169 if (XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP) &&
2170 S_ISREG(ip->i_mode)) {
2171 zfs_sa_get_scanstamp(zp, xvap);
2174 if (XVA_ISSET_REQ(xvap, XAT_CREATETIME)) {
2177 (void) sa_lookup(zp->z_sa_hdl, SA_ZPL_CRTIME(zsb),
2178 times, sizeof (times));
2179 ZFS_TIME_DECODE(&xoap->xoa_createtime, times);
2180 XVA_SET_RTN(xvap, XAT_CREATETIME);
2183 if (XVA_ISSET_REQ(xvap, XAT_REPARSE)) {
2184 xoap->xoa_reparse = ((zp->z_pflags & ZFS_REPARSE) != 0);
2185 XVA_SET_RTN(xvap, XAT_REPARSE);
2187 if (XVA_ISSET_REQ(xvap, XAT_GEN)) {
2188 xoap->xoa_generation = zp->z_gen;
2189 XVA_SET_RTN(xvap, XAT_GEN);
2192 if (XVA_ISSET_REQ(xvap, XAT_OFFLINE)) {
2194 ((zp->z_pflags & ZFS_OFFLINE) != 0);
2195 XVA_SET_RTN(xvap, XAT_OFFLINE);
2198 if (XVA_ISSET_REQ(xvap, XAT_SPARSE)) {
2200 ((zp->z_pflags & ZFS_SPARSE) != 0);
2201 XVA_SET_RTN(xvap, XAT_SPARSE);
2205 ZFS_TIME_DECODE(&vap->va_atime, zp->z_atime);
2206 ZFS_TIME_DECODE(&vap->va_mtime, mtime);
2207 ZFS_TIME_DECODE(&vap->va_ctime, ctime);
2209 mutex_exit(&zp->z_lock);
2211 sa_object_size(zp->z_sa_hdl, &vap->va_blksize, &vap->va_nblocks);
2213 if (zp->z_blksz == 0) {
2215 * Block size hasn't been set; suggest maximal I/O transfers.
2217 vap->va_blksize = zsb->z_max_blksz;
2223 EXPORT_SYMBOL(zfs_getattr);
2226 * Set the file attributes to the values contained in the
2229 * IN: ip - inode of file to be modified.
2230 * vap - new attribute values.
2231 * If ATTR_XVATTR set, then optional attrs are being set
2232 * flags - ATTR_UTIME set if non-default time values provided.
2233 * - ATTR_NOACLCHECK (CIFS context only).
2234 * cr - credentials of caller.
2236 * RETURN: 0 if success
2237 * error code if failure
2240 * ip - ctime updated, mtime updated if size changed.
2244 zfs_setattr(struct inode *ip, vattr_t *vap, int flags, cred_t *cr)
2246 znode_t *zp = ITOZ(ip);
2247 zfs_sb_t *zsb = ITOZSB(ip);
2252 uint_t mask = vap->va_mask;
2256 uint64_t new_uid, new_gid;
2258 uint64_t mtime[2], ctime[2];
2260 int need_policy = FALSE;
2262 zfs_fuid_info_t *fuidp = NULL;
2263 xvattr_t *xvap = (xvattr_t *)vap; /* vap may be an xvattr_t * */
2266 boolean_t skipaclchk = (flags & ATTR_NOACLCHECK) ? B_TRUE : B_FALSE;
2267 boolean_t fuid_dirtied = B_FALSE;
2268 sa_bulk_attr_t bulk[7], xattr_bulk[7];
2269 int count = 0, xattr_count = 0;
2280 * Make sure that if we have ephemeral uid/gid or xvattr specified
2281 * that file system is at proper version level
2284 if (zsb->z_use_fuids == B_FALSE &&
2285 (((mask & ATTR_UID) && IS_EPHEMERAL(vap->va_uid)) ||
2286 ((mask & ATTR_GID) && IS_EPHEMERAL(vap->va_gid)) ||
2287 (mask & ATTR_XVATTR))) {
2292 if (mask & ATTR_SIZE && S_ISDIR(ip->i_mode)) {
2297 if (mask & ATTR_SIZE && !S_ISREG(ip->i_mode) && !S_ISFIFO(ip->i_mode)) {
2303 * If this is an xvattr_t, then get a pointer to the structure of
2304 * optional attributes. If this is NULL, then we have a vattr_t.
2306 xoap = xva_getxoptattr(xvap);
2308 xva_init(&tmpxvattr);
2311 * Immutable files can only alter immutable bit and atime
2313 if ((zp->z_pflags & ZFS_IMMUTABLE) &&
2314 ((mask & (ATTR_SIZE|ATTR_UID|ATTR_GID|ATTR_MTIME|ATTR_MODE)) ||
2315 ((mask & ATTR_XVATTR) && XVA_ISSET_REQ(xvap, XAT_CREATETIME)))) {
2320 if ((mask & ATTR_SIZE) && (zp->z_pflags & ZFS_READONLY)) {
2326 * Verify timestamps doesn't overflow 32 bits.
2327 * ZFS can handle large timestamps, but 32bit syscalls can't
2328 * handle times greater than 2039. This check should be removed
2329 * once large timestamps are fully supported.
2331 if (mask & (ATTR_ATIME | ATTR_MTIME)) {
2332 if (((mask & ATTR_ATIME) && TIMESPEC_OVERFLOW(&vap->va_atime)) ||
2333 ((mask & ATTR_MTIME) && TIMESPEC_OVERFLOW(&vap->va_mtime))) {
2343 /* Can this be moved to before the top label? */
2344 if (zsb->z_vfs->mnt_flags & MNT_READONLY) {
2350 * First validate permissions
2353 if (mask & ATTR_SIZE) {
2354 err = zfs_zaccess(zp, ACE_WRITE_DATA, 0, skipaclchk, cr);
2360 * XXX - Note, we are not providing any open
2361 * mode flags here (like FNDELAY), so we may
2362 * block if there are locks present... this
2363 * should be addressed in openat().
2365 /* XXX - would it be OK to generate a log record here? */
2366 err = zfs_freesp(zp, vap->va_size, 0, 0, FALSE);
2372 /* Careful negative Linux return code here */
2373 err = -vmtruncate(ip, vap->va_size);
2380 if (mask & (ATTR_ATIME|ATTR_MTIME) ||
2381 ((mask & ATTR_XVATTR) && (XVA_ISSET_REQ(xvap, XAT_HIDDEN) ||
2382 XVA_ISSET_REQ(xvap, XAT_READONLY) ||
2383 XVA_ISSET_REQ(xvap, XAT_ARCHIVE) ||
2384 XVA_ISSET_REQ(xvap, XAT_OFFLINE) ||
2385 XVA_ISSET_REQ(xvap, XAT_SPARSE) ||
2386 XVA_ISSET_REQ(xvap, XAT_CREATETIME) ||
2387 XVA_ISSET_REQ(xvap, XAT_SYSTEM)))) {
2388 need_policy = zfs_zaccess(zp, ACE_WRITE_ATTRIBUTES, 0,
2392 if (mask & (ATTR_UID|ATTR_GID)) {
2393 int idmask = (mask & (ATTR_UID|ATTR_GID));
2398 * NOTE: even if a new mode is being set,
2399 * we may clear S_ISUID/S_ISGID bits.
2402 if (!(mask & ATTR_MODE))
2403 vap->va_mode = zp->z_mode;
2406 * Take ownership or chgrp to group we are a member of
2409 take_owner = (mask & ATTR_UID) && (vap->va_uid == crgetuid(cr));
2410 take_group = (mask & ATTR_GID) &&
2411 zfs_groupmember(zsb, vap->va_gid, cr);
2414 * If both ATTR_UID and ATTR_GID are set then take_owner and
2415 * take_group must both be set in order to allow taking
2418 * Otherwise, send the check through secpolicy_vnode_setattr()
2422 if (((idmask == (ATTR_UID|ATTR_GID)) &&
2423 take_owner && take_group) ||
2424 ((idmask == ATTR_UID) && take_owner) ||
2425 ((idmask == ATTR_GID) && take_group)) {
2426 if (zfs_zaccess(zp, ACE_WRITE_OWNER, 0,
2427 skipaclchk, cr) == 0) {
2429 * Remove setuid/setgid for non-privileged users
2431 (void) secpolicy_setid_clear(vap, cr);
2432 trim_mask = (mask & (ATTR_UID|ATTR_GID));
2441 mutex_enter(&zp->z_lock);
2442 oldva.va_mode = zp->z_mode;
2443 zfs_fuid_map_ids(zp, cr, &oldva.va_uid, &oldva.va_gid);
2444 if (mask & ATTR_XVATTR) {
2446 * Update xvattr mask to include only those attributes
2447 * that are actually changing.
2449 * the bits will be restored prior to actually setting
2450 * the attributes so the caller thinks they were set.
2452 if (XVA_ISSET_REQ(xvap, XAT_APPENDONLY)) {
2453 if (xoap->xoa_appendonly !=
2454 ((zp->z_pflags & ZFS_APPENDONLY) != 0)) {
2457 XVA_CLR_REQ(xvap, XAT_APPENDONLY);
2458 XVA_SET_REQ(&tmpxvattr, XAT_APPENDONLY);
2462 if (XVA_ISSET_REQ(xvap, XAT_NOUNLINK)) {
2463 if (xoap->xoa_nounlink !=
2464 ((zp->z_pflags & ZFS_NOUNLINK) != 0)) {
2467 XVA_CLR_REQ(xvap, XAT_NOUNLINK);
2468 XVA_SET_REQ(&tmpxvattr, XAT_NOUNLINK);
2472 if (XVA_ISSET_REQ(xvap, XAT_IMMUTABLE)) {
2473 if (xoap->xoa_immutable !=
2474 ((zp->z_pflags & ZFS_IMMUTABLE) != 0)) {
2477 XVA_CLR_REQ(xvap, XAT_IMMUTABLE);
2478 XVA_SET_REQ(&tmpxvattr, XAT_IMMUTABLE);
2482 if (XVA_ISSET_REQ(xvap, XAT_NODUMP)) {
2483 if (xoap->xoa_nodump !=
2484 ((zp->z_pflags & ZFS_NODUMP) != 0)) {
2487 XVA_CLR_REQ(xvap, XAT_NODUMP);
2488 XVA_SET_REQ(&tmpxvattr, XAT_NODUMP);
2492 if (XVA_ISSET_REQ(xvap, XAT_AV_MODIFIED)) {
2493 if (xoap->xoa_av_modified !=
2494 ((zp->z_pflags & ZFS_AV_MODIFIED) != 0)) {
2497 XVA_CLR_REQ(xvap, XAT_AV_MODIFIED);
2498 XVA_SET_REQ(&tmpxvattr, XAT_AV_MODIFIED);
2502 if (XVA_ISSET_REQ(xvap, XAT_AV_QUARANTINED)) {
2503 if ((!S_ISREG(ip->i_mode) &&
2504 xoap->xoa_av_quarantined) ||
2505 xoap->xoa_av_quarantined !=
2506 ((zp->z_pflags & ZFS_AV_QUARANTINED) != 0)) {
2509 XVA_CLR_REQ(xvap, XAT_AV_QUARANTINED);
2510 XVA_SET_REQ(&tmpxvattr, XAT_AV_QUARANTINED);
2514 if (XVA_ISSET_REQ(xvap, XAT_REPARSE)) {
2515 mutex_exit(&zp->z_lock);
2520 if (need_policy == FALSE &&
2521 (XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP) ||
2522 XVA_ISSET_REQ(xvap, XAT_OPAQUE))) {
2527 mutex_exit(&zp->z_lock);
2529 if (mask & ATTR_MODE) {
2530 if (zfs_zaccess(zp, ACE_WRITE_ACL, 0, skipaclchk, cr) == 0) {
2531 err = secpolicy_setid_setsticky_clear(ip, vap,
2537 trim_mask |= ATTR_MODE;
2545 * If trim_mask is set then take ownership
2546 * has been granted or write_acl is present and user
2547 * has the ability to modify mode. In that case remove
2548 * UID|GID and or MODE from mask so that
2549 * secpolicy_vnode_setattr() doesn't revoke it.
2553 saved_mask = vap->va_mask;
2554 vap->va_mask &= ~trim_mask;
2556 err = secpolicy_vnode_setattr(cr, ip, vap, &oldva, flags,
2557 (int (*)(void *, int, cred_t *))zfs_zaccess_unix, zp);
2564 vap->va_mask |= saved_mask;
2568 * secpolicy_vnode_setattr, or take ownership may have
2571 mask = vap->va_mask;
2573 if ((mask & (ATTR_UID | ATTR_GID))) {
2574 err = sa_lookup(zp->z_sa_hdl, SA_ZPL_XATTR(zsb),
2575 &xattr_obj, sizeof (xattr_obj));
2577 if (err == 0 && xattr_obj) {
2578 err = zfs_zget(ZTOZSB(zp), xattr_obj, &attrzp);
2582 if (mask & ATTR_UID) {
2583 new_uid = zfs_fuid_create(zsb,
2584 (uint64_t)vap->va_uid, cr, ZFS_OWNER, &fuidp);
2585 if (new_uid != zp->z_uid &&
2586 zfs_fuid_overquota(zsb, B_FALSE, new_uid)) {
2594 if (mask & ATTR_GID) {
2595 new_gid = zfs_fuid_create(zsb, (uint64_t)vap->va_gid,
2596 cr, ZFS_GROUP, &fuidp);
2597 if (new_gid != zp->z_gid &&
2598 zfs_fuid_overquota(zsb, B_TRUE, new_gid)) {
2606 tx = dmu_tx_create(zsb->z_os);
2608 if (mask & ATTR_MODE) {
2609 uint64_t pmode = zp->z_mode;
2611 new_mode = (pmode & S_IFMT) | (vap->va_mode & ~S_IFMT);
2613 zfs_acl_chmod_setattr(zp, &aclp, new_mode);
2615 mutex_enter(&zp->z_lock);
2616 if (!zp->z_is_sa && ((acl_obj = zfs_external_acl(zp)) != 0)) {
2618 * Are we upgrading ACL from old V0 format
2621 if (zsb->z_version >= ZPL_VERSION_FUID &&
2622 zfs_znode_acl_version(zp) ==
2623 ZFS_ACL_VERSION_INITIAL) {
2624 dmu_tx_hold_free(tx, acl_obj, 0,
2626 dmu_tx_hold_write(tx, DMU_NEW_OBJECT,
2627 0, aclp->z_acl_bytes);
2629 dmu_tx_hold_write(tx, acl_obj, 0,
2632 } else if (!zp->z_is_sa && aclp->z_acl_bytes > ZFS_ACE_SPACE) {
2633 dmu_tx_hold_write(tx, DMU_NEW_OBJECT,
2634 0, aclp->z_acl_bytes);
2636 mutex_exit(&zp->z_lock);
2637 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_TRUE);
2639 if ((mask & ATTR_XVATTR) &&
2640 XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP))
2641 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_TRUE);
2643 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
2647 dmu_tx_hold_sa(tx, attrzp->z_sa_hdl, B_FALSE);
2650 fuid_dirtied = zsb->z_fuid_dirty;
2652 zfs_fuid_txhold(zsb, tx);
2654 zfs_sa_upgrade_txholds(tx, zp);
2656 err = dmu_tx_assign(tx, TXG_NOWAIT);
2658 if (err == ERESTART)
2665 * Set each attribute requested.
2666 * We group settings according to the locks they need to acquire.
2668 * Note: you cannot set ctime directly, although it will be
2669 * updated as a side-effect of calling this function.
2673 if (mask & (ATTR_UID|ATTR_GID|ATTR_MODE))
2674 mutex_enter(&zp->z_acl_lock);
2675 mutex_enter(&zp->z_lock);
2677 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zsb), NULL,
2678 &zp->z_pflags, sizeof (zp->z_pflags));
2681 if (mask & (ATTR_UID|ATTR_GID|ATTR_MODE))
2682 mutex_enter(&attrzp->z_acl_lock);
2683 mutex_enter(&attrzp->z_lock);
2684 SA_ADD_BULK_ATTR(xattr_bulk, xattr_count,
2685 SA_ZPL_FLAGS(zsb), NULL, &attrzp->z_pflags,
2686 sizeof (attrzp->z_pflags));
2689 if (mask & (ATTR_UID|ATTR_GID)) {
2691 if (mask & ATTR_UID) {
2692 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_UID(zsb), NULL,
2693 &new_uid, sizeof (new_uid));
2694 zp->z_uid = new_uid;
2696 SA_ADD_BULK_ATTR(xattr_bulk, xattr_count,
2697 SA_ZPL_UID(zsb), NULL, &new_uid,
2699 attrzp->z_uid = new_uid;
2703 if (mask & ATTR_GID) {
2704 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GID(zsb),
2705 NULL, &new_gid, sizeof (new_gid));
2706 zp->z_gid = new_gid;
2708 SA_ADD_BULK_ATTR(xattr_bulk, xattr_count,
2709 SA_ZPL_GID(zsb), NULL, &new_gid,
2711 attrzp->z_gid = new_gid;
2714 if (!(mask & ATTR_MODE)) {
2715 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MODE(zsb),
2716 NULL, &new_mode, sizeof (new_mode));
2717 new_mode = zp->z_mode;
2719 err = zfs_acl_chown_setattr(zp);
2722 err = zfs_acl_chown_setattr(attrzp);
2727 if (mask & ATTR_MODE) {
2728 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MODE(zsb), NULL,
2729 &new_mode, sizeof (new_mode));
2730 zp->z_mode = new_mode;
2731 ASSERT3P(aclp, !=, NULL);
2732 err = zfs_aclset_common(zp, aclp, cr, tx);
2733 ASSERT3U(err, ==, 0);
2734 if (zp->z_acl_cached)
2735 zfs_acl_free(zp->z_acl_cached);
2736 zp->z_acl_cached = aclp;
2741 if (mask & ATTR_ATIME) {
2742 ZFS_TIME_ENCODE(&vap->va_atime, zp->z_atime);
2743 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_ATIME(zsb), NULL,
2744 &zp->z_atime, sizeof (zp->z_atime));
2747 if (mask & ATTR_MTIME) {
2748 ZFS_TIME_ENCODE(&vap->va_mtime, mtime);
2749 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zsb), NULL,
2750 mtime, sizeof (mtime));
2753 /* XXX - shouldn't this be done *before* the ATIME/MTIME checks? */
2754 if (mask & ATTR_SIZE && !(mask & ATTR_MTIME)) {
2755 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zsb),
2756 NULL, mtime, sizeof (mtime));
2757 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zsb), NULL,
2758 &ctime, sizeof (ctime));
2759 zfs_tstamp_update_setup(zp, CONTENT_MODIFIED, mtime, ctime,
2761 } else if (mask != 0) {
2762 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zsb), NULL,
2763 &ctime, sizeof (ctime));
2764 zfs_tstamp_update_setup(zp, STATE_CHANGED, mtime, ctime,
2767 SA_ADD_BULK_ATTR(xattr_bulk, xattr_count,
2768 SA_ZPL_CTIME(zsb), NULL,
2769 &ctime, sizeof (ctime));
2770 zfs_tstamp_update_setup(attrzp, STATE_CHANGED,
2771 mtime, ctime, B_TRUE);
2775 * Do this after setting timestamps to prevent timestamp
2776 * update from toggling bit
2779 if (xoap && (mask & ATTR_XVATTR)) {
2782 * restore trimmed off masks
2783 * so that return masks can be set for caller.
2786 if (XVA_ISSET_REQ(&tmpxvattr, XAT_APPENDONLY)) {
2787 XVA_SET_REQ(xvap, XAT_APPENDONLY);
2789 if (XVA_ISSET_REQ(&tmpxvattr, XAT_NOUNLINK)) {
2790 XVA_SET_REQ(xvap, XAT_NOUNLINK);
2792 if (XVA_ISSET_REQ(&tmpxvattr, XAT_IMMUTABLE)) {
2793 XVA_SET_REQ(xvap, XAT_IMMUTABLE);
2795 if (XVA_ISSET_REQ(&tmpxvattr, XAT_NODUMP)) {
2796 XVA_SET_REQ(xvap, XAT_NODUMP);
2798 if (XVA_ISSET_REQ(&tmpxvattr, XAT_AV_MODIFIED)) {
2799 XVA_SET_REQ(xvap, XAT_AV_MODIFIED);
2801 if (XVA_ISSET_REQ(&tmpxvattr, XAT_AV_QUARANTINED)) {
2802 XVA_SET_REQ(xvap, XAT_AV_QUARANTINED);
2805 if (XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP))
2806 ASSERT(S_ISREG(ip->i_mode));
2808 zfs_xvattr_set(zp, xvap, tx);
2812 zfs_fuid_sync(zsb, tx);
2815 zfs_log_setattr(zilog, tx, TX_SETATTR, zp, vap, mask, fuidp);
2817 mutex_exit(&zp->z_lock);
2818 if (mask & (ATTR_UID|ATTR_GID|ATTR_MODE))
2819 mutex_exit(&zp->z_acl_lock);
2822 if (mask & (ATTR_UID|ATTR_GID|ATTR_MODE))
2823 mutex_exit(&attrzp->z_acl_lock);
2824 mutex_exit(&attrzp->z_lock);
2827 if (err == 0 && attrzp) {
2828 err2 = sa_bulk_update(attrzp->z_sa_hdl, xattr_bulk,
2839 zfs_fuid_info_free(fuidp);
2845 if (err == ERESTART)
2848 err2 = sa_bulk_update(zp->z_sa_hdl, bulk, count, tx);
2850 zfs_inode_update(zp);
2854 if (zsb->z_os->os_sync == ZFS_SYNC_ALWAYS)
2855 zil_commit(zilog, 0);
2860 EXPORT_SYMBOL(zfs_setattr);
2862 typedef struct zfs_zlock {
2863 krwlock_t *zl_rwlock; /* lock we acquired */
2864 znode_t *zl_znode; /* znode we held */
2865 struct zfs_zlock *zl_next; /* next in list */
2869 * Drop locks and release vnodes that were held by zfs_rename_lock().
2872 zfs_rename_unlock(zfs_zlock_t **zlpp)
2876 while ((zl = *zlpp) != NULL) {
2877 if (zl->zl_znode != NULL)
2878 iput(ZTOI(zl->zl_znode));
2879 rw_exit(zl->zl_rwlock);
2880 *zlpp = zl->zl_next;
2881 kmem_free(zl, sizeof (*zl));
2886 * Search back through the directory tree, using the ".." entries.
2887 * Lock each directory in the chain to prevent concurrent renames.
2888 * Fail any attempt to move a directory into one of its own descendants.
2889 * XXX - z_parent_lock can overlap with map or grow locks
2892 zfs_rename_lock(znode_t *szp, znode_t *tdzp, znode_t *sdzp, zfs_zlock_t **zlpp)
2896 uint64_t rootid = ZTOZSB(zp)->z_root;
2897 uint64_t oidp = zp->z_id;
2898 krwlock_t *rwlp = &szp->z_parent_lock;
2899 krw_t rw = RW_WRITER;
2902 * First pass write-locks szp and compares to zp->z_id.
2903 * Later passes read-lock zp and compare to zp->z_parent.
2906 if (!rw_tryenter(rwlp, rw)) {
2908 * Another thread is renaming in this path.
2909 * Note that if we are a WRITER, we don't have any
2910 * parent_locks held yet.
2912 if (rw == RW_READER && zp->z_id > szp->z_id) {
2914 * Drop our locks and restart
2916 zfs_rename_unlock(&zl);
2920 rwlp = &szp->z_parent_lock;
2925 * Wait for other thread to drop its locks
2931 zl = kmem_alloc(sizeof (*zl), KM_SLEEP);
2932 zl->zl_rwlock = rwlp;
2933 zl->zl_znode = NULL;
2934 zl->zl_next = *zlpp;
2937 if (oidp == szp->z_id) /* We're a descendant of szp */
2940 if (oidp == rootid) /* We've hit the top */
2943 if (rw == RW_READER) { /* i.e. not the first pass */
2944 int error = zfs_zget(ZTOZSB(zp), oidp, &zp);
2949 (void) sa_lookup(zp->z_sa_hdl, SA_ZPL_PARENT(ZTOZSB(zp)),
2950 &oidp, sizeof (oidp));
2951 rwlp = &zp->z_parent_lock;
2954 } while (zp->z_id != sdzp->z_id);
2960 * Move an entry from the provided source directory to the target
2961 * directory. Change the entry name as indicated.
2963 * IN: sdip - Source directory containing the "old entry".
2964 * snm - Old entry name.
2965 * tdip - Target directory to contain the "new entry".
2966 * tnm - New entry name.
2967 * cr - credentials of caller.
2968 * flags - case flags
2970 * RETURN: 0 if success
2971 * error code if failure
2974 * sdip,tdip - ctime|mtime updated
2978 zfs_rename(struct inode *sdip, char *snm, struct inode *tdip, char *tnm,
2979 cred_t *cr, int flags)
2981 znode_t *tdzp, *szp, *tzp;
2982 znode_t *sdzp = ITOZ(sdip);
2983 zfs_sb_t *zsb = ITOZSB(sdip);
2985 zfs_dirlock_t *sdl, *tdl;
2988 int cmp, serr, terr;
2993 ZFS_VERIFY_ZP(sdzp);
2996 if (tdip->i_sb != sdip->i_sb) {
3002 ZFS_VERIFY_ZP(tdzp);
3003 if (zsb->z_utf8 && u8_validate(tnm,
3004 strlen(tnm), NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
3009 if (flags & FIGNORECASE)
3018 * This is to prevent the creation of links into attribute space
3019 * by renaming a linked file into/outof an attribute directory.
3020 * See the comment in zfs_link() for why this is considered bad.
3022 if ((tdzp->z_pflags & ZFS_XATTR) != (sdzp->z_pflags & ZFS_XATTR)) {
3028 * Lock source and target directory entries. To prevent deadlock,
3029 * a lock ordering must be defined. We lock the directory with
3030 * the smallest object id first, or if it's a tie, the one with
3031 * the lexically first name.
3033 if (sdzp->z_id < tdzp->z_id) {
3035 } else if (sdzp->z_id > tdzp->z_id) {
3039 * First compare the two name arguments without
3040 * considering any case folding.
3042 int nofold = (zsb->z_norm & ~U8_TEXTPREP_TOUPPER);
3044 cmp = u8_strcmp(snm, tnm, 0, nofold, U8_UNICODE_LATEST, &error);
3045 ASSERT(error == 0 || !zsb->z_utf8);
3048 * POSIX: "If the old argument and the new argument
3049 * both refer to links to the same existing file,
3050 * the rename() function shall return successfully
3051 * and perform no other action."
3057 * If the file system is case-folding, then we may
3058 * have some more checking to do. A case-folding file
3059 * system is either supporting mixed case sensitivity
3060 * access or is completely case-insensitive. Note
3061 * that the file system is always case preserving.
3063 * In mixed sensitivity mode case sensitive behavior
3064 * is the default. FIGNORECASE must be used to
3065 * explicitly request case insensitive behavior.
3067 * If the source and target names provided differ only
3068 * by case (e.g., a request to rename 'tim' to 'Tim'),
3069 * we will treat this as a special case in the
3070 * case-insensitive mode: as long as the source name
3071 * is an exact match, we will allow this to proceed as
3072 * a name-change request.
3074 if ((zsb->z_case == ZFS_CASE_INSENSITIVE ||
3075 (zsb->z_case == ZFS_CASE_MIXED &&
3076 flags & FIGNORECASE)) &&
3077 u8_strcmp(snm, tnm, 0, zsb->z_norm, U8_UNICODE_LATEST,
3080 * case preserving rename request, require exact
3089 * If the source and destination directories are the same, we should
3090 * grab the z_name_lock of that directory only once.
3094 rw_enter(&sdzp->z_name_lock, RW_READER);
3098 serr = zfs_dirent_lock(&sdl, sdzp, snm, &szp,
3099 ZEXISTS | zflg, NULL, NULL);
3100 terr = zfs_dirent_lock(&tdl,
3101 tdzp, tnm, &tzp, ZRENAMING | zflg, NULL, NULL);
3103 terr = zfs_dirent_lock(&tdl,
3104 tdzp, tnm, &tzp, zflg, NULL, NULL);
3105 serr = zfs_dirent_lock(&sdl,
3106 sdzp, snm, &szp, ZEXISTS | ZRENAMING | zflg,
3112 * Source entry invalid or not there.
3115 zfs_dirent_unlock(tdl);
3121 rw_exit(&sdzp->z_name_lock);
3123 if (strcmp(snm, "..") == 0)
3129 zfs_dirent_unlock(sdl);
3133 rw_exit(&sdzp->z_name_lock);
3135 if (strcmp(tnm, "..") == 0)
3142 * Must have write access at the source to remove the old entry
3143 * and write access at the target to create the new entry.
3144 * Note that if target and source are the same, this can be
3145 * done in a single check.
3148 if ((error = zfs_zaccess_rename(sdzp, szp, tdzp, tzp, cr)))
3151 if (S_ISDIR(ZTOI(szp)->i_mode)) {
3153 * Check to make sure rename is valid.
3154 * Can't do a move like this: /usr/a/b to /usr/a/b/c/d
3156 if ((error = zfs_rename_lock(szp, tdzp, sdzp, &zl)))
3161 * Does target exist?
3165 * Source and target must be the same type.
3167 if (S_ISDIR(ZTOI(szp)->i_mode)) {
3168 if (!S_ISDIR(ZTOI(tzp)->i_mode)) {
3173 if (S_ISDIR(ZTOI(tzp)->i_mode)) {
3179 * POSIX dictates that when the source and target
3180 * entries refer to the same file object, rename
3181 * must do nothing and exit without error.
3183 if (szp->z_id == tzp->z_id) {
3189 tx = dmu_tx_create(zsb->z_os);
3190 dmu_tx_hold_sa(tx, szp->z_sa_hdl, B_FALSE);
3191 dmu_tx_hold_sa(tx, sdzp->z_sa_hdl, B_FALSE);
3192 dmu_tx_hold_zap(tx, sdzp->z_id, FALSE, snm);
3193 dmu_tx_hold_zap(tx, tdzp->z_id, TRUE, tnm);
3195 dmu_tx_hold_sa(tx, tdzp->z_sa_hdl, B_FALSE);
3196 zfs_sa_upgrade_txholds(tx, tdzp);
3199 dmu_tx_hold_sa(tx, tzp->z_sa_hdl, B_FALSE);
3200 zfs_sa_upgrade_txholds(tx, tzp);
3203 zfs_sa_upgrade_txholds(tx, szp);
3204 dmu_tx_hold_zap(tx, zsb->z_unlinkedobj, FALSE, NULL);
3205 error = dmu_tx_assign(tx, TXG_NOWAIT);
3208 zfs_rename_unlock(&zl);
3209 zfs_dirent_unlock(sdl);
3210 zfs_dirent_unlock(tdl);
3213 rw_exit(&sdzp->z_name_lock);
3218 if (error == ERESTART) {
3228 if (tzp) /* Attempt to remove the existing target */
3229 error = zfs_link_destroy(tdl, tzp, tx, zflg, NULL);
3232 error = zfs_link_create(tdl, szp, tx, ZRENAMING);
3234 szp->z_pflags |= ZFS_AV_MODIFIED;
3236 error = sa_update(szp->z_sa_hdl, SA_ZPL_FLAGS(zsb),
3237 (void *)&szp->z_pflags, sizeof (uint64_t), tx);
3238 ASSERT3U(error, ==, 0);
3240 error = zfs_link_destroy(sdl, szp, tx, ZRENAMING, NULL);
3242 zfs_log_rename(zilog, tx, TX_RENAME |
3243 (flags & FIGNORECASE ? TX_CI : 0), sdzp,
3244 sdl->dl_name, tdzp, tdl->dl_name, szp);
3247 * At this point, we have successfully created
3248 * the target name, but have failed to remove
3249 * the source name. Since the create was done
3250 * with the ZRENAMING flag, there are
3251 * complications; for one, the link count is
3252 * wrong. The easiest way to deal with this
3253 * is to remove the newly created target, and
3254 * return the original error. This must
3255 * succeed; fortunately, it is very unlikely to
3256 * fail, since we just created it.
3258 VERIFY3U(zfs_link_destroy(tdl, szp, tx,
3259 ZRENAMING, NULL), ==, 0);
3267 zfs_rename_unlock(&zl);
3269 zfs_dirent_unlock(sdl);
3270 zfs_dirent_unlock(tdl);
3272 zfs_inode_update(sdzp);
3274 rw_exit(&sdzp->z_name_lock);
3277 zfs_inode_update(tdzp);
3279 zfs_inode_update(szp);
3282 zfs_inode_update(tzp);
3286 if (zsb->z_os->os_sync == ZFS_SYNC_ALWAYS)
3287 zil_commit(zilog, 0);
3292 EXPORT_SYMBOL(zfs_rename);
3295 * Insert the indicated symbolic reference entry into the directory.
3297 * IN: dip - Directory to contain new symbolic link.
3298 * link - Name for new symlink entry.
3299 * vap - Attributes of new entry.
3300 * target - Target path of new symlink.
3302 * cr - credentials of caller.
3303 * flags - case flags
3305 * RETURN: 0 if success
3306 * error code if failure
3309 * dip - ctime|mtime updated
3313 zfs_symlink(struct inode *dip, char *name, vattr_t *vap, char *link,
3314 struct inode **ipp, cred_t *cr, int flags)
3316 znode_t *zp, *dzp = ITOZ(dip);
3319 zfs_sb_t *zsb = ITOZSB(dip);
3321 uint64_t len = strlen(link);
3324 zfs_acl_ids_t acl_ids;
3325 boolean_t fuid_dirtied;
3326 uint64_t txtype = TX_SYMLINK;
3328 ASSERT(S_ISLNK(vap->va_mode));
3334 if (zsb->z_utf8 && u8_validate(name, strlen(name),
3335 NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
3339 if (flags & FIGNORECASE)
3342 if (len > MAXPATHLEN) {
3344 return (ENAMETOOLONG);
3347 if ((error = zfs_acl_ids_create(dzp, 0,
3348 vap, cr, NULL, &acl_ids)) != 0) {
3356 * Attempt to lock directory; fail if entry already exists.
3358 error = zfs_dirent_lock(&dl, dzp, name, &zp, zflg, NULL, NULL);
3360 zfs_acl_ids_free(&acl_ids);
3365 if ((error = zfs_zaccess(dzp, ACE_ADD_FILE, 0, B_FALSE, cr))) {
3366 zfs_acl_ids_free(&acl_ids);
3367 zfs_dirent_unlock(dl);
3372 if (zfs_acl_ids_overquota(zsb, &acl_ids)) {
3373 zfs_acl_ids_free(&acl_ids);
3374 zfs_dirent_unlock(dl);
3378 tx = dmu_tx_create(zsb->z_os);
3379 fuid_dirtied = zsb->z_fuid_dirty;
3380 dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0, MAX(1, len));
3381 dmu_tx_hold_zap(tx, dzp->z_id, TRUE, name);
3382 dmu_tx_hold_sa_create(tx, acl_ids.z_aclp->z_acl_bytes +
3383 ZFS_SA_BASE_ATTR_SIZE + len);
3384 dmu_tx_hold_sa(tx, dzp->z_sa_hdl, B_FALSE);
3385 if (!zsb->z_use_sa && acl_ids.z_aclp->z_acl_bytes > ZFS_ACE_SPACE) {
3386 dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0,
3387 acl_ids.z_aclp->z_acl_bytes);
3390 zfs_fuid_txhold(zsb, tx);
3391 error = dmu_tx_assign(tx, TXG_NOWAIT);
3393 zfs_dirent_unlock(dl);
3394 if (error == ERESTART) {
3399 zfs_acl_ids_free(&acl_ids);
3406 * Create a new object for the symlink.
3407 * for version 4 ZPL datsets the symlink will be an SA attribute
3409 zfs_mknode(dzp, vap, tx, cr, 0, &zp, &acl_ids);
3412 zfs_fuid_sync(zsb, tx);
3414 mutex_enter(&zp->z_lock);
3416 error = sa_update(zp->z_sa_hdl, SA_ZPL_SYMLINK(zsb),
3419 zfs_sa_symlink(zp, link, len, tx);
3420 mutex_exit(&zp->z_lock);
3423 (void) sa_update(zp->z_sa_hdl, SA_ZPL_SIZE(zsb),
3424 &zp->z_size, sizeof (zp->z_size), tx);
3426 * Insert the new object into the directory.
3428 (void) zfs_link_create(dl, zp, tx, ZNEW);
3430 if (flags & FIGNORECASE)
3432 zfs_log_symlink(zilog, tx, txtype, dzp, zp, name, link);
3434 zfs_inode_update(dzp);
3435 zfs_inode_update(zp);
3437 zfs_acl_ids_free(&acl_ids);
3441 zfs_dirent_unlock(dl);
3445 if (zsb->z_os->os_sync == ZFS_SYNC_ALWAYS)
3446 zil_commit(zilog, 0);
3451 EXPORT_SYMBOL(zfs_symlink);
3454 * Return, in the buffer contained in the provided uio structure,
3455 * the symbolic path referred to by ip.
3457 * IN: ip - inode of symbolic link
3458 * uio - structure to contain the link path.
3459 * cr - credentials of caller.
3461 * RETURN: 0 if success
3462 * error code if failure
3465 * ip - atime updated
3469 zfs_readlink(struct inode *ip, uio_t *uio, cred_t *cr)
3471 znode_t *zp = ITOZ(ip);
3472 zfs_sb_t *zsb = ITOZSB(ip);
3478 mutex_enter(&zp->z_lock);
3480 error = sa_lookup_uio(zp->z_sa_hdl,
3481 SA_ZPL_SYMLINK(zsb), uio);
3483 error = zfs_sa_readlink(zp, uio);
3484 mutex_exit(&zp->z_lock);
3486 ZFS_ACCESSTIME_STAMP(zsb, zp);
3487 zfs_inode_update(zp);
3491 EXPORT_SYMBOL(zfs_readlink);
3494 * Insert a new entry into directory tdip referencing sip.
3496 * IN: tdip - Directory to contain new entry.
3497 * sip - inode of new entry.
3498 * name - name of new entry.
3499 * cr - credentials of caller.
3501 * RETURN: 0 if success
3502 * error code if failure
3505 * tdip - ctime|mtime updated
3506 * sip - ctime updated
3510 zfs_link(struct inode *tdip, struct inode *sip, char *name, cred_t *cr)
3512 znode_t *dzp = ITOZ(tdip);
3514 zfs_sb_t *zsb = ITOZSB(tdip);
3523 ASSERT(S_ISDIR(tdip->i_mode));
3530 * POSIX dictates that we return EPERM here.
3531 * Better choices include ENOTSUP or EISDIR.
3533 if (S_ISDIR(sip->i_mode)) {
3538 if (sip->i_sb != tdip->i_sb) {
3546 /* Prevent links to .zfs/shares files */
3548 if ((error = sa_lookup(szp->z_sa_hdl, SA_ZPL_PARENT(zsb),
3549 &parent, sizeof (uint64_t))) != 0) {
3553 if (parent == zsb->z_shares_dir) {
3558 if (zsb->z_utf8 && u8_validate(name,
3559 strlen(name), NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
3563 #ifdef HAVE_PN_UTILS
3564 if (flags & FIGNORECASE)
3566 #endif /* HAVE_PN_UTILS */
3569 * We do not support links between attributes and non-attributes
3570 * because of the potential security risk of creating links
3571 * into "normal" file space in order to circumvent restrictions
3572 * imposed in attribute space.
3574 if ((szp->z_pflags & ZFS_XATTR) != (dzp->z_pflags & ZFS_XATTR)) {
3579 owner = zfs_fuid_map_id(zsb, szp->z_uid, cr, ZFS_OWNER);
3580 if (owner != crgetuid(cr) && secpolicy_basic_link(cr) != 0) {
3585 if ((error = zfs_zaccess(dzp, ACE_ADD_FILE, 0, B_FALSE, cr))) {
3592 * Attempt to lock directory; fail if entry already exists.
3594 error = zfs_dirent_lock(&dl, dzp, name, &tzp, zf, NULL, NULL);
3600 tx = dmu_tx_create(zsb->z_os);
3601 dmu_tx_hold_sa(tx, szp->z_sa_hdl, B_FALSE);
3602 dmu_tx_hold_zap(tx, dzp->z_id, TRUE, name);
3603 zfs_sa_upgrade_txholds(tx, szp);
3604 zfs_sa_upgrade_txholds(tx, dzp);
3605 error = dmu_tx_assign(tx, TXG_NOWAIT);
3607 zfs_dirent_unlock(dl);
3608 if (error == ERESTART) {
3618 error = zfs_link_create(dl, szp, tx, 0);
3621 uint64_t txtype = TX_LINK;
3622 #ifdef HAVE_PN_UTILS
3623 if (flags & FIGNORECASE)
3625 #endif /* HAVE_PN_UTILS */
3626 zfs_log_link(zilog, tx, txtype, dzp, szp, name);
3631 zfs_dirent_unlock(dl);
3633 if (zsb->z_os->os_sync == ZFS_SYNC_ALWAYS)
3634 zil_commit(zilog, 0);
3636 zfs_inode_update(dzp);
3637 zfs_inode_update(szp);
3641 EXPORT_SYMBOL(zfs_link);
3645 * zfs_null_putapage() is used when the file system has been force
3646 * unmounted. It just drops the pages.
3650 zfs_null_putapage(vnode_t *vp, page_t *pp, u_offset_t *offp,
3651 size_t *lenp, int flags, cred_t *cr)
3653 pvn_write_done(pp, B_INVAL|B_FORCE|B_ERROR);
3658 * Push a page out to disk, klustering if possible.
3660 * IN: vp - file to push page to.
3661 * pp - page to push.
3662 * flags - additional flags.
3663 * cr - credentials of caller.
3665 * OUT: offp - start of range pushed.
3666 * lenp - len of range pushed.
3668 * RETURN: 0 if success
3669 * error code if failure
3671 * NOTE: callers must have locked the page to be pushed. On
3672 * exit, the page (and all other pages in the kluster) must be
3677 zfs_putapage(vnode_t *vp, page_t *pp, u_offset_t *offp,
3678 size_t *lenp, int flags, cred_t *cr)
3680 znode_t *zp = VTOZ(vp);
3681 zfsvfs_t *zfsvfs = zp->z_zfsvfs;
3683 u_offset_t off, koff;
3690 * If our blocksize is bigger than the page size, try to kluster
3691 * multiple pages so that we write a full block (thus avoiding
3692 * a read-modify-write).
3694 if (off < zp->z_size && zp->z_blksz > PAGESIZE) {
3695 klen = P2ROUNDUP((ulong_t)zp->z_blksz, PAGESIZE);
3696 koff = ISP2(klen) ? P2ALIGN(off, (u_offset_t)klen) : 0;
3697 ASSERT(koff <= zp->z_size);
3698 if (koff + klen > zp->z_size)
3699 klen = P2ROUNDUP(zp->z_size - koff, (uint64_t)PAGESIZE);
3700 pp = pvn_write_kluster(vp, pp, &off, &len, koff, klen, flags);
3702 ASSERT3U(btop(len), ==, btopr(len));
3705 * Can't push pages past end-of-file.
3707 if (off >= zp->z_size) {
3708 /* ignore all pages */
3711 } else if (off + len > zp->z_size) {
3712 int npages = btopr(zp->z_size - off);
3715 page_list_break(&pp, &trunc, npages);
3716 /* ignore pages past end of file */
3718 pvn_write_done(trunc, flags);
3719 len = zp->z_size - off;
3722 if (zfs_owner_overquota(zfsvfs, zp, B_FALSE) ||
3723 zfs_owner_overquota(zfsvfs, zp, B_TRUE)) {
3728 tx = dmu_tx_create(zfsvfs->z_os);
3729 dmu_tx_hold_write(tx, zp->z_id, off, len);
3731 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
3732 zfs_sa_upgrade_txholds(tx, zp);
3733 err = dmu_tx_assign(tx, TXG_NOWAIT);
3735 if (err == ERESTART) {
3744 if (zp->z_blksz <= PAGESIZE) {
3745 caddr_t va = zfs_map_page(pp, S_READ);
3746 ASSERT3U(len, <=, PAGESIZE);
3747 dmu_write(zfsvfs->z_os, zp->z_id, off, len, va, tx);
3748 zfs_unmap_page(pp, va);
3750 err = dmu_write_pages(zfsvfs->z_os, zp->z_id, off, len, pp, tx);
3754 uint64_t mtime[2], ctime[2];
3755 sa_bulk_attr_t bulk[3];
3758 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL,
3760 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL,
3762 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs), NULL,
3764 zfs_tstamp_update_setup(zp, CONTENT_MODIFIED, mtime, ctime,
3766 zfs_log_write(zfsvfs->z_log, tx, TX_WRITE, zp, off, len, 0);
3771 pvn_write_done(pp, (err ? B_ERROR : 0) | flags);
3781 * Copy the portion of the file indicated from pages into the file.
3782 * The pages are stored in a page list attached to the files vnode.
3784 * IN: vp - vnode of file to push page data to.
3785 * off - position in file to put data.
3786 * len - amount of data to write.
3787 * flags - flags to control the operation.
3788 * cr - credentials of caller.
3789 * ct - caller context.
3791 * RETURN: 0 if success
3792 * error code if failure
3795 * vp - ctime|mtime updated
3799 zfs_putpage(vnode_t *vp, offset_t off, size_t len, int flags, cred_t *cr)
3801 znode_t *zp = VTOZ(vp);
3802 zfsvfs_t *zfsvfs = zp->z_zfsvfs;
3814 * Align this request to the file block size in case we kluster.
3815 * XXX - this can result in pretty aggresive locking, which can
3816 * impact simultanious read/write access. One option might be
3817 * to break up long requests (len == 0) into block-by-block
3818 * operations to get narrower locking.
3820 blksz = zp->z_blksz;
3822 io_off = P2ALIGN_TYPED(off, blksz, u_offset_t);
3825 if (len > 0 && ISP2(blksz))
3826 io_len = P2ROUNDUP_TYPED(len + (off - io_off), blksz, size_t);
3832 * Search the entire vp list for pages >= io_off.
3834 rl = zfs_range_lock(zp, io_off, UINT64_MAX, RL_WRITER);
3835 error = pvn_vplist_dirty(vp, io_off, zfs_putapage, flags, cr);
3838 rl = zfs_range_lock(zp, io_off, io_len, RL_WRITER);
3840 if (off > zp->z_size) {
3841 /* past end of file */
3842 zfs_range_unlock(rl);
3847 len = MIN(io_len, P2ROUNDUP(zp->z_size, PAGESIZE) - io_off);
3849 for (off = io_off; io_off < off + len; io_off += io_len) {
3850 if ((flags & B_INVAL) || ((flags & B_ASYNC) == 0)) {
3851 pp = page_lookup(vp, io_off,
3852 (flags & (B_INVAL | B_FREE)) ? SE_EXCL : SE_SHARED);
3854 pp = page_lookup_nowait(vp, io_off,
3855 (flags & B_FREE) ? SE_EXCL : SE_SHARED);
3858 if (pp != NULL && pvn_getdirty(pp, flags)) {
3862 * Found a dirty page to push
3864 err = zfs_putapage(vp, pp, &io_off, &io_len, flags, cr);
3872 zfs_range_unlock(rl);
3873 if ((flags & B_ASYNC) == 0 || zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
3874 zil_commit(zfsvfs->z_log, zp->z_id);
3878 #endif /* HAVE_MMAP */
3882 zfs_inactive(struct inode *ip)
3884 znode_t *zp = ITOZ(ip);
3885 zfs_sb_t *zsb = ITOZSB(ip);
3888 #ifdef HAVE_SNAPSHOT
3889 /* Early return for snapshot inode? */
3890 #endif /* HAVE_SNAPSHOT */
3892 rw_enter(&zsb->z_teardown_inactive_lock, RW_READER);
3893 if (zp->z_sa_hdl == NULL) {
3894 rw_exit(&zsb->z_teardown_inactive_lock);
3898 if (zp->z_atime_dirty && zp->z_unlinked == 0) {
3899 dmu_tx_t *tx = dmu_tx_create(zsb->z_os);
3901 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
3902 zfs_sa_upgrade_txholds(tx, zp);
3903 error = dmu_tx_assign(tx, TXG_WAIT);
3907 mutex_enter(&zp->z_lock);
3908 (void) sa_update(zp->z_sa_hdl, SA_ZPL_ATIME(zsb),
3909 (void *)&zp->z_atime, sizeof (zp->z_atime), tx);
3910 zp->z_atime_dirty = 0;
3911 mutex_exit(&zp->z_lock);
3917 rw_exit(&zsb->z_teardown_inactive_lock);
3919 EXPORT_SYMBOL(zfs_inactive);
3922 * Bounds-check the seek operation.
3924 * IN: ip - inode seeking within
3925 * ooff - old file offset
3926 * noffp - pointer to new file offset
3927 * ct - caller context
3929 * RETURN: 0 if success
3930 * EINVAL if new offset invalid
3934 zfs_seek(struct inode *ip, offset_t ooff, offset_t *noffp)
3936 if (S_ISDIR(ip->i_mode))
3938 return ((*noffp < 0 || *noffp > MAXOFFSET_T) ? EINVAL : 0);
3940 EXPORT_SYMBOL(zfs_seek);
3944 * Pre-filter the generic locking function to trap attempts to place
3945 * a mandatory lock on a memory mapped file.
3948 zfs_frlock(vnode_t *vp, int cmd, flock64_t *bfp, int flag, offset_t offset,
3949 flk_callback_t *flk_cbp, cred_t *cr)
3951 znode_t *zp = VTOZ(vp);
3952 zfsvfs_t *zfsvfs = zp->z_zfsvfs;
3958 * We are following the UFS semantics with respect to mapcnt
3959 * here: If we see that the file is mapped already, then we will
3960 * return an error, but we don't worry about races between this
3961 * function and zfs_map().
3963 if (zp->z_mapcnt > 0 && MANDMODE(zp->z_mode)) {
3968 return (fs_frlock(vp, cmd, bfp, flag, offset, flk_cbp, cr, ct));
3972 * If we can't find a page in the cache, we will create a new page
3973 * and fill it with file data. For efficiency, we may try to fill
3974 * multiple pages at once (klustering) to fill up the supplied page
3975 * list. Note that the pages to be filled are held with an exclusive
3976 * lock to prevent access by other threads while they are being filled.
3979 zfs_fillpage(vnode_t *vp, u_offset_t off, struct seg *seg,
3980 caddr_t addr, page_t *pl[], size_t plsz, enum seg_rw rw)
3982 znode_t *zp = VTOZ(vp);
3983 page_t *pp, *cur_pp;
3984 objset_t *os = zp->z_zfsvfs->z_os;
3985 u_offset_t io_off, total;
3989 if (plsz == PAGESIZE || zp->z_blksz <= PAGESIZE) {
3991 * We only have a single page, don't bother klustering
3995 pp = page_create_va(vp, io_off, io_len,
3996 PG_EXCL | PG_WAIT, seg, addr);
3999 * Try to find enough pages to fill the page list
4001 pp = pvn_read_kluster(vp, off, seg, addr, &io_off,
4002 &io_len, off, plsz, 0);
4006 * The page already exists, nothing to do here.
4013 * Fill the pages in the kluster.
4016 for (total = io_off + io_len; io_off < total; io_off += PAGESIZE) {
4019 ASSERT3U(io_off, ==, cur_pp->p_offset);
4020 va = zfs_map_page(cur_pp, S_WRITE);
4021 err = dmu_read(os, zp->z_id, io_off, PAGESIZE, va,
4023 zfs_unmap_page(cur_pp, va);
4025 /* On error, toss the entire kluster */
4026 pvn_read_done(pp, B_ERROR);
4027 /* convert checksum errors into IO errors */
4032 cur_pp = cur_pp->p_next;
4036 * Fill in the page list array from the kluster starting
4037 * from the desired offset `off'.
4038 * NOTE: the page list will always be null terminated.
4040 pvn_plist_init(pp, pl, plsz, off, io_len, rw);
4041 ASSERT(pl == NULL || (*pl)->p_offset == off);
4047 * Return pointers to the pages for the file region [off, off + len]
4048 * in the pl array. If plsz is greater than len, this function may
4049 * also return page pointers from after the specified region
4050 * (i.e. the region [off, off + plsz]). These additional pages are
4051 * only returned if they are already in the cache, or were created as
4052 * part of a klustered read.
4054 * IN: vp - vnode of file to get data from.
4055 * off - position in file to get data from.
4056 * len - amount of data to retrieve.
4057 * plsz - length of provided page list.
4058 * seg - segment to obtain pages for.
4059 * addr - virtual address of fault.
4060 * rw - mode of created pages.
4061 * cr - credentials of caller.
4062 * ct - caller context.
4064 * OUT: protp - protection mode of created pages.
4065 * pl - list of pages created.
4067 * RETURN: 0 if success
4068 * error code if failure
4071 * vp - atime updated
4075 zfs_getpage(vnode_t *vp, offset_t off, size_t len, uint_t *protp,
4076 page_t *pl[], size_t plsz, struct seg *seg, caddr_t addr,
4077 enum seg_rw rw, cred_t *cr)
4079 znode_t *zp = VTOZ(vp);
4080 zfsvfs_t *zfsvfs = zp->z_zfsvfs;
4084 /* we do our own caching, faultahead is unnecessary */
4087 else if (len > plsz)
4090 len = P2ROUNDUP(len, PAGESIZE);
4091 ASSERT(plsz >= len);
4100 * Loop through the requested range [off, off + len) looking
4101 * for pages. If we don't find a page, we will need to create
4102 * a new page and fill it with data from the file.
4105 if (*pl = page_lookup(vp, off, SE_SHARED))
4107 else if (err = zfs_fillpage(vp, off, seg, addr, pl, plsz, rw))
4110 ASSERT3U((*pl)->p_offset, ==, off);
4114 ASSERT3U(len, >=, PAGESIZE);
4117 ASSERT3U(plsz, >=, PAGESIZE);
4124 * Fill out the page array with any pages already in the cache.
4127 (*pl++ = page_lookup_nowait(vp, off, SE_SHARED))) {
4134 * Release any pages we have previously locked.
4139 ZFS_ACCESSTIME_STAMP(zfsvfs, zp);
4149 * Request a memory map for a section of a file. This code interacts
4150 * with common code and the VM system as follows:
4152 * common code calls mmap(), which ends up in smmap_common()
4154 * this calls VOP_MAP(), which takes you into (say) zfs
4156 * zfs_map() calls as_map(), passing segvn_create() as the callback
4158 * segvn_create() creates the new segment and calls VOP_ADDMAP()
4160 * zfs_addmap() updates z_mapcnt
4164 zfs_map(vnode_t *vp, offset_t off, struct as *as, caddr_t *addrp,
4165 size_t len, uchar_t prot, uchar_t maxprot, uint_t flags, cred_t *cr)
4167 znode_t *zp = VTOZ(vp);
4168 zfsvfs_t *zfsvfs = zp->z_zfsvfs;
4169 segvn_crargs_t vn_a;
4175 if ((prot & PROT_WRITE) && (zp->z_pflags &
4176 (ZFS_IMMUTABLE | ZFS_READONLY | ZFS_APPENDONLY))) {
4181 if ((prot & (PROT_READ | PROT_EXEC)) &&
4182 (zp->z_pflags & ZFS_AV_QUARANTINED)) {
4187 if (vp->v_flag & VNOMAP) {
4192 if (off < 0 || len > MAXOFFSET_T - off) {
4197 if (vp->v_type != VREG) {
4203 * If file is locked, disallow mapping.
4205 if (MANDMODE(zp->z_mode) && vn_has_flocks(vp)) {
4211 error = choose_addr(as, addrp, len, off, ADDR_VACALIGN, flags);
4219 vn_a.offset = (u_offset_t)off;
4220 vn_a.type = flags & MAP_TYPE;
4222 vn_a.maxprot = maxprot;
4225 vn_a.flags = flags & ~MAP_TYPE;
4227 vn_a.lgrp_mem_policy_flags = 0;
4229 error = as_map(as, *addrp, len, segvn_create, &vn_a);
4238 zfs_addmap(vnode_t *vp, offset_t off, struct as *as, caddr_t addr,
4239 size_t len, uchar_t prot, uchar_t maxprot, uint_t flags, cred_t *cr)
4241 uint64_t pages = btopr(len);
4243 atomic_add_64(&VTOZ(vp)->z_mapcnt, pages);
4248 * The reason we push dirty pages as part of zfs_delmap() is so that we get a
4249 * more accurate mtime for the associated file. Since we don't have a way of
4250 * detecting when the data was actually modified, we have to resort to
4251 * heuristics. If an explicit msync() is done, then we mark the mtime when the
4252 * last page is pushed. The problem occurs when the msync() call is omitted,
4253 * which by far the most common case:
4261 * putpage() via fsflush
4263 * If we wait until fsflush to come along, we can have a modification time that
4264 * is some arbitrary point in the future. In order to prevent this in the
4265 * common case, we flush pages whenever a (MAP_SHARED, PROT_WRITE) mapping is
4270 zfs_delmap(vnode_t *vp, offset_t off, struct as *as, caddr_t addr,
4271 size_t len, uint_t prot, uint_t maxprot, uint_t flags, cred_t *cr)
4273 uint64_t pages = btopr(len);
4275 ASSERT3U(VTOZ(vp)->z_mapcnt, >=, pages);
4276 atomic_add_64(&VTOZ(vp)->z_mapcnt, -pages);
4278 if ((flags & MAP_SHARED) && (prot & PROT_WRITE) &&
4279 vn_has_cached_data(vp))
4280 (void) VOP_PUTPAGE(vp, off, len, B_ASYNC, cr, ct);
4284 #endif /* HAVE_MMAP */
4287 * convoff - converts the given data (start, whence) to the
4291 convoff(struct inode *ip, flock64_t *lckdat, int whence, offset_t offset)
4296 if ((lckdat->l_whence == 2) || (whence == 2)) {
4297 if ((error = zfs_getattr(ip, &vap, 0, CRED()) != 0))
4301 switch (lckdat->l_whence) {
4303 lckdat->l_start += offset;
4306 lckdat->l_start += vap.va_size;
4314 if (lckdat->l_start < 0)
4319 lckdat->l_start -= offset;
4322 lckdat->l_start -= vap.va_size;
4330 lckdat->l_whence = (short)whence;
4335 * Free or allocate space in a file. Currently, this function only
4336 * supports the `F_FREESP' command. However, this command is somewhat
4337 * misnamed, as its functionality includes the ability to allocate as
4338 * well as free space.
4340 * IN: ip - inode of file to free data in.
4341 * cmd - action to take (only F_FREESP supported).
4342 * bfp - section of file to free/alloc.
4343 * flag - current file open mode flags.
4344 * offset - current file offset.
4345 * cr - credentials of caller [UNUSED].
4347 * RETURN: 0 if success
4348 * error code if failure
4351 * ip - ctime|mtime updated
4355 zfs_space(struct inode *ip, int cmd, flock64_t *bfp, int flag,
4356 offset_t offset, cred_t *cr)
4358 znode_t *zp = ITOZ(ip);
4359 zfs_sb_t *zsb = ITOZSB(ip);
4366 if (cmd != F_FREESP) {
4371 if ((error = convoff(ip, bfp, 0, offset))) {
4376 if (bfp->l_len < 0) {
4382 len = bfp->l_len; /* 0 means from off to end of file */
4384 error = zfs_freesp(zp, off, len, flag, TRUE);
4389 EXPORT_SYMBOL(zfs_space);
4393 zfs_fid(struct inode *ip, fid_t *fidp)
4395 znode_t *zp = ITOZ(ip);
4396 zfs_sb_t *zsb = ITOZSB(ip);
4399 uint64_t object = zp->z_id;
4406 if ((error = sa_lookup(zp->z_sa_hdl, SA_ZPL_GEN(zsb),
4407 &gen64, sizeof (uint64_t))) != 0) {
4412 gen = (uint32_t)gen64;
4414 size = (zsb->z_parent != zsb) ? LONG_FID_LEN : SHORT_FID_LEN;
4415 if (fidp->fid_len < size) {
4416 fidp->fid_len = size;
4421 zfid = (zfid_short_t *)fidp;
4423 zfid->zf_len = size;
4425 for (i = 0; i < sizeof (zfid->zf_object); i++)
4426 zfid->zf_object[i] = (uint8_t)(object >> (8 * i));
4428 /* Must have a non-zero generation number to distinguish from .zfs */
4431 for (i = 0; i < sizeof (zfid->zf_gen); i++)
4432 zfid->zf_gen[i] = (uint8_t)(gen >> (8 * i));
4434 if (size == LONG_FID_LEN) {
4435 uint64_t objsetid = dmu_objset_id(zsb->z_os);
4438 zlfid = (zfid_long_t *)fidp;
4440 for (i = 0; i < sizeof (zlfid->zf_setid); i++)
4441 zlfid->zf_setid[i] = (uint8_t)(objsetid >> (8 * i));
4443 /* XXX - this should be the generation number for the objset */
4444 for (i = 0; i < sizeof (zlfid->zf_setgen); i++)
4445 zlfid->zf_setgen[i] = 0;
4451 EXPORT_SYMBOL(zfs_fid);
4455 zfs_getsecattr(struct inode *ip, vsecattr_t *vsecp, int flag, cred_t *cr)
4457 znode_t *zp = ITOZ(ip);
4458 zfs_sb_t *zsb = ITOZSB(ip);
4460 boolean_t skipaclchk = (flag & ATTR_NOACLCHECK) ? B_TRUE : B_FALSE;
4464 error = zfs_getacl(zp, vsecp, skipaclchk, cr);
4469 EXPORT_SYMBOL(zfs_getsecattr);
4473 zfs_setsecattr(struct inode *ip, vsecattr_t *vsecp, int flag, cred_t *cr)
4475 znode_t *zp = ITOZ(ip);
4476 zfs_sb_t *zsb = ITOZSB(ip);
4478 boolean_t skipaclchk = (flag & ATTR_NOACLCHECK) ? B_TRUE : B_FALSE;
4479 zilog_t *zilog = zsb->z_log;
4484 error = zfs_setacl(zp, vsecp, skipaclchk, cr);
4486 if (zsb->z_os->os_sync == ZFS_SYNC_ALWAYS)
4487 zil_commit(zilog, 0);
4492 EXPORT_SYMBOL(zfs_setsecattr);
4494 #ifdef HAVE_UIO_ZEROCOPY
4496 * Tunable, both must be a power of 2.
4498 * zcr_blksz_min: the smallest read we may consider to loan out an arcbuf
4499 * zcr_blksz_max: if set to less than the file block size, allow loaning out of
4500 * an arcbuf for a partial block read
4502 int zcr_blksz_min = (1 << 10); /* 1K */
4503 int zcr_blksz_max = (1 << 17); /* 128K */
4507 zfs_reqzcbuf(struct inode *ip, enum uio_rw ioflag, xuio_t *xuio, cred_t *cr)
4509 znode_t *zp = ITOZ(ip);
4510 zfs_sb_t *zsb = ITOZSB(ip);
4511 int max_blksz = zsb->z_max_blksz;
4512 uio_t *uio = &xuio->xu_uio;
4513 ssize_t size = uio->uio_resid;
4514 offset_t offset = uio->uio_loffset;
4519 int preamble, postamble;
4521 if (xuio->xu_type != UIOTYPE_ZEROCOPY)
4529 * Loan out an arc_buf for write if write size is bigger than
4530 * max_blksz, and the file's block size is also max_blksz.
4533 if (size < blksz || zp->z_blksz != blksz) {
4538 * Caller requests buffers for write before knowing where the
4539 * write offset might be (e.g. NFS TCP write).
4544 preamble = P2PHASE(offset, blksz);
4546 preamble = blksz - preamble;
4551 postamble = P2PHASE(size, blksz);
4554 fullblk = size / blksz;
4555 (void) dmu_xuio_init(xuio,
4556 (preamble != 0) + fullblk + (postamble != 0));
4559 * Have to fix iov base/len for partial buffers. They
4560 * currently represent full arc_buf's.
4563 /* data begins in the middle of the arc_buf */
4564 abuf = dmu_request_arcbuf(sa_get_db(zp->z_sa_hdl),
4567 (void) dmu_xuio_add(xuio, abuf,
4568 blksz - preamble, preamble);
4571 for (i = 0; i < fullblk; i++) {
4572 abuf = dmu_request_arcbuf(sa_get_db(zp->z_sa_hdl),
4575 (void) dmu_xuio_add(xuio, abuf, 0, blksz);
4579 /* data ends in the middle of the arc_buf */
4580 abuf = dmu_request_arcbuf(sa_get_db(zp->z_sa_hdl),
4583 (void) dmu_xuio_add(xuio, abuf, 0, postamble);
4588 * Loan out an arc_buf for read if the read size is larger than
4589 * the current file block size. Block alignment is not
4590 * considered. Partial arc_buf will be loaned out for read.
4592 blksz = zp->z_blksz;
4593 if (blksz < zcr_blksz_min)
4594 blksz = zcr_blksz_min;
4595 if (blksz > zcr_blksz_max)
4596 blksz = zcr_blksz_max;
4597 /* avoid potential complexity of dealing with it */
4598 if (blksz > max_blksz) {
4603 maxsize = zp->z_size - uio->uio_loffset;
4617 uio->uio_extflg = UIO_XUIO;
4618 XUIO_XUZC_RW(xuio) = ioflag;
4625 zfs_retzcbuf(struct inode *ip, xuio_t *xuio, cred_t *cr)
4629 int ioflag = XUIO_XUZC_RW(xuio);
4631 ASSERT(xuio->xu_type == UIOTYPE_ZEROCOPY);
4633 i = dmu_xuio_cnt(xuio);
4635 abuf = dmu_xuio_arcbuf(xuio, i);
4637 * if abuf == NULL, it must be a write buffer
4638 * that has been returned in zfs_write().
4641 dmu_return_arcbuf(abuf);
4642 ASSERT(abuf || ioflag == UIO_WRITE);
4645 dmu_xuio_fini(xuio);
4648 #endif /* HAVE_UIO_ZEROCOPY */