4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
25 /* Portions Copyright 2007 Jeremy Teo */
26 /* Portions Copyright 2010 Robert Milkowski */
29 #include <sys/types.h>
30 #include <sys/param.h>
32 #include <sys/systm.h>
33 #include <sys/sysmacros.h>
34 #include <sys/resource.h>
36 #include <sys/vfs_opreg.h>
40 #include <sys/taskq.h>
42 #include <sys/vmsystm.h>
43 #include <sys/atomic.h>
45 #include <sys/pathname.h>
46 #include <sys/cmn_err.h>
47 #include <sys/errno.h>
48 #include <sys/unistd.h>
49 #include <sys/zfs_dir.h>
50 #include <sys/zfs_acl.h>
51 #include <sys/zfs_ioctl.h>
52 #include <sys/fs/zfs.h>
54 #include <sys/dmu_objset.h>
60 #include <sys/dirent.h>
61 #include <sys/policy.h>
62 #include <sys/sunddi.h>
65 #include "fs/fs_subr.h"
66 #include <sys/zfs_fuid.h>
67 #include <sys/zfs_sa.h>
68 #include <sys/zfs_vnops.h>
70 #include <sys/zfs_rlock.h>
71 #include <sys/extdirent.h>
72 #include <sys/kidmap.h>
80 * Each vnode op performs some logical unit of work. To do this, the ZPL must
81 * properly lock its in-core state, create a DMU transaction, do the work,
82 * record this work in the intent log (ZIL), commit the DMU transaction,
83 * and wait for the intent log to commit if it is a synchronous operation.
84 * Moreover, the vnode ops must work in both normal and log replay context.
85 * The ordering of events is important to avoid deadlocks and references
86 * to freed memory. The example below illustrates the following Big Rules:
88 * (1) A check must be made in each zfs thread for a mounted file system.
89 * This is done avoiding races using ZFS_ENTER(zsb).
90 * A ZFS_EXIT(zsb) is needed before all returns. Any znodes
91 * must be checked with ZFS_VERIFY_ZP(zp). Both of these macros
92 * can return EIO from the calling function.
94 * (2) iput() should always be the last thing except for zil_commit()
95 * (if necessary) and ZFS_EXIT(). This is for 3 reasons:
96 * First, if it's the last reference, the vnode/znode
97 * can be freed, so the zp may point to freed memory. Second, the last
98 * reference will call zfs_zinactive(), which may induce a lot of work --
99 * pushing cached pages (which acquires range locks) and syncing out
100 * cached atime changes. Third, zfs_zinactive() may require a new tx,
101 * which could deadlock the system if you were already holding one.
102 * If you must call iput() within a tx then use iput_ASYNC().
104 * (3) All range locks must be grabbed before calling dmu_tx_assign(),
105 * as they can span dmu_tx_assign() calls.
107 * (4) Always pass TXG_NOWAIT as the second argument to dmu_tx_assign().
108 * This is critical because we don't want to block while holding locks.
109 * Note, in particular, that if a lock is sometimes acquired before
110 * the tx assigns, and sometimes after (e.g. z_lock), then failing to
111 * use a non-blocking assign can deadlock the system. The scenario:
113 * Thread A has grabbed a lock before calling dmu_tx_assign().
114 * Thread B is in an already-assigned tx, and blocks for this lock.
115 * Thread A calls dmu_tx_assign(TXG_WAIT) and blocks in txg_wait_open()
116 * forever, because the previous txg can't quiesce until B's tx commits.
118 * If dmu_tx_assign() returns ERESTART and zsb->z_assign is TXG_NOWAIT,
119 * then drop all locks, call dmu_tx_wait(), and try again.
121 * (5) If the operation succeeded, generate the intent log entry for it
122 * before dropping locks. This ensures that the ordering of events
123 * in the intent log matches the order in which they actually occurred.
124 * During ZIL replay the zfs_log_* functions will update the sequence
125 * number to indicate the zil transaction has replayed.
127 * (6) At the end of each vnode op, the DMU tx must always commit,
128 * regardless of whether there were any errors.
130 * (7) After dropping all locks, invoke zil_commit(zilog, foid)
131 * to ensure that synchronous semantics are provided when necessary.
133 * In general, this is how things should be ordered in each vnode op:
135 * ZFS_ENTER(zsb); // exit if unmounted
137 * zfs_dirent_lock(&dl, ...) // lock directory entry (may igrab())
138 * rw_enter(...); // grab any other locks you need
139 * tx = dmu_tx_create(...); // get DMU tx
140 * dmu_tx_hold_*(); // hold each object you might modify
141 * error = dmu_tx_assign(tx, TXG_NOWAIT); // try to assign
143 * rw_exit(...); // drop locks
144 * zfs_dirent_unlock(dl); // unlock directory entry
145 * iput(...); // release held vnodes
146 * if (error == ERESTART) {
151 * dmu_tx_abort(tx); // abort DMU tx
152 * ZFS_EXIT(zsb); // finished in zfs
153 * return (error); // really out of space
155 * error = do_real_work(); // do whatever this VOP does
157 * zfs_log_*(...); // on success, make ZIL entry
158 * dmu_tx_commit(tx); // commit DMU tx -- error or not
159 * rw_exit(...); // drop locks
160 * zfs_dirent_unlock(dl); // unlock directory entry
161 * iput(...); // release held vnodes
162 * zil_commit(zilog, foid); // synchronous when necessary
163 * ZFS_EXIT(zsb); // finished in zfs
164 * return (error); // done, report error
168 * Virus scanning is unsupported. It would be possible to add a hook
169 * here to performance the required virus scan. This could be done
170 * entirely in the kernel or potentially as an update to invoke a
174 zfs_vscan(struct inode *ip, cred_t *cr, int async)
181 zfs_open(struct inode *ip, int mode, int flag, cred_t *cr)
183 znode_t *zp = ITOZ(ip);
184 zfs_sb_t *zsb = ITOZSB(ip);
189 /* Honor ZFS_APPENDONLY file attribute */
190 if ((mode & FMODE_WRITE) && (zp->z_pflags & ZFS_APPENDONLY) &&
191 ((flag & O_APPEND) == 0)) {
196 /* Virus scan eligible files on open */
197 if (!zfs_has_ctldir(zp) && zsb->z_vscan && S_ISREG(ip->i_mode) &&
198 !(zp->z_pflags & ZFS_AV_QUARANTINED) && zp->z_size > 0) {
199 if (zfs_vscan(ip, cr, 0) != 0) {
205 /* Keep a count of the synchronous opens in the znode */
207 atomic_inc_32(&zp->z_sync_cnt);
212 EXPORT_SYMBOL(zfs_open);
216 zfs_close(struct inode *ip, int flag, cred_t *cr)
218 znode_t *zp = ITOZ(ip);
219 zfs_sb_t *zsb = ITOZSB(ip);
225 * Zero the synchronous opens in the znode. Under Linux the
226 * zfs_close() hook is not symmetric with zfs_open(), it is
227 * only called once when the last reference is dropped.
232 if (!zfs_has_ctldir(zp) && zsb->z_vscan && S_ISREG(ip->i_mode) &&
233 !(zp->z_pflags & ZFS_AV_QUARANTINED) && zp->z_size > 0)
234 VERIFY(zfs_vscan(ip, cr, 1) == 0);
239 EXPORT_SYMBOL(zfs_close);
243 * When a file is memory mapped, we must keep the IO data synchronized
244 * between the DMU cache and the memory mapped pages. What this means:
246 * On Write: If we find a memory mapped page, we write to *both*
247 * the page and the dmu buffer.
250 update_pages(struct inode *ip, int64_t start, int len,
251 objset_t *os, uint64_t oid)
253 struct address_space *mp = ip->i_mapping;
259 off = start & (PAGE_CACHE_SIZE-1);
260 for (start &= PAGE_CACHE_MASK; len > 0; start += PAGE_CACHE_SIZE) {
261 nbytes = MIN(PAGE_CACHE_SIZE - off, len);
263 pp = find_lock_page(mp, start >> PAGE_CACHE_SHIFT);
265 if (mapping_writably_mapped(mp))
266 flush_dcache_page(pp);
269 (void) dmu_read(os, oid, start+off, nbytes, pb+off,
273 if (mapping_writably_mapped(mp))
274 flush_dcache_page(pp);
276 mark_page_accessed(pp);
280 page_cache_release(pp);
289 * When a file is memory mapped, we must keep the IO data synchronized
290 * between the DMU cache and the memory mapped pages. What this means:
292 * On Read: We "read" preferentially from memory mapped pages,
293 * else we default from the dmu buffer.
295 * NOTE: We will always "break up" the IO into PAGESIZE uiomoves when
296 * the file is memory mapped.
299 mappedread(struct inode *ip, int nbytes, uio_t *uio)
301 struct address_space *mp = ip->i_mapping;
303 znode_t *zp = ITOZ(ip);
304 objset_t *os = ITOZSB(ip)->z_os;
311 start = uio->uio_loffset;
312 off = start & (PAGE_CACHE_SIZE-1);
313 for (start &= PAGE_CACHE_MASK; len > 0; start += PAGE_CACHE_SIZE) {
314 bytes = MIN(PAGE_CACHE_SIZE - off, len);
316 pp = find_lock_page(mp, start >> PAGE_CACHE_SHIFT);
318 ASSERT(PageUptodate(pp));
321 error = uiomove(pb + off, bytes, UIO_READ, uio);
324 if (mapping_writably_mapped(mp))
325 flush_dcache_page(pp);
327 mark_page_accessed(pp);
329 page_cache_release(pp);
331 error = dmu_read_uio(os, zp->z_id, uio, bytes);
343 unsigned long zfs_read_chunk_size = 1024 * 1024; /* Tunable */
346 * Read bytes from specified file into supplied buffer.
348 * IN: ip - inode of file to be read from.
349 * uio - structure supplying read location, range info,
351 * ioflag - FSYNC flags; used to provide FRSYNC semantics.
352 * O_DIRECT flag; used to bypass page cache.
353 * cr - credentials of caller.
355 * OUT: uio - updated offset and range, buffer filled.
357 * RETURN: 0 if success
358 * error code if failure
361 * inode - atime updated if byte count > 0
365 zfs_read(struct inode *ip, uio_t *uio, int ioflag, cred_t *cr)
367 znode_t *zp = ITOZ(ip);
368 zfs_sb_t *zsb = ITOZSB(ip);
373 #ifdef HAVE_UIO_ZEROCOPY
375 #endif /* HAVE_UIO_ZEROCOPY */
381 if (zp->z_pflags & ZFS_AV_QUARANTINED) {
387 * Validate file offset
389 if (uio->uio_loffset < (offset_t)0) {
395 * Fasttrack empty reads
397 if (uio->uio_resid == 0) {
402 #ifdef HAVE_MANDLOCKS
404 * Check for mandatory locks
406 if (MANDMODE(zp->z_mode)) {
407 if (error = chklock(ip, FREAD,
408 uio->uio_loffset, uio->uio_resid, uio->uio_fmode, ct)) {
413 #endif /* HAVE_MANDLOCK */
416 * If we're in FRSYNC mode, sync out this znode before reading it.
418 if (ioflag & FRSYNC || zsb->z_os->os_sync == ZFS_SYNC_ALWAYS)
419 zil_commit(zsb->z_log, zp->z_id);
422 * Lock the range against changes.
424 rl = zfs_range_lock(zp, uio->uio_loffset, uio->uio_resid, RL_READER);
427 * If we are reading past end-of-file we can skip
428 * to the end; but we might still need to set atime.
430 if (uio->uio_loffset >= zp->z_size) {
435 ASSERT(uio->uio_loffset < zp->z_size);
436 n = MIN(uio->uio_resid, zp->z_size - uio->uio_loffset);
438 #ifdef HAVE_UIO_ZEROCOPY
439 if ((uio->uio_extflg == UIO_XUIO) &&
440 (((xuio_t *)uio)->xu_type == UIOTYPE_ZEROCOPY)) {
442 int blksz = zp->z_blksz;
443 uint64_t offset = uio->uio_loffset;
445 xuio = (xuio_t *)uio;
447 nblk = (P2ROUNDUP(offset + n, blksz) - P2ALIGN(offset,
450 ASSERT(offset + n <= blksz);
453 (void) dmu_xuio_init(xuio, nblk);
455 if (vn_has_cached_data(ip)) {
457 * For simplicity, we always allocate a full buffer
458 * even if we only expect to read a portion of a block.
460 while (--nblk >= 0) {
461 (void) dmu_xuio_add(xuio,
462 dmu_request_arcbuf(sa_get_db(zp->z_sa_hdl),
467 #endif /* HAVE_UIO_ZEROCOPY */
470 nbytes = MIN(n, zfs_read_chunk_size -
471 P2PHASE(uio->uio_loffset, zfs_read_chunk_size));
473 if (zp->z_is_mapped && !(ioflag & O_DIRECT))
474 error = mappedread(ip, nbytes, uio);
476 error = dmu_read_uio(os, zp->z_id, uio, nbytes);
479 /* convert checksum errors into IO errors */
488 zfs_range_unlock(rl);
490 ZFS_ACCESSTIME_STAMP(zsb, zp);
491 zfs_inode_update(zp);
495 EXPORT_SYMBOL(zfs_read);
498 * Write the bytes to a file.
500 * IN: ip - inode of file to be written to.
501 * uio - structure supplying write location, range info,
503 * ioflag - FAPPEND flag set if in append mode.
504 * O_DIRECT flag; used to bypass page cache.
505 * cr - credentials of caller.
507 * OUT: uio - updated offset and range.
509 * RETURN: 0 if success
510 * error code if failure
513 * ip - ctime|mtime updated if byte count > 0
518 zfs_write(struct inode *ip, uio_t *uio, int ioflag, cred_t *cr)
520 znode_t *zp = ITOZ(ip);
521 rlim64_t limit = uio->uio_limit;
522 ssize_t start_resid = uio->uio_resid;
526 zfs_sb_t *zsb = ZTOZSB(zp);
531 int max_blksz = zsb->z_max_blksz;
534 iovec_t *aiov = NULL;
537 iovec_t *iovp = uio->uio_iov;
540 sa_bulk_attr_t bulk[4];
541 uint64_t mtime[2], ctime[2];
542 ASSERTV(int iovcnt = uio->uio_iovcnt);
545 * Fasttrack empty write
551 if (limit == RLIM64_INFINITY || limit > MAXOFFSET_T)
557 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zsb), NULL, &mtime, 16);
558 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zsb), NULL, &ctime, 16);
559 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_SIZE(zsb), NULL, &zp->z_size, 8);
560 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zsb), NULL,
564 * If immutable or not appending then return EPERM
566 if ((zp->z_pflags & (ZFS_IMMUTABLE | ZFS_READONLY)) ||
567 ((zp->z_pflags & ZFS_APPENDONLY) && !(ioflag & FAPPEND) &&
568 (uio->uio_loffset < zp->z_size))) {
576 * Validate file offset
578 woff = ioflag & FAPPEND ? zp->z_size : uio->uio_loffset;
584 #ifdef HAVE_MANDLOCKS
586 * Check for mandatory locks before calling zfs_range_lock()
587 * in order to prevent a deadlock with locks set via fcntl().
589 if (MANDMODE((mode_t)zp->z_mode) &&
590 (error = chklock(ip, FWRITE, woff, n, uio->uio_fmode, ct)) != 0) {
594 #endif /* HAVE_MANDLOCKS */
596 #ifdef HAVE_UIO_ZEROCOPY
598 * Pre-fault the pages to ensure slow (eg NFS) pages
600 * Skip this if uio contains loaned arc_buf.
602 if ((uio->uio_extflg == UIO_XUIO) &&
603 (((xuio_t *)uio)->xu_type == UIOTYPE_ZEROCOPY))
604 xuio = (xuio_t *)uio;
606 uio_prefaultpages(MIN(n, max_blksz), uio);
607 #endif /* HAVE_UIO_ZEROCOPY */
610 * If in append mode, set the io offset pointer to eof.
612 if (ioflag & FAPPEND) {
614 * Obtain an appending range lock to guarantee file append
615 * semantics. We reset the write offset once we have the lock.
617 rl = zfs_range_lock(zp, 0, n, RL_APPEND);
619 if (rl->r_len == UINT64_MAX) {
621 * We overlocked the file because this write will cause
622 * the file block size to increase.
623 * Note that zp_size cannot change with this lock held.
627 uio->uio_loffset = woff;
630 * Note that if the file block size will change as a result of
631 * this write, then this range lock will lock the entire file
632 * so that we can re-write the block safely.
634 rl = zfs_range_lock(zp, woff, n, RL_WRITER);
638 zfs_range_unlock(rl);
643 if ((woff + n) > limit || woff > (limit - n))
646 /* Will this write extend the file length? */
647 write_eof = (woff + n > zp->z_size);
649 end_size = MAX(zp->z_size, woff + n);
652 * Write the file in reasonable size chunks. Each chunk is written
653 * in a separate transaction; this keeps the intent log records small
654 * and allows us to do more fine-grained space accounting.
658 woff = uio->uio_loffset;
660 if (zfs_owner_overquota(zsb, zp, B_FALSE) ||
661 zfs_owner_overquota(zsb, zp, B_TRUE)) {
663 dmu_return_arcbuf(abuf);
668 if (xuio && abuf == NULL) {
669 ASSERT(i_iov < iovcnt);
671 abuf = dmu_xuio_arcbuf(xuio, i_iov);
672 dmu_xuio_clear(xuio, i_iov);
673 ASSERT((aiov->iov_base == abuf->b_data) ||
674 ((char *)aiov->iov_base - (char *)abuf->b_data +
675 aiov->iov_len == arc_buf_size(abuf)));
677 } else if (abuf == NULL && n >= max_blksz &&
678 woff >= zp->z_size &&
679 P2PHASE(woff, max_blksz) == 0 &&
680 zp->z_blksz == max_blksz) {
682 * This write covers a full block. "Borrow" a buffer
683 * from the dmu so that we can fill it before we enter
684 * a transaction. This avoids the possibility of
685 * holding up the transaction if the data copy hangs
686 * up on a pagefault (e.g., from an NFS server mapping).
690 abuf = dmu_request_arcbuf(sa_get_db(zp->z_sa_hdl),
692 ASSERT(abuf != NULL);
693 ASSERT(arc_buf_size(abuf) == max_blksz);
694 if ((error = uiocopy(abuf->b_data, max_blksz,
695 UIO_WRITE, uio, &cbytes))) {
696 dmu_return_arcbuf(abuf);
699 ASSERT(cbytes == max_blksz);
703 * Start a transaction.
705 tx = dmu_tx_create(zsb->z_os);
706 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
707 dmu_tx_hold_write(tx, zp->z_id, woff, MIN(n, max_blksz));
708 zfs_sa_upgrade_txholds(tx, zp);
709 error = dmu_tx_assign(tx, TXG_NOWAIT);
711 if (error == ERESTART) {
718 dmu_return_arcbuf(abuf);
723 * If zfs_range_lock() over-locked we grow the blocksize
724 * and then reduce the lock range. This will only happen
725 * on the first iteration since zfs_range_reduce() will
726 * shrink down r_len to the appropriate size.
728 if (rl->r_len == UINT64_MAX) {
731 if (zp->z_blksz > max_blksz) {
732 ASSERT(!ISP2(zp->z_blksz));
733 new_blksz = MIN(end_size, SPA_MAXBLOCKSIZE);
735 new_blksz = MIN(end_size, max_blksz);
737 zfs_grow_blocksize(zp, new_blksz, tx);
738 zfs_range_reduce(rl, woff, n);
742 * XXX - should we really limit each write to z_max_blksz?
743 * Perhaps we should use SPA_MAXBLOCKSIZE chunks?
745 nbytes = MIN(n, max_blksz - P2PHASE(woff, max_blksz));
748 tx_bytes = uio->uio_resid;
749 error = dmu_write_uio_dbuf(sa_get_db(zp->z_sa_hdl),
751 tx_bytes -= uio->uio_resid;
754 ASSERT(xuio == NULL || tx_bytes == aiov->iov_len);
756 * If this is not a full block write, but we are
757 * extending the file past EOF and this data starts
758 * block-aligned, use assign_arcbuf(). Otherwise,
759 * write via dmu_write().
761 if (tx_bytes < max_blksz && (!write_eof ||
762 aiov->iov_base != abuf->b_data)) {
764 dmu_write(zsb->z_os, zp->z_id, woff,
765 aiov->iov_len, aiov->iov_base, tx);
766 dmu_return_arcbuf(abuf);
767 xuio_stat_wbuf_copied();
769 ASSERT(xuio || tx_bytes == max_blksz);
770 dmu_assign_arcbuf(sa_get_db(zp->z_sa_hdl),
773 ASSERT(tx_bytes <= uio->uio_resid);
774 uioskip(uio, tx_bytes);
777 if (tx_bytes && zp->z_is_mapped && !(ioflag & O_DIRECT))
778 update_pages(ip, woff, tx_bytes, zsb->z_os, zp->z_id);
781 * If we made no progress, we're done. If we made even
782 * partial progress, update the znode and ZIL accordingly.
785 (void) sa_update(zp->z_sa_hdl, SA_ZPL_SIZE(zsb),
786 (void *)&zp->z_size, sizeof (uint64_t), tx);
793 * Clear Set-UID/Set-GID bits on successful write if not
794 * privileged and at least one of the excute bits is set.
796 * It would be nice to to this after all writes have
797 * been done, but that would still expose the ISUID/ISGID
798 * to another app after the partial write is committed.
800 * Note: we don't call zfs_fuid_map_id() here because
801 * user 0 is not an ephemeral uid.
803 mutex_enter(&zp->z_acl_lock);
804 if ((zp->z_mode & (S_IXUSR | (S_IXUSR >> 3) |
805 (S_IXUSR >> 6))) != 0 &&
806 (zp->z_mode & (S_ISUID | S_ISGID)) != 0 &&
807 secpolicy_vnode_setid_retain(cr,
808 (zp->z_mode & S_ISUID) != 0 && zp->z_uid == 0) != 0) {
810 zp->z_mode &= ~(S_ISUID | S_ISGID);
811 newmode = zp->z_mode;
812 (void) sa_update(zp->z_sa_hdl, SA_ZPL_MODE(zsb),
813 (void *)&newmode, sizeof (uint64_t), tx);
815 mutex_exit(&zp->z_acl_lock);
817 zfs_tstamp_update_setup(zp, CONTENT_MODIFIED, mtime, ctime,
821 * Update the file size (zp_size) if it has changed;
822 * account for possible concurrent updates.
824 while ((end_size = zp->z_size) < uio->uio_loffset) {
825 (void) atomic_cas_64(&zp->z_size, end_size,
830 * If we are replaying and eof is non zero then force
831 * the file size to the specified eof. Note, there's no
832 * concurrency during replay.
834 if (zsb->z_replay && zsb->z_replay_eof != 0)
835 zp->z_size = zsb->z_replay_eof;
837 error = sa_bulk_update(zp->z_sa_hdl, bulk, count, tx);
839 zfs_log_write(zilog, tx, TX_WRITE, zp, woff, tx_bytes, ioflag);
844 ASSERT(tx_bytes == nbytes);
848 uio_prefaultpages(MIN(n, max_blksz), uio);
851 zfs_range_unlock(rl);
854 * If we're in replay mode, or we made no progress, return error.
855 * Otherwise, it's at least a partial write, so it's successful.
857 if (zsb->z_replay || uio->uio_resid == start_resid) {
862 if (ioflag & (FSYNC | FDSYNC) ||
863 zsb->z_os->os_sync == ZFS_SYNC_ALWAYS)
864 zil_commit(zilog, zp->z_id);
866 zfs_inode_update(zp);
870 EXPORT_SYMBOL(zfs_write);
873 iput_async(struct inode *ip, taskq_t *taskq)
875 ASSERT(atomic_read(&ip->i_count) > 0);
876 if (atomic_read(&ip->i_count) == 1)
877 taskq_dispatch(taskq, (task_func_t *)iput, ip, TQ_SLEEP);
883 zfs_get_done(zgd_t *zgd, int error)
885 znode_t *zp = zgd->zgd_private;
886 objset_t *os = ZTOZSB(zp)->z_os;
889 dmu_buf_rele(zgd->zgd_db, zgd);
891 zfs_range_unlock(zgd->zgd_rl);
894 * Release the vnode asynchronously as we currently have the
895 * txg stopped from syncing.
897 iput_async(ZTOI(zp), dsl_pool_iput_taskq(dmu_objset_pool(os)));
899 if (error == 0 && zgd->zgd_bp)
900 zil_add_block(zgd->zgd_zilog, zgd->zgd_bp);
902 kmem_free(zgd, sizeof (zgd_t));
906 static int zil_fault_io = 0;
910 * Get data to generate a TX_WRITE intent log record.
913 zfs_get_data(void *arg, lr_write_t *lr, char *buf, zio_t *zio)
916 objset_t *os = zsb->z_os;
918 uint64_t object = lr->lr_foid;
919 uint64_t offset = lr->lr_offset;
920 uint64_t size = lr->lr_length;
921 blkptr_t *bp = &lr->lr_blkptr;
930 * Nothing to do if the file has been removed
932 if (zfs_zget(zsb, object, &zp) != 0)
934 if (zp->z_unlinked) {
936 * Release the vnode asynchronously as we currently have the
937 * txg stopped from syncing.
939 iput_async(ZTOI(zp), dsl_pool_iput_taskq(dmu_objset_pool(os)));
943 zgd = (zgd_t *)kmem_zalloc(sizeof (zgd_t), KM_SLEEP);
944 zgd->zgd_zilog = zsb->z_log;
945 zgd->zgd_private = zp;
948 * Write records come in two flavors: immediate and indirect.
949 * For small writes it's cheaper to store the data with the
950 * log record (immediate); for large writes it's cheaper to
951 * sync the data and get a pointer to it (indirect) so that
952 * we don't have to write the data twice.
954 if (buf != NULL) { /* immediate write */
955 zgd->zgd_rl = zfs_range_lock(zp, offset, size, RL_READER);
956 /* test for truncation needs to be done while range locked */
957 if (offset >= zp->z_size) {
960 error = dmu_read(os, object, offset, size, buf,
961 DMU_READ_NO_PREFETCH);
963 ASSERT(error == 0 || error == ENOENT);
964 } else { /* indirect write */
966 * Have to lock the whole block to ensure when it's
967 * written out and it's checksum is being calculated
968 * that no one can change the data. We need to re-check
969 * blocksize after we get the lock in case it's changed!
974 blkoff = ISP2(size) ? P2PHASE(offset, size) : offset;
976 zgd->zgd_rl = zfs_range_lock(zp, offset, size,
978 if (zp->z_blksz == size)
981 zfs_range_unlock(zgd->zgd_rl);
983 /* test for truncation needs to be done while range locked */
984 if (lr->lr_offset >= zp->z_size)
993 error = dmu_buf_hold(os, object, offset, zgd, &db,
994 DMU_READ_NO_PREFETCH);
1000 ASSERT(db->db_offset == offset);
1001 ASSERT(db->db_size == size);
1003 error = dmu_sync(zio, lr->lr_common.lrc_txg,
1005 ASSERT(error || lr->lr_length <= zp->z_blksz);
1008 * On success, we need to wait for the write I/O
1009 * initiated by dmu_sync() to complete before we can
1010 * release this dbuf. We will finish everything up
1011 * in the zfs_get_done() callback.
1016 if (error == EALREADY) {
1017 lr->lr_common.lrc_txtype = TX_WRITE2;
1023 zfs_get_done(zgd, error);
1030 zfs_access(struct inode *ip, int mode, int flag, cred_t *cr)
1032 znode_t *zp = ITOZ(ip);
1033 zfs_sb_t *zsb = ITOZSB(ip);
1039 if (flag & V_ACE_MASK)
1040 error = zfs_zaccess(zp, mode, flag, B_FALSE, cr);
1042 error = zfs_zaccess_rwx(zp, mode, flag, cr);
1047 EXPORT_SYMBOL(zfs_access);
1050 * Lookup an entry in a directory, or an extended attribute directory.
1051 * If it exists, return a held inode reference for it.
1053 * IN: dip - inode of directory to search.
1054 * nm - name of entry to lookup.
1055 * flags - LOOKUP_XATTR set if looking for an attribute.
1056 * cr - credentials of caller.
1057 * direntflags - directory lookup flags
1058 * realpnp - returned pathname.
1060 * OUT: ipp - inode of located entry, NULL if not found.
1062 * RETURN: 0 if success
1063 * error code if failure
1070 zfs_lookup(struct inode *dip, char *nm, struct inode **ipp, int flags,
1071 cred_t *cr, int *direntflags, pathname_t *realpnp)
1073 znode_t *zdp = ITOZ(dip);
1074 zfs_sb_t *zsb = ITOZSB(dip);
1078 if (!(flags & (LOOKUP_XATTR | FIGNORECASE))) {
1080 if (!S_ISDIR(dip->i_mode)) {
1082 } else if (zdp->z_sa_hdl == NULL) {
1086 if (nm[0] == 0 || (nm[0] == '.' && nm[1] == '\0')) {
1087 error = zfs_fastaccesschk_execute(zdp, cr);
1096 vnode_t *tvp = dnlc_lookup(dvp, nm);
1099 error = zfs_fastaccesschk_execute(zdp, cr);
1104 if (tvp == DNLC_NO_VNODE) {
1109 return (specvp_check(vpp, cr));
1112 #endif /* HAVE_DNLC */
1121 if (flags & LOOKUP_XATTR) {
1123 * If the xattr property is off, refuse the lookup request.
1125 if (!(zsb->z_flags & ZSB_XATTR_USER)) {
1131 * We don't allow recursive attributes..
1132 * Maybe someday we will.
1134 if (zdp->z_pflags & ZFS_XATTR) {
1139 if ((error = zfs_get_xattrdir(zdp, ipp, cr, flags))) {
1145 * Do we have permission to get into attribute directory?
1148 if ((error = zfs_zaccess(ITOZ(*ipp), ACE_EXECUTE, 0,
1158 if (!S_ISDIR(dip->i_mode)) {
1164 * Check accessibility of directory.
1167 if ((error = zfs_zaccess(zdp, ACE_EXECUTE, 0, B_FALSE, cr))) {
1172 if (zsb->z_utf8 && u8_validate(nm, strlen(nm),
1173 NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
1178 error = zfs_dirlook(zdp, nm, ipp, flags, direntflags, realpnp);
1179 if ((error == 0) && (*ipp))
1180 zfs_inode_update(ITOZ(*ipp));
1185 EXPORT_SYMBOL(zfs_lookup);
1188 * Attempt to create a new entry in a directory. If the entry
1189 * already exists, truncate the file if permissible, else return
1190 * an error. Return the ip of the created or trunc'd file.
1192 * IN: dip - inode of directory to put new file entry in.
1193 * name - name of new file entry.
1194 * vap - attributes of new file.
1195 * excl - flag indicating exclusive or non-exclusive mode.
1196 * mode - mode to open file with.
1197 * cr - credentials of caller.
1198 * flag - large file flag [UNUSED].
1199 * vsecp - ACL to be set
1201 * OUT: ipp - inode of created or trunc'd entry.
1203 * RETURN: 0 if success
1204 * error code if failure
1207 * dip - ctime|mtime updated if new entry created
1208 * ip - ctime|mtime always, atime if new
1213 zfs_create(struct inode *dip, char *name, vattr_t *vap, int excl,
1214 int mode, struct inode **ipp, cred_t *cr, int flag, vsecattr_t *vsecp)
1216 znode_t *zp, *dzp = ITOZ(dip);
1217 zfs_sb_t *zsb = ITOZSB(dip);
1225 zfs_acl_ids_t acl_ids;
1226 boolean_t fuid_dirtied;
1227 boolean_t have_acl = B_FALSE;
1230 * If we have an ephemeral id, ACL, or XVATTR then
1231 * make sure file system is at proper version
1237 if (zsb->z_use_fuids == B_FALSE &&
1238 (vsecp || IS_EPHEMERAL(uid) || IS_EPHEMERAL(gid)))
1246 if (zsb->z_utf8 && u8_validate(name, strlen(name),
1247 NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
1252 if (vap->va_mask & ATTR_XVATTR) {
1253 if ((error = secpolicy_xvattr((xvattr_t *)vap,
1254 crgetuid(cr), cr, vap->va_mode)) != 0) {
1262 if (*name == '\0') {
1264 * Null component name refers to the directory itself.
1271 /* possible igrab(zp) */
1274 if (flag & FIGNORECASE)
1277 error = zfs_dirent_lock(&dl, dzp, name, &zp, zflg,
1281 zfs_acl_ids_free(&acl_ids);
1282 if (strcmp(name, "..") == 0)
1293 * Create a new file object and update the directory
1296 if ((error = zfs_zaccess(dzp, ACE_ADD_FILE, 0, B_FALSE, cr))) {
1298 zfs_acl_ids_free(&acl_ids);
1303 * We only support the creation of regular files in
1304 * extended attribute directories.
1307 if ((dzp->z_pflags & ZFS_XATTR) && !S_ISREG(vap->va_mode)) {
1309 zfs_acl_ids_free(&acl_ids);
1314 if (!have_acl && (error = zfs_acl_ids_create(dzp, 0, vap,
1315 cr, vsecp, &acl_ids)) != 0)
1319 if (zfs_acl_ids_overquota(zsb, &acl_ids)) {
1320 zfs_acl_ids_free(&acl_ids);
1325 tx = dmu_tx_create(os);
1327 dmu_tx_hold_sa_create(tx, acl_ids.z_aclp->z_acl_bytes +
1328 ZFS_SA_BASE_ATTR_SIZE);
1330 fuid_dirtied = zsb->z_fuid_dirty;
1332 zfs_fuid_txhold(zsb, tx);
1333 dmu_tx_hold_zap(tx, dzp->z_id, TRUE, name);
1334 dmu_tx_hold_sa(tx, dzp->z_sa_hdl, B_FALSE);
1335 if (!zsb->z_use_sa &&
1336 acl_ids.z_aclp->z_acl_bytes > ZFS_ACE_SPACE) {
1337 dmu_tx_hold_write(tx, DMU_NEW_OBJECT,
1338 0, acl_ids.z_aclp->z_acl_bytes);
1340 error = dmu_tx_assign(tx, TXG_NOWAIT);
1342 zfs_dirent_unlock(dl);
1343 if (error == ERESTART) {
1348 zfs_acl_ids_free(&acl_ids);
1353 zfs_mknode(dzp, vap, tx, cr, 0, &zp, &acl_ids);
1356 zfs_fuid_sync(zsb, tx);
1358 (void) zfs_link_create(dl, zp, tx, ZNEW);
1359 txtype = zfs_log_create_txtype(Z_FILE, vsecp, vap);
1360 if (flag & FIGNORECASE)
1362 zfs_log_create(zilog, tx, txtype, dzp, zp, name,
1363 vsecp, acl_ids.z_fuidp, vap);
1364 zfs_acl_ids_free(&acl_ids);
1367 int aflags = (flag & FAPPEND) ? V_APPEND : 0;
1370 zfs_acl_ids_free(&acl_ids);
1374 * A directory entry already exists for this name.
1377 * Can't truncate an existing file if in exclusive mode.
1384 * Can't open a directory for writing.
1386 if (S_ISDIR(ZTOI(zp)->i_mode)) {
1391 * Verify requested access to file.
1393 if (mode && (error = zfs_zaccess_rwx(zp, mode, aflags, cr))) {
1397 mutex_enter(&dzp->z_lock);
1399 mutex_exit(&dzp->z_lock);
1402 * Truncate regular files if requested.
1404 if (S_ISREG(ZTOI(zp)->i_mode) &&
1405 (vap->va_mask & ATTR_SIZE) && (vap->va_size == 0)) {
1406 /* we can't hold any locks when calling zfs_freesp() */
1407 zfs_dirent_unlock(dl);
1409 error = zfs_freesp(zp, 0, 0, mode, TRUE);
1415 zfs_dirent_unlock(dl);
1421 zfs_inode_update(dzp);
1422 zfs_inode_update(zp);
1426 if (zsb->z_os->os_sync == ZFS_SYNC_ALWAYS)
1427 zil_commit(zilog, 0);
1432 EXPORT_SYMBOL(zfs_create);
1435 * Remove an entry from a directory.
1437 * IN: dip - inode of directory to remove entry from.
1438 * name - name of entry to remove.
1439 * cr - credentials of caller.
1441 * RETURN: 0 if success
1442 * error code if failure
1446 * ip - ctime (if nlink > 0)
1449 uint64_t null_xattr = 0;
1453 zfs_remove(struct inode *dip, char *name, cred_t *cr)
1455 znode_t *zp, *dzp = ITOZ(dip);
1458 zfs_sb_t *zsb = ITOZSB(dip);
1461 uint64_t xattr_obj_unlinked = 0;
1467 pathname_t *realnmp = NULL;
1468 #ifdef HAVE_PN_UTILS
1470 #endif /* HAVE_PN_UTILS */
1478 #ifdef HAVE_PN_UTILS
1479 if (flags & FIGNORECASE) {
1484 #endif /* HAVE_PN_UTILS */
1490 * Attempt to lock directory; fail if entry doesn't exist.
1492 if ((error = zfs_dirent_lock(&dl, dzp, name, &zp, zflg,
1494 #ifdef HAVE_PN_UTILS
1497 #endif /* HAVE_PN_UTILS */
1504 if ((error = zfs_zaccess_delete(dzp, zp, cr))) {
1509 * Need to use rmdir for removing directories.
1511 if (S_ISDIR(ip->i_mode)) {
1518 dnlc_remove(dvp, realnmp->pn_buf);
1520 dnlc_remove(dvp, name);
1521 #endif /* HAVE_DNLC */
1524 * We never delete the znode and always place it in the unlinked
1525 * set. The dentry cache will always hold the last reference and
1526 * is responsible for safely freeing the znode.
1529 tx = dmu_tx_create(zsb->z_os);
1530 dmu_tx_hold_zap(tx, dzp->z_id, FALSE, name);
1531 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
1532 zfs_sa_upgrade_txholds(tx, zp);
1533 zfs_sa_upgrade_txholds(tx, dzp);
1535 /* are there any extended attributes? */
1536 error = sa_lookup(zp->z_sa_hdl, SA_ZPL_XATTR(zsb),
1537 &xattr_obj, sizeof (xattr_obj));
1538 if (error == 0 && xattr_obj) {
1539 error = zfs_zget(zsb, xattr_obj, &xzp);
1540 ASSERT3U(error, ==, 0);
1541 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_TRUE);
1542 dmu_tx_hold_sa(tx, xzp->z_sa_hdl, B_FALSE);
1545 /* charge as an update -- would be nice not to charge at all */
1546 dmu_tx_hold_zap(tx, zsb->z_unlinkedobj, FALSE, NULL);
1548 error = dmu_tx_assign(tx, TXG_NOWAIT);
1550 zfs_dirent_unlock(dl);
1554 if (error == ERESTART) {
1559 #ifdef HAVE_PN_UTILS
1562 #endif /* HAVE_PN_UTILS */
1569 * Remove the directory entry.
1571 error = zfs_link_destroy(dl, zp, tx, zflg, &unlinked);
1580 * Hold z_lock so that we can make sure that the ACL obj
1581 * hasn't changed. Could have been deleted due to
1584 mutex_enter(&zp->z_lock);
1585 (void) sa_lookup(zp->z_sa_hdl, SA_ZPL_XATTR(zsb),
1586 &xattr_obj_unlinked, sizeof (xattr_obj_unlinked));
1587 mutex_exit(&zp->z_lock);
1588 zfs_unlinked_add(zp, tx);
1592 #ifdef HAVE_PN_UTILS
1593 if (flags & FIGNORECASE)
1595 #endif /* HAVE_PN_UTILS */
1596 zfs_log_remove(zilog, tx, txtype, dzp, name, obj);
1600 #ifdef HAVE_PN_UTILS
1603 #endif /* HAVE_PN_UTILS */
1605 zfs_dirent_unlock(dl);
1606 zfs_inode_update(dzp);
1607 zfs_inode_update(zp);
1609 zfs_inode_update(xzp);
1615 if (zsb->z_os->os_sync == ZFS_SYNC_ALWAYS)
1616 zil_commit(zilog, 0);
1621 EXPORT_SYMBOL(zfs_remove);
1624 * Create a new directory and insert it into dip using the name
1625 * provided. Return a pointer to the inserted directory.
1627 * IN: dip - inode of directory to add subdir to.
1628 * dirname - name of new directory.
1629 * vap - attributes of new directory.
1630 * cr - credentials of caller.
1631 * vsecp - ACL to be set
1633 * OUT: ipp - inode of created directory.
1635 * RETURN: 0 if success
1636 * error code if failure
1639 * dip - ctime|mtime updated
1640 * ipp - ctime|mtime|atime updated
1644 zfs_mkdir(struct inode *dip, char *dirname, vattr_t *vap, struct inode **ipp,
1645 cred_t *cr, int flags, vsecattr_t *vsecp)
1647 znode_t *zp, *dzp = ITOZ(dip);
1648 zfs_sb_t *zsb = ITOZSB(dip);
1656 gid_t gid = crgetgid(cr);
1657 zfs_acl_ids_t acl_ids;
1658 boolean_t fuid_dirtied;
1660 ASSERT(S_ISDIR(vap->va_mode));
1663 * If we have an ephemeral id, ACL, or XVATTR then
1664 * make sure file system is at proper version
1668 if (zsb->z_use_fuids == B_FALSE &&
1669 (vsecp || IS_EPHEMERAL(uid) || IS_EPHEMERAL(gid)))
1676 if (dzp->z_pflags & ZFS_XATTR) {
1681 if (zsb->z_utf8 && u8_validate(dirname,
1682 strlen(dirname), NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
1686 if (flags & FIGNORECASE)
1689 if (vap->va_mask & ATTR_XVATTR) {
1690 if ((error = secpolicy_xvattr((xvattr_t *)vap,
1691 crgetuid(cr), cr, vap->va_mode)) != 0) {
1697 if ((error = zfs_acl_ids_create(dzp, 0, vap, cr,
1698 vsecp, &acl_ids)) != 0) {
1703 * First make sure the new directory doesn't exist.
1705 * Existence is checked first to make sure we don't return
1706 * EACCES instead of EEXIST which can cause some applications
1712 if ((error = zfs_dirent_lock(&dl, dzp, dirname, &zp, zf,
1714 zfs_acl_ids_free(&acl_ids);
1719 if ((error = zfs_zaccess(dzp, ACE_ADD_SUBDIRECTORY, 0, B_FALSE, cr))) {
1720 zfs_acl_ids_free(&acl_ids);
1721 zfs_dirent_unlock(dl);
1726 if (zfs_acl_ids_overquota(zsb, &acl_ids)) {
1727 zfs_acl_ids_free(&acl_ids);
1728 zfs_dirent_unlock(dl);
1734 * Add a new entry to the directory.
1736 tx = dmu_tx_create(zsb->z_os);
1737 dmu_tx_hold_zap(tx, dzp->z_id, TRUE, dirname);
1738 dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, FALSE, NULL);
1739 fuid_dirtied = zsb->z_fuid_dirty;
1741 zfs_fuid_txhold(zsb, tx);
1742 if (!zsb->z_use_sa && acl_ids.z_aclp->z_acl_bytes > ZFS_ACE_SPACE) {
1743 dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0,
1744 acl_ids.z_aclp->z_acl_bytes);
1747 dmu_tx_hold_sa_create(tx, acl_ids.z_aclp->z_acl_bytes +
1748 ZFS_SA_BASE_ATTR_SIZE);
1750 error = dmu_tx_assign(tx, TXG_NOWAIT);
1752 zfs_dirent_unlock(dl);
1753 if (error == ERESTART) {
1758 zfs_acl_ids_free(&acl_ids);
1767 zfs_mknode(dzp, vap, tx, cr, 0, &zp, &acl_ids);
1770 zfs_fuid_sync(zsb, tx);
1773 * Now put new name in parent dir.
1775 (void) zfs_link_create(dl, zp, tx, ZNEW);
1779 txtype = zfs_log_create_txtype(Z_DIR, vsecp, vap);
1780 if (flags & FIGNORECASE)
1782 zfs_log_create(zilog, tx, txtype, dzp, zp, dirname, vsecp,
1783 acl_ids.z_fuidp, vap);
1785 zfs_acl_ids_free(&acl_ids);
1789 zfs_dirent_unlock(dl);
1791 if (zsb->z_os->os_sync == ZFS_SYNC_ALWAYS)
1792 zil_commit(zilog, 0);
1794 zfs_inode_update(dzp);
1795 zfs_inode_update(zp);
1799 EXPORT_SYMBOL(zfs_mkdir);
1802 * Remove a directory subdir entry. If the current working
1803 * directory is the same as the subdir to be removed, the
1806 * IN: dip - inode of directory to remove from.
1807 * name - name of directory to be removed.
1808 * cwd - inode of current working directory.
1809 * cr - credentials of caller.
1810 * flags - case flags
1812 * RETURN: 0 if success
1813 * error code if failure
1816 * dip - ctime|mtime updated
1820 zfs_rmdir(struct inode *dip, char *name, struct inode *cwd, cred_t *cr,
1823 znode_t *dzp = ITOZ(dip);
1826 zfs_sb_t *zsb = ITOZSB(dip);
1837 if (flags & FIGNORECASE)
1843 * Attempt to lock directory; fail if entry doesn't exist.
1845 if ((error = zfs_dirent_lock(&dl, dzp, name, &zp, zflg,
1853 if ((error = zfs_zaccess_delete(dzp, zp, cr))) {
1857 if (!S_ISDIR(ip->i_mode)) {
1868 * Grab a lock on the directory to make sure that noone is
1869 * trying to add (or lookup) entries while we are removing it.
1871 rw_enter(&zp->z_name_lock, RW_WRITER);
1874 * Grab a lock on the parent pointer to make sure we play well
1875 * with the treewalk and directory rename code.
1877 rw_enter(&zp->z_parent_lock, RW_WRITER);
1879 tx = dmu_tx_create(zsb->z_os);
1880 dmu_tx_hold_zap(tx, dzp->z_id, FALSE, name);
1881 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
1882 dmu_tx_hold_zap(tx, zsb->z_unlinkedobj, FALSE, NULL);
1883 zfs_sa_upgrade_txholds(tx, zp);
1884 zfs_sa_upgrade_txholds(tx, dzp);
1885 error = dmu_tx_assign(tx, TXG_NOWAIT);
1887 rw_exit(&zp->z_parent_lock);
1888 rw_exit(&zp->z_name_lock);
1889 zfs_dirent_unlock(dl);
1891 if (error == ERESTART) {
1901 error = zfs_link_destroy(dl, zp, tx, zflg, NULL);
1904 uint64_t txtype = TX_RMDIR;
1905 if (flags & FIGNORECASE)
1907 zfs_log_remove(zilog, tx, txtype, dzp, name, ZFS_NO_OBJECT);
1912 rw_exit(&zp->z_parent_lock);
1913 rw_exit(&zp->z_name_lock);
1915 zfs_dirent_unlock(dl);
1919 if (zsb->z_os->os_sync == ZFS_SYNC_ALWAYS)
1920 zil_commit(zilog, 0);
1922 zfs_inode_update(dzp);
1923 zfs_inode_update(zp);
1927 EXPORT_SYMBOL(zfs_rmdir);
1930 * Read as many directory entries as will fit into the provided
1931 * dirent buffer from the given directory cursor position.
1933 * IN: ip - inode of directory to read.
1934 * dirent - buffer for directory entries.
1936 * OUT: dirent - filler buffer of directory entries.
1938 * RETURN: 0 if success
1939 * error code if failure
1942 * ip - atime updated
1944 * Note that the low 4 bits of the cookie returned by zap is always zero.
1945 * This allows us to use the low range for "special" directory entries:
1946 * We use 0 for '.', and 1 for '..'. If this is the root of the filesystem,
1947 * we use the offset 2 for the '.zfs' directory.
1951 zfs_readdir(struct inode *ip, void *dirent, filldir_t filldir,
1952 loff_t *pos, cred_t *cr)
1954 znode_t *zp = ITOZ(ip);
1955 zfs_sb_t *zsb = ITOZSB(ip);
1958 zap_attribute_t zap;
1968 if ((error = sa_lookup(zp->z_sa_hdl, SA_ZPL_PARENT(zsb),
1969 &parent, sizeof (parent))) != 0)
1973 * Quit if directory has been removed (posix)
1980 prefetch = zp->z_zn_prefetch;
1983 * Initialize the iterator cursor.
1987 * Start iteration from the beginning of the directory.
1989 zap_cursor_init(&zc, os, zp->z_id);
1992 * The offset is a serialized cursor.
1994 zap_cursor_init_serialized(&zc, os, zp->z_id, *pos);
1998 * Transform to file-system independent format
2005 * Special case `.', `..', and `.zfs'.
2008 (void) strcpy(zap.za_name, ".");
2009 zap.za_normalization_conflict = 0;
2011 } else if (*pos == 1) {
2012 (void) strcpy(zap.za_name, "..");
2013 zap.za_normalization_conflict = 0;
2015 } else if (*pos == 2 && zfs_show_ctldir(zp)) {
2016 (void) strcpy(zap.za_name, ZFS_CTLDIR_NAME);
2017 zap.za_normalization_conflict = 0;
2018 objnum = ZFSCTL_INO_ROOT;
2023 if ((error = zap_cursor_retrieve(&zc, &zap))) {
2024 if (error == ENOENT)
2030 if (zap.za_integer_length != 8 ||
2031 zap.za_num_integers != 1) {
2032 cmn_err(CE_WARN, "zap_readdir: bad directory "
2033 "entry, obj = %lld, offset = %lld\n",
2034 (u_longlong_t)zp->z_id,
2035 (u_longlong_t)*pos);
2040 objnum = ZFS_DIRENT_OBJ(zap.za_first_integer);
2042 done = filldir(dirent, zap.za_name, strlen(zap.za_name),
2043 zap_cursor_serialize(&zc), objnum, 0);
2048 /* Prefetch znode */
2050 dmu_prefetch(os, objnum, 0, 0);
2054 zap_cursor_advance(&zc);
2055 *pos = zap_cursor_serialize(&zc);
2060 zp->z_zn_prefetch = B_FALSE; /* a lookup will re-enable pre-fetching */
2063 zap_cursor_fini(&zc);
2064 if (error == ENOENT)
2067 ZFS_ACCESSTIME_STAMP(zsb, zp);
2068 zfs_inode_update(zp);
2075 EXPORT_SYMBOL(zfs_readdir);
2077 ulong_t zfs_fsync_sync_cnt = 4;
2080 zfs_fsync(struct inode *ip, int syncflag, cred_t *cr)
2082 znode_t *zp = ITOZ(ip);
2083 zfs_sb_t *zsb = ITOZSB(ip);
2085 (void) tsd_set(zfs_fsyncer_key, (void *)zfs_fsync_sync_cnt);
2087 if (zsb->z_os->os_sync != ZFS_SYNC_DISABLED) {
2090 zil_commit(zsb->z_log, zp->z_id);
2095 EXPORT_SYMBOL(zfs_fsync);
2099 * Get the requested file attributes and place them in the provided
2102 * IN: ip - inode of file.
2103 * vap - va_mask identifies requested attributes.
2104 * If ATTR_XVATTR set, then optional attrs are requested
2105 * flags - ATTR_NOACLCHECK (CIFS server context)
2106 * cr - credentials of caller.
2108 * OUT: vap - attribute values.
2110 * RETURN: 0 (always succeeds)
2114 zfs_getattr(struct inode *ip, vattr_t *vap, int flags, cred_t *cr)
2116 znode_t *zp = ITOZ(ip);
2117 zfs_sb_t *zsb = ITOZSB(ip);
2120 uint64_t mtime[2], ctime[2];
2121 xvattr_t *xvap = (xvattr_t *)vap; /* vap may be an xvattr_t * */
2122 xoptattr_t *xoap = NULL;
2123 boolean_t skipaclchk = (flags & ATTR_NOACLCHECK) ? B_TRUE : B_FALSE;
2124 sa_bulk_attr_t bulk[2];
2130 zfs_fuid_map_ids(zp, cr, &vap->va_uid, &vap->va_gid);
2132 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zsb), NULL, &mtime, 16);
2133 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zsb), NULL, &ctime, 16);
2135 if ((error = sa_bulk_lookup(zp->z_sa_hdl, bulk, count)) != 0) {
2141 * If ACL is trivial don't bother looking for ACE_READ_ATTRIBUTES.
2142 * Also, if we are the owner don't bother, since owner should
2143 * always be allowed to read basic attributes of file.
2145 if (!(zp->z_pflags & ZFS_ACL_TRIVIAL) &&
2146 (vap->va_uid != crgetuid(cr))) {
2147 if ((error = zfs_zaccess(zp, ACE_READ_ATTRIBUTES, 0,
2155 * Return all attributes. It's cheaper to provide the answer
2156 * than to determine whether we were asked the question.
2159 mutex_enter(&zp->z_lock);
2160 vap->va_type = vn_mode_to_vtype(zp->z_mode);
2161 vap->va_mode = zp->z_mode;
2162 vap->va_fsid = ZTOI(zp)->i_sb->s_dev;
2163 vap->va_nodeid = zp->z_id;
2164 if ((zp->z_id == zsb->z_root) && zfs_show_ctldir(zp))
2165 links = zp->z_links + 1;
2167 links = zp->z_links;
2168 vap->va_nlink = MIN(links, ZFS_LINK_MAX);
2169 vap->va_size = i_size_read(ip);
2170 vap->va_rdev = ip->i_rdev;
2171 vap->va_seq = ip->i_generation;
2174 * Add in any requested optional attributes and the create time.
2175 * Also set the corresponding bits in the returned attribute bitmap.
2177 if ((xoap = xva_getxoptattr(xvap)) != NULL && zsb->z_use_fuids) {
2178 if (XVA_ISSET_REQ(xvap, XAT_ARCHIVE)) {
2180 ((zp->z_pflags & ZFS_ARCHIVE) != 0);
2181 XVA_SET_RTN(xvap, XAT_ARCHIVE);
2184 if (XVA_ISSET_REQ(xvap, XAT_READONLY)) {
2185 xoap->xoa_readonly =
2186 ((zp->z_pflags & ZFS_READONLY) != 0);
2187 XVA_SET_RTN(xvap, XAT_READONLY);
2190 if (XVA_ISSET_REQ(xvap, XAT_SYSTEM)) {
2192 ((zp->z_pflags & ZFS_SYSTEM) != 0);
2193 XVA_SET_RTN(xvap, XAT_SYSTEM);
2196 if (XVA_ISSET_REQ(xvap, XAT_HIDDEN)) {
2198 ((zp->z_pflags & ZFS_HIDDEN) != 0);
2199 XVA_SET_RTN(xvap, XAT_HIDDEN);
2202 if (XVA_ISSET_REQ(xvap, XAT_NOUNLINK)) {
2203 xoap->xoa_nounlink =
2204 ((zp->z_pflags & ZFS_NOUNLINK) != 0);
2205 XVA_SET_RTN(xvap, XAT_NOUNLINK);
2208 if (XVA_ISSET_REQ(xvap, XAT_IMMUTABLE)) {
2209 xoap->xoa_immutable =
2210 ((zp->z_pflags & ZFS_IMMUTABLE) != 0);
2211 XVA_SET_RTN(xvap, XAT_IMMUTABLE);
2214 if (XVA_ISSET_REQ(xvap, XAT_APPENDONLY)) {
2215 xoap->xoa_appendonly =
2216 ((zp->z_pflags & ZFS_APPENDONLY) != 0);
2217 XVA_SET_RTN(xvap, XAT_APPENDONLY);
2220 if (XVA_ISSET_REQ(xvap, XAT_NODUMP)) {
2222 ((zp->z_pflags & ZFS_NODUMP) != 0);
2223 XVA_SET_RTN(xvap, XAT_NODUMP);
2226 if (XVA_ISSET_REQ(xvap, XAT_OPAQUE)) {
2228 ((zp->z_pflags & ZFS_OPAQUE) != 0);
2229 XVA_SET_RTN(xvap, XAT_OPAQUE);
2232 if (XVA_ISSET_REQ(xvap, XAT_AV_QUARANTINED)) {
2233 xoap->xoa_av_quarantined =
2234 ((zp->z_pflags & ZFS_AV_QUARANTINED) != 0);
2235 XVA_SET_RTN(xvap, XAT_AV_QUARANTINED);
2238 if (XVA_ISSET_REQ(xvap, XAT_AV_MODIFIED)) {
2239 xoap->xoa_av_modified =
2240 ((zp->z_pflags & ZFS_AV_MODIFIED) != 0);
2241 XVA_SET_RTN(xvap, XAT_AV_MODIFIED);
2244 if (XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP) &&
2245 S_ISREG(ip->i_mode)) {
2246 zfs_sa_get_scanstamp(zp, xvap);
2249 if (XVA_ISSET_REQ(xvap, XAT_CREATETIME)) {
2252 (void) sa_lookup(zp->z_sa_hdl, SA_ZPL_CRTIME(zsb),
2253 times, sizeof (times));
2254 ZFS_TIME_DECODE(&xoap->xoa_createtime, times);
2255 XVA_SET_RTN(xvap, XAT_CREATETIME);
2258 if (XVA_ISSET_REQ(xvap, XAT_REPARSE)) {
2259 xoap->xoa_reparse = ((zp->z_pflags & ZFS_REPARSE) != 0);
2260 XVA_SET_RTN(xvap, XAT_REPARSE);
2262 if (XVA_ISSET_REQ(xvap, XAT_GEN)) {
2263 xoap->xoa_generation = zp->z_gen;
2264 XVA_SET_RTN(xvap, XAT_GEN);
2267 if (XVA_ISSET_REQ(xvap, XAT_OFFLINE)) {
2269 ((zp->z_pflags & ZFS_OFFLINE) != 0);
2270 XVA_SET_RTN(xvap, XAT_OFFLINE);
2273 if (XVA_ISSET_REQ(xvap, XAT_SPARSE)) {
2275 ((zp->z_pflags & ZFS_SPARSE) != 0);
2276 XVA_SET_RTN(xvap, XAT_SPARSE);
2280 ZFS_TIME_DECODE(&vap->va_atime, zp->z_atime);
2281 ZFS_TIME_DECODE(&vap->va_mtime, mtime);
2282 ZFS_TIME_DECODE(&vap->va_ctime, ctime);
2284 mutex_exit(&zp->z_lock);
2286 sa_object_size(zp->z_sa_hdl, &vap->va_blksize, &vap->va_nblocks);
2288 if (zp->z_blksz == 0) {
2290 * Block size hasn't been set; suggest maximal I/O transfers.
2292 vap->va_blksize = zsb->z_max_blksz;
2298 EXPORT_SYMBOL(zfs_getattr);
2301 * Set the file attributes to the values contained in the
2304 * IN: ip - inode of file to be modified.
2305 * vap - new attribute values.
2306 * If ATTR_XVATTR set, then optional attrs are being set
2307 * flags - ATTR_UTIME set if non-default time values provided.
2308 * - ATTR_NOACLCHECK (CIFS context only).
2309 * cr - credentials of caller.
2311 * RETURN: 0 if success
2312 * error code if failure
2315 * ip - ctime updated, mtime updated if size changed.
2319 zfs_setattr(struct inode *ip, vattr_t *vap, int flags, cred_t *cr)
2321 znode_t *zp = ITOZ(ip);
2322 zfs_sb_t *zsb = ITOZSB(ip);
2326 xvattr_t *tmpxvattr;
2327 uint_t mask = vap->va_mask;
2331 uint64_t new_uid, new_gid;
2333 uint64_t mtime[2], ctime[2];
2335 int need_policy = FALSE;
2337 zfs_fuid_info_t *fuidp = NULL;
2338 xvattr_t *xvap = (xvattr_t *)vap; /* vap may be an xvattr_t * */
2341 boolean_t skipaclchk = (flags & ATTR_NOACLCHECK) ? B_TRUE : B_FALSE;
2342 boolean_t fuid_dirtied = B_FALSE;
2343 sa_bulk_attr_t *bulk, *xattr_bulk;
2344 int count = 0, xattr_count = 0;
2355 * Make sure that if we have ephemeral uid/gid or xvattr specified
2356 * that file system is at proper version level
2359 if (zsb->z_use_fuids == B_FALSE &&
2360 (((mask & ATTR_UID) && IS_EPHEMERAL(vap->va_uid)) ||
2361 ((mask & ATTR_GID) && IS_EPHEMERAL(vap->va_gid)) ||
2362 (mask & ATTR_XVATTR))) {
2367 if (mask & ATTR_SIZE && S_ISDIR(ip->i_mode)) {
2372 if (mask & ATTR_SIZE && !S_ISREG(ip->i_mode) && !S_ISFIFO(ip->i_mode)) {
2378 * If this is an xvattr_t, then get a pointer to the structure of
2379 * optional attributes. If this is NULL, then we have a vattr_t.
2381 xoap = xva_getxoptattr(xvap);
2383 tmpxvattr = kmem_alloc(sizeof(xvattr_t), KM_SLEEP);
2384 xva_init(tmpxvattr);
2386 bulk = kmem_alloc(sizeof(sa_bulk_attr_t) * 7, KM_SLEEP);
2387 xattr_bulk = kmem_alloc(sizeof(sa_bulk_attr_t) * 7, KM_SLEEP);
2390 * Immutable files can only alter immutable bit and atime
2392 if ((zp->z_pflags & ZFS_IMMUTABLE) &&
2393 ((mask & (ATTR_SIZE|ATTR_UID|ATTR_GID|ATTR_MTIME|ATTR_MODE)) ||
2394 ((mask & ATTR_XVATTR) && XVA_ISSET_REQ(xvap, XAT_CREATETIME)))) {
2399 if ((mask & ATTR_SIZE) && (zp->z_pflags & ZFS_READONLY)) {
2405 * Verify timestamps doesn't overflow 32 bits.
2406 * ZFS can handle large timestamps, but 32bit syscalls can't
2407 * handle times greater than 2039. This check should be removed
2408 * once large timestamps are fully supported.
2410 if (mask & (ATTR_ATIME | ATTR_MTIME)) {
2411 if (((mask & ATTR_ATIME) && TIMESPEC_OVERFLOW(&vap->va_atime)) ||
2412 ((mask & ATTR_MTIME) && TIMESPEC_OVERFLOW(&vap->va_mtime))) {
2422 /* Can this be moved to before the top label? */
2423 if (zsb->z_vfs->mnt_flags & MNT_READONLY) {
2429 * First validate permissions
2432 if (mask & ATTR_SIZE) {
2433 err = zfs_zaccess(zp, ACE_WRITE_DATA, 0, skipaclchk, cr);
2437 truncate_setsize(ip, vap->va_size);
2440 * XXX - Note, we are not providing any open
2441 * mode flags here (like FNDELAY), so we may
2442 * block if there are locks present... this
2443 * should be addressed in openat().
2445 /* XXX - would it be OK to generate a log record here? */
2446 err = zfs_freesp(zp, vap->va_size, 0, 0, FALSE);
2451 if (mask & (ATTR_ATIME|ATTR_MTIME) ||
2452 ((mask & ATTR_XVATTR) && (XVA_ISSET_REQ(xvap, XAT_HIDDEN) ||
2453 XVA_ISSET_REQ(xvap, XAT_READONLY) ||
2454 XVA_ISSET_REQ(xvap, XAT_ARCHIVE) ||
2455 XVA_ISSET_REQ(xvap, XAT_OFFLINE) ||
2456 XVA_ISSET_REQ(xvap, XAT_SPARSE) ||
2457 XVA_ISSET_REQ(xvap, XAT_CREATETIME) ||
2458 XVA_ISSET_REQ(xvap, XAT_SYSTEM)))) {
2459 need_policy = zfs_zaccess(zp, ACE_WRITE_ATTRIBUTES, 0,
2463 if (mask & (ATTR_UID|ATTR_GID)) {
2464 int idmask = (mask & (ATTR_UID|ATTR_GID));
2469 * NOTE: even if a new mode is being set,
2470 * we may clear S_ISUID/S_ISGID bits.
2473 if (!(mask & ATTR_MODE))
2474 vap->va_mode = zp->z_mode;
2477 * Take ownership or chgrp to group we are a member of
2480 take_owner = (mask & ATTR_UID) && (vap->va_uid == crgetuid(cr));
2481 take_group = (mask & ATTR_GID) &&
2482 zfs_groupmember(zsb, vap->va_gid, cr);
2485 * If both ATTR_UID and ATTR_GID are set then take_owner and
2486 * take_group must both be set in order to allow taking
2489 * Otherwise, send the check through secpolicy_vnode_setattr()
2493 if (((idmask == (ATTR_UID|ATTR_GID)) &&
2494 take_owner && take_group) ||
2495 ((idmask == ATTR_UID) && take_owner) ||
2496 ((idmask == ATTR_GID) && take_group)) {
2497 if (zfs_zaccess(zp, ACE_WRITE_OWNER, 0,
2498 skipaclchk, cr) == 0) {
2500 * Remove setuid/setgid for non-privileged users
2502 (void) secpolicy_setid_clear(vap, cr);
2503 trim_mask = (mask & (ATTR_UID|ATTR_GID));
2512 mutex_enter(&zp->z_lock);
2513 oldva.va_mode = zp->z_mode;
2514 zfs_fuid_map_ids(zp, cr, &oldva.va_uid, &oldva.va_gid);
2515 if (mask & ATTR_XVATTR) {
2517 * Update xvattr mask to include only those attributes
2518 * that are actually changing.
2520 * the bits will be restored prior to actually setting
2521 * the attributes so the caller thinks they were set.
2523 if (XVA_ISSET_REQ(xvap, XAT_APPENDONLY)) {
2524 if (xoap->xoa_appendonly !=
2525 ((zp->z_pflags & ZFS_APPENDONLY) != 0)) {
2528 XVA_CLR_REQ(xvap, XAT_APPENDONLY);
2529 XVA_SET_REQ(tmpxvattr, XAT_APPENDONLY);
2533 if (XVA_ISSET_REQ(xvap, XAT_NOUNLINK)) {
2534 if (xoap->xoa_nounlink !=
2535 ((zp->z_pflags & ZFS_NOUNLINK) != 0)) {
2538 XVA_CLR_REQ(xvap, XAT_NOUNLINK);
2539 XVA_SET_REQ(tmpxvattr, XAT_NOUNLINK);
2543 if (XVA_ISSET_REQ(xvap, XAT_IMMUTABLE)) {
2544 if (xoap->xoa_immutable !=
2545 ((zp->z_pflags & ZFS_IMMUTABLE) != 0)) {
2548 XVA_CLR_REQ(xvap, XAT_IMMUTABLE);
2549 XVA_SET_REQ(tmpxvattr, XAT_IMMUTABLE);
2553 if (XVA_ISSET_REQ(xvap, XAT_NODUMP)) {
2554 if (xoap->xoa_nodump !=
2555 ((zp->z_pflags & ZFS_NODUMP) != 0)) {
2558 XVA_CLR_REQ(xvap, XAT_NODUMP);
2559 XVA_SET_REQ(tmpxvattr, XAT_NODUMP);
2563 if (XVA_ISSET_REQ(xvap, XAT_AV_MODIFIED)) {
2564 if (xoap->xoa_av_modified !=
2565 ((zp->z_pflags & ZFS_AV_MODIFIED) != 0)) {
2568 XVA_CLR_REQ(xvap, XAT_AV_MODIFIED);
2569 XVA_SET_REQ(tmpxvattr, XAT_AV_MODIFIED);
2573 if (XVA_ISSET_REQ(xvap, XAT_AV_QUARANTINED)) {
2574 if ((!S_ISREG(ip->i_mode) &&
2575 xoap->xoa_av_quarantined) ||
2576 xoap->xoa_av_quarantined !=
2577 ((zp->z_pflags & ZFS_AV_QUARANTINED) != 0)) {
2580 XVA_CLR_REQ(xvap, XAT_AV_QUARANTINED);
2581 XVA_SET_REQ(tmpxvattr, XAT_AV_QUARANTINED);
2585 if (XVA_ISSET_REQ(xvap, XAT_REPARSE)) {
2586 mutex_exit(&zp->z_lock);
2591 if (need_policy == FALSE &&
2592 (XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP) ||
2593 XVA_ISSET_REQ(xvap, XAT_OPAQUE))) {
2598 mutex_exit(&zp->z_lock);
2600 if (mask & ATTR_MODE) {
2601 if (zfs_zaccess(zp, ACE_WRITE_ACL, 0, skipaclchk, cr) == 0) {
2602 err = secpolicy_setid_setsticky_clear(ip, vap,
2607 trim_mask |= ATTR_MODE;
2615 * If trim_mask is set then take ownership
2616 * has been granted or write_acl is present and user
2617 * has the ability to modify mode. In that case remove
2618 * UID|GID and or MODE from mask so that
2619 * secpolicy_vnode_setattr() doesn't revoke it.
2623 saved_mask = vap->va_mask;
2624 vap->va_mask &= ~trim_mask;
2626 err = secpolicy_vnode_setattr(cr, ip, vap, &oldva, flags,
2627 (int (*)(void *, int, cred_t *))zfs_zaccess_unix, zp);
2632 vap->va_mask |= saved_mask;
2636 * secpolicy_vnode_setattr, or take ownership may have
2639 mask = vap->va_mask;
2641 if ((mask & (ATTR_UID | ATTR_GID))) {
2642 err = sa_lookup(zp->z_sa_hdl, SA_ZPL_XATTR(zsb),
2643 &xattr_obj, sizeof (xattr_obj));
2645 if (err == 0 && xattr_obj) {
2646 err = zfs_zget(ZTOZSB(zp), xattr_obj, &attrzp);
2650 if (mask & ATTR_UID) {
2651 new_uid = zfs_fuid_create(zsb,
2652 (uint64_t)vap->va_uid, cr, ZFS_OWNER, &fuidp);
2653 if (new_uid != zp->z_uid &&
2654 zfs_fuid_overquota(zsb, B_FALSE, new_uid)) {
2662 if (mask & ATTR_GID) {
2663 new_gid = zfs_fuid_create(zsb, (uint64_t)vap->va_gid,
2664 cr, ZFS_GROUP, &fuidp);
2665 if (new_gid != zp->z_gid &&
2666 zfs_fuid_overquota(zsb, B_TRUE, new_gid)) {
2674 tx = dmu_tx_create(zsb->z_os);
2676 if (mask & ATTR_MODE) {
2677 uint64_t pmode = zp->z_mode;
2679 new_mode = (pmode & S_IFMT) | (vap->va_mode & ~S_IFMT);
2681 zfs_acl_chmod_setattr(zp, &aclp, new_mode);
2683 mutex_enter(&zp->z_lock);
2684 if (!zp->z_is_sa && ((acl_obj = zfs_external_acl(zp)) != 0)) {
2686 * Are we upgrading ACL from old V0 format
2689 if (zsb->z_version >= ZPL_VERSION_FUID &&
2690 zfs_znode_acl_version(zp) ==
2691 ZFS_ACL_VERSION_INITIAL) {
2692 dmu_tx_hold_free(tx, acl_obj, 0,
2694 dmu_tx_hold_write(tx, DMU_NEW_OBJECT,
2695 0, aclp->z_acl_bytes);
2697 dmu_tx_hold_write(tx, acl_obj, 0,
2700 } else if (!zp->z_is_sa && aclp->z_acl_bytes > ZFS_ACE_SPACE) {
2701 dmu_tx_hold_write(tx, DMU_NEW_OBJECT,
2702 0, aclp->z_acl_bytes);
2704 mutex_exit(&zp->z_lock);
2705 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_TRUE);
2707 if ((mask & ATTR_XVATTR) &&
2708 XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP))
2709 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_TRUE);
2711 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
2715 dmu_tx_hold_sa(tx, attrzp->z_sa_hdl, B_FALSE);
2718 fuid_dirtied = zsb->z_fuid_dirty;
2720 zfs_fuid_txhold(zsb, tx);
2722 zfs_sa_upgrade_txholds(tx, zp);
2724 err = dmu_tx_assign(tx, TXG_NOWAIT);
2726 if (err == ERESTART)
2733 * Set each attribute requested.
2734 * We group settings according to the locks they need to acquire.
2736 * Note: you cannot set ctime directly, although it will be
2737 * updated as a side-effect of calling this function.
2741 if (mask & (ATTR_UID|ATTR_GID|ATTR_MODE))
2742 mutex_enter(&zp->z_acl_lock);
2743 mutex_enter(&zp->z_lock);
2745 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zsb), NULL,
2746 &zp->z_pflags, sizeof (zp->z_pflags));
2749 if (mask & (ATTR_UID|ATTR_GID|ATTR_MODE))
2750 mutex_enter(&attrzp->z_acl_lock);
2751 mutex_enter(&attrzp->z_lock);
2752 SA_ADD_BULK_ATTR(xattr_bulk, xattr_count,
2753 SA_ZPL_FLAGS(zsb), NULL, &attrzp->z_pflags,
2754 sizeof (attrzp->z_pflags));
2757 if (mask & (ATTR_UID|ATTR_GID)) {
2759 if (mask & ATTR_UID) {
2760 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_UID(zsb), NULL,
2761 &new_uid, sizeof (new_uid));
2762 zp->z_uid = new_uid;
2764 SA_ADD_BULK_ATTR(xattr_bulk, xattr_count,
2765 SA_ZPL_UID(zsb), NULL, &new_uid,
2767 attrzp->z_uid = new_uid;
2771 if (mask & ATTR_GID) {
2772 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GID(zsb),
2773 NULL, &new_gid, sizeof (new_gid));
2774 zp->z_gid = new_gid;
2776 SA_ADD_BULK_ATTR(xattr_bulk, xattr_count,
2777 SA_ZPL_GID(zsb), NULL, &new_gid,
2779 attrzp->z_gid = new_gid;
2782 if (!(mask & ATTR_MODE)) {
2783 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MODE(zsb),
2784 NULL, &new_mode, sizeof (new_mode));
2785 new_mode = zp->z_mode;
2787 err = zfs_acl_chown_setattr(zp);
2790 err = zfs_acl_chown_setattr(attrzp);
2795 if (mask & ATTR_MODE) {
2796 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MODE(zsb), NULL,
2797 &new_mode, sizeof (new_mode));
2798 zp->z_mode = new_mode;
2799 ASSERT3P(aclp, !=, NULL);
2800 err = zfs_aclset_common(zp, aclp, cr, tx);
2801 ASSERT3U(err, ==, 0);
2802 if (zp->z_acl_cached)
2803 zfs_acl_free(zp->z_acl_cached);
2804 zp->z_acl_cached = aclp;
2809 if (mask & ATTR_ATIME) {
2810 ZFS_TIME_ENCODE(&vap->va_atime, zp->z_atime);
2811 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_ATIME(zsb), NULL,
2812 &zp->z_atime, sizeof (zp->z_atime));
2815 if (mask & ATTR_MTIME) {
2816 ZFS_TIME_ENCODE(&vap->va_mtime, mtime);
2817 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zsb), NULL,
2818 mtime, sizeof (mtime));
2821 /* XXX - shouldn't this be done *before* the ATIME/MTIME checks? */
2822 if (mask & ATTR_SIZE && !(mask & ATTR_MTIME)) {
2823 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zsb),
2824 NULL, mtime, sizeof (mtime));
2825 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zsb), NULL,
2826 &ctime, sizeof (ctime));
2827 zfs_tstamp_update_setup(zp, CONTENT_MODIFIED, mtime, ctime,
2829 } else if (mask != 0) {
2830 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zsb), NULL,
2831 &ctime, sizeof (ctime));
2832 zfs_tstamp_update_setup(zp, STATE_CHANGED, mtime, ctime,
2835 SA_ADD_BULK_ATTR(xattr_bulk, xattr_count,
2836 SA_ZPL_CTIME(zsb), NULL,
2837 &ctime, sizeof (ctime));
2838 zfs_tstamp_update_setup(attrzp, STATE_CHANGED,
2839 mtime, ctime, B_TRUE);
2843 * Do this after setting timestamps to prevent timestamp
2844 * update from toggling bit
2847 if (xoap && (mask & ATTR_XVATTR)) {
2850 * restore trimmed off masks
2851 * so that return masks can be set for caller.
2854 if (XVA_ISSET_REQ(tmpxvattr, XAT_APPENDONLY)) {
2855 XVA_SET_REQ(xvap, XAT_APPENDONLY);
2857 if (XVA_ISSET_REQ(tmpxvattr, XAT_NOUNLINK)) {
2858 XVA_SET_REQ(xvap, XAT_NOUNLINK);
2860 if (XVA_ISSET_REQ(tmpxvattr, XAT_IMMUTABLE)) {
2861 XVA_SET_REQ(xvap, XAT_IMMUTABLE);
2863 if (XVA_ISSET_REQ(tmpxvattr, XAT_NODUMP)) {
2864 XVA_SET_REQ(xvap, XAT_NODUMP);
2866 if (XVA_ISSET_REQ(tmpxvattr, XAT_AV_MODIFIED)) {
2867 XVA_SET_REQ(xvap, XAT_AV_MODIFIED);
2869 if (XVA_ISSET_REQ(tmpxvattr, XAT_AV_QUARANTINED)) {
2870 XVA_SET_REQ(xvap, XAT_AV_QUARANTINED);
2873 if (XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP))
2874 ASSERT(S_ISREG(ip->i_mode));
2876 zfs_xvattr_set(zp, xvap, tx);
2880 zfs_fuid_sync(zsb, tx);
2883 zfs_log_setattr(zilog, tx, TX_SETATTR, zp, vap, mask, fuidp);
2885 mutex_exit(&zp->z_lock);
2886 if (mask & (ATTR_UID|ATTR_GID|ATTR_MODE))
2887 mutex_exit(&zp->z_acl_lock);
2890 if (mask & (ATTR_UID|ATTR_GID|ATTR_MODE))
2891 mutex_exit(&attrzp->z_acl_lock);
2892 mutex_exit(&attrzp->z_lock);
2895 if (err == 0 && attrzp) {
2896 err2 = sa_bulk_update(attrzp->z_sa_hdl, xattr_bulk,
2907 zfs_fuid_info_free(fuidp);
2913 if (err == ERESTART)
2916 err2 = sa_bulk_update(zp->z_sa_hdl, bulk, count, tx);
2918 zfs_inode_update(zp);
2922 if (zsb->z_os->os_sync == ZFS_SYNC_ALWAYS)
2923 zil_commit(zilog, 0);
2926 kmem_free(xattr_bulk, sizeof(sa_bulk_attr_t) * 7);
2927 kmem_free(bulk, sizeof(sa_bulk_attr_t) * 7);
2928 kmem_free(tmpxvattr, sizeof(xvattr_t));
2932 EXPORT_SYMBOL(zfs_setattr);
2934 typedef struct zfs_zlock {
2935 krwlock_t *zl_rwlock; /* lock we acquired */
2936 znode_t *zl_znode; /* znode we held */
2937 struct zfs_zlock *zl_next; /* next in list */
2941 * Drop locks and release vnodes that were held by zfs_rename_lock().
2944 zfs_rename_unlock(zfs_zlock_t **zlpp)
2948 while ((zl = *zlpp) != NULL) {
2949 if (zl->zl_znode != NULL)
2950 iput(ZTOI(zl->zl_znode));
2951 rw_exit(zl->zl_rwlock);
2952 *zlpp = zl->zl_next;
2953 kmem_free(zl, sizeof (*zl));
2958 * Search back through the directory tree, using the ".." entries.
2959 * Lock each directory in the chain to prevent concurrent renames.
2960 * Fail any attempt to move a directory into one of its own descendants.
2961 * XXX - z_parent_lock can overlap with map or grow locks
2964 zfs_rename_lock(znode_t *szp, znode_t *tdzp, znode_t *sdzp, zfs_zlock_t **zlpp)
2968 uint64_t rootid = ZTOZSB(zp)->z_root;
2969 uint64_t oidp = zp->z_id;
2970 krwlock_t *rwlp = &szp->z_parent_lock;
2971 krw_t rw = RW_WRITER;
2974 * First pass write-locks szp and compares to zp->z_id.
2975 * Later passes read-lock zp and compare to zp->z_parent.
2978 if (!rw_tryenter(rwlp, rw)) {
2980 * Another thread is renaming in this path.
2981 * Note that if we are a WRITER, we don't have any
2982 * parent_locks held yet.
2984 if (rw == RW_READER && zp->z_id > szp->z_id) {
2986 * Drop our locks and restart
2988 zfs_rename_unlock(&zl);
2992 rwlp = &szp->z_parent_lock;
2997 * Wait for other thread to drop its locks
3003 zl = kmem_alloc(sizeof (*zl), KM_SLEEP);
3004 zl->zl_rwlock = rwlp;
3005 zl->zl_znode = NULL;
3006 zl->zl_next = *zlpp;
3009 if (oidp == szp->z_id) /* We're a descendant of szp */
3012 if (oidp == rootid) /* We've hit the top */
3015 if (rw == RW_READER) { /* i.e. not the first pass */
3016 int error = zfs_zget(ZTOZSB(zp), oidp, &zp);
3021 (void) sa_lookup(zp->z_sa_hdl, SA_ZPL_PARENT(ZTOZSB(zp)),
3022 &oidp, sizeof (oidp));
3023 rwlp = &zp->z_parent_lock;
3026 } while (zp->z_id != sdzp->z_id);
3032 * Move an entry from the provided source directory to the target
3033 * directory. Change the entry name as indicated.
3035 * IN: sdip - Source directory containing the "old entry".
3036 * snm - Old entry name.
3037 * tdip - Target directory to contain the "new entry".
3038 * tnm - New entry name.
3039 * cr - credentials of caller.
3040 * flags - case flags
3042 * RETURN: 0 if success
3043 * error code if failure
3046 * sdip,tdip - ctime|mtime updated
3050 zfs_rename(struct inode *sdip, char *snm, struct inode *tdip, char *tnm,
3051 cred_t *cr, int flags)
3053 znode_t *tdzp, *szp, *tzp;
3054 znode_t *sdzp = ITOZ(sdip);
3055 zfs_sb_t *zsb = ITOZSB(sdip);
3057 zfs_dirlock_t *sdl, *tdl;
3060 int cmp, serr, terr;
3065 ZFS_VERIFY_ZP(sdzp);
3068 if (tdip->i_sb != sdip->i_sb) {
3074 ZFS_VERIFY_ZP(tdzp);
3075 if (zsb->z_utf8 && u8_validate(tnm,
3076 strlen(tnm), NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
3081 if (flags & FIGNORECASE)
3090 * This is to prevent the creation of links into attribute space
3091 * by renaming a linked file into/outof an attribute directory.
3092 * See the comment in zfs_link() for why this is considered bad.
3094 if ((tdzp->z_pflags & ZFS_XATTR) != (sdzp->z_pflags & ZFS_XATTR)) {
3100 * Lock source and target directory entries. To prevent deadlock,
3101 * a lock ordering must be defined. We lock the directory with
3102 * the smallest object id first, or if it's a tie, the one with
3103 * the lexically first name.
3105 if (sdzp->z_id < tdzp->z_id) {
3107 } else if (sdzp->z_id > tdzp->z_id) {
3111 * First compare the two name arguments without
3112 * considering any case folding.
3114 int nofold = (zsb->z_norm & ~U8_TEXTPREP_TOUPPER);
3116 cmp = u8_strcmp(snm, tnm, 0, nofold, U8_UNICODE_LATEST, &error);
3117 ASSERT(error == 0 || !zsb->z_utf8);
3120 * POSIX: "If the old argument and the new argument
3121 * both refer to links to the same existing file,
3122 * the rename() function shall return successfully
3123 * and perform no other action."
3129 * If the file system is case-folding, then we may
3130 * have some more checking to do. A case-folding file
3131 * system is either supporting mixed case sensitivity
3132 * access or is completely case-insensitive. Note
3133 * that the file system is always case preserving.
3135 * In mixed sensitivity mode case sensitive behavior
3136 * is the default. FIGNORECASE must be used to
3137 * explicitly request case insensitive behavior.
3139 * If the source and target names provided differ only
3140 * by case (e.g., a request to rename 'tim' to 'Tim'),
3141 * we will treat this as a special case in the
3142 * case-insensitive mode: as long as the source name
3143 * is an exact match, we will allow this to proceed as
3144 * a name-change request.
3146 if ((zsb->z_case == ZFS_CASE_INSENSITIVE ||
3147 (zsb->z_case == ZFS_CASE_MIXED &&
3148 flags & FIGNORECASE)) &&
3149 u8_strcmp(snm, tnm, 0, zsb->z_norm, U8_UNICODE_LATEST,
3152 * case preserving rename request, require exact
3161 * If the source and destination directories are the same, we should
3162 * grab the z_name_lock of that directory only once.
3166 rw_enter(&sdzp->z_name_lock, RW_READER);
3170 serr = zfs_dirent_lock(&sdl, sdzp, snm, &szp,
3171 ZEXISTS | zflg, NULL, NULL);
3172 terr = zfs_dirent_lock(&tdl,
3173 tdzp, tnm, &tzp, ZRENAMING | zflg, NULL, NULL);
3175 terr = zfs_dirent_lock(&tdl,
3176 tdzp, tnm, &tzp, zflg, NULL, NULL);
3177 serr = zfs_dirent_lock(&sdl,
3178 sdzp, snm, &szp, ZEXISTS | ZRENAMING | zflg,
3184 * Source entry invalid or not there.
3187 zfs_dirent_unlock(tdl);
3193 rw_exit(&sdzp->z_name_lock);
3195 if (strcmp(snm, "..") == 0)
3201 zfs_dirent_unlock(sdl);
3205 rw_exit(&sdzp->z_name_lock);
3207 if (strcmp(tnm, "..") == 0)
3214 * Must have write access at the source to remove the old entry
3215 * and write access at the target to create the new entry.
3216 * Note that if target and source are the same, this can be
3217 * done in a single check.
3220 if ((error = zfs_zaccess_rename(sdzp, szp, tdzp, tzp, cr)))
3223 if (S_ISDIR(ZTOI(szp)->i_mode)) {
3225 * Check to make sure rename is valid.
3226 * Can't do a move like this: /usr/a/b to /usr/a/b/c/d
3228 if ((error = zfs_rename_lock(szp, tdzp, sdzp, &zl)))
3233 * Does target exist?
3237 * Source and target must be the same type.
3239 if (S_ISDIR(ZTOI(szp)->i_mode)) {
3240 if (!S_ISDIR(ZTOI(tzp)->i_mode)) {
3245 if (S_ISDIR(ZTOI(tzp)->i_mode)) {
3251 * POSIX dictates that when the source and target
3252 * entries refer to the same file object, rename
3253 * must do nothing and exit without error.
3255 if (szp->z_id == tzp->z_id) {
3261 tx = dmu_tx_create(zsb->z_os);
3262 dmu_tx_hold_sa(tx, szp->z_sa_hdl, B_FALSE);
3263 dmu_tx_hold_sa(tx, sdzp->z_sa_hdl, B_FALSE);
3264 dmu_tx_hold_zap(tx, sdzp->z_id, FALSE, snm);
3265 dmu_tx_hold_zap(tx, tdzp->z_id, TRUE, tnm);
3267 dmu_tx_hold_sa(tx, tdzp->z_sa_hdl, B_FALSE);
3268 zfs_sa_upgrade_txholds(tx, tdzp);
3271 dmu_tx_hold_sa(tx, tzp->z_sa_hdl, B_FALSE);
3272 zfs_sa_upgrade_txholds(tx, tzp);
3275 zfs_sa_upgrade_txholds(tx, szp);
3276 dmu_tx_hold_zap(tx, zsb->z_unlinkedobj, FALSE, NULL);
3277 error = dmu_tx_assign(tx, TXG_NOWAIT);
3280 zfs_rename_unlock(&zl);
3281 zfs_dirent_unlock(sdl);
3282 zfs_dirent_unlock(tdl);
3285 rw_exit(&sdzp->z_name_lock);
3290 if (error == ERESTART) {
3300 if (tzp) /* Attempt to remove the existing target */
3301 error = zfs_link_destroy(tdl, tzp, tx, zflg, NULL);
3304 error = zfs_link_create(tdl, szp, tx, ZRENAMING);
3306 szp->z_pflags |= ZFS_AV_MODIFIED;
3308 error = sa_update(szp->z_sa_hdl, SA_ZPL_FLAGS(zsb),
3309 (void *)&szp->z_pflags, sizeof (uint64_t), tx);
3310 ASSERT3U(error, ==, 0);
3312 error = zfs_link_destroy(sdl, szp, tx, ZRENAMING, NULL);
3314 zfs_log_rename(zilog, tx, TX_RENAME |
3315 (flags & FIGNORECASE ? TX_CI : 0), sdzp,
3316 sdl->dl_name, tdzp, tdl->dl_name, szp);
3319 * At this point, we have successfully created
3320 * the target name, but have failed to remove
3321 * the source name. Since the create was done
3322 * with the ZRENAMING flag, there are
3323 * complications; for one, the link count is
3324 * wrong. The easiest way to deal with this
3325 * is to remove the newly created target, and
3326 * return the original error. This must
3327 * succeed; fortunately, it is very unlikely to
3328 * fail, since we just created it.
3330 VERIFY3U(zfs_link_destroy(tdl, szp, tx,
3331 ZRENAMING, NULL), ==, 0);
3339 zfs_rename_unlock(&zl);
3341 zfs_dirent_unlock(sdl);
3342 zfs_dirent_unlock(tdl);
3344 zfs_inode_update(sdzp);
3346 rw_exit(&sdzp->z_name_lock);
3349 zfs_inode_update(tdzp);
3351 zfs_inode_update(szp);
3354 zfs_inode_update(tzp);
3358 if (zsb->z_os->os_sync == ZFS_SYNC_ALWAYS)
3359 zil_commit(zilog, 0);
3364 EXPORT_SYMBOL(zfs_rename);
3367 * Insert the indicated symbolic reference entry into the directory.
3369 * IN: dip - Directory to contain new symbolic link.
3370 * link - Name for new symlink entry.
3371 * vap - Attributes of new entry.
3372 * target - Target path of new symlink.
3374 * cr - credentials of caller.
3375 * flags - case flags
3377 * RETURN: 0 if success
3378 * error code if failure
3381 * dip - ctime|mtime updated
3385 zfs_symlink(struct inode *dip, char *name, vattr_t *vap, char *link,
3386 struct inode **ipp, cred_t *cr, int flags)
3388 znode_t *zp, *dzp = ITOZ(dip);
3391 zfs_sb_t *zsb = ITOZSB(dip);
3393 uint64_t len = strlen(link);
3396 zfs_acl_ids_t acl_ids;
3397 boolean_t fuid_dirtied;
3398 uint64_t txtype = TX_SYMLINK;
3400 ASSERT(S_ISLNK(vap->va_mode));
3406 if (zsb->z_utf8 && u8_validate(name, strlen(name),
3407 NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
3411 if (flags & FIGNORECASE)
3414 if (len > MAXPATHLEN) {
3416 return (ENAMETOOLONG);
3419 if ((error = zfs_acl_ids_create(dzp, 0,
3420 vap, cr, NULL, &acl_ids)) != 0) {
3428 * Attempt to lock directory; fail if entry already exists.
3430 error = zfs_dirent_lock(&dl, dzp, name, &zp, zflg, NULL, NULL);
3432 zfs_acl_ids_free(&acl_ids);
3437 if ((error = zfs_zaccess(dzp, ACE_ADD_FILE, 0, B_FALSE, cr))) {
3438 zfs_acl_ids_free(&acl_ids);
3439 zfs_dirent_unlock(dl);
3444 if (zfs_acl_ids_overquota(zsb, &acl_ids)) {
3445 zfs_acl_ids_free(&acl_ids);
3446 zfs_dirent_unlock(dl);
3450 tx = dmu_tx_create(zsb->z_os);
3451 fuid_dirtied = zsb->z_fuid_dirty;
3452 dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0, MAX(1, len));
3453 dmu_tx_hold_zap(tx, dzp->z_id, TRUE, name);
3454 dmu_tx_hold_sa_create(tx, acl_ids.z_aclp->z_acl_bytes +
3455 ZFS_SA_BASE_ATTR_SIZE + len);
3456 dmu_tx_hold_sa(tx, dzp->z_sa_hdl, B_FALSE);
3457 if (!zsb->z_use_sa && acl_ids.z_aclp->z_acl_bytes > ZFS_ACE_SPACE) {
3458 dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0,
3459 acl_ids.z_aclp->z_acl_bytes);
3462 zfs_fuid_txhold(zsb, tx);
3463 error = dmu_tx_assign(tx, TXG_NOWAIT);
3465 zfs_dirent_unlock(dl);
3466 if (error == ERESTART) {
3471 zfs_acl_ids_free(&acl_ids);
3478 * Create a new object for the symlink.
3479 * for version 4 ZPL datsets the symlink will be an SA attribute
3481 zfs_mknode(dzp, vap, tx, cr, 0, &zp, &acl_ids);
3484 zfs_fuid_sync(zsb, tx);
3486 mutex_enter(&zp->z_lock);
3488 error = sa_update(zp->z_sa_hdl, SA_ZPL_SYMLINK(zsb),
3491 zfs_sa_symlink(zp, link, len, tx);
3492 mutex_exit(&zp->z_lock);
3495 (void) sa_update(zp->z_sa_hdl, SA_ZPL_SIZE(zsb),
3496 &zp->z_size, sizeof (zp->z_size), tx);
3498 * Insert the new object into the directory.
3500 (void) zfs_link_create(dl, zp, tx, ZNEW);
3502 if (flags & FIGNORECASE)
3504 zfs_log_symlink(zilog, tx, txtype, dzp, zp, name, link);
3506 zfs_inode_update(dzp);
3507 zfs_inode_update(zp);
3509 zfs_acl_ids_free(&acl_ids);
3513 zfs_dirent_unlock(dl);
3517 if (zsb->z_os->os_sync == ZFS_SYNC_ALWAYS)
3518 zil_commit(zilog, 0);
3523 EXPORT_SYMBOL(zfs_symlink);
3526 * Return, in the buffer contained in the provided uio structure,
3527 * the symbolic path referred to by ip.
3529 * IN: ip - inode of symbolic link
3530 * uio - structure to contain the link path.
3531 * cr - credentials of caller.
3533 * RETURN: 0 if success
3534 * error code if failure
3537 * ip - atime updated
3541 zfs_readlink(struct inode *ip, uio_t *uio, cred_t *cr)
3543 znode_t *zp = ITOZ(ip);
3544 zfs_sb_t *zsb = ITOZSB(ip);
3550 mutex_enter(&zp->z_lock);
3552 error = sa_lookup_uio(zp->z_sa_hdl,
3553 SA_ZPL_SYMLINK(zsb), uio);
3555 error = zfs_sa_readlink(zp, uio);
3556 mutex_exit(&zp->z_lock);
3558 ZFS_ACCESSTIME_STAMP(zsb, zp);
3559 zfs_inode_update(zp);
3563 EXPORT_SYMBOL(zfs_readlink);
3566 * Insert a new entry into directory tdip referencing sip.
3568 * IN: tdip - Directory to contain new entry.
3569 * sip - inode of new entry.
3570 * name - name of new entry.
3571 * cr - credentials of caller.
3573 * RETURN: 0 if success
3574 * error code if failure
3577 * tdip - ctime|mtime updated
3578 * sip - ctime updated
3582 zfs_link(struct inode *tdip, struct inode *sip, char *name, cred_t *cr)
3584 znode_t *dzp = ITOZ(tdip);
3586 zfs_sb_t *zsb = ITOZSB(tdip);
3595 ASSERT(S_ISDIR(tdip->i_mode));
3602 * POSIX dictates that we return EPERM here.
3603 * Better choices include ENOTSUP or EISDIR.
3605 if (S_ISDIR(sip->i_mode)) {
3610 if (sip->i_sb != tdip->i_sb) {
3618 /* Prevent links to .zfs/shares files */
3620 if ((error = sa_lookup(szp->z_sa_hdl, SA_ZPL_PARENT(zsb),
3621 &parent, sizeof (uint64_t))) != 0) {
3625 if (parent == zsb->z_shares_dir) {
3630 if (zsb->z_utf8 && u8_validate(name,
3631 strlen(name), NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
3635 #ifdef HAVE_PN_UTILS
3636 if (flags & FIGNORECASE)
3638 #endif /* HAVE_PN_UTILS */
3641 * We do not support links between attributes and non-attributes
3642 * because of the potential security risk of creating links
3643 * into "normal" file space in order to circumvent restrictions
3644 * imposed in attribute space.
3646 if ((szp->z_pflags & ZFS_XATTR) != (dzp->z_pflags & ZFS_XATTR)) {
3651 owner = zfs_fuid_map_id(zsb, szp->z_uid, cr, ZFS_OWNER);
3652 if (owner != crgetuid(cr) && secpolicy_basic_link(cr) != 0) {
3657 if ((error = zfs_zaccess(dzp, ACE_ADD_FILE, 0, B_FALSE, cr))) {
3664 * Attempt to lock directory; fail if entry already exists.
3666 error = zfs_dirent_lock(&dl, dzp, name, &tzp, zf, NULL, NULL);
3672 tx = dmu_tx_create(zsb->z_os);
3673 dmu_tx_hold_sa(tx, szp->z_sa_hdl, B_FALSE);
3674 dmu_tx_hold_zap(tx, dzp->z_id, TRUE, name);
3675 zfs_sa_upgrade_txholds(tx, szp);
3676 zfs_sa_upgrade_txholds(tx, dzp);
3677 error = dmu_tx_assign(tx, TXG_NOWAIT);
3679 zfs_dirent_unlock(dl);
3680 if (error == ERESTART) {
3690 error = zfs_link_create(dl, szp, tx, 0);
3693 uint64_t txtype = TX_LINK;
3694 #ifdef HAVE_PN_UTILS
3695 if (flags & FIGNORECASE)
3697 #endif /* HAVE_PN_UTILS */
3698 zfs_log_link(zilog, tx, txtype, dzp, szp, name);
3703 zfs_dirent_unlock(dl);
3705 if (zsb->z_os->os_sync == ZFS_SYNC_ALWAYS)
3706 zil_commit(zilog, 0);
3708 zfs_inode_update(dzp);
3709 zfs_inode_update(szp);
3713 EXPORT_SYMBOL(zfs_link);
3717 * zfs_null_putapage() is used when the file system has been force
3718 * unmounted. It just drops the pages.
3722 zfs_null_putapage(vnode_t *vp, page_t *pp, u_offset_t *offp,
3723 size_t *lenp, int flags, cred_t *cr)
3725 pvn_write_done(pp, B_INVAL|B_FORCE|B_ERROR);
3730 * Push a page out to disk, klustering if possible.
3732 * IN: vp - file to push page to.
3733 * pp - page to push.
3734 * flags - additional flags.
3735 * cr - credentials of caller.
3737 * OUT: offp - start of range pushed.
3738 * lenp - len of range pushed.
3740 * RETURN: 0 if success
3741 * error code if failure
3743 * NOTE: callers must have locked the page to be pushed. On
3744 * exit, the page (and all other pages in the kluster) must be
3749 zfs_putapage(vnode_t *vp, page_t *pp, u_offset_t *offp,
3750 size_t *lenp, int flags, cred_t *cr)
3752 znode_t *zp = VTOZ(vp);
3753 zfsvfs_t *zfsvfs = zp->z_zfsvfs;
3755 u_offset_t off, koff;
3762 * If our blocksize is bigger than the page size, try to kluster
3763 * multiple pages so that we write a full block (thus avoiding
3764 * a read-modify-write).
3766 if (off < zp->z_size && zp->z_blksz > PAGESIZE) {
3767 klen = P2ROUNDUP((ulong_t)zp->z_blksz, PAGESIZE);
3768 koff = ISP2(klen) ? P2ALIGN(off, (u_offset_t)klen) : 0;
3769 ASSERT(koff <= zp->z_size);
3770 if (koff + klen > zp->z_size)
3771 klen = P2ROUNDUP(zp->z_size - koff, (uint64_t)PAGESIZE);
3772 pp = pvn_write_kluster(vp, pp, &off, &len, koff, klen, flags);
3774 ASSERT3U(btop(len), ==, btopr(len));
3777 * Can't push pages past end-of-file.
3779 if (off >= zp->z_size) {
3780 /* ignore all pages */
3783 } else if (off + len > zp->z_size) {
3784 int npages = btopr(zp->z_size - off);
3787 page_list_break(&pp, &trunc, npages);
3788 /* ignore pages past end of file */
3790 pvn_write_done(trunc, flags);
3791 len = zp->z_size - off;
3794 if (zfs_owner_overquota(zfsvfs, zp, B_FALSE) ||
3795 zfs_owner_overquota(zfsvfs, zp, B_TRUE)) {
3800 tx = dmu_tx_create(zfsvfs->z_os);
3801 dmu_tx_hold_write(tx, zp->z_id, off, len);
3803 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
3804 zfs_sa_upgrade_txholds(tx, zp);
3805 err = dmu_tx_assign(tx, TXG_NOWAIT);
3807 if (err == ERESTART) {
3816 if (zp->z_blksz <= PAGESIZE) {
3817 caddr_t va = zfs_map_page(pp, S_READ);
3818 ASSERT3U(len, <=, PAGESIZE);
3819 dmu_write(zfsvfs->z_os, zp->z_id, off, len, va, tx);
3820 zfs_unmap_page(pp, va);
3822 err = dmu_write_pages(zfsvfs->z_os, zp->z_id, off, len, pp, tx);
3826 uint64_t mtime[2], ctime[2];
3827 sa_bulk_attr_t bulk[3];
3830 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL,
3832 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL,
3834 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs), NULL,
3836 zfs_tstamp_update_setup(zp, CONTENT_MODIFIED, mtime, ctime,
3838 zfs_log_write(zfsvfs->z_log, tx, TX_WRITE, zp, off, len, 0);
3843 pvn_write_done(pp, (err ? B_ERROR : 0) | flags);
3853 * Copy the portion of the file indicated from pages into the file.
3854 * The pages are stored in a page list attached to the files vnode.
3856 * IN: vp - vnode of file to push page data to.
3857 * off - position in file to put data.
3858 * len - amount of data to write.
3859 * flags - flags to control the operation.
3860 * cr - credentials of caller.
3861 * ct - caller context.
3863 * RETURN: 0 if success
3864 * error code if failure
3867 * vp - ctime|mtime updated
3871 zfs_putpage(vnode_t *vp, offset_t off, size_t len, int flags, cred_t *cr)
3873 znode_t *zp = VTOZ(vp);
3874 zfsvfs_t *zfsvfs = zp->z_zfsvfs;
3886 * Align this request to the file block size in case we kluster.
3887 * XXX - this can result in pretty aggresive locking, which can
3888 * impact simultanious read/write access. One option might be
3889 * to break up long requests (len == 0) into block-by-block
3890 * operations to get narrower locking.
3892 blksz = zp->z_blksz;
3894 io_off = P2ALIGN_TYPED(off, blksz, u_offset_t);
3897 if (len > 0 && ISP2(blksz))
3898 io_len = P2ROUNDUP_TYPED(len + (off - io_off), blksz, size_t);
3904 * Search the entire vp list for pages >= io_off.
3906 rl = zfs_range_lock(zp, io_off, UINT64_MAX, RL_WRITER);
3907 error = pvn_vplist_dirty(vp, io_off, zfs_putapage, flags, cr);
3910 rl = zfs_range_lock(zp, io_off, io_len, RL_WRITER);
3912 if (off > zp->z_size) {
3913 /* past end of file */
3914 zfs_range_unlock(rl);
3919 len = MIN(io_len, P2ROUNDUP(zp->z_size, PAGESIZE) - io_off);
3921 for (off = io_off; io_off < off + len; io_off += io_len) {
3922 if ((flags & B_INVAL) || ((flags & B_ASYNC) == 0)) {
3923 pp = page_lookup(vp, io_off,
3924 (flags & (B_INVAL | B_FREE)) ? SE_EXCL : SE_SHARED);
3926 pp = page_lookup_nowait(vp, io_off,
3927 (flags & B_FREE) ? SE_EXCL : SE_SHARED);
3930 if (pp != NULL && pvn_getdirty(pp, flags)) {
3934 * Found a dirty page to push
3936 err = zfs_putapage(vp, pp, &io_off, &io_len, flags, cr);
3944 zfs_range_unlock(rl);
3945 if ((flags & B_ASYNC) == 0 || zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
3946 zil_commit(zfsvfs->z_log, zp->z_id);
3950 #endif /* HAVE_MMAP */
3954 zfs_inactive(struct inode *ip)
3956 znode_t *zp = ITOZ(ip);
3957 zfs_sb_t *zsb = ITOZSB(ip);
3960 #ifdef HAVE_SNAPSHOT
3961 /* Early return for snapshot inode? */
3962 #endif /* HAVE_SNAPSHOT */
3964 rw_enter(&zsb->z_teardown_inactive_lock, RW_READER);
3965 if (zp->z_sa_hdl == NULL) {
3966 rw_exit(&zsb->z_teardown_inactive_lock);
3970 if (zp->z_atime_dirty && zp->z_unlinked == 0) {
3971 dmu_tx_t *tx = dmu_tx_create(zsb->z_os);
3973 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
3974 zfs_sa_upgrade_txholds(tx, zp);
3975 error = dmu_tx_assign(tx, TXG_WAIT);
3979 mutex_enter(&zp->z_lock);
3980 (void) sa_update(zp->z_sa_hdl, SA_ZPL_ATIME(zsb),
3981 (void *)&zp->z_atime, sizeof (zp->z_atime), tx);
3982 zp->z_atime_dirty = 0;
3983 mutex_exit(&zp->z_lock);
3989 rw_exit(&zsb->z_teardown_inactive_lock);
3991 EXPORT_SYMBOL(zfs_inactive);
3994 * Bounds-check the seek operation.
3996 * IN: ip - inode seeking within
3997 * ooff - old file offset
3998 * noffp - pointer to new file offset
3999 * ct - caller context
4001 * RETURN: 0 if success
4002 * EINVAL if new offset invalid
4006 zfs_seek(struct inode *ip, offset_t ooff, offset_t *noffp)
4008 if (S_ISDIR(ip->i_mode))
4010 return ((*noffp < 0 || *noffp > MAXOFFSET_T) ? EINVAL : 0);
4012 EXPORT_SYMBOL(zfs_seek);
4016 * Pre-filter the generic locking function to trap attempts to place
4017 * a mandatory lock on a memory mapped file.
4020 zfs_frlock(vnode_t *vp, int cmd, flock64_t *bfp, int flag, offset_t offset,
4021 flk_callback_t *flk_cbp, cred_t *cr)
4023 znode_t *zp = VTOZ(vp);
4024 zfsvfs_t *zfsvfs = zp->z_zfsvfs;
4030 * We are following the UFS semantics with respect to mapcnt
4031 * here: If we see that the file is mapped already, then we will
4032 * return an error, but we don't worry about races between this
4033 * function and zfs_map().
4035 if (zp->z_mapcnt > 0 && MANDMODE(zp->z_mode)) {
4040 return (fs_frlock(vp, cmd, bfp, flag, offset, flk_cbp, cr, ct));
4044 * If we can't find a page in the cache, we will create a new page
4045 * and fill it with file data. For efficiency, we may try to fill
4046 * multiple pages at once (klustering) to fill up the supplied page
4047 * list. Note that the pages to be filled are held with an exclusive
4048 * lock to prevent access by other threads while they are being filled.
4051 zfs_fillpage(vnode_t *vp, u_offset_t off, struct seg *seg,
4052 caddr_t addr, page_t *pl[], size_t plsz, enum seg_rw rw)
4054 znode_t *zp = VTOZ(vp);
4055 page_t *pp, *cur_pp;
4056 objset_t *os = zp->z_zfsvfs->z_os;
4057 u_offset_t io_off, total;
4061 if (plsz == PAGESIZE || zp->z_blksz <= PAGESIZE) {
4063 * We only have a single page, don't bother klustering
4067 pp = page_create_va(vp, io_off, io_len,
4068 PG_EXCL | PG_WAIT, seg, addr);
4071 * Try to find enough pages to fill the page list
4073 pp = pvn_read_kluster(vp, off, seg, addr, &io_off,
4074 &io_len, off, plsz, 0);
4078 * The page already exists, nothing to do here.
4085 * Fill the pages in the kluster.
4088 for (total = io_off + io_len; io_off < total; io_off += PAGESIZE) {
4091 ASSERT3U(io_off, ==, cur_pp->p_offset);
4092 va = zfs_map_page(cur_pp, S_WRITE);
4093 err = dmu_read(os, zp->z_id, io_off, PAGESIZE, va,
4095 zfs_unmap_page(cur_pp, va);
4097 /* On error, toss the entire kluster */
4098 pvn_read_done(pp, B_ERROR);
4099 /* convert checksum errors into IO errors */
4104 cur_pp = cur_pp->p_next;
4108 * Fill in the page list array from the kluster starting
4109 * from the desired offset `off'.
4110 * NOTE: the page list will always be null terminated.
4112 pvn_plist_init(pp, pl, plsz, off, io_len, rw);
4113 ASSERT(pl == NULL || (*pl)->p_offset == off);
4119 * Return pointers to the pages for the file region [off, off + len]
4120 * in the pl array. If plsz is greater than len, this function may
4121 * also return page pointers from after the specified region
4122 * (i.e. the region [off, off + plsz]). These additional pages are
4123 * only returned if they are already in the cache, or were created as
4124 * part of a klustered read.
4126 * IN: vp - vnode of file to get data from.
4127 * off - position in file to get data from.
4128 * len - amount of data to retrieve.
4129 * plsz - length of provided page list.
4130 * seg - segment to obtain pages for.
4131 * addr - virtual address of fault.
4132 * rw - mode of created pages.
4133 * cr - credentials of caller.
4134 * ct - caller context.
4136 * OUT: protp - protection mode of created pages.
4137 * pl - list of pages created.
4139 * RETURN: 0 if success
4140 * error code if failure
4143 * vp - atime updated
4147 zfs_getpage(vnode_t *vp, offset_t off, size_t len, uint_t *protp,
4148 page_t *pl[], size_t plsz, struct seg *seg, caddr_t addr,
4149 enum seg_rw rw, cred_t *cr)
4151 znode_t *zp = VTOZ(vp);
4152 zfsvfs_t *zfsvfs = zp->z_zfsvfs;
4156 /* we do our own caching, faultahead is unnecessary */
4159 else if (len > plsz)
4162 len = P2ROUNDUP(len, PAGESIZE);
4163 ASSERT(plsz >= len);
4172 * Loop through the requested range [off, off + len) looking
4173 * for pages. If we don't find a page, we will need to create
4174 * a new page and fill it with data from the file.
4177 if (*pl = page_lookup(vp, off, SE_SHARED))
4179 else if (err = zfs_fillpage(vp, off, seg, addr, pl, plsz, rw))
4182 ASSERT3U((*pl)->p_offset, ==, off);
4186 ASSERT3U(len, >=, PAGESIZE);
4189 ASSERT3U(plsz, >=, PAGESIZE);
4196 * Fill out the page array with any pages already in the cache.
4199 (*pl++ = page_lookup_nowait(vp, off, SE_SHARED))) {
4206 * Release any pages we have previously locked.
4211 ZFS_ACCESSTIME_STAMP(zfsvfs, zp);
4221 * Request a memory map for a section of a file. This code interacts
4222 * with common code and the VM system as follows:
4224 * common code calls mmap(), which ends up in smmap_common()
4226 * this calls VOP_MAP(), which takes you into (say) zfs
4228 * zfs_map() calls as_map(), passing segvn_create() as the callback
4230 * segvn_create() creates the new segment and calls VOP_ADDMAP()
4232 * zfs_addmap() updates z_mapcnt
4236 zfs_map(vnode_t *vp, offset_t off, struct as *as, caddr_t *addrp,
4237 size_t len, uchar_t prot, uchar_t maxprot, uint_t flags, cred_t *cr)
4239 znode_t *zp = VTOZ(vp);
4240 zfsvfs_t *zfsvfs = zp->z_zfsvfs;
4241 segvn_crargs_t vn_a;
4247 if ((prot & PROT_WRITE) && (zp->z_pflags &
4248 (ZFS_IMMUTABLE | ZFS_READONLY | ZFS_APPENDONLY))) {
4253 if ((prot & (PROT_READ | PROT_EXEC)) &&
4254 (zp->z_pflags & ZFS_AV_QUARANTINED)) {
4259 if (vp->v_flag & VNOMAP) {
4264 if (off < 0 || len > MAXOFFSET_T - off) {
4269 if (vp->v_type != VREG) {
4275 * If file is locked, disallow mapping.
4277 if (MANDMODE(zp->z_mode) && vn_has_flocks(vp)) {
4283 error = choose_addr(as, addrp, len, off, ADDR_VACALIGN, flags);
4291 vn_a.offset = (u_offset_t)off;
4292 vn_a.type = flags & MAP_TYPE;
4294 vn_a.maxprot = maxprot;
4297 vn_a.flags = flags & ~MAP_TYPE;
4299 vn_a.lgrp_mem_policy_flags = 0;
4301 error = as_map(as, *addrp, len, segvn_create, &vn_a);
4310 zfs_addmap(vnode_t *vp, offset_t off, struct as *as, caddr_t addr,
4311 size_t len, uchar_t prot, uchar_t maxprot, uint_t flags, cred_t *cr)
4313 uint64_t pages = btopr(len);
4315 atomic_add_64(&VTOZ(vp)->z_mapcnt, pages);
4320 * The reason we push dirty pages as part of zfs_delmap() is so that we get a
4321 * more accurate mtime for the associated file. Since we don't have a way of
4322 * detecting when the data was actually modified, we have to resort to
4323 * heuristics. If an explicit msync() is done, then we mark the mtime when the
4324 * last page is pushed. The problem occurs when the msync() call is omitted,
4325 * which by far the most common case:
4333 * putpage() via fsflush
4335 * If we wait until fsflush to come along, we can have a modification time that
4336 * is some arbitrary point in the future. In order to prevent this in the
4337 * common case, we flush pages whenever a (MAP_SHARED, PROT_WRITE) mapping is
4342 zfs_delmap(vnode_t *vp, offset_t off, struct as *as, caddr_t addr,
4343 size_t len, uint_t prot, uint_t maxprot, uint_t flags, cred_t *cr)
4345 uint64_t pages = btopr(len);
4347 ASSERT3U(VTOZ(vp)->z_mapcnt, >=, pages);
4348 atomic_add_64(&VTOZ(vp)->z_mapcnt, -pages);
4350 if ((flags & MAP_SHARED) && (prot & PROT_WRITE) &&
4351 vn_has_cached_data(vp))
4352 (void) VOP_PUTPAGE(vp, off, len, B_ASYNC, cr, ct);
4356 #endif /* HAVE_MMAP */
4359 * convoff - converts the given data (start, whence) to the
4363 convoff(struct inode *ip, flock64_t *lckdat, int whence, offset_t offset)
4368 if ((lckdat->l_whence == 2) || (whence == 2)) {
4369 if ((error = zfs_getattr(ip, &vap, 0, CRED()) != 0))
4373 switch (lckdat->l_whence) {
4375 lckdat->l_start += offset;
4378 lckdat->l_start += vap.va_size;
4386 if (lckdat->l_start < 0)
4391 lckdat->l_start -= offset;
4394 lckdat->l_start -= vap.va_size;
4402 lckdat->l_whence = (short)whence;
4407 * Free or allocate space in a file. Currently, this function only
4408 * supports the `F_FREESP' command. However, this command is somewhat
4409 * misnamed, as its functionality includes the ability to allocate as
4410 * well as free space.
4412 * IN: ip - inode of file to free data in.
4413 * cmd - action to take (only F_FREESP supported).
4414 * bfp - section of file to free/alloc.
4415 * flag - current file open mode flags.
4416 * offset - current file offset.
4417 * cr - credentials of caller [UNUSED].
4419 * RETURN: 0 if success
4420 * error code if failure
4423 * ip - ctime|mtime updated
4427 zfs_space(struct inode *ip, int cmd, flock64_t *bfp, int flag,
4428 offset_t offset, cred_t *cr)
4430 znode_t *zp = ITOZ(ip);
4431 zfs_sb_t *zsb = ITOZSB(ip);
4438 if (cmd != F_FREESP) {
4443 if ((error = convoff(ip, bfp, 0, offset))) {
4448 if (bfp->l_len < 0) {
4454 len = bfp->l_len; /* 0 means from off to end of file */
4456 error = zfs_freesp(zp, off, len, flag, TRUE);
4461 EXPORT_SYMBOL(zfs_space);
4465 zfs_fid(struct inode *ip, fid_t *fidp)
4467 znode_t *zp = ITOZ(ip);
4468 zfs_sb_t *zsb = ITOZSB(ip);
4471 uint64_t object = zp->z_id;
4478 if ((error = sa_lookup(zp->z_sa_hdl, SA_ZPL_GEN(zsb),
4479 &gen64, sizeof (uint64_t))) != 0) {
4484 gen = (uint32_t)gen64;
4486 size = (zsb->z_parent != zsb) ? LONG_FID_LEN : SHORT_FID_LEN;
4487 if (fidp->fid_len < size) {
4488 fidp->fid_len = size;
4493 zfid = (zfid_short_t *)fidp;
4495 zfid->zf_len = size;
4497 for (i = 0; i < sizeof (zfid->zf_object); i++)
4498 zfid->zf_object[i] = (uint8_t)(object >> (8 * i));
4500 /* Must have a non-zero generation number to distinguish from .zfs */
4503 for (i = 0; i < sizeof (zfid->zf_gen); i++)
4504 zfid->zf_gen[i] = (uint8_t)(gen >> (8 * i));
4506 if (size == LONG_FID_LEN) {
4507 uint64_t objsetid = dmu_objset_id(zsb->z_os);
4510 zlfid = (zfid_long_t *)fidp;
4512 for (i = 0; i < sizeof (zlfid->zf_setid); i++)
4513 zlfid->zf_setid[i] = (uint8_t)(objsetid >> (8 * i));
4515 /* XXX - this should be the generation number for the objset */
4516 for (i = 0; i < sizeof (zlfid->zf_setgen); i++)
4517 zlfid->zf_setgen[i] = 0;
4523 EXPORT_SYMBOL(zfs_fid);
4527 zfs_getsecattr(struct inode *ip, vsecattr_t *vsecp, int flag, cred_t *cr)
4529 znode_t *zp = ITOZ(ip);
4530 zfs_sb_t *zsb = ITOZSB(ip);
4532 boolean_t skipaclchk = (flag & ATTR_NOACLCHECK) ? B_TRUE : B_FALSE;
4536 error = zfs_getacl(zp, vsecp, skipaclchk, cr);
4541 EXPORT_SYMBOL(zfs_getsecattr);
4545 zfs_setsecattr(struct inode *ip, vsecattr_t *vsecp, int flag, cred_t *cr)
4547 znode_t *zp = ITOZ(ip);
4548 zfs_sb_t *zsb = ITOZSB(ip);
4550 boolean_t skipaclchk = (flag & ATTR_NOACLCHECK) ? B_TRUE : B_FALSE;
4551 zilog_t *zilog = zsb->z_log;
4556 error = zfs_setacl(zp, vsecp, skipaclchk, cr);
4558 if (zsb->z_os->os_sync == ZFS_SYNC_ALWAYS)
4559 zil_commit(zilog, 0);
4564 EXPORT_SYMBOL(zfs_setsecattr);
4566 #ifdef HAVE_UIO_ZEROCOPY
4568 * Tunable, both must be a power of 2.
4570 * zcr_blksz_min: the smallest read we may consider to loan out an arcbuf
4571 * zcr_blksz_max: if set to less than the file block size, allow loaning out of
4572 * an arcbuf for a partial block read
4574 int zcr_blksz_min = (1 << 10); /* 1K */
4575 int zcr_blksz_max = (1 << 17); /* 128K */
4579 zfs_reqzcbuf(struct inode *ip, enum uio_rw ioflag, xuio_t *xuio, cred_t *cr)
4581 znode_t *zp = ITOZ(ip);
4582 zfs_sb_t *zsb = ITOZSB(ip);
4583 int max_blksz = zsb->z_max_blksz;
4584 uio_t *uio = &xuio->xu_uio;
4585 ssize_t size = uio->uio_resid;
4586 offset_t offset = uio->uio_loffset;
4591 int preamble, postamble;
4593 if (xuio->xu_type != UIOTYPE_ZEROCOPY)
4601 * Loan out an arc_buf for write if write size is bigger than
4602 * max_blksz, and the file's block size is also max_blksz.
4605 if (size < blksz || zp->z_blksz != blksz) {
4610 * Caller requests buffers for write before knowing where the
4611 * write offset might be (e.g. NFS TCP write).
4616 preamble = P2PHASE(offset, blksz);
4618 preamble = blksz - preamble;
4623 postamble = P2PHASE(size, blksz);
4626 fullblk = size / blksz;
4627 (void) dmu_xuio_init(xuio,
4628 (preamble != 0) + fullblk + (postamble != 0));
4631 * Have to fix iov base/len for partial buffers. They
4632 * currently represent full arc_buf's.
4635 /* data begins in the middle of the arc_buf */
4636 abuf = dmu_request_arcbuf(sa_get_db(zp->z_sa_hdl),
4639 (void) dmu_xuio_add(xuio, abuf,
4640 blksz - preamble, preamble);
4643 for (i = 0; i < fullblk; i++) {
4644 abuf = dmu_request_arcbuf(sa_get_db(zp->z_sa_hdl),
4647 (void) dmu_xuio_add(xuio, abuf, 0, blksz);
4651 /* data ends in the middle of the arc_buf */
4652 abuf = dmu_request_arcbuf(sa_get_db(zp->z_sa_hdl),
4655 (void) dmu_xuio_add(xuio, abuf, 0, postamble);
4660 * Loan out an arc_buf for read if the read size is larger than
4661 * the current file block size. Block alignment is not
4662 * considered. Partial arc_buf will be loaned out for read.
4664 blksz = zp->z_blksz;
4665 if (blksz < zcr_blksz_min)
4666 blksz = zcr_blksz_min;
4667 if (blksz > zcr_blksz_max)
4668 blksz = zcr_blksz_max;
4669 /* avoid potential complexity of dealing with it */
4670 if (blksz > max_blksz) {
4675 maxsize = zp->z_size - uio->uio_loffset;
4689 uio->uio_extflg = UIO_XUIO;
4690 XUIO_XUZC_RW(xuio) = ioflag;
4697 zfs_retzcbuf(struct inode *ip, xuio_t *xuio, cred_t *cr)
4701 int ioflag = XUIO_XUZC_RW(xuio);
4703 ASSERT(xuio->xu_type == UIOTYPE_ZEROCOPY);
4705 i = dmu_xuio_cnt(xuio);
4707 abuf = dmu_xuio_arcbuf(xuio, i);
4709 * if abuf == NULL, it must be a write buffer
4710 * that has been returned in zfs_write().
4713 dmu_return_arcbuf(abuf);
4714 ASSERT(abuf || ioflag == UIO_WRITE);
4717 dmu_xuio_fini(xuio);
4720 #endif /* HAVE_UIO_ZEROCOPY */
4722 #if defined(_KERNEL) && defined(HAVE_SPL)
4723 module_param(zfs_read_chunk_size, long, 0644);
4724 MODULE_PARM_DESC(zfs_read_chunk_size, "Bytes to read per chunk");