X-Git-Url: https://git.camperquake.de/gitweb.cgi?a=blobdiff_plain;f=module%2Fzfs%2Fzfs_vnops.c;h=876d44b3563dae38e3989c632c356d2b7b9637b1;hb=refs%2Fheads%2Frertzinger%2Ffeature-zpool-get--p;hp=8e0037e37da527f5c1b4840996e3c506d5749c64;hpb=172bb4bd5e4afef721dd4d2972d8680d983f144b;p=zfs.git diff --git a/module/zfs/zfs_vnops.c b/module/zfs/zfs_vnops.c index 8e0037e..876d44b 100644 --- a/module/zfs/zfs_vnops.c +++ b/module/zfs/zfs_vnops.c @@ -19,11 +19,13 @@ * CDDL HEADER END */ /* - * Copyright 2008 Sun Microsystems, Inc. All rights reserved. - * Use is subject to license terms. + * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2012 by Delphix. All rights reserved. */ /* Portions Copyright 2007 Jeremy Teo */ +/* Portions Copyright 2010 Robert Milkowski */ + #include #include @@ -33,7 +35,6 @@ #include #include #include -#include #include #include #include @@ -41,13 +42,7 @@ #include #include #include -#include -#include #include -#include -#include -#include -#include #include #include #include @@ -57,24 +52,29 @@ #include #include #include +#include #include #include #include #include +#include #include #include #include -#include #include +#include #include "fs/fs_subr.h" #include #include +#include +#include #include #include #include #include -#include +#include #include +#include /* * Programming rules. @@ -88,12 +88,12 @@ * to freed memory. The example below illustrates the following Big Rules: * * (1) A check must be made in each zfs thread for a mounted file system. - * This is done avoiding races using ZFS_ENTER(zfsvfs). - * A ZFS_EXIT(zfsvfs) is needed before all returns. Any znodes + * This is done avoiding races using ZFS_ENTER(zsb). + * A ZFS_EXIT(zsb) is needed before all returns. Any znodes * must be checked with ZFS_VERIFY_ZP(zp). Both of these macros * can return EIO from the calling function. * - * (2) VN_RELE() should always be the last thing except for zil_commit() + * (2) iput() should always be the last thing except for zil_commit() * (if necessary) and ZFS_EXIT(). This is for 3 reasons: * First, if it's the last reference, the vnode/znode * can be freed, so the zp may point to freed memory. Second, the last @@ -101,13 +101,12 @@ * pushing cached pages (which acquires range locks) and syncing out * cached atime changes. Third, zfs_zinactive() may require a new tx, * which could deadlock the system if you were already holding one. + * If you must call iput() within a tx then use iput_ASYNC(). * * (3) All range locks must be grabbed before calling dmu_tx_assign(), * as they can span dmu_tx_assign() calls. * - * (4) Always pass zfsvfs->z_assign as the second argument to dmu_tx_assign(). - * In normal operation, this will be TXG_NOWAIT. During ZIL replay, - * it will be a specific txg. Either way, dmu_tx_assign() never blocks. + * (4) Always pass TXG_NOWAIT as the second argument to dmu_tx_assign(). * This is critical because we don't want to block while holding locks. * Note, in particular, that if a lock is sometimes acquired before * the tx assigns, and sometimes after (e.g. z_lock), then failing to @@ -118,39 +117,41 @@ * Thread A calls dmu_tx_assign(TXG_WAIT) and blocks in txg_wait_open() * forever, because the previous txg can't quiesce until B's tx commits. * - * If dmu_tx_assign() returns ERESTART and zfsvfs->z_assign is TXG_NOWAIT, + * If dmu_tx_assign() returns ERESTART and zsb->z_assign is TXG_NOWAIT, * then drop all locks, call dmu_tx_wait(), and try again. * * (5) If the operation succeeded, generate the intent log entry for it * before dropping locks. This ensures that the ordering of events * in the intent log matches the order in which they actually occurred. + * During ZIL replay the zfs_log_* functions will update the sequence + * number to indicate the zil transaction has replayed. * * (6) At the end of each vnode op, the DMU tx must always commit, * regardless of whether there were any errors. * - * (7) After dropping all locks, invoke zil_commit(zilog, seq, foid) + * (7) After dropping all locks, invoke zil_commit(zilog, foid) * to ensure that synchronous semantics are provided when necessary. * * In general, this is how things should be ordered in each vnode op: * - * ZFS_ENTER(zfsvfs); // exit if unmounted + * ZFS_ENTER(zsb); // exit if unmounted * top: - * zfs_dirent_lock(&dl, ...) // lock directory entry (may VN_HOLD()) + * zfs_dirent_lock(&dl, ...) // lock directory entry (may igrab()) * rw_enter(...); // grab any other locks you need * tx = dmu_tx_create(...); // get DMU tx * dmu_tx_hold_*(); // hold each object you might modify - * error = dmu_tx_assign(tx, zfsvfs->z_assign); // try to assign + * error = dmu_tx_assign(tx, TXG_NOWAIT); // try to assign * if (error) { * rw_exit(...); // drop locks * zfs_dirent_unlock(dl); // unlock directory entry - * VN_RELE(...); // release held vnodes - * if (error == ERESTART && zfsvfs->z_assign == TXG_NOWAIT) { + * iput(...); // release held vnodes + * if (error == ERESTART) { * dmu_tx_wait(tx); * dmu_tx_abort(tx); * goto top; * } * dmu_tx_abort(tx); // abort DMU tx - * ZFS_EXIT(zfsvfs); // finished in zfs + * ZFS_EXIT(zsb); // finished in zfs * return (error); // really out of space * } * error = do_real_work(); // do whatever this VOP does @@ -159,101 +160,111 @@ * dmu_tx_commit(tx); // commit DMU tx -- error or not * rw_exit(...); // drop locks * zfs_dirent_unlock(dl); // unlock directory entry - * VN_RELE(...); // release held vnodes - * zil_commit(zilog, seq, foid); // synchronous when necessary - * ZFS_EXIT(zfsvfs); // finished in zfs + * iput(...); // release held vnodes + * zil_commit(zilog, foid); // synchronous when necessary + * ZFS_EXIT(zsb); // finished in zfs * return (error); // done, report error */ -/* ARGSUSED */ +/* + * Virus scanning is unsupported. It would be possible to add a hook + * here to performance the required virus scan. This could be done + * entirely in the kernel or potentially as an update to invoke a + * scanning utility. + */ static int -zfs_open(vnode_t **vpp, int flag, cred_t *cr, caller_context_t *ct) +zfs_vscan(struct inode *ip, cred_t *cr, int async) +{ + return (0); +} + +/* ARGSUSED */ +int +zfs_open(struct inode *ip, int mode, int flag, cred_t *cr) { - znode_t *zp = VTOZ(*vpp); - zfsvfs_t *zfsvfs = zp->z_zfsvfs; + znode_t *zp = ITOZ(ip); + zfs_sb_t *zsb = ITOZSB(ip); - ZFS_ENTER(zfsvfs); + ZFS_ENTER(zsb); ZFS_VERIFY_ZP(zp); - if ((flag & FWRITE) && (zp->z_phys->zp_flags & ZFS_APPENDONLY) && - ((flag & FAPPEND) == 0)) { - ZFS_EXIT(zfsvfs); + /* Honor ZFS_APPENDONLY file attribute */ + if ((mode & FMODE_WRITE) && (zp->z_pflags & ZFS_APPENDONLY) && + ((flag & O_APPEND) == 0)) { + ZFS_EXIT(zsb); return (EPERM); } - if (!zfs_has_ctldir(zp) && zp->z_zfsvfs->z_vscan && - ZTOV(zp)->v_type == VREG && - !(zp->z_phys->zp_flags & ZFS_AV_QUARANTINED) && - zp->z_phys->zp_size > 0) { - if (fs_vscan(*vpp, cr, 0) != 0) { - ZFS_EXIT(zfsvfs); + /* Virus scan eligible files on open */ + if (!zfs_has_ctldir(zp) && zsb->z_vscan && S_ISREG(ip->i_mode) && + !(zp->z_pflags & ZFS_AV_QUARANTINED) && zp->z_size > 0) { + if (zfs_vscan(ip, cr, 0) != 0) { + ZFS_EXIT(zsb); return (EACCES); } } /* Keep a count of the synchronous opens in the znode */ - if (flag & (FSYNC | FDSYNC)) + if (flag & O_SYNC) atomic_inc_32(&zp->z_sync_cnt); - ZFS_EXIT(zfsvfs); + ZFS_EXIT(zsb); return (0); } +EXPORT_SYMBOL(zfs_open); /* ARGSUSED */ -static int -zfs_close(vnode_t *vp, int flag, int count, offset_t offset, cred_t *cr, - caller_context_t *ct) +int +zfs_close(struct inode *ip, int flag, cred_t *cr) { - znode_t *zp = VTOZ(vp); - zfsvfs_t *zfsvfs = zp->z_zfsvfs; + znode_t *zp = ITOZ(ip); + zfs_sb_t *zsb = ITOZSB(ip); - ZFS_ENTER(zfsvfs); + ZFS_ENTER(zsb); ZFS_VERIFY_ZP(zp); - /* Decrement the synchronous opens in the znode */ - if ((flag & (FSYNC | FDSYNC)) && (count == 1)) - atomic_dec_32(&zp->z_sync_cnt); - /* - * Clean up any locks held by this process on the vp. + * Zero the synchronous opens in the znode. Under Linux the + * zfs_close() hook is not symmetric with zfs_open(), it is + * only called once when the last reference is dropped. */ - cleanlocks(vp, ddi_get_pid(), 0); - cleanshares(vp, ddi_get_pid()); + if (flag & O_SYNC) + zp->z_sync_cnt = 0; - if (!zfs_has_ctldir(zp) && zp->z_zfsvfs->z_vscan && - ZTOV(zp)->v_type == VREG && - !(zp->z_phys->zp_flags & ZFS_AV_QUARANTINED) && - zp->z_phys->zp_size > 0) - VERIFY(fs_vscan(vp, cr, 1) == 0); + if (!zfs_has_ctldir(zp) && zsb->z_vscan && S_ISREG(ip->i_mode) && + !(zp->z_pflags & ZFS_AV_QUARANTINED) && zp->z_size > 0) + VERIFY(zfs_vscan(ip, cr, 1) == 0); - ZFS_EXIT(zfsvfs); + ZFS_EXIT(zsb); return (0); } +EXPORT_SYMBOL(zfs_close); +#if defined(SEEK_HOLE) && defined(SEEK_DATA) /* - * Lseek support for finding holes (cmd == _FIO_SEEK_HOLE) and - * data (cmd == _FIO_SEEK_DATA). "off" is an in/out parameter. + * Lseek support for finding holes (cmd == SEEK_HOLE) and + * data (cmd == SEEK_DATA). "off" is an in/out parameter. */ static int -zfs_holey(vnode_t *vp, int cmd, offset_t *off) +zfs_holey_common(struct inode *ip, int cmd, loff_t *off) { - znode_t *zp = VTOZ(vp); + znode_t *zp = ITOZ(ip); uint64_t noff = (uint64_t)*off; /* new offset */ uint64_t file_sz; int error; boolean_t hole; - file_sz = zp->z_phys->zp_size; + file_sz = zp->z_size; if (noff >= file_sz) { return (ENXIO); } - if (cmd == _FIO_SEEK_HOLE) + if (cmd == SEEK_HOLE) hole = B_TRUE; else hole = B_FALSE; - error = dmu_offset_next(zp->z_zfsvfs->z_os, zp->z_id, hole, &noff); + error = dmu_offset_next(ZTOZSB(zp)->z_os, zp->z_id, hole, &noff); /* end of file? */ if ((error == ESRCH) || (noff > file_sz)) { @@ -273,131 +284,69 @@ zfs_holey(vnode_t *vp, int cmd, offset_t *off) return (error); } -/* ARGSUSED */ -static int -zfs_ioctl(vnode_t *vp, int com, intptr_t data, int flag, cred_t *cred, - int *rvalp, caller_context_t *ct) +int +zfs_holey(struct inode *ip, int cmd, loff_t *off) { - offset_t off; + znode_t *zp = ITOZ(ip); + zfs_sb_t *zsb = ITOZSB(ip); int error; - zfsvfs_t *zfsvfs; - znode_t *zp; - - switch (com) { - case _FIOFFS: - return (zfs_sync(vp->v_vfsp, 0, cred)); - - /* - * The following two ioctls are used by bfu. Faking out, - * necessary to avoid bfu errors. - */ - case _FIOGDIO: - case _FIOSDIO: - return (0); - - case _FIO_SEEK_DATA: - case _FIO_SEEK_HOLE: - if (ddi_copyin((void *)data, &off, sizeof (off), flag)) - return (EFAULT); - - zp = VTOZ(vp); - zfsvfs = zp->z_zfsvfs; - ZFS_ENTER(zfsvfs); - ZFS_VERIFY_ZP(zp); - /* offset parameter is in/out */ - error = zfs_holey(vp, com, &off); - ZFS_EXIT(zfsvfs); - if (error) - return (error); - if (ddi_copyout(&off, (void *)data, sizeof (off), flag)) - return (EFAULT); - return (0); - } - return (ENOTTY); -} + ZFS_ENTER(zsb); + ZFS_VERIFY_ZP(zp); -/* - * Utility functions to map and unmap a single physical page. These - * are used to manage the mappable copies of ZFS file data, and therefore - * do not update ref/mod bits. - */ -caddr_t -zfs_map_page(page_t *pp, enum seg_rw rw) -{ - if (kpm_enable) - return (hat_kpm_mapin(pp, 0)); - ASSERT(rw == S_READ || rw == S_WRITE); - return (ppmapin(pp, PROT_READ | ((rw == S_WRITE) ? PROT_WRITE : 0), - (caddr_t)-1)); -} + error = zfs_holey_common(ip, cmd, off); -void -zfs_unmap_page(page_t *pp, caddr_t addr) -{ - if (kpm_enable) { - hat_kpm_mapout(pp, 0, addr); - } else { - ppmapout(addr); - } + ZFS_EXIT(zsb); + return (error); } +EXPORT_SYMBOL(zfs_holey); +#endif /* SEEK_HOLE && SEEK_DATA */ +#if defined(_KERNEL) /* * When a file is memory mapped, we must keep the IO data synchronized * between the DMU cache and the memory mapped pages. What this means: * * On Write: If we find a memory mapped page, we write to *both* * the page and the dmu buffer. - * - * NOTE: We will always "break up" the IO into PAGESIZE uiomoves when - * the file is memory mapped. */ -static int -mappedwrite(vnode_t *vp, int nbytes, uio_t *uio, dmu_tx_t *tx) +static void +update_pages(struct inode *ip, int64_t start, int len, + objset_t *os, uint64_t oid) { - znode_t *zp = VTOZ(vp); - zfsvfs_t *zfsvfs = zp->z_zfsvfs; - int64_t start, off; - int len = nbytes; - int error = 0; - - start = uio->uio_loffset; - off = start & PAGEOFFSET; - for (start &= PAGEMASK; len > 0; start += PAGESIZE) { - page_t *pp; - uint64_t bytes = MIN(PAGESIZE - off, len); - uint64_t woff = uio->uio_loffset; - - /* - * We don't want a new page to "appear" in the middle of - * the file update (because it may not get the write - * update data), so we grab a lock to block - * zfs_getpage(). - */ - rw_enter(&zp->z_map_lock, RW_WRITER); - if (pp = page_lookup(vp, start, SE_SHARED)) { - caddr_t va; - - rw_exit(&zp->z_map_lock); - va = zfs_map_page(pp, S_WRITE); - error = uiomove(va+off, bytes, UIO_WRITE, uio); - if (error == 0) { - dmu_write(zfsvfs->z_os, zp->z_id, - woff, bytes, va+off, tx); - } - zfs_unmap_page(pp, va); - page_unlock(pp); - } else { - error = dmu_write_uio(zfsvfs->z_os, zp->z_id, - uio, bytes, tx); - rw_exit(&zp->z_map_lock); + struct address_space *mp = ip->i_mapping; + struct page *pp; + uint64_t nbytes; + int64_t off; + void *pb; + + off = start & (PAGE_CACHE_SIZE-1); + for (start &= PAGE_CACHE_MASK; len > 0; start += PAGE_CACHE_SIZE) { + nbytes = MIN(PAGE_CACHE_SIZE - off, len); + + pp = find_lock_page(mp, start >> PAGE_CACHE_SHIFT); + if (pp) { + if (mapping_writably_mapped(mp)) + flush_dcache_page(pp); + + pb = kmap(pp); + (void) dmu_read(os, oid, start+off, nbytes, pb+off, + DMU_READ_PREFETCH); + kunmap(pp); + + if (mapping_writably_mapped(mp)) + flush_dcache_page(pp); + + mark_page_accessed(pp); + SetPageUptodate(pp); + ClearPageError(pp); + unlock_page(pp); + page_cache_release(pp); } - len -= bytes; + + len -= nbytes; off = 0; - if (error) - break; } - return (error); } /* @@ -411,30 +360,41 @@ mappedwrite(vnode_t *vp, int nbytes, uio_t *uio, dmu_tx_t *tx) * the file is memory mapped. */ static int -mappedread(vnode_t *vp, int nbytes, uio_t *uio) +mappedread(struct inode *ip, int nbytes, uio_t *uio) { - znode_t *zp = VTOZ(vp); - objset_t *os = zp->z_zfsvfs->z_os; + struct address_space *mp = ip->i_mapping; + struct page *pp; + znode_t *zp = ITOZ(ip); + objset_t *os = ITOZSB(ip)->z_os; int64_t start, off; + uint64_t bytes; int len = nbytes; int error = 0; + void *pb; start = uio->uio_loffset; - off = start & PAGEOFFSET; - for (start &= PAGEMASK; len > 0; start += PAGESIZE) { - page_t *pp; - uint64_t bytes = MIN(PAGESIZE - off, len); - - if (pp = page_lookup(vp, start, SE_SHARED)) { - caddr_t va; - - va = zfs_map_page(pp, S_READ); - error = uiomove(va + off, bytes, UIO_READ, uio); - zfs_unmap_page(pp, va); - page_unlock(pp); + off = start & (PAGE_CACHE_SIZE-1); + for (start &= PAGE_CACHE_MASK; len > 0; start += PAGE_CACHE_SIZE) { + bytes = MIN(PAGE_CACHE_SIZE - off, len); + + pp = find_lock_page(mp, start >> PAGE_CACHE_SHIFT); + if (pp) { + ASSERT(PageUptodate(pp)); + + pb = kmap(pp); + error = uiomove(pb + off, bytes, UIO_READ, uio); + kunmap(pp); + + if (mapping_writably_mapped(mp)) + flush_dcache_page(pp); + + mark_page_accessed(pp); + unlock_page(pp); + page_cache_release(pp); } else { error = dmu_read_uio(os, zp->z_id, uio, bytes); } + len -= bytes; off = 0; if (error) @@ -442,18 +402,19 @@ mappedread(vnode_t *vp, int nbytes, uio_t *uio) } return (error); } +#endif /* _KERNEL */ -offset_t zfs_read_chunk_size = 1024 * 1024; /* Tunable */ +unsigned long zfs_read_chunk_size = 1024 * 1024; /* Tunable */ /* * Read bytes from specified file into supplied buffer. * - * IN: vp - vnode of file to be read from. + * IN: ip - inode of file to be read from. * uio - structure supplying read location, range info, * and return buffer. - * ioflag - SYNC flags; used to provide FRSYNC semantics. + * ioflag - FSYNC flags; used to provide FRSYNC semantics. + * O_DIRECT flag; used to bypass page cache. * cr - credentials of caller. - * ct - caller context * * OUT: uio - updated offset and range, buffer filled. * @@ -461,25 +422,28 @@ offset_t zfs_read_chunk_size = 1024 * 1024; /* Tunable */ * error code if failure * * Side Effects: - * vp - atime updated if byte count > 0 + * inode - atime updated if byte count > 0 */ /* ARGSUSED */ -static int -zfs_read(vnode_t *vp, uio_t *uio, int ioflag, cred_t *cr, caller_context_t *ct) +int +zfs_read(struct inode *ip, uio_t *uio, int ioflag, cred_t *cr) { - znode_t *zp = VTOZ(vp); - zfsvfs_t *zfsvfs = zp->z_zfsvfs; + znode_t *zp = ITOZ(ip); + zfs_sb_t *zsb = ITOZSB(ip); objset_t *os; ssize_t n, nbytes; - int error; + int error = 0; rl_t *rl; +#ifdef HAVE_UIO_ZEROCOPY + xuio_t *xuio = NULL; +#endif /* HAVE_UIO_ZEROCOPY */ - ZFS_ENTER(zfsvfs); + ZFS_ENTER(zsb); ZFS_VERIFY_ZP(zp); - os = zfsvfs->z_os; + os = zsb->z_os; - if (zp->z_phys->zp_flags & ZFS_AV_QUARANTINED) { - ZFS_EXIT(zfsvfs); + if (zp->z_pflags & ZFS_AV_QUARANTINED) { + ZFS_EXIT(zsb); return (EACCES); } @@ -487,7 +451,7 @@ zfs_read(vnode_t *vp, uio_t *uio, int ioflag, cred_t *cr, caller_context_t *ct) * Validate file offset */ if (uio->uio_loffset < (offset_t)0) { - ZFS_EXIT(zfsvfs); + ZFS_EXIT(zsb); return (EINVAL); } @@ -495,26 +459,24 @@ zfs_read(vnode_t *vp, uio_t *uio, int ioflag, cred_t *cr, caller_context_t *ct) * Fasttrack empty reads */ if (uio->uio_resid == 0) { - ZFS_EXIT(zfsvfs); + ZFS_EXIT(zsb); return (0); } /* * Check for mandatory locks */ - if (MANDMODE((mode_t)zp->z_phys->zp_mode)) { - if (error = chklock(vp, FREAD, - uio->uio_loffset, uio->uio_resid, uio->uio_fmode, ct)) { - ZFS_EXIT(zfsvfs); - return (error); - } + if (mandatory_lock(ip) && + !lock_may_read(ip, uio->uio_loffset, uio->uio_resid)) { + ZFS_EXIT(zsb); + return (EAGAIN); } /* * If we're in FRSYNC mode, sync out this znode before reading it. */ - if (ioflag & FRSYNC) - zil_commit(zfsvfs->z_log, zp->z_last_itx, zp->z_id); + if (ioflag & FRSYNC || zsb->z_os->os_sync == ZFS_SYNC_ALWAYS) + zil_commit(zsb->z_log, zp->z_id); /* * Lock the range against changes. @@ -525,22 +487,54 @@ zfs_read(vnode_t *vp, uio_t *uio, int ioflag, cred_t *cr, caller_context_t *ct) * If we are reading past end-of-file we can skip * to the end; but we might still need to set atime. */ - if (uio->uio_loffset >= zp->z_phys->zp_size) { + if (uio->uio_loffset >= zp->z_size) { error = 0; goto out; } - ASSERT(uio->uio_loffset < zp->z_phys->zp_size); - n = MIN(uio->uio_resid, zp->z_phys->zp_size - uio->uio_loffset); + ASSERT(uio->uio_loffset < zp->z_size); + n = MIN(uio->uio_resid, zp->z_size - uio->uio_loffset); + +#ifdef HAVE_UIO_ZEROCOPY + if ((uio->uio_extflg == UIO_XUIO) && + (((xuio_t *)uio)->xu_type == UIOTYPE_ZEROCOPY)) { + int nblk; + int blksz = zp->z_blksz; + uint64_t offset = uio->uio_loffset; + + xuio = (xuio_t *)uio; + if ((ISP2(blksz))) { + nblk = (P2ROUNDUP(offset + n, blksz) - P2ALIGN(offset, + blksz)) / blksz; + } else { + ASSERT(offset + n <= blksz); + nblk = 1; + } + (void) dmu_xuio_init(xuio, nblk); + + if (vn_has_cached_data(ip)) { + /* + * For simplicity, we always allocate a full buffer + * even if we only expect to read a portion of a block. + */ + while (--nblk >= 0) { + (void) dmu_xuio_add(xuio, + dmu_request_arcbuf(sa_get_db(zp->z_sa_hdl), + blksz), 0, blksz); + } + } + } +#endif /* HAVE_UIO_ZEROCOPY */ while (n > 0) { nbytes = MIN(n, zfs_read_chunk_size - P2PHASE(uio->uio_loffset, zfs_read_chunk_size)); - if (vn_has_cached_data(vp)) - error = mappedread(vp, nbytes, uio); + if (zp->z_is_mapped && !(ioflag & O_DIRECT)) + error = mappedread(ip, nbytes, uio); else error = dmu_read_uio(os, zp->z_id, uio, nbytes); + if (error) { /* convert checksum errors into IO errors */ if (error == ECKSUM) @@ -550,24 +544,25 @@ zfs_read(vnode_t *vp, uio_t *uio, int ioflag, cred_t *cr, caller_context_t *ct) n -= nbytes; } - out: zfs_range_unlock(rl); - ZFS_ACCESSTIME_STAMP(zfsvfs, zp); - ZFS_EXIT(zfsvfs); + ZFS_ACCESSTIME_STAMP(zsb, zp); + zfs_inode_update(zp); + ZFS_EXIT(zsb); return (error); } +EXPORT_SYMBOL(zfs_read); /* * Write the bytes to a file. * - * IN: vp - vnode of file to be written to. + * IN: ip - inode of file to be written to. * uio - structure supplying write location, range info, * and data buffer. * ioflag - FAPPEND flag set if in append mode. + * O_DIRECT flag; used to bypass page cache. * cr - credentials of caller. - * ct - caller context (NFS/CIFS fem monitor only) * * OUT: uio - updated offset and range. * @@ -575,26 +570,36 @@ out: * error code if failure * * Timestamps: - * vp - ctime|mtime updated if byte count > 0 + * ip - ctime|mtime updated if byte count > 0 */ + /* ARGSUSED */ -static int -zfs_write(vnode_t *vp, uio_t *uio, int ioflag, cred_t *cr, caller_context_t *ct) +int +zfs_write(struct inode *ip, uio_t *uio, int ioflag, cred_t *cr) { - znode_t *zp = VTOZ(vp); - rlim64_t limit = uio->uio_llimit; + znode_t *zp = ITOZ(ip); + rlim64_t limit = uio->uio_limit; ssize_t start_resid = uio->uio_resid; ssize_t tx_bytes; uint64_t end_size; dmu_tx_t *tx; - zfsvfs_t *zfsvfs = zp->z_zfsvfs; + zfs_sb_t *zsb = ZTOZSB(zp); zilog_t *zilog; offset_t woff; ssize_t n, nbytes; rl_t *rl; - int max_blksz = zfsvfs->z_max_blksz; - uint64_t pflags; - int error; + int max_blksz = zsb->z_max_blksz; + int error = 0; + arc_buf_t *abuf; + iovec_t *aiov = NULL; + xuio_t *xuio = NULL; + int i_iov = 0; + iovec_t *iovp = uio->uio_iov; + int write_eof; + int count = 0; + sa_bulk_attr_t bulk[4]; + uint64_t mtime[2], ctime[2]; + ASSERTV(int iovcnt = uio->uio_iovcnt); /* * Fasttrack empty write @@ -606,84 +611,99 @@ zfs_write(vnode_t *vp, uio_t *uio, int ioflag, cred_t *cr, caller_context_t *ct) if (limit == RLIM64_INFINITY || limit > MAXOFFSET_T) limit = MAXOFFSET_T; - ZFS_ENTER(zfsvfs); + ZFS_ENTER(zsb); ZFS_VERIFY_ZP(zp); + SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zsb), NULL, &mtime, 16); + SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zsb), NULL, &ctime, 16); + SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_SIZE(zsb), NULL, &zp->z_size, 8); + SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zsb), NULL, + &zp->z_pflags, 8); + /* * If immutable or not appending then return EPERM */ - pflags = zp->z_phys->zp_flags; - if ((pflags & (ZFS_IMMUTABLE | ZFS_READONLY)) || - ((pflags & ZFS_APPENDONLY) && !(ioflag & FAPPEND) && - (uio->uio_loffset < zp->z_phys->zp_size))) { - ZFS_EXIT(zfsvfs); + if ((zp->z_pflags & (ZFS_IMMUTABLE | ZFS_READONLY)) || + ((zp->z_pflags & ZFS_APPENDONLY) && !(ioflag & FAPPEND) && + (uio->uio_loffset < zp->z_size))) { + ZFS_EXIT(zsb); return (EPERM); } - zilog = zfsvfs->z_log; + zilog = zsb->z_log; + + /* + * Validate file offset + */ + woff = ioflag & FAPPEND ? zp->z_size : uio->uio_loffset; + if (woff < 0) { + ZFS_EXIT(zsb); + return (EINVAL); + } + + /* + * Check for mandatory locks before calling zfs_range_lock() + * in order to prevent a deadlock with locks set via fcntl(). + */ + if (mandatory_lock(ip) && !lock_may_write(ip, woff, n)) { + ZFS_EXIT(zsb); + return (EAGAIN); + } +#ifdef HAVE_UIO_ZEROCOPY /* * Pre-fault the pages to ensure slow (eg NFS) pages * don't hold up txg. + * Skip this if uio contains loaned arc_buf. */ - uio_prefaultpages(n, uio); + if ((uio->uio_extflg == UIO_XUIO) && + (((xuio_t *)uio)->xu_type == UIOTYPE_ZEROCOPY)) + xuio = (xuio_t *)uio; + else + uio_prefaultpages(MIN(n, max_blksz), uio); +#endif /* HAVE_UIO_ZEROCOPY */ /* * If in append mode, set the io offset pointer to eof. */ if (ioflag & FAPPEND) { /* - * Range lock for a file append: - * The value for the start of range will be determined by - * zfs_range_lock() (to guarantee append semantics). - * If this write will cause the block size to increase, - * zfs_range_lock() will lock the entire file, so we must - * later reduce the range after we grow the block size. + * Obtain an appending range lock to guarantee file append + * semantics. We reset the write offset once we have the lock. */ rl = zfs_range_lock(zp, 0, n, RL_APPEND); + woff = rl->r_off; if (rl->r_len == UINT64_MAX) { - /* overlocked, zp_size can't change */ - woff = uio->uio_loffset = zp->z_phys->zp_size; - } else { - woff = uio->uio_loffset = rl->r_off; + /* + * We overlocked the file because this write will cause + * the file block size to increase. + * Note that zp_size cannot change with this lock held. + */ + woff = zp->z_size; } + uio->uio_loffset = woff; } else { - woff = uio->uio_loffset; - /* - * Validate file offset - */ - if (woff < 0) { - ZFS_EXIT(zfsvfs); - return (EINVAL); - } - /* - * If we need to grow the block size then zfs_range_lock() - * will lock a wider range than we request here. - * Later after growing the block size we reduce the range. + * Note that if the file block size will change as a result of + * this write, then this range lock will lock the entire file + * so that we can re-write the block safely. */ rl = zfs_range_lock(zp, woff, n, RL_WRITER); } if (woff >= limit) { zfs_range_unlock(rl); - ZFS_EXIT(zfsvfs); + ZFS_EXIT(zsb); return (EFBIG); } if ((woff + n) > limit || woff > (limit - n)) n = limit - woff; - /* - * Check for mandatory locks - */ - if (MANDMODE((mode_t)zp->z_phys->zp_mode) && - (error = chklock(vp, FWRITE, woff, n, uio->uio_fmode, ct)) != 0) { - zfs_range_unlock(rl); - ZFS_EXIT(zfsvfs); - return (error); - } - end_size = MAX(zp->z_phys->zp_size, woff + n); + /* Will this write extend the file length? */ + write_eof = (woff + n > zp->z_size); + + end_size = MAX(zp->z_size, woff + n); /* * Write the file in reasonable size chunks. Each chunk is written @@ -691,22 +711,68 @@ zfs_write(vnode_t *vp, uio_t *uio, int ioflag, cred_t *cr, caller_context_t *ct) * and allows us to do more fine-grained space accounting. */ while (n > 0) { + abuf = NULL; + woff = uio->uio_loffset; +again: + if (zfs_owner_overquota(zsb, zp, B_FALSE) || + zfs_owner_overquota(zsb, zp, B_TRUE)) { + if (abuf != NULL) + dmu_return_arcbuf(abuf); + error = EDQUOT; + break; + } + + if (xuio && abuf == NULL) { + ASSERT(i_iov < iovcnt); + aiov = &iovp[i_iov]; + abuf = dmu_xuio_arcbuf(xuio, i_iov); + dmu_xuio_clear(xuio, i_iov); + ASSERT((aiov->iov_base == abuf->b_data) || + ((char *)aiov->iov_base - (char *)abuf->b_data + + aiov->iov_len == arc_buf_size(abuf))); + i_iov++; + } else if (abuf == NULL && n >= max_blksz && + woff >= zp->z_size && + P2PHASE(woff, max_blksz) == 0 && + zp->z_blksz == max_blksz) { + /* + * This write covers a full block. "Borrow" a buffer + * from the dmu so that we can fill it before we enter + * a transaction. This avoids the possibility of + * holding up the transaction if the data copy hangs + * up on a pagefault (e.g., from an NFS server mapping). + */ + size_t cbytes; + + abuf = dmu_request_arcbuf(sa_get_db(zp->z_sa_hdl), + max_blksz); + ASSERT(abuf != NULL); + ASSERT(arc_buf_size(abuf) == max_blksz); + if ((error = uiocopy(abuf->b_data, max_blksz, + UIO_WRITE, uio, &cbytes))) { + dmu_return_arcbuf(abuf); + break; + } + ASSERT(cbytes == max_blksz); + } + /* * Start a transaction. */ - woff = uio->uio_loffset; - tx = dmu_tx_create(zfsvfs->z_os); - dmu_tx_hold_bonus(tx, zp->z_id); + tx = dmu_tx_create(zsb->z_os); + dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE); dmu_tx_hold_write(tx, zp->z_id, woff, MIN(n, max_blksz)); - error = dmu_tx_assign(tx, zfsvfs->z_assign); + zfs_sa_upgrade_txholds(tx, zp); + error = dmu_tx_assign(tx, TXG_NOWAIT); if (error) { - if (error == ERESTART && - zfsvfs->z_assign == TXG_NOWAIT) { + if (error == ERESTART) { dmu_tx_wait(tx); dmu_tx_abort(tx); - continue; + goto again; } dmu_tx_abort(tx); + if (abuf != NULL) + dmu_return_arcbuf(abuf); break; } @@ -734,24 +800,47 @@ zfs_write(vnode_t *vp, uio_t *uio, int ioflag, cred_t *cr, caller_context_t *ct) * Perhaps we should use SPA_MAXBLOCKSIZE chunks? */ nbytes = MIN(n, max_blksz - P2PHASE(woff, max_blksz)); - rw_enter(&zp->z_map_lock, RW_READER); - tx_bytes = uio->uio_resid; - if (vn_has_cached_data(vp)) { - rw_exit(&zp->z_map_lock); - error = mappedwrite(vp, nbytes, uio, tx); - } else { - error = dmu_write_uio(zfsvfs->z_os, zp->z_id, + if (abuf == NULL) { + tx_bytes = uio->uio_resid; + error = dmu_write_uio_dbuf(sa_get_db(zp->z_sa_hdl), uio, nbytes, tx); - rw_exit(&zp->z_map_lock); + tx_bytes -= uio->uio_resid; + } else { + tx_bytes = nbytes; + ASSERT(xuio == NULL || tx_bytes == aiov->iov_len); + /* + * If this is not a full block write, but we are + * extending the file past EOF and this data starts + * block-aligned, use assign_arcbuf(). Otherwise, + * write via dmu_write(). + */ + if (tx_bytes < max_blksz && (!write_eof || + aiov->iov_base != abuf->b_data)) { + ASSERT(xuio); + dmu_write(zsb->z_os, zp->z_id, woff, + aiov->iov_len, aiov->iov_base, tx); + dmu_return_arcbuf(abuf); + xuio_stat_wbuf_copied(); + } else { + ASSERT(xuio || tx_bytes == max_blksz); + dmu_assign_arcbuf(sa_get_db(zp->z_sa_hdl), + woff, abuf, tx); + } + ASSERT(tx_bytes <= uio->uio_resid); + uioskip(uio, tx_bytes); } - tx_bytes -= uio->uio_resid; + + if (tx_bytes && zp->z_is_mapped && !(ioflag & O_DIRECT)) + update_pages(ip, woff, tx_bytes, zsb->z_os, zp->z_id); /* * If we made no progress, we're done. If we made even * partial progress, update the znode and ZIL accordingly. */ if (tx_bytes == 0) { + (void) sa_update(zp->z_sa_hdl, SA_ZPL_SIZE(zsb), + (void *)&zp->z_size, sizeof (uint64_t), tx); dmu_tx_commit(tx); ASSERT(error != 0); break; @@ -769,29 +858,41 @@ zfs_write(vnode_t *vp, uio_t *uio, int ioflag, cred_t *cr, caller_context_t *ct) * user 0 is not an ephemeral uid. */ mutex_enter(&zp->z_acl_lock); - if ((zp->z_phys->zp_mode & (S_IXUSR | (S_IXUSR >> 3) | + if ((zp->z_mode & (S_IXUSR | (S_IXUSR >> 3) | (S_IXUSR >> 6))) != 0 && - (zp->z_phys->zp_mode & (S_ISUID | S_ISGID)) != 0 && + (zp->z_mode & (S_ISUID | S_ISGID)) != 0 && secpolicy_vnode_setid_retain(cr, - (zp->z_phys->zp_mode & S_ISUID) != 0 && - zp->z_phys->zp_uid == 0) != 0) { - zp->z_phys->zp_mode &= ~(S_ISUID | S_ISGID); + (zp->z_mode & S_ISUID) != 0 && zp->z_uid == 0) != 0) { + uint64_t newmode; + zp->z_mode &= ~(S_ISUID | S_ISGID); + newmode = zp->z_mode; + (void) sa_update(zp->z_sa_hdl, SA_ZPL_MODE(zsb), + (void *)&newmode, sizeof (uint64_t), tx); } mutex_exit(&zp->z_acl_lock); - /* - * Update time stamp. NOTE: This marks the bonus buffer as - * dirty, so we don't have to do it again for zp_size. - */ - zfs_time_stamper(zp, CONTENT_MODIFIED, tx); + zfs_tstamp_update_setup(zp, CONTENT_MODIFIED, mtime, ctime, + B_TRUE); /* * Update the file size (zp_size) if it has changed; * account for possible concurrent updates. */ - while ((end_size = zp->z_phys->zp_size) < uio->uio_loffset) - (void) atomic_cas_64(&zp->z_phys->zp_size, end_size, + while ((end_size = zp->z_size) < uio->uio_loffset) { + (void) atomic_cas_64(&zp->z_size, end_size, uio->uio_loffset); + ASSERT(error == 0); + } + /* + * If we are replaying and eof is non zero then force + * the file size to the specified eof. Note, there's no + * concurrency during replay. + */ + if (zsb->z_replay && zsb->z_replay_eof != 0) + zp->z_size = zsb->z_replay_eof; + + error = sa_bulk_update(zp->z_sa_hdl, bulk, count, tx); + zfs_log_write(zilog, tx, TX_WRITE, zp, woff, tx_bytes, ioflag); dmu_tx_commit(tx); @@ -799,6 +900,9 @@ zfs_write(vnode_t *vp, uio_t *uio, int ioflag, cred_t *cr, caller_context_t *ct) break; ASSERT(tx_bytes == nbytes); n -= nbytes; + + if (!xuio && n > 0) + uio_prefaultpages(MIN(n, max_blksz), uio); } zfs_range_unlock(rl); @@ -807,61 +911,96 @@ zfs_write(vnode_t *vp, uio_t *uio, int ioflag, cred_t *cr, caller_context_t *ct) * If we're in replay mode, or we made no progress, return error. * Otherwise, it's at least a partial write, so it's successful. */ - if (zfsvfs->z_assign >= TXG_INITIAL || uio->uio_resid == start_resid) { - ZFS_EXIT(zfsvfs); + if (zsb->z_replay || uio->uio_resid == start_resid) { + ZFS_EXIT(zsb); return (error); } - if (ioflag & (FSYNC | FDSYNC)) - zil_commit(zilog, zp->z_last_itx, zp->z_id); + if (ioflag & (FSYNC | FDSYNC) || + zsb->z_os->os_sync == ZFS_SYNC_ALWAYS) + zil_commit(zilog, zp->z_id); - ZFS_EXIT(zfsvfs); + zfs_inode_update(zp); + ZFS_EXIT(zsb); return (0); } +EXPORT_SYMBOL(zfs_write); + +static void +iput_async(struct inode *ip, taskq_t *taskq) +{ + ASSERT(atomic_read(&ip->i_count) > 0); + if (atomic_read(&ip->i_count) == 1) + taskq_dispatch(taskq, (task_func_t *)iput, ip, TQ_PUSHPAGE); + else + iput(ip); +} void -zfs_get_done(dmu_buf_t *db, void *vzgd) +zfs_get_done(zgd_t *zgd, int error) { - zgd_t *zgd = (zgd_t *)vzgd; - rl_t *rl = zgd->zgd_rl; - vnode_t *vp = ZTOV(rl->r_zp); + znode_t *zp = zgd->zgd_private; + objset_t *os = ZTOZSB(zp)->z_os; + + if (zgd->zgd_db) + dmu_buf_rele(zgd->zgd_db, zgd); + + zfs_range_unlock(zgd->zgd_rl); + + /* + * Release the vnode asynchronously as we currently have the + * txg stopped from syncing. + */ + iput_async(ZTOI(zp), dsl_pool_iput_taskq(dmu_objset_pool(os))); + + if (error == 0 && zgd->zgd_bp) + zil_add_block(zgd->zgd_zilog, zgd->zgd_bp); - dmu_buf_rele(db, vzgd); - zfs_range_unlock(rl); - VN_RELE(vp); - zil_add_block(zgd->zgd_zilog, zgd->zgd_bp); kmem_free(zgd, sizeof (zgd_t)); } +#ifdef DEBUG +static int zil_fault_io = 0; +#endif + /* * Get data to generate a TX_WRITE intent log record. */ int zfs_get_data(void *arg, lr_write_t *lr, char *buf, zio_t *zio) { - zfsvfs_t *zfsvfs = arg; - objset_t *os = zfsvfs->z_os; + zfs_sb_t *zsb = arg; + objset_t *os = zsb->z_os; znode_t *zp; - uint64_t off = lr->lr_offset; + uint64_t object = lr->lr_foid; + uint64_t offset = lr->lr_offset; + uint64_t size = lr->lr_length; + blkptr_t *bp = &lr->lr_blkptr; dmu_buf_t *db; - rl_t *rl; zgd_t *zgd; - int dlen = lr->lr_length; /* length of user data */ int error = 0; - ASSERT(zio); - ASSERT(dlen != 0); + ASSERT(zio != NULL); + ASSERT(size != 0); /* * Nothing to do if the file has been removed */ - if (zfs_zget(zfsvfs, lr->lr_foid, &zp) != 0) + if (zfs_zget(zsb, object, &zp) != 0) return (ENOENT); if (zp->z_unlinked) { - VN_RELE(ZTOV(zp)); + /* + * Release the vnode asynchronously as we currently have the + * txg stopped from syncing. + */ + iput_async(ZTOI(zp), dsl_pool_iput_taskq(dmu_objset_pool(os))); return (ENOENT); } + zgd = (zgd_t *)kmem_zalloc(sizeof (zgd_t), KM_PUSHPAGE); + zgd->zgd_zilog = zsb->z_log; + zgd->zgd_private = zp; + /* * Write records come in two flavors: immediate and indirect. * For small writes it's cheaper to store the data with the @@ -870,16 +1009,16 @@ zfs_get_data(void *arg, lr_write_t *lr, char *buf, zio_t *zio) * we don't have to write the data twice. */ if (buf != NULL) { /* immediate write */ - rl = zfs_range_lock(zp, off, dlen, RL_READER); + zgd->zgd_rl = zfs_range_lock(zp, offset, size, RL_READER); /* test for truncation needs to be done while range locked */ - if (off >= zp->z_phys->zp_size) { + if (offset >= zp->z_size) { error = ENOENT; - goto out; + } else { + error = dmu_read(os, object, offset, size, buf, + DMU_READ_NO_PREFETCH); } - VERIFY(0 == dmu_read(os, lr->lr_foid, off, dlen, buf)); + ASSERT(error == 0 || error == ENOENT); } else { /* indirect write */ - uint64_t boff; /* block starting offset */ - /* * Have to lock the whole block to ensure when it's * written out and it's checksum is being calculated @@ -887,63 +1026,71 @@ zfs_get_data(void *arg, lr_write_t *lr, char *buf, zio_t *zio) * blocksize after we get the lock in case it's changed! */ for (;;) { - if (ISP2(zp->z_blksz)) { - boff = P2ALIGN_TYPED(off, zp->z_blksz, - uint64_t); - } else { - boff = 0; - } - dlen = zp->z_blksz; - rl = zfs_range_lock(zp, boff, dlen, RL_READER); - if (zp->z_blksz == dlen) + uint64_t blkoff; + size = zp->z_blksz; + blkoff = ISP2(size) ? P2PHASE(offset, size) : offset; + offset -= blkoff; + zgd->zgd_rl = zfs_range_lock(zp, offset, size, + RL_READER); + if (zp->z_blksz == size) break; - zfs_range_unlock(rl); + offset += blkoff; + zfs_range_unlock(zgd->zgd_rl); } /* test for truncation needs to be done while range locked */ - if (off >= zp->z_phys->zp_size) { + if (lr->lr_offset >= zp->z_size) error = ENOENT; - goto out; +#ifdef DEBUG + if (zil_fault_io) { + error = EIO; + zil_fault_io = 0; } - zgd = (zgd_t *)kmem_alloc(sizeof (zgd_t), KM_SLEEP); - zgd->zgd_rl = rl; - zgd->zgd_zilog = zfsvfs->z_log; - zgd->zgd_bp = &lr->lr_blkptr; - VERIFY(0 == dmu_buf_hold(os, lr->lr_foid, boff, zgd, &db)); - ASSERT(boff == db->db_offset); - lr->lr_blkoff = off - boff; - error = dmu_sync(zio, db, &lr->lr_blkptr, - lr->lr_common.lrc_txg, zfs_get_done, zgd); - ASSERT((error && error != EINPROGRESS) || - lr->lr_length <= zp->z_blksz); +#endif if (error == 0) - zil_add_block(zfsvfs->z_log, &lr->lr_blkptr); - /* - * If we get EINPROGRESS, then we need to wait for a - * write IO initiated by dmu_sync() to complete before - * we can release this dbuf. We will finish everything - * up in the zfs_get_done() callback. - */ - if (error == EINPROGRESS) - return (0); - dmu_buf_rele(db, zgd); - kmem_free(zgd, sizeof (zgd_t)); + error = dmu_buf_hold(os, object, offset, zgd, &db, + DMU_READ_NO_PREFETCH); + + if (error == 0) { + zgd->zgd_db = db; + zgd->zgd_bp = bp; + + ASSERT(db->db_offset == offset); + ASSERT(db->db_size == size); + + error = dmu_sync(zio, lr->lr_common.lrc_txg, + zfs_get_done, zgd); + ASSERT(error || lr->lr_length <= zp->z_blksz); + + /* + * On success, we need to wait for the write I/O + * initiated by dmu_sync() to complete before we can + * release this dbuf. We will finish everything up + * in the zfs_get_done() callback. + */ + if (error == 0) + return (0); + + if (error == EALREADY) { + lr->lr_common.lrc_txtype = TX_WRITE2; + error = 0; + } + } } -out: - zfs_range_unlock(rl); - VN_RELE(ZTOV(zp)); + + zfs_get_done(zgd, error); + return (error); } /*ARGSUSED*/ -static int -zfs_access(vnode_t *vp, int mode, int flag, cred_t *cr, - caller_context_t *ct) +int +zfs_access(struct inode *ip, int mode, int flag, cred_t *cr) { - znode_t *zp = VTOZ(vp); - zfsvfs_t *zfsvfs = zp->z_zfsvfs; + znode_t *zp = ITOZ(ip); + zfs_sb_t *zsb = ITOZSB(ip); int error; - ZFS_ENTER(zfsvfs); + ZFS_ENTER(zsb); ZFS_VERIFY_ZP(zp); if (flag & V_ACE_MASK) @@ -951,25 +1098,23 @@ zfs_access(vnode_t *vp, int mode, int flag, cred_t *cr, else error = zfs_zaccess_rwx(zp, mode, flag, cr); - ZFS_EXIT(zfsvfs); + ZFS_EXIT(zsb); return (error); } +EXPORT_SYMBOL(zfs_access); /* * Lookup an entry in a directory, or an extended attribute directory. - * If it exists, return a held vnode reference for it. + * If it exists, return a held inode reference for it. * - * IN: dvp - vnode of directory to search. + * IN: dip - inode of directory to search. * nm - name of entry to lookup. - * pnp - full pathname to lookup [UNUSED]. * flags - LOOKUP_XATTR set if looking for an attribute. - * rdir - root directory vnode [UNUSED]. * cr - credentials of caller. - * ct - caller context * direntflags - directory lookup flags * realpnp - returned pathname. * - * OUT: vpp - vnode of located entry, NULL if not found. + * OUT: ipp - inode of located entry, NULL if not found. * * RETURN: 0 if success * error code if failure @@ -978,40 +1123,70 @@ zfs_access(vnode_t *vp, int mode, int flag, cred_t *cr, * NA */ /* ARGSUSED */ -static int -zfs_lookup(vnode_t *dvp, char *nm, vnode_t **vpp, struct pathname *pnp, - int flags, vnode_t *rdir, cred_t *cr, caller_context_t *ct, - int *direntflags, pathname_t *realpnp) +int +zfs_lookup(struct inode *dip, char *nm, struct inode **ipp, int flags, + cred_t *cr, int *direntflags, pathname_t *realpnp) { - znode_t *zdp = VTOZ(dvp); - zfsvfs_t *zfsvfs = zdp->z_zfsvfs; - int error; + znode_t *zdp = ITOZ(dip); + zfs_sb_t *zsb = ITOZSB(dip); + int error = 0; - ZFS_ENTER(zfsvfs); - ZFS_VERIFY_ZP(zdp); + /* fast path */ + if (!(flags & (LOOKUP_XATTR | FIGNORECASE))) { - *vpp = NULL; + if (!S_ISDIR(dip->i_mode)) { + return (ENOTDIR); + } else if (zdp->z_sa_hdl == NULL) { + return (EIO); + } - if (flags & LOOKUP_XATTR) { - /* - * If the xattr property is off, refuse the lookup request. - */ - if (!(zfsvfs->z_vfs->vfs_flag & VFS_XATTR)) { - ZFS_EXIT(zfsvfs); - return (EINVAL); + if (nm[0] == 0 || (nm[0] == '.' && nm[1] == '\0')) { + error = zfs_fastaccesschk_execute(zdp, cr); + if (!error) { + *ipp = dip; + igrab(*ipp); + return (0); + } + return (error); +#ifdef HAVE_DNLC + } else { + vnode_t *tvp = dnlc_lookup(dvp, nm); + + if (tvp) { + error = zfs_fastaccesschk_execute(zdp, cr); + if (error) { + iput(tvp); + return (error); + } + if (tvp == DNLC_NO_VNODE) { + iput(tvp); + return (ENOENT); + } else { + *vpp = tvp; + return (specvp_check(vpp, cr)); + } + } +#endif /* HAVE_DNLC */ } + } + + ZFS_ENTER(zsb); + ZFS_VERIFY_ZP(zdp); + + *ipp = NULL; + if (flags & LOOKUP_XATTR) { /* * We don't allow recursive attributes.. * Maybe someday we will. */ - if (zdp->z_phys->zp_flags & ZFS_XATTR) { - ZFS_EXIT(zfsvfs); + if (zdp->z_pflags & ZFS_XATTR) { + ZFS_EXIT(zsb); return (EINVAL); } - if (error = zfs_get_xattrdir(VTOZ(dvp), vpp, cr, flags)) { - ZFS_EXIT(zfsvfs); + if ((error = zfs_get_xattrdir(zdp, ipp, cr, flags))) { + ZFS_EXIT(zsb); return (error); } @@ -1019,18 +1194,18 @@ zfs_lookup(vnode_t *dvp, char *nm, vnode_t **vpp, struct pathname *pnp, * Do we have permission to get into attribute directory? */ - if (error = zfs_zaccess(VTOZ(*vpp), ACE_EXECUTE, 0, - B_FALSE, cr)) { - VN_RELE(*vpp); - *vpp = NULL; + if ((error = zfs_zaccess(ITOZ(*ipp), ACE_EXECUTE, 0, + B_FALSE, cr))) { + iput(*ipp); + *ipp = NULL; } - ZFS_EXIT(zfsvfs); + ZFS_EXIT(zsb); return (error); } - if (dvp->v_type != VDIR) { - ZFS_EXIT(zfsvfs); + if (!S_ISDIR(dip->i_mode)) { + ZFS_EXIT(zsb); return (ENOTDIR); } @@ -1038,132 +1213,111 @@ zfs_lookup(vnode_t *dvp, char *nm, vnode_t **vpp, struct pathname *pnp, * Check accessibility of directory. */ - if (error = zfs_zaccess(zdp, ACE_EXECUTE, 0, B_FALSE, cr)) { - ZFS_EXIT(zfsvfs); + if ((error = zfs_zaccess(zdp, ACE_EXECUTE, 0, B_FALSE, cr))) { + ZFS_EXIT(zsb); return (error); } - if (zfsvfs->z_utf8 && u8_validate(nm, strlen(nm), + if (zsb->z_utf8 && u8_validate(nm, strlen(nm), NULL, U8_VALIDATE_ENTIRE, &error) < 0) { - ZFS_EXIT(zfsvfs); + ZFS_EXIT(zsb); return (EILSEQ); } - error = zfs_dirlook(zdp, nm, vpp, flags, direntflags, realpnp); - if (error == 0) { - /* - * Convert device special files - */ - if (IS_DEVVP(*vpp)) { - vnode_t *svp; - - svp = specvp(*vpp, (*vpp)->v_rdev, (*vpp)->v_type, cr); - VN_RELE(*vpp); - if (svp == NULL) - error = ENOSYS; - else - *vpp = svp; - } - } + error = zfs_dirlook(zdp, nm, ipp, flags, direntflags, realpnp); + if ((error == 0) && (*ipp)) + zfs_inode_update(ITOZ(*ipp)); - ZFS_EXIT(zfsvfs); + ZFS_EXIT(zsb); return (error); } +EXPORT_SYMBOL(zfs_lookup); /* * Attempt to create a new entry in a directory. If the entry * already exists, truncate the file if permissible, else return - * an error. Return the vp of the created or trunc'd file. + * an error. Return the ip of the created or trunc'd file. * - * IN: dvp - vnode of directory to put new file entry in. + * IN: dip - inode of directory to put new file entry in. * name - name of new file entry. * vap - attributes of new file. * excl - flag indicating exclusive or non-exclusive mode. * mode - mode to open file with. * cr - credentials of caller. * flag - large file flag [UNUSED]. - * ct - caller context - * vsecp - ACL to be set + * vsecp - ACL to be set * - * OUT: vpp - vnode of created or trunc'd entry. + * OUT: ipp - inode of created or trunc'd entry. * * RETURN: 0 if success * error code if failure * * Timestamps: - * dvp - ctime|mtime updated if new entry created - * vp - ctime|mtime always, atime if new + * dip - ctime|mtime updated if new entry created + * ip - ctime|mtime always, atime if new */ /* ARGSUSED */ -static int -zfs_create(vnode_t *dvp, char *name, vattr_t *vap, vcexcl_t excl, - int mode, vnode_t **vpp, cred_t *cr, int flag, caller_context_t *ct, - vsecattr_t *vsecp) +int +zfs_create(struct inode *dip, char *name, vattr_t *vap, int excl, + int mode, struct inode **ipp, cred_t *cr, int flag, vsecattr_t *vsecp) { - znode_t *zp, *dzp = VTOZ(dvp); - zfsvfs_t *zfsvfs = dzp->z_zfsvfs; + znode_t *zp, *dzp = ITOZ(dip); + zfs_sb_t *zsb = ITOZSB(dip); zilog_t *zilog; objset_t *os; zfs_dirlock_t *dl; dmu_tx_t *tx; int error; - zfs_acl_t *aclp = NULL; - zfs_fuid_info_t *fuidp = NULL; - ksid_t *ksid; uid_t uid; - gid_t gid = crgetgid(cr); + gid_t gid; + zfs_acl_ids_t acl_ids; + boolean_t fuid_dirtied; + boolean_t have_acl = B_FALSE; /* * If we have an ephemeral id, ACL, or XVATTR then * make sure file system is at proper version */ - ksid = crgetsid(cr, KSID_OWNER); - if (ksid) - uid = ksid_getid(ksid); - else - uid = crgetuid(cr); + gid = crgetgid(cr); + uid = crgetuid(cr); - if (zfsvfs->z_use_fuids == B_FALSE && - (vsecp || (vap->va_mask & AT_XVATTR) || - IS_EPHEMERAL(uid) || IS_EPHEMERAL(gid))) + if (zsb->z_use_fuids == B_FALSE && + (vsecp || IS_EPHEMERAL(uid) || IS_EPHEMERAL(gid))) return (EINVAL); - ZFS_ENTER(zfsvfs); + ZFS_ENTER(zsb); ZFS_VERIFY_ZP(dzp); - os = zfsvfs->z_os; - zilog = zfsvfs->z_log; + os = zsb->z_os; + zilog = zsb->z_log; - if (zfsvfs->z_utf8 && u8_validate(name, strlen(name), + if (zsb->z_utf8 && u8_validate(name, strlen(name), NULL, U8_VALIDATE_ENTIRE, &error) < 0) { - ZFS_EXIT(zfsvfs); + ZFS_EXIT(zsb); return (EILSEQ); } - if (vap->va_mask & AT_XVATTR) { + if (vap->va_mask & ATTR_XVATTR) { if ((error = secpolicy_xvattr((xvattr_t *)vap, - crgetuid(cr), cr, vap->va_type)) != 0) { - ZFS_EXIT(zfsvfs); + crgetuid(cr), cr, vap->va_mode)) != 0) { + ZFS_EXIT(zsb); return (error); } } -top: - *vpp = NULL; - - if ((vap->va_mode & VSVTX) && secpolicy_vnode_stky_modify(cr)) - vap->va_mode &= ~VSVTX; +top: + *ipp = NULL; if (*name == '\0') { /* * Null component name refers to the directory itself. */ - VN_HOLD(dvp); + igrab(dip); zp = dzp; dl = NULL; error = 0; } else { - /* possible VN_HOLD(zp) */ + /* possible igrab(zp) */ int zflg = 0; if (flag & FIGNORECASE) @@ -1172,20 +1326,11 @@ top: error = zfs_dirent_lock(&dl, dzp, name, &zp, zflg, NULL, NULL); if (error) { + if (have_acl) + zfs_acl_ids_free(&acl_ids); if (strcmp(name, "..") == 0) error = EISDIR; - ZFS_EXIT(zfsvfs); - if (aclp) - zfs_acl_free(aclp); - return (error); - } - } - if (vsecp && aclp == NULL) { - error = zfs_vsec_2_aclp(zfsvfs, vap->va_type, vsecp, &aclp); - if (error) { - ZFS_EXIT(zfsvfs); - if (dl) - zfs_dirent_unlock(dl); + ZFS_EXIT(zsb); return (error); } } @@ -1197,7 +1342,9 @@ top: * Create a new file object and update the directory * to reference it. */ - if (error = zfs_zaccess(dzp, ACE_ADD_FILE, 0, B_FALSE, cr)) { + if ((error = zfs_zaccess(dzp, ACE_ADD_FILE, 0, B_FALSE, cr))) { + if (have_acl) + zfs_acl_ids_free(&acl_ids); goto out; } @@ -1205,76 +1352,87 @@ top: * We only support the creation of regular files in * extended attribute directories. */ - if ((dzp->z_phys->zp_flags & ZFS_XATTR) && - (vap->va_type != VREG)) { + + if ((dzp->z_pflags & ZFS_XATTR) && !S_ISREG(vap->va_mode)) { + if (have_acl) + zfs_acl_ids_free(&acl_ids); error = EINVAL; goto out; } - tx = dmu_tx_create(os); - dmu_tx_hold_bonus(tx, DMU_NEW_OBJECT); - if ((aclp && aclp->z_has_fuids) || IS_EPHEMERAL(uid) || - IS_EPHEMERAL(gid)) { - if (zfsvfs->z_fuid_obj == 0) { - dmu_tx_hold_bonus(tx, DMU_NEW_OBJECT); - dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0, - FUID_SIZE_ESTIMATE(zfsvfs)); - dmu_tx_hold_zap(tx, MASTER_NODE_OBJ, - FALSE, NULL); - } else { - dmu_tx_hold_bonus(tx, zfsvfs->z_fuid_obj); - dmu_tx_hold_write(tx, zfsvfs->z_fuid_obj, 0, - FUID_SIZE_ESTIMATE(zfsvfs)); - } + if (!have_acl && (error = zfs_acl_ids_create(dzp, 0, vap, + cr, vsecp, &acl_ids)) != 0) + goto out; + have_acl = B_TRUE; + + if (zfs_acl_ids_overquota(zsb, &acl_ids)) { + zfs_acl_ids_free(&acl_ids); + error = EDQUOT; + goto out; } - dmu_tx_hold_bonus(tx, dzp->z_id); + + tx = dmu_tx_create(os); + + dmu_tx_hold_sa_create(tx, acl_ids.z_aclp->z_acl_bytes + + ZFS_SA_BASE_ATTR_SIZE); + + fuid_dirtied = zsb->z_fuid_dirty; + if (fuid_dirtied) + zfs_fuid_txhold(zsb, tx); dmu_tx_hold_zap(tx, dzp->z_id, TRUE, name); - if ((dzp->z_phys->zp_flags & ZFS_INHERIT_ACE) || aclp) { + dmu_tx_hold_sa(tx, dzp->z_sa_hdl, B_FALSE); + if (!zsb->z_use_sa && + acl_ids.z_aclp->z_acl_bytes > ZFS_ACE_SPACE) { dmu_tx_hold_write(tx, DMU_NEW_OBJECT, - 0, SPA_MAXBLOCKSIZE); + 0, acl_ids.z_aclp->z_acl_bytes); } - error = dmu_tx_assign(tx, zfsvfs->z_assign); + error = dmu_tx_assign(tx, TXG_NOWAIT); if (error) { zfs_dirent_unlock(dl); - if (error == ERESTART && - zfsvfs->z_assign == TXG_NOWAIT) { + if (error == ERESTART) { dmu_tx_wait(tx); dmu_tx_abort(tx); goto top; } + zfs_acl_ids_free(&acl_ids); dmu_tx_abort(tx); - ZFS_EXIT(zfsvfs); - if (aclp) - zfs_acl_free(aclp); + ZFS_EXIT(zsb); return (error); } - zfs_mknode(dzp, vap, tx, cr, 0, &zp, 0, aclp, &fuidp); + zfs_mknode(dzp, vap, tx, cr, 0, &zp, &acl_ids); + + if (fuid_dirtied) + zfs_fuid_sync(zsb, tx); + (void) zfs_link_create(dl, zp, tx, ZNEW); txtype = zfs_log_create_txtype(Z_FILE, vsecp, vap); if (flag & FIGNORECASE) txtype |= TX_CI; zfs_log_create(zilog, tx, txtype, dzp, zp, name, - vsecp, fuidp, vap); - if (fuidp) - zfs_fuid_info_free(fuidp); + vsecp, acl_ids.z_fuidp, vap); + zfs_acl_ids_free(&acl_ids); dmu_tx_commit(tx); } else { int aflags = (flag & FAPPEND) ? V_APPEND : 0; + if (have_acl) + zfs_acl_ids_free(&acl_ids); + have_acl = B_FALSE; + /* * A directory entry already exists for this name. */ /* * Can't truncate an existing file if in exclusive mode. */ - if (excl == EXCL) { + if (excl) { error = EEXIST; goto out; } /* * Can't open a directory for writing. */ - if ((ZTOV(zp)->v_type == VDIR) && (mode & S_IWRITE)) { + if (S_ISDIR(ZTOI(zp)->i_mode)) { error = EISDIR; goto out; } @@ -1292,15 +1450,12 @@ top: /* * Truncate regular files if requested. */ - if ((ZTOV(zp)->v_type == VREG) && - (vap->va_mask & AT_SIZE) && (vap->va_size == 0)) { + if (S_ISREG(ZTOI(zp)->i_mode) && + (vap->va_mask & ATTR_SIZE) && (vap->va_size == 0)) { /* we can't hold any locks when calling zfs_freesp() */ zfs_dirent_unlock(dl); dl = NULL; error = zfs_freesp(zp, 0, 0, mode, TRUE); - if (error == 0) { - vnevent_create(ZTOV(zp), ct); - } } } out: @@ -1310,158 +1465,152 @@ out: if (error) { if (zp) - VN_RELE(ZTOV(zp)); + iput(ZTOI(zp)); } else { - *vpp = ZTOV(zp); - /* - * If vnode is for a device return a specfs vnode instead. - */ - if (IS_DEVVP(*vpp)) { - struct vnode *svp; - - svp = specvp(*vpp, (*vpp)->v_rdev, (*vpp)->v_type, cr); - VN_RELE(*vpp); - if (svp == NULL) { - error = ENOSYS; - } - *vpp = svp; - } + zfs_inode_update(dzp); + zfs_inode_update(zp); + *ipp = ZTOI(zp); } - if (aclp) - zfs_acl_free(aclp); - ZFS_EXIT(zfsvfs); + if (zsb->z_os->os_sync == ZFS_SYNC_ALWAYS) + zil_commit(zilog, 0); + + ZFS_EXIT(zsb); return (error); } +EXPORT_SYMBOL(zfs_create); /* * Remove an entry from a directory. * - * IN: dvp - vnode of directory to remove entry from. + * IN: dip - inode of directory to remove entry from. * name - name of entry to remove. * cr - credentials of caller. - * ct - caller context - * flags - case flags * * RETURN: 0 if success * error code if failure * * Timestamps: - * dvp - ctime|mtime - * vp - ctime (if nlink > 0) + * dip - ctime|mtime + * ip - ctime (if nlink > 0) */ + +uint64_t null_xattr = 0; + /*ARGSUSED*/ -static int -zfs_remove(vnode_t *dvp, char *name, cred_t *cr, caller_context_t *ct, - int flags) +int +zfs_remove(struct inode *dip, char *name, cred_t *cr) { - znode_t *zp, *dzp = VTOZ(dvp); - znode_t *xzp = NULL; - vnode_t *vp; - zfsvfs_t *zfsvfs = dzp->z_zfsvfs; + znode_t *zp, *dzp = ITOZ(dip); + znode_t *xzp; + struct inode *ip; + zfs_sb_t *zsb = ITOZSB(dip); zilog_t *zilog; - uint64_t acl_obj, xattr_obj; + uint64_t xattr_obj; + uint64_t xattr_obj_unlinked = 0; + uint64_t obj = 0; zfs_dirlock_t *dl; dmu_tx_t *tx; - boolean_t may_delete_now, delete_now = FALSE; - boolean_t unlinked, toobig = FALSE; + boolean_t unlinked; uint64_t txtype; pathname_t *realnmp = NULL; +#ifdef HAVE_PN_UTILS pathname_t realnm; +#endif /* HAVE_PN_UTILS */ int error; int zflg = ZEXISTS; - ZFS_ENTER(zfsvfs); + ZFS_ENTER(zsb); ZFS_VERIFY_ZP(dzp); - zilog = zfsvfs->z_log; + zilog = zsb->z_log; +#ifdef HAVE_PN_UTILS if (flags & FIGNORECASE) { zflg |= ZCILOOK; pn_alloc(&realnm); realnmp = &realnm; } +#endif /* HAVE_PN_UTILS */ top: + xattr_obj = 0; + xzp = NULL; /* * Attempt to lock directory; fail if entry doesn't exist. */ - if (error = zfs_dirent_lock(&dl, dzp, name, &zp, zflg, - NULL, realnmp)) { + if ((error = zfs_dirent_lock(&dl, dzp, name, &zp, zflg, + NULL, realnmp))) { +#ifdef HAVE_PN_UTILS if (realnmp) pn_free(realnmp); - ZFS_EXIT(zfsvfs); +#endif /* HAVE_PN_UTILS */ + ZFS_EXIT(zsb); return (error); } - vp = ZTOV(zp); + ip = ZTOI(zp); - if (error = zfs_zaccess_delete(dzp, zp, cr)) { + if ((error = zfs_zaccess_delete(dzp, zp, cr))) { goto out; } /* * Need to use rmdir for removing directories. */ - if (vp->v_type == VDIR) { + if (S_ISDIR(ip->i_mode)) { error = EPERM; goto out; } - vnevent_remove(vp, dvp, name, ct); - +#ifdef HAVE_DNLC if (realnmp) dnlc_remove(dvp, realnmp->pn_buf); else dnlc_remove(dvp, name); - - mutex_enter(&vp->v_lock); - may_delete_now = vp->v_count == 1 && !vn_has_cached_data(vp); - mutex_exit(&vp->v_lock); +#endif /* HAVE_DNLC */ /* - * We may delete the znode now, or we may put it in the unlinked set; - * it depends on whether we're the last link, and on whether there are - * other holds on the vnode. So we dmu_tx_hold() the right things to - * allow for either case. + * We never delete the znode and always place it in the unlinked + * set. The dentry cache will always hold the last reference and + * is responsible for safely freeing the znode. */ - tx = dmu_tx_create(zfsvfs->z_os); + obj = zp->z_id; + tx = dmu_tx_create(zsb->z_os); dmu_tx_hold_zap(tx, dzp->z_id, FALSE, name); - dmu_tx_hold_bonus(tx, zp->z_id); - if (may_delete_now) { - toobig = - zp->z_phys->zp_size > zp->z_blksz * DMU_MAX_DELETEBLKCNT; - /* if the file is too big, only hold_free a token amount */ - dmu_tx_hold_free(tx, zp->z_id, 0, - (toobig ? DMU_MAX_ACCESS : DMU_OBJECT_END)); - } + dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE); + zfs_sa_upgrade_txholds(tx, zp); + zfs_sa_upgrade_txholds(tx, dzp); /* are there any extended attributes? */ - if ((xattr_obj = zp->z_phys->zp_xattr) != 0) { - /* XXX - do we need this if we are deleting? */ - dmu_tx_hold_bonus(tx, xattr_obj); + error = sa_lookup(zp->z_sa_hdl, SA_ZPL_XATTR(zsb), + &xattr_obj, sizeof (xattr_obj)); + if (error == 0 && xattr_obj) { + error = zfs_zget(zsb, xattr_obj, &xzp); + ASSERT0(error); + dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_TRUE); + dmu_tx_hold_sa(tx, xzp->z_sa_hdl, B_FALSE); } - /* are there any additional acls */ - if ((acl_obj = zp->z_phys->zp_acl.z_acl_extern_obj) != 0 && - may_delete_now) - dmu_tx_hold_free(tx, acl_obj, 0, DMU_OBJECT_END); - /* charge as an update -- would be nice not to charge at all */ - dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, FALSE, NULL); + dmu_tx_hold_zap(tx, zsb->z_unlinkedobj, FALSE, NULL); - error = dmu_tx_assign(tx, zfsvfs->z_assign); + error = dmu_tx_assign(tx, TXG_NOWAIT); if (error) { zfs_dirent_unlock(dl); - VN_RELE(vp); - if (error == ERESTART && zfsvfs->z_assign == TXG_NOWAIT) { + iput(ip); + if (xzp) + iput(ZTOI(xzp)); + if (error == ERESTART) { dmu_tx_wait(tx); dmu_tx_abort(tx); goto top; } +#ifdef HAVE_PN_UTILS if (realnmp) pn_free(realnmp); +#endif /* HAVE_PN_UTILS */ dmu_tx_abort(tx); - ZFS_EXIT(zfsvfs); + ZFS_EXIT(zsb); return (error); } @@ -1476,270 +1625,263 @@ top: } if (unlinked) { - mutex_enter(&vp->v_lock); - delete_now = may_delete_now && !toobig && - vp->v_count == 1 && !vn_has_cached_data(vp) && - zp->z_phys->zp_xattr == xattr_obj && - zp->z_phys->zp_acl.z_acl_extern_obj == acl_obj; - mutex_exit(&vp->v_lock); - } - - if (delete_now) { - if (zp->z_phys->zp_xattr) { - error = zfs_zget(zfsvfs, zp->z_phys->zp_xattr, &xzp); - ASSERT3U(error, ==, 0); - ASSERT3U(xzp->z_phys->zp_links, ==, 2); - dmu_buf_will_dirty(xzp->z_dbuf, tx); - mutex_enter(&xzp->z_lock); - xzp->z_unlinked = 1; - xzp->z_phys->zp_links = 0; - mutex_exit(&xzp->z_lock); - zfs_unlinked_add(xzp, tx); - zp->z_phys->zp_xattr = 0; /* probably unnecessary */ - } + /* + * Hold z_lock so that we can make sure that the ACL obj + * hasn't changed. Could have been deleted due to + * zfs_sa_upgrade(). + */ mutex_enter(&zp->z_lock); - mutex_enter(&vp->v_lock); - vp->v_count--; - ASSERT3U(vp->v_count, ==, 0); - mutex_exit(&vp->v_lock); + (void) sa_lookup(zp->z_sa_hdl, SA_ZPL_XATTR(zsb), + &xattr_obj_unlinked, sizeof (xattr_obj_unlinked)); mutex_exit(&zp->z_lock); - zfs_znode_delete(zp, tx); - } else if (unlinked) { zfs_unlinked_add(zp, tx); } txtype = TX_REMOVE; +#ifdef HAVE_PN_UTILS if (flags & FIGNORECASE) txtype |= TX_CI; - zfs_log_remove(zilog, tx, txtype, dzp, name); +#endif /* HAVE_PN_UTILS */ + zfs_log_remove(zilog, tx, txtype, dzp, name, obj); dmu_tx_commit(tx); out: +#ifdef HAVE_PN_UTILS if (realnmp) pn_free(realnmp); +#endif /* HAVE_PN_UTILS */ zfs_dirent_unlock(dl); + zfs_inode_update(dzp); + zfs_inode_update(zp); + if (xzp) + zfs_inode_update(xzp); - if (!delete_now) { - VN_RELE(vp); - } else if (xzp) { - /* this rele is delayed to prevent nesting transactions */ - VN_RELE(ZTOV(xzp)); - } + iput(ip); + if (xzp) + iput(ZTOI(xzp)); + + if (zsb->z_os->os_sync == ZFS_SYNC_ALWAYS) + zil_commit(zilog, 0); - ZFS_EXIT(zfsvfs); + ZFS_EXIT(zsb); return (error); } +EXPORT_SYMBOL(zfs_remove); /* - * Create a new directory and insert it into dvp using the name + * Create a new directory and insert it into dip using the name * provided. Return a pointer to the inserted directory. * - * IN: dvp - vnode of directory to add subdir to. + * IN: dip - inode of directory to add subdir to. * dirname - name of new directory. * vap - attributes of new directory. * cr - credentials of caller. - * ct - caller context * vsecp - ACL to be set * - * OUT: vpp - vnode of created directory. + * OUT: ipp - inode of created directory. * * RETURN: 0 if success * error code if failure * * Timestamps: - * dvp - ctime|mtime updated - * vp - ctime|mtime|atime updated + * dip - ctime|mtime updated + * ipp - ctime|mtime|atime updated */ /*ARGSUSED*/ -static int -zfs_mkdir(vnode_t *dvp, char *dirname, vattr_t *vap, vnode_t **vpp, cred_t *cr, - caller_context_t *ct, int flags, vsecattr_t *vsecp) +int +zfs_mkdir(struct inode *dip, char *dirname, vattr_t *vap, struct inode **ipp, + cred_t *cr, int flags, vsecattr_t *vsecp) { - znode_t *zp, *dzp = VTOZ(dvp); - zfsvfs_t *zfsvfs = dzp->z_zfsvfs; + znode_t *zp, *dzp = ITOZ(dip); + zfs_sb_t *zsb = ITOZSB(dip); zilog_t *zilog; zfs_dirlock_t *dl; uint64_t txtype; dmu_tx_t *tx; int error; - zfs_acl_t *aclp = NULL; - zfs_fuid_info_t *fuidp = NULL; int zf = ZNEW; - ksid_t *ksid; uid_t uid; gid_t gid = crgetgid(cr); + zfs_acl_ids_t acl_ids; + boolean_t fuid_dirtied; - ASSERT(vap->va_type == VDIR); + ASSERT(S_ISDIR(vap->va_mode)); /* * If we have an ephemeral id, ACL, or XVATTR then * make sure file system is at proper version */ - ksid = crgetsid(cr, KSID_OWNER); - if (ksid) - uid = ksid_getid(ksid); - else - uid = crgetuid(cr); - if (zfsvfs->z_use_fuids == B_FALSE && - (vsecp || (vap->va_mask & AT_XVATTR) || - IS_EPHEMERAL(uid) || IS_EPHEMERAL(gid))) + uid = crgetuid(cr); + if (zsb->z_use_fuids == B_FALSE && + (vsecp || IS_EPHEMERAL(uid) || IS_EPHEMERAL(gid))) return (EINVAL); - ZFS_ENTER(zfsvfs); + ZFS_ENTER(zsb); ZFS_VERIFY_ZP(dzp); - zilog = zfsvfs->z_log; + zilog = zsb->z_log; - if (dzp->z_phys->zp_flags & ZFS_XATTR) { - ZFS_EXIT(zfsvfs); + if (dzp->z_pflags & ZFS_XATTR) { + ZFS_EXIT(zsb); return (EINVAL); } - if (zfsvfs->z_utf8 && u8_validate(dirname, + if (zsb->z_utf8 && u8_validate(dirname, strlen(dirname), NULL, U8_VALIDATE_ENTIRE, &error) < 0) { - ZFS_EXIT(zfsvfs); + ZFS_EXIT(zsb); return (EILSEQ); } if (flags & FIGNORECASE) zf |= ZCILOOK; - if (vap->va_mask & AT_XVATTR) + if (vap->va_mask & ATTR_XVATTR) { if ((error = secpolicy_xvattr((xvattr_t *)vap, - crgetuid(cr), cr, vap->va_type)) != 0) { - ZFS_EXIT(zfsvfs); + crgetuid(cr), cr, vap->va_mode)) != 0) { + ZFS_EXIT(zsb); return (error); } + } + if ((error = zfs_acl_ids_create(dzp, 0, vap, cr, + vsecp, &acl_ids)) != 0) { + ZFS_EXIT(zsb); + return (error); + } /* * First make sure the new directory doesn't exist. + * + * Existence is checked first to make sure we don't return + * EACCES instead of EEXIST which can cause some applications + * to fail. */ top: - *vpp = NULL; + *ipp = NULL; - if (error = zfs_dirent_lock(&dl, dzp, dirname, &zp, zf, - NULL, NULL)) { - ZFS_EXIT(zfsvfs); + if ((error = zfs_dirent_lock(&dl, dzp, dirname, &zp, zf, + NULL, NULL))) { + zfs_acl_ids_free(&acl_ids); + ZFS_EXIT(zsb); return (error); } - if (error = zfs_zaccess(dzp, ACE_ADD_SUBDIRECTORY, 0, B_FALSE, cr)) { + if ((error = zfs_zaccess(dzp, ACE_ADD_SUBDIRECTORY, 0, B_FALSE, cr))) { + zfs_acl_ids_free(&acl_ids); zfs_dirent_unlock(dl); - ZFS_EXIT(zfsvfs); + ZFS_EXIT(zsb); return (error); } - if (vsecp && aclp == NULL) { - error = zfs_vsec_2_aclp(zfsvfs, vap->va_type, vsecp, &aclp); - if (error) { - zfs_dirent_unlock(dl); - ZFS_EXIT(zfsvfs); - return (error); - } + if (zfs_acl_ids_overquota(zsb, &acl_ids)) { + zfs_acl_ids_free(&acl_ids); + zfs_dirent_unlock(dl); + ZFS_EXIT(zsb); + return (EDQUOT); } + /* * Add a new entry to the directory. */ - tx = dmu_tx_create(zfsvfs->z_os); + tx = dmu_tx_create(zsb->z_os); dmu_tx_hold_zap(tx, dzp->z_id, TRUE, dirname); dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, FALSE, NULL); - if ((aclp && aclp->z_has_fuids) || IS_EPHEMERAL(uid) || - IS_EPHEMERAL(gid)) { - if (zfsvfs->z_fuid_obj == 0) { - dmu_tx_hold_bonus(tx, DMU_NEW_OBJECT); - dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0, - FUID_SIZE_ESTIMATE(zfsvfs)); - dmu_tx_hold_zap(tx, MASTER_NODE_OBJ, FALSE, NULL); - } else { - dmu_tx_hold_bonus(tx, zfsvfs->z_fuid_obj); - dmu_tx_hold_write(tx, zfsvfs->z_fuid_obj, 0, - FUID_SIZE_ESTIMATE(zfsvfs)); - } + fuid_dirtied = zsb->z_fuid_dirty; + if (fuid_dirtied) + zfs_fuid_txhold(zsb, tx); + if (!zsb->z_use_sa && acl_ids.z_aclp->z_acl_bytes > ZFS_ACE_SPACE) { + dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0, + acl_ids.z_aclp->z_acl_bytes); } - if ((dzp->z_phys->zp_flags & ZFS_INHERIT_ACE) || aclp) - dmu_tx_hold_write(tx, DMU_NEW_OBJECT, - 0, SPA_MAXBLOCKSIZE); - error = dmu_tx_assign(tx, zfsvfs->z_assign); + + dmu_tx_hold_sa_create(tx, acl_ids.z_aclp->z_acl_bytes + + ZFS_SA_BASE_ATTR_SIZE); + + error = dmu_tx_assign(tx, TXG_NOWAIT); if (error) { zfs_dirent_unlock(dl); - if (error == ERESTART && zfsvfs->z_assign == TXG_NOWAIT) { + if (error == ERESTART) { dmu_tx_wait(tx); dmu_tx_abort(tx); goto top; } + zfs_acl_ids_free(&acl_ids); dmu_tx_abort(tx); - ZFS_EXIT(zfsvfs); - if (aclp) - zfs_acl_free(aclp); + ZFS_EXIT(zsb); return (error); } /* * Create new node. */ - zfs_mknode(dzp, vap, tx, cr, 0, &zp, 0, aclp, &fuidp); + zfs_mknode(dzp, vap, tx, cr, 0, &zp, &acl_ids); - if (aclp) - zfs_acl_free(aclp); + if (fuid_dirtied) + zfs_fuid_sync(zsb, tx); /* * Now put new name in parent dir. */ (void) zfs_link_create(dl, zp, tx, ZNEW); - *vpp = ZTOV(zp); + *ipp = ZTOI(zp); txtype = zfs_log_create_txtype(Z_DIR, vsecp, vap); if (flags & FIGNORECASE) txtype |= TX_CI; - zfs_log_create(zilog, tx, txtype, dzp, zp, dirname, vsecp, fuidp, vap); + zfs_log_create(zilog, tx, txtype, dzp, zp, dirname, vsecp, + acl_ids.z_fuidp, vap); + + zfs_acl_ids_free(&acl_ids); - if (fuidp) - zfs_fuid_info_free(fuidp); dmu_tx_commit(tx); zfs_dirent_unlock(dl); - ZFS_EXIT(zfsvfs); + if (zsb->z_os->os_sync == ZFS_SYNC_ALWAYS) + zil_commit(zilog, 0); + + zfs_inode_update(dzp); + zfs_inode_update(zp); + ZFS_EXIT(zsb); return (0); } +EXPORT_SYMBOL(zfs_mkdir); /* * Remove a directory subdir entry. If the current working * directory is the same as the subdir to be removed, the * remove will fail. * - * IN: dvp - vnode of directory to remove from. + * IN: dip - inode of directory to remove from. * name - name of directory to be removed. - * cwd - vnode of current working directory. + * cwd - inode of current working directory. * cr - credentials of caller. - * ct - caller context * flags - case flags * * RETURN: 0 if success * error code if failure * * Timestamps: - * dvp - ctime|mtime updated + * dip - ctime|mtime updated */ /*ARGSUSED*/ -static int -zfs_rmdir(vnode_t *dvp, char *name, vnode_t *cwd, cred_t *cr, - caller_context_t *ct, int flags) +int +zfs_rmdir(struct inode *dip, char *name, struct inode *cwd, cred_t *cr, + int flags) { - znode_t *dzp = VTOZ(dvp); + znode_t *dzp = ITOZ(dip); znode_t *zp; - vnode_t *vp; - zfsvfs_t *zfsvfs = dzp->z_zfsvfs; + struct inode *ip; + zfs_sb_t *zsb = ITOZSB(dip); zilog_t *zilog; zfs_dirlock_t *dl; dmu_tx_t *tx; int error; int zflg = ZEXISTS; - ZFS_ENTER(zfsvfs); + ZFS_ENTER(zsb); ZFS_VERIFY_ZP(dzp); - zilog = zfsvfs->z_log; + zilog = zsb->z_log; if (flags & FIGNORECASE) zflg |= ZCILOOK; @@ -1749,30 +1891,28 @@ top: /* * Attempt to lock directory; fail if entry doesn't exist. */ - if (error = zfs_dirent_lock(&dl, dzp, name, &zp, zflg, - NULL, NULL)) { - ZFS_EXIT(zfsvfs); + if ((error = zfs_dirent_lock(&dl, dzp, name, &zp, zflg, + NULL, NULL))) { + ZFS_EXIT(zsb); return (error); } - vp = ZTOV(zp); + ip = ZTOI(zp); - if (error = zfs_zaccess_delete(dzp, zp, cr)) { + if ((error = zfs_zaccess_delete(dzp, zp, cr))) { goto out; } - if (vp->v_type != VDIR) { + if (!S_ISDIR(ip->i_mode)) { error = ENOTDIR; goto out; } - if (vp == cwd) { + if (ip == cwd) { error = EINVAL; goto out; } - vnevent_rmdir(vp, dvp, name, ct); - /* * Grab a lock on the directory to make sure that noone is * trying to add (or lookup) entries while we are removing it. @@ -1785,23 +1925,25 @@ top: */ rw_enter(&zp->z_parent_lock, RW_WRITER); - tx = dmu_tx_create(zfsvfs->z_os); + tx = dmu_tx_create(zsb->z_os); dmu_tx_hold_zap(tx, dzp->z_id, FALSE, name); - dmu_tx_hold_bonus(tx, zp->z_id); - dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, FALSE, NULL); - error = dmu_tx_assign(tx, zfsvfs->z_assign); + dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE); + dmu_tx_hold_zap(tx, zsb->z_unlinkedobj, FALSE, NULL); + zfs_sa_upgrade_txholds(tx, zp); + zfs_sa_upgrade_txholds(tx, dzp); + error = dmu_tx_assign(tx, TXG_NOWAIT); if (error) { rw_exit(&zp->z_parent_lock); rw_exit(&zp->z_name_lock); zfs_dirent_unlock(dl); - VN_RELE(vp); - if (error == ERESTART && zfsvfs->z_assign == TXG_NOWAIT) { + iput(ip); + if (error == ERESTART) { dmu_tx_wait(tx); dmu_tx_abort(tx); goto top; } dmu_tx_abort(tx); - ZFS_EXIT(zfsvfs); + ZFS_EXIT(zsb); return (error); } @@ -1811,7 +1953,7 @@ top: uint64_t txtype = TX_RMDIR; if (flags & FIGNORECASE) txtype |= TX_CI; - zfs_log_remove(zilog, tx, txtype, dzp, name); + zfs_log_remove(zilog, tx, txtype, dzp, name, ZFS_NO_OBJECT); } dmu_tx_commit(tx); @@ -1821,32 +1963,32 @@ top: out: zfs_dirent_unlock(dl); - VN_RELE(vp); + zfs_inode_update(dzp); + zfs_inode_update(zp); + iput(ip); - ZFS_EXIT(zfsvfs); + if (zsb->z_os->os_sync == ZFS_SYNC_ALWAYS) + zil_commit(zilog, 0); + + ZFS_EXIT(zsb); return (error); } +EXPORT_SYMBOL(zfs_rmdir); /* * Read as many directory entries as will fit into the provided - * buffer from the given directory cursor position (specified in - * the uio structure. + * dirent buffer from the given directory cursor position. * - * IN: vp - vnode of directory to read. - * uio - structure supplying read location, range info, - * and return buffer. - * cr - credentials of caller. - * ct - caller context - * flags - case flags + * IN: ip - inode of directory to read. + * dirent - buffer for directory entries. * - * OUT: uio - updated offset and range, buffer filled. - * eofp - set to true if end-of-file detected. + * OUT: dirent - filler buffer of directory entries. * * RETURN: 0 if success * error code if failure * * Timestamps: - * vp - atime updated + * ip - atime updated * * Note that the low 4 bits of the cookie returned by zap is always zero. * This allows us to use the low range for "special" directory entries: @@ -1854,63 +1996,42 @@ out: * we use the offset 2 for the '.zfs' directory. */ /* ARGSUSED */ -static int -zfs_readdir(vnode_t *vp, uio_t *uio, cred_t *cr, int *eofp, - caller_context_t *ct, int flags) +int +zfs_readdir(struct inode *ip, struct dir_context *ctx, cred_t *cr) { - znode_t *zp = VTOZ(vp); - iovec_t *iovp; - edirent_t *eodp; - dirent64_t *odp; - zfsvfs_t *zfsvfs = zp->z_zfsvfs; + znode_t *zp = ITOZ(ip); + zfs_sb_t *zsb = ITOZSB(ip); objset_t *os; - caddr_t outbuf; - size_t bufsize; zap_cursor_t zc; zap_attribute_t zap; - uint_t bytes_wanted; - uint64_t offset; /* must be unsigned; checks for < 1 */ - int local_eof; int outcount; int error; uint8_t prefetch; - boolean_t check_sysattrs; + int done = 0; + uint64_t parent; + loff_t *pos = &(ctx->pos); - ZFS_ENTER(zfsvfs); + ZFS_ENTER(zsb); ZFS_VERIFY_ZP(zp); - /* - * If we are not given an eof variable, - * use a local one. - */ - if (eofp == NULL) - eofp = &local_eof; - - /* - * Check for valid iov_len. - */ - if (uio->uio_iov->iov_len <= 0) { - ZFS_EXIT(zfsvfs); - return (EINVAL); - } + if ((error = sa_lookup(zp->z_sa_hdl, SA_ZPL_PARENT(zsb), + &parent, sizeof (parent))) != 0) + goto out; /* * Quit if directory has been removed (posix) */ - if ((*eofp = zp->z_unlinked) != 0) { - ZFS_EXIT(zfsvfs); - return (0); - } - error = 0; - os = zfsvfs->z_os; - offset = uio->uio_loffset; + if (zp->z_unlinked) + goto out; + + os = zsb->z_os; prefetch = zp->z_zn_prefetch; /* * Initialize the iterator cursor. */ - if (offset <= 3) { + if (*pos <= 3) { /* * Start iteration from the beginning of the directory. */ @@ -1919,55 +2040,28 @@ zfs_readdir(vnode_t *vp, uio_t *uio, cred_t *cr, int *eofp, /* * The offset is a serialized cursor. */ - zap_cursor_init_serialized(&zc, os, zp->z_id, offset); - } - - /* - * Get space to change directory entries into fs independent format. - */ - iovp = uio->uio_iov; - bytes_wanted = iovp->iov_len; - if (uio->uio_segflg != UIO_SYSSPACE || uio->uio_iovcnt != 1) { - bufsize = bytes_wanted; - outbuf = kmem_alloc(bufsize, KM_SLEEP); - odp = (struct dirent64 *)outbuf; - } else { - bufsize = bytes_wanted; - odp = (struct dirent64 *)iovp->iov_base; + zap_cursor_init_serialized(&zc, os, zp->z_id, *pos); } - eodp = (struct edirent *)odp; - - /* - * If this VFS supports the system attribute view interface; and - * we're looking at an extended attribute directory; and we care - * about normalization conflicts on this vfs; then we must check - * for normalization conflicts with the sysattr name space. - */ - check_sysattrs = vfs_has_feature(vp->v_vfsp, VFSFT_SYSATTR_VIEWS) && - (vp->v_flag & V_XATTRDIR) && zfsvfs->z_norm && - (flags & V_RDDIR_ENTFLAGS); /* * Transform to file-system independent format */ outcount = 0; - while (outcount < bytes_wanted) { - ino64_t objnum; - ushort_t reclen; - off64_t *next; + while (!done) { + uint64_t objnum; /* * Special case `.', `..', and `.zfs'. */ - if (offset == 0) { + if (*pos == 0) { (void) strcpy(zap.za_name, "."); zap.za_normalization_conflict = 0; objnum = zp->z_id; - } else if (offset == 1) { + } else if (*pos == 1) { (void) strcpy(zap.za_name, ".."); zap.za_normalization_conflict = 0; - objnum = zp->z_phys->zp_parent; - } else if (offset == 2 && zfs_show_ctldir(zp)) { + objnum = parent; + } else if (*pos == 2 && zfs_show_ctldir(zp)) { (void) strcpy(zap.za_name, ZFS_CTLDIR_NAME); zap.za_normalization_conflict = 0; objnum = ZFSCTL_INO_ROOT; @@ -1975,199 +2069,143 @@ zfs_readdir(vnode_t *vp, uio_t *uio, cred_t *cr, int *eofp, /* * Grab next entry. */ - if (error = zap_cursor_retrieve(&zc, &zap)) { - if ((*eofp = (error == ENOENT)) != 0) + if ((error = zap_cursor_retrieve(&zc, &zap))) { + if (error == ENOENT) break; else goto update; } + /* + * Allow multiple entries provided the first entry is + * the object id. Non-zpl consumers may safely make + * use of the additional space. + * + * XXX: This should be a feature flag for compatibility + */ if (zap.za_integer_length != 8 || - zap.za_num_integers != 1) { + zap.za_num_integers == 0) { cmn_err(CE_WARN, "zap_readdir: bad directory " - "entry, obj = %lld, offset = %lld\n", + "entry, obj = %lld, offset = %lld, " + "length = %d, num = %lld\n", (u_longlong_t)zp->z_id, - (u_longlong_t)offset); + (u_longlong_t)*pos, + zap.za_integer_length, + (u_longlong_t)zap.za_num_integers); error = ENXIO; goto update; } objnum = ZFS_DIRENT_OBJ(zap.za_first_integer); - /* - * MacOS X can extract the object type here such as: - * uint8_t type = ZFS_DIRENT_TYPE(zap.za_first_integer); - */ - - if (check_sysattrs && !zap.za_normalization_conflict) { - zap.za_normalization_conflict = - xattr_sysattr_casechk(zap.za_name); - } } - if (flags & V_RDDIR_ENTFLAGS) - reclen = EDIRENT_RECLEN(strlen(zap.za_name)); - else - reclen = DIRENT64_RECLEN(strlen(zap.za_name)); - - /* - * Will this entry fit in the buffer? - */ - if (outcount + reclen > bufsize) { - /* - * Did we manage to fit anything in the buffer? - */ - if (!outcount) { - error = EINVAL; - goto update; - } + done = !dir_emit(ctx, zap.za_name, strlen(zap.za_name), + objnum, ZFS_DIRENT_TYPE(zap.za_first_integer)); + if (done) break; - } - if (flags & V_RDDIR_ENTFLAGS) { - /* - * Add extended flag entry: - */ - eodp->ed_ino = objnum; - eodp->ed_reclen = reclen; - /* NOTE: ed_off is the offset for the *next* entry */ - next = &(eodp->ed_off); - eodp->ed_eflags = zap.za_normalization_conflict ? - ED_CASE_CONFLICT : 0; - (void) strncpy(eodp->ed_name, zap.za_name, - EDIRENT_NAMELEN(reclen)); - eodp = (edirent_t *)((intptr_t)eodp + reclen); - } else { - /* - * Add normal entry: - */ - odp->d_ino = objnum; - odp->d_reclen = reclen; - /* NOTE: d_off is the offset for the *next* entry */ - next = &(odp->d_off); - (void) strncpy(odp->d_name, zap.za_name, - DIRENT64_NAMELEN(reclen)); - odp = (dirent64_t *)((intptr_t)odp + reclen); - } - outcount += reclen; - - ASSERT(outcount <= bufsize); /* Prefetch znode */ - if (prefetch) + if (prefetch) { dmu_prefetch(os, objnum, 0, 0); + } - /* - * Move to the next entry, fill in the previous offset. - */ - if (offset > 2 || (offset == 2 && !zfs_show_ctldir(zp))) { + if (*pos > 2 || (*pos == 2 && !zfs_show_ctldir(zp))) { zap_cursor_advance(&zc); - offset = zap_cursor_serialize(&zc); + *pos = zap_cursor_serialize(&zc); } else { - offset += 1; + (*pos)++; } - *next = offset; } zp->z_zn_prefetch = B_FALSE; /* a lookup will re-enable pre-fetching */ - if (uio->uio_segflg == UIO_SYSSPACE && uio->uio_iovcnt == 1) { - iovp->iov_base += outcount; - iovp->iov_len -= outcount; - uio->uio_resid -= outcount; - } else if (error = uiomove(outbuf, (long)outcount, UIO_READ, uio)) { - /* - * Reset the pointer. - */ - offset = uio->uio_loffset; - } - update: zap_cursor_fini(&zc); - if (uio->uio_segflg != UIO_SYSSPACE || uio->uio_iovcnt != 1) - kmem_free(outbuf, bufsize); - if (error == ENOENT) error = 0; - ZFS_ACCESSTIME_STAMP(zfsvfs, zp); + ZFS_ACCESSTIME_STAMP(zsb, zp); + zfs_inode_update(zp); + +out: + ZFS_EXIT(zsb); - uio->uio_loffset = offset; - ZFS_EXIT(zfsvfs); return (error); } +EXPORT_SYMBOL(zfs_readdir); ulong_t zfs_fsync_sync_cnt = 4; -static int -zfs_fsync(vnode_t *vp, int syncflag, cred_t *cr, caller_context_t *ct) +int +zfs_fsync(struct inode *ip, int syncflag, cred_t *cr) { - znode_t *zp = VTOZ(vp); - zfsvfs_t *zfsvfs = zp->z_zfsvfs; - - /* - * Regardless of whether this is required for standards conformance, - * this is the logical behavior when fsync() is called on a file with - * dirty pages. We use B_ASYNC since the ZIL transactions are already - * going to be pushed out as part of the zil_commit(). - */ - if (vn_has_cached_data(vp) && !(syncflag & FNODSYNC) && - (vp->v_type == VREG) && !(IS_SWAPVP(vp))) - (void) VOP_PUTPAGE(vp, (offset_t)0, (size_t)0, B_ASYNC, cr, ct); + znode_t *zp = ITOZ(ip); + zfs_sb_t *zsb = ITOZSB(ip); (void) tsd_set(zfs_fsyncer_key, (void *)zfs_fsync_sync_cnt); - ZFS_ENTER(zfsvfs); - ZFS_VERIFY_ZP(zp); - zil_commit(zfsvfs->z_log, zp->z_last_itx, zp->z_id); - ZFS_EXIT(zfsvfs); + if (zsb->z_os->os_sync != ZFS_SYNC_DISABLED) { + ZFS_ENTER(zsb); + ZFS_VERIFY_ZP(zp); + zil_commit(zsb->z_log, zp->z_id); + ZFS_EXIT(zsb); + } return (0); } +EXPORT_SYMBOL(zfs_fsync); /* * Get the requested file attributes and place them in the provided * vattr structure. * - * IN: vp - vnode of file. + * IN: ip - inode of file. * vap - va_mask identifies requested attributes. - * If AT_XVATTR set, then optional attrs are requested + * If ATTR_XVATTR set, then optional attrs are requested * flags - ATTR_NOACLCHECK (CIFS server context) * cr - credentials of caller. - * ct - caller context * * OUT: vap - attribute values. * * RETURN: 0 (always succeeds) */ /* ARGSUSED */ -static int -zfs_getattr(vnode_t *vp, vattr_t *vap, int flags, cred_t *cr, - caller_context_t *ct) +int +zfs_getattr(struct inode *ip, vattr_t *vap, int flags, cred_t *cr) { - znode_t *zp = VTOZ(vp); - zfsvfs_t *zfsvfs = zp->z_zfsvfs; - znode_phys_t *pzp; + znode_t *zp = ITOZ(ip); + zfs_sb_t *zsb = ITOZSB(ip); int error = 0; uint64_t links; + uint64_t mtime[2], ctime[2]; xvattr_t *xvap = (xvattr_t *)vap; /* vap may be an xvattr_t * */ xoptattr_t *xoap = NULL; boolean_t skipaclchk = (flags & ATTR_NOACLCHECK) ? B_TRUE : B_FALSE; + sa_bulk_attr_t bulk[2]; + int count = 0; - ZFS_ENTER(zfsvfs); + ZFS_ENTER(zsb); ZFS_VERIFY_ZP(zp); - pzp = zp->z_phys; - mutex_enter(&zp->z_lock); + zfs_fuid_map_ids(zp, cr, &vap->va_uid, &vap->va_gid); + + SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zsb), NULL, &mtime, 16); + SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zsb), NULL, &ctime, 16); + + if ((error = sa_bulk_lookup(zp->z_sa_hdl, bulk, count)) != 0) { + ZFS_EXIT(zsb); + return (error); + } /* * If ACL is trivial don't bother looking for ACE_READ_ATTRIBUTES. * Also, if we are the owner don't bother, since owner should * always be allowed to read basic attributes of file. */ - if (!(pzp->zp_flags & ZFS_ACL_TRIVIAL) && - (pzp->zp_uid != crgetuid(cr))) { - if (error = zfs_zaccess(zp, ACE_READ_ATTRIBUTES, 0, - skipaclchk, cr)) { - mutex_exit(&zp->z_lock); - ZFS_EXIT(zfsvfs); + if (!(zp->z_pflags & ZFS_ACL_TRIVIAL) && + (vap->va_uid != crgetuid(cr))) { + if ((error = zfs_zaccess(zp, ACE_READ_ATTRIBUTES, 0, + skipaclchk, cr))) { + ZFS_EXIT(zsb); return (error); } } @@ -2177,216 +2215,264 @@ zfs_getattr(vnode_t *vp, vattr_t *vap, int flags, cred_t *cr, * than to determine whether we were asked the question. */ - vap->va_type = vp->v_type; - vap->va_mode = pzp->zp_mode & MODEMASK; - zfs_fuid_map_ids(zp, cr, &vap->va_uid, &vap->va_gid); - vap->va_fsid = zp->z_zfsvfs->z_vfs->vfs_dev; + mutex_enter(&zp->z_lock); + vap->va_type = vn_mode_to_vtype(zp->z_mode); + vap->va_mode = zp->z_mode; + vap->va_fsid = ZTOI(zp)->i_sb->s_dev; vap->va_nodeid = zp->z_id; - if ((vp->v_flag & VROOT) && zfs_show_ctldir(zp)) - links = pzp->zp_links + 1; + if ((zp->z_id == zsb->z_root) && zfs_show_ctldir(zp)) + links = zp->z_links + 1; else - links = pzp->zp_links; - vap->va_nlink = MIN(links, UINT32_MAX); /* nlink_t limit! */ - vap->va_size = pzp->zp_size; - vap->va_rdev = vp->v_rdev; - vap->va_seq = zp->z_seq; + links = zp->z_links; + vap->va_nlink = MIN(links, ZFS_LINK_MAX); + vap->va_size = i_size_read(ip); + vap->va_rdev = ip->i_rdev; + vap->va_seq = ip->i_generation; /* * Add in any requested optional attributes and the create time. * Also set the corresponding bits in the returned attribute bitmap. */ - if ((xoap = xva_getxoptattr(xvap)) != NULL && zfsvfs->z_use_fuids) { + if ((xoap = xva_getxoptattr(xvap)) != NULL && zsb->z_use_fuids) { if (XVA_ISSET_REQ(xvap, XAT_ARCHIVE)) { xoap->xoa_archive = - ((pzp->zp_flags & ZFS_ARCHIVE) != 0); + ((zp->z_pflags & ZFS_ARCHIVE) != 0); XVA_SET_RTN(xvap, XAT_ARCHIVE); } if (XVA_ISSET_REQ(xvap, XAT_READONLY)) { xoap->xoa_readonly = - ((pzp->zp_flags & ZFS_READONLY) != 0); + ((zp->z_pflags & ZFS_READONLY) != 0); XVA_SET_RTN(xvap, XAT_READONLY); } if (XVA_ISSET_REQ(xvap, XAT_SYSTEM)) { xoap->xoa_system = - ((pzp->zp_flags & ZFS_SYSTEM) != 0); + ((zp->z_pflags & ZFS_SYSTEM) != 0); XVA_SET_RTN(xvap, XAT_SYSTEM); } if (XVA_ISSET_REQ(xvap, XAT_HIDDEN)) { xoap->xoa_hidden = - ((pzp->zp_flags & ZFS_HIDDEN) != 0); + ((zp->z_pflags & ZFS_HIDDEN) != 0); XVA_SET_RTN(xvap, XAT_HIDDEN); } if (XVA_ISSET_REQ(xvap, XAT_NOUNLINK)) { xoap->xoa_nounlink = - ((pzp->zp_flags & ZFS_NOUNLINK) != 0); + ((zp->z_pflags & ZFS_NOUNLINK) != 0); XVA_SET_RTN(xvap, XAT_NOUNLINK); } if (XVA_ISSET_REQ(xvap, XAT_IMMUTABLE)) { xoap->xoa_immutable = - ((pzp->zp_flags & ZFS_IMMUTABLE) != 0); + ((zp->z_pflags & ZFS_IMMUTABLE) != 0); XVA_SET_RTN(xvap, XAT_IMMUTABLE); } if (XVA_ISSET_REQ(xvap, XAT_APPENDONLY)) { xoap->xoa_appendonly = - ((pzp->zp_flags & ZFS_APPENDONLY) != 0); + ((zp->z_pflags & ZFS_APPENDONLY) != 0); XVA_SET_RTN(xvap, XAT_APPENDONLY); } if (XVA_ISSET_REQ(xvap, XAT_NODUMP)) { xoap->xoa_nodump = - ((pzp->zp_flags & ZFS_NODUMP) != 0); + ((zp->z_pflags & ZFS_NODUMP) != 0); XVA_SET_RTN(xvap, XAT_NODUMP); } if (XVA_ISSET_REQ(xvap, XAT_OPAQUE)) { xoap->xoa_opaque = - ((pzp->zp_flags & ZFS_OPAQUE) != 0); + ((zp->z_pflags & ZFS_OPAQUE) != 0); XVA_SET_RTN(xvap, XAT_OPAQUE); } if (XVA_ISSET_REQ(xvap, XAT_AV_QUARANTINED)) { xoap->xoa_av_quarantined = - ((pzp->zp_flags & ZFS_AV_QUARANTINED) != 0); + ((zp->z_pflags & ZFS_AV_QUARANTINED) != 0); XVA_SET_RTN(xvap, XAT_AV_QUARANTINED); } if (XVA_ISSET_REQ(xvap, XAT_AV_MODIFIED)) { xoap->xoa_av_modified = - ((pzp->zp_flags & ZFS_AV_MODIFIED) != 0); + ((zp->z_pflags & ZFS_AV_MODIFIED) != 0); XVA_SET_RTN(xvap, XAT_AV_MODIFIED); } if (XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP) && - vp->v_type == VREG && - (pzp->zp_flags & ZFS_BONUS_SCANSTAMP)) { - size_t len; - dmu_object_info_t doi; - - /* - * Only VREG files have anti-virus scanstamps, so we - * won't conflict with symlinks in the bonus buffer. - */ - dmu_object_info_from_db(zp->z_dbuf, &doi); - len = sizeof (xoap->xoa_av_scanstamp) + - sizeof (znode_phys_t); - if (len <= doi.doi_bonus_size) { - /* - * pzp points to the start of the - * znode_phys_t. pzp + 1 points to the - * first byte after the znode_phys_t. - */ - (void) memcpy(xoap->xoa_av_scanstamp, - pzp + 1, - sizeof (xoap->xoa_av_scanstamp)); - XVA_SET_RTN(xvap, XAT_AV_SCANSTAMP); - } + S_ISREG(ip->i_mode)) { + zfs_sa_get_scanstamp(zp, xvap); } if (XVA_ISSET_REQ(xvap, XAT_CREATETIME)) { - ZFS_TIME_DECODE(&xoap->xoa_createtime, pzp->zp_crtime); + uint64_t times[2]; + + (void) sa_lookup(zp->z_sa_hdl, SA_ZPL_CRTIME(zsb), + times, sizeof (times)); + ZFS_TIME_DECODE(&xoap->xoa_createtime, times); XVA_SET_RTN(xvap, XAT_CREATETIME); } + + if (XVA_ISSET_REQ(xvap, XAT_REPARSE)) { + xoap->xoa_reparse = ((zp->z_pflags & ZFS_REPARSE) != 0); + XVA_SET_RTN(xvap, XAT_REPARSE); + } + if (XVA_ISSET_REQ(xvap, XAT_GEN)) { + xoap->xoa_generation = zp->z_gen; + XVA_SET_RTN(xvap, XAT_GEN); + } + + if (XVA_ISSET_REQ(xvap, XAT_OFFLINE)) { + xoap->xoa_offline = + ((zp->z_pflags & ZFS_OFFLINE) != 0); + XVA_SET_RTN(xvap, XAT_OFFLINE); + } + + if (XVA_ISSET_REQ(xvap, XAT_SPARSE)) { + xoap->xoa_sparse = + ((zp->z_pflags & ZFS_SPARSE) != 0); + XVA_SET_RTN(xvap, XAT_SPARSE); + } } - ZFS_TIME_DECODE(&vap->va_atime, pzp->zp_atime); - ZFS_TIME_DECODE(&vap->va_mtime, pzp->zp_mtime); - ZFS_TIME_DECODE(&vap->va_ctime, pzp->zp_ctime); + ZFS_TIME_DECODE(&vap->va_atime, zp->z_atime); + ZFS_TIME_DECODE(&vap->va_mtime, mtime); + ZFS_TIME_DECODE(&vap->va_ctime, ctime); mutex_exit(&zp->z_lock); - dmu_object_size_from_db(zp->z_dbuf, &vap->va_blksize, &vap->va_nblocks); + sa_object_size(zp->z_sa_hdl, &vap->va_blksize, &vap->va_nblocks); if (zp->z_blksz == 0) { /* * Block size hasn't been set; suggest maximal I/O transfers. */ - vap->va_blksize = zfsvfs->z_max_blksz; + vap->va_blksize = zsb->z_max_blksz; + } + + ZFS_EXIT(zsb); + return (0); +} +EXPORT_SYMBOL(zfs_getattr); + +/* + * Get the basic file attributes and place them in the provided kstat + * structure. The inode is assumed to be the authoritative source + * for most of the attributes. However, the znode currently has the + * authoritative atime, blksize, and block count. + * + * IN: ip - inode of file. + * + * OUT: sp - kstat values. + * + * RETURN: 0 (always succeeds) + */ +/* ARGSUSED */ +int +zfs_getattr_fast(struct inode *ip, struct kstat *sp) +{ + znode_t *zp = ITOZ(ip); + zfs_sb_t *zsb = ITOZSB(ip); + + ZFS_ENTER(zsb); + ZFS_VERIFY_ZP(zp); + + mutex_enter(&zp->z_lock); + + generic_fillattr(ip, sp); + ZFS_TIME_DECODE(&sp->atime, zp->z_atime); + + sa_object_size(zp->z_sa_hdl, (uint32_t *)&sp->blksize, &sp->blocks); + if (unlikely(zp->z_blksz == 0)) { + /* + * Block size hasn't been set; suggest maximal I/O transfers. + */ + sp->blksize = zsb->z_max_blksz; } - ZFS_EXIT(zfsvfs); + mutex_exit(&zp->z_lock); + + ZFS_EXIT(zsb); + return (0); } +EXPORT_SYMBOL(zfs_getattr_fast); /* * Set the file attributes to the values contained in the * vattr structure. * - * IN: vp - vnode of file to be modified. + * IN: ip - inode of file to be modified. * vap - new attribute values. - * If AT_XVATTR set, then optional attrs are being set + * If ATTR_XVATTR set, then optional attrs are being set * flags - ATTR_UTIME set if non-default time values provided. * - ATTR_NOACLCHECK (CIFS context only). * cr - credentials of caller. - * ct - caller context * * RETURN: 0 if success * error code if failure * * Timestamps: - * vp - ctime updated, mtime updated if size changed. + * ip - ctime updated, mtime updated if size changed. */ /* ARGSUSED */ -static int -zfs_setattr(vnode_t *vp, vattr_t *vap, int flags, cred_t *cr, - caller_context_t *ct) +int +zfs_setattr(struct inode *ip, vattr_t *vap, int flags, cred_t *cr) { - znode_t *zp = VTOZ(vp); - znode_phys_t *pzp; - zfsvfs_t *zfsvfs = zp->z_zfsvfs; + znode_t *zp = ITOZ(ip); + zfs_sb_t *zsb = ITOZSB(ip); zilog_t *zilog; dmu_tx_t *tx; vattr_t oldva; + xvattr_t *tmpxvattr; uint_t mask = vap->va_mask; uint_t saved_mask; int trim_mask = 0; uint64_t new_mode; + uint64_t new_uid, new_gid; + uint64_t xattr_obj; + uint64_t mtime[2], ctime[2]; znode_t *attrzp; int need_policy = FALSE; - int err; + int err, err2; zfs_fuid_info_t *fuidp = NULL; xvattr_t *xvap = (xvattr_t *)vap; /* vap may be an xvattr_t * */ xoptattr_t *xoap; - zfs_acl_t *aclp = NULL; + zfs_acl_t *aclp; boolean_t skipaclchk = (flags & ATTR_NOACLCHECK) ? B_TRUE : B_FALSE; + boolean_t fuid_dirtied = B_FALSE; + sa_bulk_attr_t *bulk, *xattr_bulk; + int count = 0, xattr_count = 0; if (mask == 0) return (0); - if (mask & AT_NOSET) - return (EINVAL); - - ZFS_ENTER(zfsvfs); + ZFS_ENTER(zsb); ZFS_VERIFY_ZP(zp); - pzp = zp->z_phys; - zilog = zfsvfs->z_log; + zilog = zsb->z_log; /* * Make sure that if we have ephemeral uid/gid or xvattr specified * that file system is at proper version level */ - if (zfsvfs->z_use_fuids == B_FALSE && - (((mask & AT_UID) && IS_EPHEMERAL(vap->va_uid)) || - ((mask & AT_GID) && IS_EPHEMERAL(vap->va_gid)) || - (mask & AT_XVATTR))) { - ZFS_EXIT(zfsvfs); + if (zsb->z_use_fuids == B_FALSE && + (((mask & ATTR_UID) && IS_EPHEMERAL(vap->va_uid)) || + ((mask & ATTR_GID) && IS_EPHEMERAL(vap->va_gid)) || + (mask & ATTR_XVATTR))) { + ZFS_EXIT(zsb); return (EINVAL); } - if (mask & AT_SIZE && vp->v_type == VDIR) { - ZFS_EXIT(zfsvfs); + if (mask & ATTR_SIZE && S_ISDIR(ip->i_mode)) { + ZFS_EXIT(zsb); return (EISDIR); } - if (mask & AT_SIZE && vp->v_type != VREG && vp->v_type != VFIFO) { - ZFS_EXIT(zfsvfs); + if (mask & ATTR_SIZE && !S_ISREG(ip->i_mode) && !S_ISFIFO(ip->i_mode)) { + ZFS_EXIT(zsb); return (EINVAL); } @@ -2396,19 +2482,25 @@ zfs_setattr(vnode_t *vp, vattr_t *vap, int flags, cred_t *cr, */ xoap = xva_getxoptattr(xvap); + tmpxvattr = kmem_alloc(sizeof(xvattr_t), KM_SLEEP); + xva_init(tmpxvattr); + + bulk = kmem_alloc(sizeof(sa_bulk_attr_t) * 7, KM_SLEEP); + xattr_bulk = kmem_alloc(sizeof(sa_bulk_attr_t) * 7, KM_SLEEP); + /* * Immutable files can only alter immutable bit and atime */ - if ((pzp->zp_flags & ZFS_IMMUTABLE) && - ((mask & (AT_SIZE|AT_UID|AT_GID|AT_MTIME|AT_MODE)) || - ((mask & AT_XVATTR) && XVA_ISSET_REQ(xvap, XAT_CREATETIME)))) { - ZFS_EXIT(zfsvfs); - return (EPERM); + if ((zp->z_pflags & ZFS_IMMUTABLE) && + ((mask & (ATTR_SIZE|ATTR_UID|ATTR_GID|ATTR_MTIME|ATTR_MODE)) || + ((mask & ATTR_XVATTR) && XVA_ISSET_REQ(xvap, XAT_CREATETIME)))) { + err = EPERM; + goto out3; } - if ((mask & AT_SIZE) && (pzp->zp_flags & ZFS_READONLY)) { - ZFS_EXIT(zfsvfs); - return (EPERM); + if ((mask & ATTR_SIZE) && (zp->z_pflags & ZFS_READONLY)) { + err = EPERM; + goto out3; } /* @@ -2417,32 +2509,35 @@ zfs_setattr(vnode_t *vp, vattr_t *vap, int flags, cred_t *cr, * handle times greater than 2039. This check should be removed * once large timestamps are fully supported. */ - if (mask & (AT_ATIME | AT_MTIME)) { - if (((mask & AT_ATIME) && TIMESPEC_OVERFLOW(&vap->va_atime)) || - ((mask & AT_MTIME) && TIMESPEC_OVERFLOW(&vap->va_mtime))) { - ZFS_EXIT(zfsvfs); - return (EOVERFLOW); + if (mask & (ATTR_ATIME | ATTR_MTIME)) { + if (((mask & ATTR_ATIME) && TIMESPEC_OVERFLOW(&vap->va_atime)) || + ((mask & ATTR_MTIME) && TIMESPEC_OVERFLOW(&vap->va_mtime))) { + err = EOVERFLOW; + goto out3; } } top: attrzp = NULL; + aclp = NULL; - if (zfsvfs->z_vfs->vfs_flag & VFS_RDONLY) { - ZFS_EXIT(zfsvfs); - return (EROFS); + /* Can this be moved to before the top label? */ + if (zfs_is_readonly(zsb)) { + err = EROFS; + goto out3; } /* * First validate permissions */ - if (mask & AT_SIZE) { + if (mask & ATTR_SIZE) { err = zfs_zaccess(zp, ACE_WRITE_DATA, 0, skipaclchk, cr); - if (err) { - ZFS_EXIT(zfsvfs); - return (err); - } + if (err) + goto out3; + + truncate_setsize(ip, vap->va_size); + /* * XXX - Note, we are not providing any open * mode flags here (like FNDELAY), so we may @@ -2451,23 +2546,24 @@ top: */ /* XXX - would it be OK to generate a log record here? */ err = zfs_freesp(zp, vap->va_size, 0, 0, FALSE); - if (err) { - ZFS_EXIT(zfsvfs); - return (err); - } + if (err) + goto out3; } - if (mask & (AT_ATIME|AT_MTIME) || - ((mask & AT_XVATTR) && (XVA_ISSET_REQ(xvap, XAT_HIDDEN) || + if (mask & (ATTR_ATIME|ATTR_MTIME) || + ((mask & ATTR_XVATTR) && (XVA_ISSET_REQ(xvap, XAT_HIDDEN) || XVA_ISSET_REQ(xvap, XAT_READONLY) || XVA_ISSET_REQ(xvap, XAT_ARCHIVE) || + XVA_ISSET_REQ(xvap, XAT_OFFLINE) || + XVA_ISSET_REQ(xvap, XAT_SPARSE) || XVA_ISSET_REQ(xvap, XAT_CREATETIME) || - XVA_ISSET_REQ(xvap, XAT_SYSTEM)))) + XVA_ISSET_REQ(xvap, XAT_SYSTEM)))) { need_policy = zfs_zaccess(zp, ACE_WRITE_ATTRIBUTES, 0, skipaclchk, cr); + } - if (mask & (AT_UID|AT_GID)) { - int idmask = (mask & (AT_UID|AT_GID)); + if (mask & (ATTR_UID|ATTR_GID)) { + int idmask = (mask & (ATTR_UID|ATTR_GID)); int take_owner; int take_group; @@ -2476,19 +2572,19 @@ top: * we may clear S_ISUID/S_ISGID bits. */ - if (!(mask & AT_MODE)) - vap->va_mode = pzp->zp_mode; + if (!(mask & ATTR_MODE)) + vap->va_mode = zp->z_mode; /* * Take ownership or chgrp to group we are a member of */ - take_owner = (mask & AT_UID) && (vap->va_uid == crgetuid(cr)); - take_group = (mask & AT_GID) && - zfs_groupmember(zfsvfs, vap->va_gid, cr); + take_owner = (mask & ATTR_UID) && (vap->va_uid == crgetuid(cr)); + take_group = (mask & ATTR_GID) && + zfs_groupmember(zsb, vap->va_gid, cr); /* - * If both AT_UID and AT_GID are set then take_owner and + * If both ATTR_UID and ATTR_GID are set then take_owner and * take_group must both be set in order to allow taking * ownership. * @@ -2496,16 +2592,17 @@ top: * */ - if (((idmask == (AT_UID|AT_GID)) && take_owner && take_group) || - ((idmask == AT_UID) && take_owner) || - ((idmask == AT_GID) && take_group)) { + if (((idmask == (ATTR_UID|ATTR_GID)) && + take_owner && take_group) || + ((idmask == ATTR_UID) && take_owner) || + ((idmask == ATTR_GID) && take_group)) { if (zfs_zaccess(zp, ACE_WRITE_OWNER, 0, skipaclchk, cr) == 0) { /* * Remove setuid/setgid for non-privileged users */ - secpolicy_setid_clear(vap, cr); - trim_mask = (mask & (AT_UID|AT_GID)); + (void) secpolicy_setid_clear(vap, cr); + trim_mask = (mask & (ATTR_UID|ATTR_GID)); } else { need_policy = TRUE; } @@ -2515,46 +2612,101 @@ top: } mutex_enter(&zp->z_lock); - oldva.va_mode = pzp->zp_mode; + oldva.va_mode = zp->z_mode; zfs_fuid_map_ids(zp, cr, &oldva.va_uid, &oldva.va_gid); - if (mask & AT_XVATTR) { - if ((need_policy == FALSE) && - (XVA_ISSET_REQ(xvap, XAT_APPENDONLY) && - xoap->xoa_appendonly != - ((pzp->zp_flags & ZFS_APPENDONLY) != 0)) || - (XVA_ISSET_REQ(xvap, XAT_NOUNLINK) && - xoap->xoa_nounlink != - ((pzp->zp_flags & ZFS_NOUNLINK) != 0)) || - (XVA_ISSET_REQ(xvap, XAT_IMMUTABLE) && - xoap->xoa_immutable != - ((pzp->zp_flags & ZFS_IMMUTABLE) != 0)) || - (XVA_ISSET_REQ(xvap, XAT_NODUMP) && - xoap->xoa_nodump != - ((pzp->zp_flags & ZFS_NODUMP) != 0)) || - (XVA_ISSET_REQ(xvap, XAT_AV_MODIFIED) && - xoap->xoa_av_modified != - ((pzp->zp_flags & ZFS_AV_MODIFIED) != 0)) || - ((XVA_ISSET_REQ(xvap, XAT_AV_QUARANTINED) && - ((vp->v_type != VREG && xoap->xoa_av_quarantined) || - xoap->xoa_av_quarantined != - ((pzp->zp_flags & ZFS_AV_QUARANTINED) != 0)))) || - (XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP)) || - (XVA_ISSET_REQ(xvap, XAT_OPAQUE))) { + if (mask & ATTR_XVATTR) { + /* + * Update xvattr mask to include only those attributes + * that are actually changing. + * + * the bits will be restored prior to actually setting + * the attributes so the caller thinks they were set. + */ + if (XVA_ISSET_REQ(xvap, XAT_APPENDONLY)) { + if (xoap->xoa_appendonly != + ((zp->z_pflags & ZFS_APPENDONLY) != 0)) { + need_policy = TRUE; + } else { + XVA_CLR_REQ(xvap, XAT_APPENDONLY); + XVA_SET_REQ(tmpxvattr, XAT_APPENDONLY); + } + } + + if (XVA_ISSET_REQ(xvap, XAT_NOUNLINK)) { + if (xoap->xoa_nounlink != + ((zp->z_pflags & ZFS_NOUNLINK) != 0)) { + need_policy = TRUE; + } else { + XVA_CLR_REQ(xvap, XAT_NOUNLINK); + XVA_SET_REQ(tmpxvattr, XAT_NOUNLINK); + } + } + + if (XVA_ISSET_REQ(xvap, XAT_IMMUTABLE)) { + if (xoap->xoa_immutable != + ((zp->z_pflags & ZFS_IMMUTABLE) != 0)) { + need_policy = TRUE; + } else { + XVA_CLR_REQ(xvap, XAT_IMMUTABLE); + XVA_SET_REQ(tmpxvattr, XAT_IMMUTABLE); + } + } + + if (XVA_ISSET_REQ(xvap, XAT_NODUMP)) { + if (xoap->xoa_nodump != + ((zp->z_pflags & ZFS_NODUMP) != 0)) { + need_policy = TRUE; + } else { + XVA_CLR_REQ(xvap, XAT_NODUMP); + XVA_SET_REQ(tmpxvattr, XAT_NODUMP); + } + } + + if (XVA_ISSET_REQ(xvap, XAT_AV_MODIFIED)) { + if (xoap->xoa_av_modified != + ((zp->z_pflags & ZFS_AV_MODIFIED) != 0)) { + need_policy = TRUE; + } else { + XVA_CLR_REQ(xvap, XAT_AV_MODIFIED); + XVA_SET_REQ(tmpxvattr, XAT_AV_MODIFIED); + } + } + + if (XVA_ISSET_REQ(xvap, XAT_AV_QUARANTINED)) { + if ((!S_ISREG(ip->i_mode) && + xoap->xoa_av_quarantined) || + xoap->xoa_av_quarantined != + ((zp->z_pflags & ZFS_AV_QUARANTINED) != 0)) { + need_policy = TRUE; + } else { + XVA_CLR_REQ(xvap, XAT_AV_QUARANTINED); + XVA_SET_REQ(tmpxvattr, XAT_AV_QUARANTINED); + } + } + + if (XVA_ISSET_REQ(xvap, XAT_REPARSE)) { + mutex_exit(&zp->z_lock); + err = EPERM; + goto out3; + } + + if (need_policy == FALSE && + (XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP) || + XVA_ISSET_REQ(xvap, XAT_OPAQUE))) { need_policy = TRUE; } } mutex_exit(&zp->z_lock); - if (mask & AT_MODE) { + if (mask & ATTR_MODE) { if (zfs_zaccess(zp, ACE_WRITE_ACL, 0, skipaclchk, cr) == 0) { - err = secpolicy_setid_setsticky_clear(vp, vap, + err = secpolicy_setid_setsticky_clear(ip, vap, &oldva, cr); - if (err) { - ZFS_EXIT(zfsvfs); - return (err); - } - trim_mask |= AT_MODE; + if (err) + goto out3; + + trim_mask |= ATTR_MODE; } else { need_policy = TRUE; } @@ -2573,12 +2725,10 @@ top: saved_mask = vap->va_mask; vap->va_mask &= ~trim_mask; } - err = secpolicy_vnode_setattr(cr, vp, vap, &oldva, flags, + err = secpolicy_vnode_setattr(cr, ip, vap, &oldva, flags, (int (*)(void *, int, cred_t *))zfs_zaccess_unix, zp); - if (err) { - ZFS_EXIT(zfsvfs); - return (err); - } + if (err) + goto out3; if (trim_mask) vap->va_mask |= saved_mask; @@ -2590,87 +2740,97 @@ top: */ mask = vap->va_mask; - tx = dmu_tx_create(zfsvfs->z_os); - dmu_tx_hold_bonus(tx, zp->z_id); - if (((mask & AT_UID) && IS_EPHEMERAL(vap->va_uid)) || - ((mask & AT_GID) && IS_EPHEMERAL(vap->va_gid))) { - if (zfsvfs->z_fuid_obj == 0) { - dmu_tx_hold_bonus(tx, DMU_NEW_OBJECT); - dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0, - FUID_SIZE_ESTIMATE(zfsvfs)); - dmu_tx_hold_zap(tx, MASTER_NODE_OBJ, FALSE, NULL); - } else { - dmu_tx_hold_bonus(tx, zfsvfs->z_fuid_obj); - dmu_tx_hold_write(tx, zfsvfs->z_fuid_obj, 0, - FUID_SIZE_ESTIMATE(zfsvfs)); + if ((mask & (ATTR_UID | ATTR_GID))) { + err = sa_lookup(zp->z_sa_hdl, SA_ZPL_XATTR(zsb), + &xattr_obj, sizeof (xattr_obj)); + + if (err == 0 && xattr_obj) { + err = zfs_zget(ZTOZSB(zp), xattr_obj, &attrzp); + if (err) + goto out2; + } + if (mask & ATTR_UID) { + new_uid = zfs_fuid_create(zsb, + (uint64_t)vap->va_uid, cr, ZFS_OWNER, &fuidp); + if (new_uid != zp->z_uid && + zfs_fuid_overquota(zsb, B_FALSE, new_uid)) { + if (attrzp) + iput(ZTOI(attrzp)); + err = EDQUOT; + goto out2; + } } - } - if (mask & AT_MODE) { - uint64_t pmode = pzp->zp_mode; + if (mask & ATTR_GID) { + new_gid = zfs_fuid_create(zsb, (uint64_t)vap->va_gid, + cr, ZFS_GROUP, &fuidp); + if (new_gid != zp->z_gid && + zfs_fuid_overquota(zsb, B_TRUE, new_gid)) { + if (attrzp) + iput(ZTOI(attrzp)); + err = EDQUOT; + goto out2; + } + } + } + tx = dmu_tx_create(zsb->z_os); + if (mask & ATTR_MODE) { + uint64_t pmode = zp->z_mode; + uint64_t acl_obj; new_mode = (pmode & S_IFMT) | (vap->va_mode & ~S_IFMT); - if (err = zfs_acl_chmod_setattr(zp, &aclp, new_mode)) { - dmu_tx_abort(tx); - ZFS_EXIT(zfsvfs); - return (err); - } - if (pzp->zp_acl.z_acl_extern_obj) { - /* Are we upgrading ACL from old V0 format to new V1 */ - if (zfsvfs->z_version <= ZPL_VERSION_FUID && - pzp->zp_acl.z_acl_version == + zfs_acl_chmod_setattr(zp, &aclp, new_mode); + + mutex_enter(&zp->z_lock); + if (!zp->z_is_sa && ((acl_obj = zfs_external_acl(zp)) != 0)) { + /* + * Are we upgrading ACL from old V0 format + * to V1 format? + */ + if (zsb->z_version >= ZPL_VERSION_FUID && + zfs_znode_acl_version(zp) == ZFS_ACL_VERSION_INITIAL) { - dmu_tx_hold_free(tx, - pzp->zp_acl.z_acl_extern_obj, 0, + dmu_tx_hold_free(tx, acl_obj, 0, DMU_OBJECT_END); dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0, aclp->z_acl_bytes); } else { - dmu_tx_hold_write(tx, - pzp->zp_acl.z_acl_extern_obj, 0, + dmu_tx_hold_write(tx, acl_obj, 0, aclp->z_acl_bytes); } - } else if (aclp->z_acl_bytes > ZFS_ACE_SPACE) { + } else if (!zp->z_is_sa && aclp->z_acl_bytes > ZFS_ACE_SPACE) { dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0, aclp->z_acl_bytes); } + mutex_exit(&zp->z_lock); + dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_TRUE); + } else { + if ((mask & ATTR_XVATTR) && + XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP)) + dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_TRUE); + else + dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE); } - if ((mask & (AT_UID | AT_GID)) && pzp->zp_xattr != 0) { - err = zfs_zget(zp->z_zfsvfs, pzp->zp_xattr, &attrzp); - if (err) { - dmu_tx_abort(tx); - ZFS_EXIT(zfsvfs); - if (aclp) - zfs_acl_free(aclp); - return (err); - } - dmu_tx_hold_bonus(tx, attrzp->z_id); + if (attrzp) { + dmu_tx_hold_sa(tx, attrzp->z_sa_hdl, B_FALSE); } - err = dmu_tx_assign(tx, zfsvfs->z_assign); - if (err) { - if (attrzp) - VN_RELE(ZTOV(attrzp)); + fuid_dirtied = zsb->z_fuid_dirty; + if (fuid_dirtied) + zfs_fuid_txhold(zsb, tx); - if (aclp) { - zfs_acl_free(aclp); - aclp = NULL; - } + zfs_sa_upgrade_txholds(tx, zp); - if (err == ERESTART && zfsvfs->z_assign == TXG_NOWAIT) { + err = dmu_tx_assign(tx, TXG_NOWAIT); + if (err) { + if (err == ERESTART) dmu_tx_wait(tx); - dmu_tx_abort(tx); - goto top; - } - dmu_tx_abort(tx); - ZFS_EXIT(zfsvfs); - return (err); + goto out; } - dmu_buf_will_dirty(zp->z_dbuf, tx); - + count = 0; /* * Set each attribute requested. * We group settings according to the locks they need to acquire. @@ -2679,90 +2839,199 @@ top: * updated as a side-effect of calling this function. */ - mutex_enter(&zp->z_lock); - if (mask & AT_MODE) { + if (mask & (ATTR_UID|ATTR_GID|ATTR_MODE)) mutex_enter(&zp->z_acl_lock); - zp->z_phys->zp_mode = new_mode; - err = zfs_aclset_common(zp, aclp, cr, &fuidp, tx); - ASSERT3U(err, ==, 0); - mutex_exit(&zp->z_acl_lock); - } + mutex_enter(&zp->z_lock); - if (attrzp) + SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zsb), NULL, + &zp->z_pflags, sizeof (zp->z_pflags)); + + if (attrzp) { + if (mask & (ATTR_UID|ATTR_GID|ATTR_MODE)) + mutex_enter(&attrzp->z_acl_lock); mutex_enter(&attrzp->z_lock); + SA_ADD_BULK_ATTR(xattr_bulk, xattr_count, + SA_ZPL_FLAGS(zsb), NULL, &attrzp->z_pflags, + sizeof (attrzp->z_pflags)); + } + + if (mask & (ATTR_UID|ATTR_GID)) { + + if (mask & ATTR_UID) { + SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_UID(zsb), NULL, + &new_uid, sizeof (new_uid)); + zp->z_uid = new_uid; + if (attrzp) { + SA_ADD_BULK_ATTR(xattr_bulk, xattr_count, + SA_ZPL_UID(zsb), NULL, &new_uid, + sizeof (new_uid)); + attrzp->z_uid = new_uid; + } + } - if (mask & AT_UID) { - pzp->zp_uid = zfs_fuid_create(zfsvfs, - vap->va_uid, cr, ZFS_OWNER, tx, &fuidp); + if (mask & ATTR_GID) { + SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GID(zsb), + NULL, &new_gid, sizeof (new_gid)); + zp->z_gid = new_gid; + if (attrzp) { + SA_ADD_BULK_ATTR(xattr_bulk, xattr_count, + SA_ZPL_GID(zsb), NULL, &new_gid, + sizeof (new_gid)); + attrzp->z_gid = new_gid; + } + } + if (!(mask & ATTR_MODE)) { + SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MODE(zsb), + NULL, &new_mode, sizeof (new_mode)); + new_mode = zp->z_mode; + } + err = zfs_acl_chown_setattr(zp); + ASSERT(err == 0); if (attrzp) { - attrzp->z_phys->zp_uid = zfs_fuid_create(zfsvfs, - vap->va_uid, cr, ZFS_OWNER, tx, &fuidp); + err = zfs_acl_chown_setattr(attrzp); + ASSERT(err == 0); } } - if (mask & AT_GID) { - pzp->zp_gid = zfs_fuid_create(zfsvfs, vap->va_gid, - cr, ZFS_GROUP, tx, &fuidp); - if (attrzp) - attrzp->z_phys->zp_gid = zfs_fuid_create(zfsvfs, - vap->va_gid, cr, ZFS_GROUP, tx, &fuidp); + if (mask & ATTR_MODE) { + SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MODE(zsb), NULL, + &new_mode, sizeof (new_mode)); + zp->z_mode = new_mode; + ASSERT3P(aclp, !=, NULL); + err = zfs_aclset_common(zp, aclp, cr, tx); + ASSERT0(err); + if (zp->z_acl_cached) + zfs_acl_free(zp->z_acl_cached); + zp->z_acl_cached = aclp; + aclp = NULL; } - if (aclp) - zfs_acl_free(aclp); - - if (attrzp) - mutex_exit(&attrzp->z_lock); - if (mask & AT_ATIME) - ZFS_TIME_ENCODE(&vap->va_atime, pzp->zp_atime); + if (mask & ATTR_ATIME) { + ZFS_TIME_ENCODE(&vap->va_atime, zp->z_atime); + SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_ATIME(zsb), NULL, + &zp->z_atime, sizeof (zp->z_atime)); + } - if (mask & AT_MTIME) - ZFS_TIME_ENCODE(&vap->va_mtime, pzp->zp_mtime); + if (mask & ATTR_MTIME) { + ZFS_TIME_ENCODE(&vap->va_mtime, mtime); + SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zsb), NULL, + mtime, sizeof (mtime)); + } /* XXX - shouldn't this be done *before* the ATIME/MTIME checks? */ - if (mask & AT_SIZE) - zfs_time_stamper_locked(zp, CONTENT_MODIFIED, tx); - else if (mask != 0) - zfs_time_stamper_locked(zp, STATE_CHANGED, tx); + if (mask & ATTR_SIZE && !(mask & ATTR_MTIME)) { + SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zsb), + NULL, mtime, sizeof (mtime)); + SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zsb), NULL, + &ctime, sizeof (ctime)); + zfs_tstamp_update_setup(zp, CONTENT_MODIFIED, mtime, ctime, + B_TRUE); + } else if (mask != 0) { + SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zsb), NULL, + &ctime, sizeof (ctime)); + zfs_tstamp_update_setup(zp, STATE_CHANGED, mtime, ctime, + B_TRUE); + if (attrzp) { + SA_ADD_BULK_ATTR(xattr_bulk, xattr_count, + SA_ZPL_CTIME(zsb), NULL, + &ctime, sizeof (ctime)); + zfs_tstamp_update_setup(attrzp, STATE_CHANGED, + mtime, ctime, B_TRUE); + } + } /* * Do this after setting timestamps to prevent timestamp * update from toggling bit */ - if (xoap && (mask & AT_XVATTR)) { - if (XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP)) { - size_t len; - dmu_object_info_t doi; + if (xoap && (mask & ATTR_XVATTR)) { - ASSERT(vp->v_type == VREG); + /* + * restore trimmed off masks + * so that return masks can be set for caller. + */ - /* Grow the bonus buffer if necessary. */ - dmu_object_info_from_db(zp->z_dbuf, &doi); - len = sizeof (xoap->xoa_av_scanstamp) + - sizeof (znode_phys_t); - if (len > doi.doi_bonus_size) - VERIFY(dmu_set_bonus(zp->z_dbuf, len, tx) == 0); + if (XVA_ISSET_REQ(tmpxvattr, XAT_APPENDONLY)) { + XVA_SET_REQ(xvap, XAT_APPENDONLY); + } + if (XVA_ISSET_REQ(tmpxvattr, XAT_NOUNLINK)) { + XVA_SET_REQ(xvap, XAT_NOUNLINK); } - zfs_xvattr_set(zp, xvap); + if (XVA_ISSET_REQ(tmpxvattr, XAT_IMMUTABLE)) { + XVA_SET_REQ(xvap, XAT_IMMUTABLE); + } + if (XVA_ISSET_REQ(tmpxvattr, XAT_NODUMP)) { + XVA_SET_REQ(xvap, XAT_NODUMP); + } + if (XVA_ISSET_REQ(tmpxvattr, XAT_AV_MODIFIED)) { + XVA_SET_REQ(xvap, XAT_AV_MODIFIED); + } + if (XVA_ISSET_REQ(tmpxvattr, XAT_AV_QUARANTINED)) { + XVA_SET_REQ(xvap, XAT_AV_QUARANTINED); + } + + if (XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP)) + ASSERT(S_ISREG(ip->i_mode)); + + zfs_xvattr_set(zp, xvap, tx); } + if (fuid_dirtied) + zfs_fuid_sync(zsb, tx); + if (mask != 0) zfs_log_setattr(zilog, tx, TX_SETATTR, zp, vap, mask, fuidp); - if (fuidp) - zfs_fuid_info_free(fuidp); mutex_exit(&zp->z_lock); + if (mask & (ATTR_UID|ATTR_GID|ATTR_MODE)) + mutex_exit(&zp->z_acl_lock); + + if (attrzp) { + if (mask & (ATTR_UID|ATTR_GID|ATTR_MODE)) + mutex_exit(&attrzp->z_acl_lock); + mutex_exit(&attrzp->z_lock); + } +out: + if (err == 0 && attrzp) { + err2 = sa_bulk_update(attrzp->z_sa_hdl, xattr_bulk, + xattr_count, tx); + ASSERT(err2 == 0); + } if (attrzp) - VN_RELE(ZTOV(attrzp)); + iput(ZTOI(attrzp)); + if (aclp) + zfs_acl_free(aclp); - dmu_tx_commit(tx); + if (fuidp) { + zfs_fuid_info_free(fuidp); + fuidp = NULL; + } - ZFS_EXIT(zfsvfs); + if (err) { + dmu_tx_abort(tx); + if (err == ERESTART) + goto top; + } else { + err2 = sa_bulk_update(zp->z_sa_hdl, bulk, count, tx); + dmu_tx_commit(tx); + zfs_inode_update(zp); + } + +out2: + if (zsb->z_os->os_sync == ZFS_SYNC_ALWAYS) + zil_commit(zilog, 0); + +out3: + kmem_free(xattr_bulk, sizeof(sa_bulk_attr_t) * 7); + kmem_free(bulk, sizeof(sa_bulk_attr_t) * 7); + kmem_free(tmpxvattr, sizeof(xvattr_t)); + ZFS_EXIT(zsb); return (err); } +EXPORT_SYMBOL(zfs_setattr); typedef struct zfs_zlock { krwlock_t *zl_rwlock; /* lock we acquired */ @@ -2780,7 +3049,7 @@ zfs_rename_unlock(zfs_zlock_t **zlpp) while ((zl = *zlpp) != NULL) { if (zl->zl_znode != NULL) - VN_RELE(ZTOV(zl->zl_znode)); + iput(ZTOI(zl->zl_znode)); rw_exit(zl->zl_rwlock); *zlpp = zl->zl_next; kmem_free(zl, sizeof (*zl)); @@ -2798,8 +3067,8 @@ zfs_rename_lock(znode_t *szp, znode_t *tdzp, znode_t *sdzp, zfs_zlock_t **zlpp) { zfs_zlock_t *zl; znode_t *zp = tdzp; - uint64_t rootid = zp->z_zfsvfs->z_root; - uint64_t *oidp = &zp->z_id; + uint64_t rootid = ZTOZSB(zp)->z_root; + uint64_t oidp = zp->z_id; krwlock_t *rwlp = &szp->z_parent_lock; krw_t rw = RW_WRITER; @@ -2821,7 +3090,7 @@ zfs_rename_lock(znode_t *szp, znode_t *tdzp, znode_t *sdzp, zfs_zlock_t **zlpp) zfs_rename_unlock(&zl); *zlpp = NULL; zp = tdzp; - oidp = &zp->z_id; + oidp = zp->z_id; rwlp = &szp->z_parent_lock; rw = RW_WRITER; continue; @@ -2839,19 +3108,20 @@ zfs_rename_lock(znode_t *szp, znode_t *tdzp, znode_t *sdzp, zfs_zlock_t **zlpp) zl->zl_next = *zlpp; *zlpp = zl; - if (*oidp == szp->z_id) /* We're a descendant of szp */ + if (oidp == szp->z_id) /* We're a descendant of szp */ return (EINVAL); - if (*oidp == rootid) /* We've hit the top */ + if (oidp == rootid) /* We've hit the top */ return (0); if (rw == RW_READER) { /* i.e. not the first pass */ - int error = zfs_zget(zp->z_zfsvfs, *oidp, &zp); + int error = zfs_zget(ZTOZSB(zp), oidp, &zp); if (error) return (error); zl->zl_znode = zp; } - oidp = &zp->z_phys->zp_parent; + (void) sa_lookup(zp->z_sa_hdl, SA_ZPL_PARENT(ZTOZSB(zp)), + &oidp, sizeof (oidp)); rwlp = &zp->z_parent_lock; rw = RW_READER; @@ -2864,30 +3134,28 @@ zfs_rename_lock(znode_t *szp, znode_t *tdzp, znode_t *sdzp, zfs_zlock_t **zlpp) * Move an entry from the provided source directory to the target * directory. Change the entry name as indicated. * - * IN: sdvp - Source directory containing the "old entry". + * IN: sdip - Source directory containing the "old entry". * snm - Old entry name. - * tdvp - Target directory to contain the "new entry". + * tdip - Target directory to contain the "new entry". * tnm - New entry name. * cr - credentials of caller. - * ct - caller context * flags - case flags * * RETURN: 0 if success * error code if failure * * Timestamps: - * sdvp,tdvp - ctime|mtime updated + * sdip,tdip - ctime|mtime updated */ /*ARGSUSED*/ -static int -zfs_rename(vnode_t *sdvp, char *snm, vnode_t *tdvp, char *tnm, cred_t *cr, - caller_context_t *ct, int flags) +int +zfs_rename(struct inode *sdip, char *snm, struct inode *tdip, char *tnm, + cred_t *cr, int flags) { znode_t *tdzp, *szp, *tzp; - znode_t *sdzp = VTOZ(sdvp); - zfsvfs_t *zfsvfs = sdzp->z_zfsvfs; + znode_t *sdzp = ITOZ(sdip); + zfs_sb_t *zsb = ITOZSB(sdip); zilog_t *zilog; - vnode_t *realvp; zfs_dirlock_t *sdl, *tdl; dmu_tx_t *tx; zfs_zlock_t *zl; @@ -2895,26 +3163,20 @@ zfs_rename(vnode_t *sdvp, char *snm, vnode_t *tdvp, char *tnm, cred_t *cr, int error = 0; int zflg = 0; - ZFS_ENTER(zfsvfs); + ZFS_ENTER(zsb); ZFS_VERIFY_ZP(sdzp); - zilog = zfsvfs->z_log; + zilog = zsb->z_log; - /* - * Make sure we have the real vp for the target directory. - */ - if (VOP_REALVP(tdvp, &realvp, ct) == 0) - tdvp = realvp; - - if (tdvp->v_vfsp != sdvp->v_vfsp) { - ZFS_EXIT(zfsvfs); + if (tdip->i_sb != sdip->i_sb) { + ZFS_EXIT(zsb); return (EXDEV); } - tdzp = VTOZ(tdvp); + tdzp = ITOZ(tdip); ZFS_VERIFY_ZP(tdzp); - if (zfsvfs->z_utf8 && u8_validate(tnm, + if (zsb->z_utf8 && u8_validate(tnm, strlen(tnm), NULL, U8_VALIDATE_ENTIRE, &error) < 0) { - ZFS_EXIT(zfsvfs); + ZFS_EXIT(zsb); return (EILSEQ); } @@ -2931,9 +3193,8 @@ top: * by renaming a linked file into/outof an attribute directory. * See the comment in zfs_link() for why this is considered bad. */ - if ((tdzp->z_phys->zp_flags & ZFS_XATTR) != - (sdzp->z_phys->zp_flags & ZFS_XATTR)) { - ZFS_EXIT(zfsvfs); + if ((tdzp->z_pflags & ZFS_XATTR) != (sdzp->z_pflags & ZFS_XATTR)) { + ZFS_EXIT(zsb); return (EINVAL); } @@ -2952,10 +3213,10 @@ top: * First compare the two name arguments without * considering any case folding. */ - int nofold = (zfsvfs->z_norm & ~U8_TEXTPREP_TOUPPER); + int nofold = (zsb->z_norm & ~U8_TEXTPREP_TOUPPER); cmp = u8_strcmp(snm, tnm, 0, nofold, U8_UNICODE_LATEST, &error); - ASSERT(error == 0 || !zfsvfs->z_utf8); + ASSERT(error == 0 || !zsb->z_utf8); if (cmp == 0) { /* * POSIX: "If the old argument and the new argument @@ -2963,7 +3224,7 @@ top: * the rename() function shall return successfully * and perform no other action." */ - ZFS_EXIT(zfsvfs); + ZFS_EXIT(zsb); return (0); } /* @@ -2984,10 +3245,10 @@ top: * is an exact match, we will allow this to proceed as * a name-change request. */ - if ((zfsvfs->z_case == ZFS_CASE_INSENSITIVE || - (zfsvfs->z_case == ZFS_CASE_MIXED && + if ((zsb->z_case == ZFS_CASE_INSENSITIVE || + (zsb->z_case == ZFS_CASE_MIXED && flags & FIGNORECASE)) && - u8_strcmp(snm, tnm, 0, zfsvfs->z_norm, U8_UNICODE_LATEST, + u8_strcmp(snm, tnm, 0, zsb->z_norm, U8_UNICODE_LATEST, &error) == 0) { /* * case preserving rename request, require exact @@ -2998,6 +3259,15 @@ top: } } + /* + * If the source and destination directories are the same, we should + * grab the z_name_lock of that directory only once. + */ + if (sdzp == tdzp) { + zflg |= ZHAVELOCK; + rw_enter(&sdzp->z_name_lock, RW_READER); + } + if (cmp < 0) { serr = zfs_dirent_lock(&sdl, sdzp, snm, &szp, ZEXISTS | zflg, NULL, NULL); @@ -3018,19 +3288,27 @@ top: if (!terr) { zfs_dirent_unlock(tdl); if (tzp) - VN_RELE(ZTOV(tzp)); + iput(ZTOI(tzp)); } + + if (sdzp == tdzp) + rw_exit(&sdzp->z_name_lock); + if (strcmp(snm, "..") == 0) serr = EINVAL; - ZFS_EXIT(zfsvfs); + ZFS_EXIT(zsb); return (serr); } if (terr) { zfs_dirent_unlock(sdl); - VN_RELE(ZTOV(szp)); + iput(ZTOI(szp)); + + if (sdzp == tdzp) + rw_exit(&sdzp->z_name_lock); + if (strcmp(tnm, "..") == 0) terr = EINVAL; - ZFS_EXIT(zfsvfs); + ZFS_EXIT(zsb); return (terr); } @@ -3041,15 +3319,15 @@ top: * done in a single check. */ - if (error = zfs_zaccess_rename(sdzp, szp, tdzp, tzp, cr)) + if ((error = zfs_zaccess_rename(sdzp, szp, tdzp, tzp, cr))) goto out; - if (ZTOV(szp)->v_type == VDIR) { + if (S_ISDIR(ZTOI(szp)->i_mode)) { /* * Check to make sure rename is valid. * Can't do a move like this: /usr/a/b to /usr/a/b/c/d */ - if (error = zfs_rename_lock(szp, tdzp, sdzp, &zl)) + if ((error = zfs_rename_lock(szp, tdzp, sdzp, &zl))) goto out; } @@ -3060,13 +3338,13 @@ top: /* * Source and target must be the same type. */ - if (ZTOV(szp)->v_type == VDIR) { - if (ZTOV(tzp)->v_type != VDIR) { + if (S_ISDIR(ZTOI(szp)->i_mode)) { + if (!S_ISDIR(ZTOI(tzp)->i_mode)) { error = ENOTDIR; goto out; } } else { - if (ZTOV(tzp)->v_type == VDIR) { + if (S_ISDIR(ZTOI(tzp)->i_mode)) { error = EISDIR; goto out; } @@ -3082,44 +3360,42 @@ top: } } - vnevent_rename_src(ZTOV(szp), sdvp, snm, ct); - if (tzp) - vnevent_rename_dest(ZTOV(tzp), tdvp, tnm, ct); - - /* - * notify the target directory if it is not the same - * as source directory. - */ - if (tdvp != sdvp) { - vnevent_rename_dest_dir(tdvp, ct); - } - - tx = dmu_tx_create(zfsvfs->z_os); - dmu_tx_hold_bonus(tx, szp->z_id); /* nlink changes */ - dmu_tx_hold_bonus(tx, sdzp->z_id); /* nlink changes */ + tx = dmu_tx_create(zsb->z_os); + dmu_tx_hold_sa(tx, szp->z_sa_hdl, B_FALSE); + dmu_tx_hold_sa(tx, sdzp->z_sa_hdl, B_FALSE); dmu_tx_hold_zap(tx, sdzp->z_id, FALSE, snm); dmu_tx_hold_zap(tx, tdzp->z_id, TRUE, tnm); - if (sdzp != tdzp) - dmu_tx_hold_bonus(tx, tdzp->z_id); /* nlink changes */ - if (tzp) - dmu_tx_hold_bonus(tx, tzp->z_id); /* parent changes */ - dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, FALSE, NULL); - error = dmu_tx_assign(tx, zfsvfs->z_assign); + if (sdzp != tdzp) { + dmu_tx_hold_sa(tx, tdzp->z_sa_hdl, B_FALSE); + zfs_sa_upgrade_txholds(tx, tdzp); + } + if (tzp) { + dmu_tx_hold_sa(tx, tzp->z_sa_hdl, B_FALSE); + zfs_sa_upgrade_txholds(tx, tzp); + } + + zfs_sa_upgrade_txholds(tx, szp); + dmu_tx_hold_zap(tx, zsb->z_unlinkedobj, FALSE, NULL); + error = dmu_tx_assign(tx, TXG_NOWAIT); if (error) { if (zl != NULL) zfs_rename_unlock(&zl); zfs_dirent_unlock(sdl); zfs_dirent_unlock(tdl); - VN_RELE(ZTOV(szp)); + + if (sdzp == tdzp) + rw_exit(&sdzp->z_name_lock); + + iput(ZTOI(szp)); if (tzp) - VN_RELE(ZTOV(tzp)); - if (error == ERESTART && zfsvfs->z_assign == TXG_NOWAIT) { + iput(ZTOI(tzp)); + if (error == ERESTART) { dmu_tx_wait(tx); dmu_tx_abort(tx); goto top; } dmu_tx_abort(tx); - ZFS_EXIT(zfsvfs); + ZFS_EXIT(zsb); return (error); } @@ -3129,17 +3405,33 @@ top: if (error == 0) { error = zfs_link_create(tdl, szp, tx, ZRENAMING); if (error == 0) { - szp->z_phys->zp_flags |= ZFS_AV_MODIFIED; + szp->z_pflags |= ZFS_AV_MODIFIED; - error = zfs_link_destroy(sdl, szp, tx, ZRENAMING, NULL); - ASSERT(error == 0); + error = sa_update(szp->z_sa_hdl, SA_ZPL_FLAGS(zsb), + (void *)&szp->z_pflags, sizeof (uint64_t), tx); + ASSERT0(error); - zfs_log_rename(zilog, tx, - TX_RENAME | (flags & FIGNORECASE ? TX_CI : 0), - sdzp, sdl->dl_name, tdzp, tdl->dl_name, szp); - - /* Update path information for the target vnode */ - vn_renamepath(tdvp, ZTOV(szp), tnm, strlen(tnm)); + error = zfs_link_destroy(sdl, szp, tx, ZRENAMING, NULL); + if (error == 0) { + zfs_log_rename(zilog, tx, TX_RENAME | + (flags & FIGNORECASE ? TX_CI : 0), sdzp, + sdl->dl_name, tdzp, tdl->dl_name, szp); + } else { + /* + * At this point, we have successfully created + * the target name, but have failed to remove + * the source name. Since the create was done + * with the ZRENAMING flag, there are + * complications; for one, the link count is + * wrong. The easiest way to deal with this + * is to remove the newly created target, and + * return the original error. This must + * succeed; fortunately, it is very unlikely to + * fail, since we just created it. + */ + VERIFY3U(zfs_link_destroy(tdl, szp, tx, + ZRENAMING, NULL), ==, 0); + } } } @@ -3151,327 +3443,349 @@ out: zfs_dirent_unlock(sdl); zfs_dirent_unlock(tdl); - VN_RELE(ZTOV(szp)); - if (tzp) - VN_RELE(ZTOV(tzp)); + zfs_inode_update(sdzp); + if (sdzp == tdzp) + rw_exit(&sdzp->z_name_lock); - ZFS_EXIT(zfsvfs); + if (sdzp != tdzp) + zfs_inode_update(tdzp); + + zfs_inode_update(szp); + iput(ZTOI(szp)); + if (tzp) { + zfs_inode_update(tzp); + iput(ZTOI(tzp)); + } + + if (zsb->z_os->os_sync == ZFS_SYNC_ALWAYS) + zil_commit(zilog, 0); + + ZFS_EXIT(zsb); return (error); } +EXPORT_SYMBOL(zfs_rename); /* * Insert the indicated symbolic reference entry into the directory. * - * IN: dvp - Directory to contain new symbolic link. + * IN: dip - Directory to contain new symbolic link. * link - Name for new symlink entry. * vap - Attributes of new entry. * target - Target path of new symlink. + * * cr - credentials of caller. - * ct - caller context * flags - case flags * * RETURN: 0 if success * error code if failure * * Timestamps: - * dvp - ctime|mtime updated + * dip - ctime|mtime updated */ /*ARGSUSED*/ -static int -zfs_symlink(vnode_t *dvp, char *name, vattr_t *vap, char *link, cred_t *cr, - caller_context_t *ct, int flags) +int +zfs_symlink(struct inode *dip, char *name, vattr_t *vap, char *link, + struct inode **ipp, cred_t *cr, int flags) { - znode_t *zp, *dzp = VTOZ(dvp); + znode_t *zp, *dzp = ITOZ(dip); zfs_dirlock_t *dl; dmu_tx_t *tx; - zfsvfs_t *zfsvfs = dzp->z_zfsvfs; + zfs_sb_t *zsb = ITOZSB(dip); zilog_t *zilog; - int len = strlen(link); + uint64_t len = strlen(link); int error; int zflg = ZNEW; - zfs_fuid_info_t *fuidp = NULL; + zfs_acl_ids_t acl_ids; + boolean_t fuid_dirtied; + uint64_t txtype = TX_SYMLINK; - ASSERT(vap->va_type == VLNK); + ASSERT(S_ISLNK(vap->va_mode)); - ZFS_ENTER(zfsvfs); + ZFS_ENTER(zsb); ZFS_VERIFY_ZP(dzp); - zilog = zfsvfs->z_log; + zilog = zsb->z_log; - if (zfsvfs->z_utf8 && u8_validate(name, strlen(name), + if (zsb->z_utf8 && u8_validate(name, strlen(name), NULL, U8_VALIDATE_ENTIRE, &error) < 0) { - ZFS_EXIT(zfsvfs); + ZFS_EXIT(zsb); return (EILSEQ); } if (flags & FIGNORECASE) zflg |= ZCILOOK; -top: - if (error = zfs_zaccess(dzp, ACE_ADD_FILE, 0, B_FALSE, cr)) { - ZFS_EXIT(zfsvfs); - return (error); - } if (len > MAXPATHLEN) { - ZFS_EXIT(zfsvfs); + ZFS_EXIT(zsb); return (ENAMETOOLONG); } + if ((error = zfs_acl_ids_create(dzp, 0, + vap, cr, NULL, &acl_ids)) != 0) { + ZFS_EXIT(zsb); + return (error); + } +top: + *ipp = NULL; + /* * Attempt to lock directory; fail if entry already exists. */ error = zfs_dirent_lock(&dl, dzp, name, &zp, zflg, NULL, NULL); if (error) { - ZFS_EXIT(zfsvfs); + zfs_acl_ids_free(&acl_ids); + ZFS_EXIT(zsb); + return (error); + } + + if ((error = zfs_zaccess(dzp, ACE_ADD_FILE, 0, B_FALSE, cr))) { + zfs_acl_ids_free(&acl_ids); + zfs_dirent_unlock(dl); + ZFS_EXIT(zsb); return (error); } - tx = dmu_tx_create(zfsvfs->z_os); + if (zfs_acl_ids_overquota(zsb, &acl_ids)) { + zfs_acl_ids_free(&acl_ids); + zfs_dirent_unlock(dl); + ZFS_EXIT(zsb); + return (EDQUOT); + } + tx = dmu_tx_create(zsb->z_os); + fuid_dirtied = zsb->z_fuid_dirty; dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0, MAX(1, len)); - dmu_tx_hold_bonus(tx, dzp->z_id); dmu_tx_hold_zap(tx, dzp->z_id, TRUE, name); - if (dzp->z_phys->zp_flags & ZFS_INHERIT_ACE) - dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0, SPA_MAXBLOCKSIZE); - if (IS_EPHEMERAL(crgetuid(cr)) || IS_EPHEMERAL(crgetgid(cr))) { - if (zfsvfs->z_fuid_obj == 0) { - dmu_tx_hold_bonus(tx, DMU_NEW_OBJECT); - dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0, - FUID_SIZE_ESTIMATE(zfsvfs)); - dmu_tx_hold_zap(tx, MASTER_NODE_OBJ, FALSE, NULL); - } else { - dmu_tx_hold_bonus(tx, zfsvfs->z_fuid_obj); - dmu_tx_hold_write(tx, zfsvfs->z_fuid_obj, 0, - FUID_SIZE_ESTIMATE(zfsvfs)); - } - } - error = dmu_tx_assign(tx, zfsvfs->z_assign); + dmu_tx_hold_sa_create(tx, acl_ids.z_aclp->z_acl_bytes + + ZFS_SA_BASE_ATTR_SIZE + len); + dmu_tx_hold_sa(tx, dzp->z_sa_hdl, B_FALSE); + if (!zsb->z_use_sa && acl_ids.z_aclp->z_acl_bytes > ZFS_ACE_SPACE) { + dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0, + acl_ids.z_aclp->z_acl_bytes); + } + if (fuid_dirtied) + zfs_fuid_txhold(zsb, tx); + error = dmu_tx_assign(tx, TXG_NOWAIT); if (error) { zfs_dirent_unlock(dl); - if (error == ERESTART && zfsvfs->z_assign == TXG_NOWAIT) { + if (error == ERESTART) { dmu_tx_wait(tx); dmu_tx_abort(tx); goto top; } + zfs_acl_ids_free(&acl_ids); dmu_tx_abort(tx); - ZFS_EXIT(zfsvfs); + ZFS_EXIT(zsb); return (error); } - dmu_buf_will_dirty(dzp->z_dbuf, tx); - /* * Create a new object for the symlink. - * Put the link content into bonus buffer if it will fit; - * otherwise, store it just like any other file data. + * for version 4 ZPL datsets the symlink will be an SA attribute */ - if (sizeof (znode_phys_t) + len <= dmu_bonus_max()) { - zfs_mknode(dzp, vap, tx, cr, 0, &zp, len, NULL, &fuidp); - if (len != 0) - bcopy(link, zp->z_phys + 1, len); - } else { - dmu_buf_t *dbp; - - zfs_mknode(dzp, vap, tx, cr, 0, &zp, 0, NULL, &fuidp); - /* - * Nothing can access the znode yet so no locking needed - * for growing the znode's blocksize. - */ - zfs_grow_blocksize(zp, len, tx); + zfs_mknode(dzp, vap, tx, cr, 0, &zp, &acl_ids); - VERIFY(0 == dmu_buf_hold(zfsvfs->z_os, - zp->z_id, 0, FTAG, &dbp)); - dmu_buf_will_dirty(dbp, tx); + if (fuid_dirtied) + zfs_fuid_sync(zsb, tx); - ASSERT3U(len, <=, dbp->db_size); - bcopy(link, dbp->db_data, len); - dmu_buf_rele(dbp, FTAG); - } - zp->z_phys->zp_size = len; + mutex_enter(&zp->z_lock); + if (zp->z_is_sa) + error = sa_update(zp->z_sa_hdl, SA_ZPL_SYMLINK(zsb), + link, len, tx); + else + zfs_sa_symlink(zp, link, len, tx); + mutex_exit(&zp->z_lock); + zp->z_size = len; + (void) sa_update(zp->z_sa_hdl, SA_ZPL_SIZE(zsb), + &zp->z_size, sizeof (zp->z_size), tx); /* * Insert the new object into the directory. */ (void) zfs_link_create(dl, zp, tx, ZNEW); -out: - if (error == 0) { - uint64_t txtype = TX_SYMLINK; - if (flags & FIGNORECASE) - txtype |= TX_CI; - zfs_log_symlink(zilog, tx, txtype, dzp, zp, name, link); - } - if (fuidp) - zfs_fuid_info_free(fuidp); + + if (flags & FIGNORECASE) + txtype |= TX_CI; + zfs_log_symlink(zilog, tx, txtype, dzp, zp, name, link); + + zfs_inode_update(dzp); + zfs_inode_update(zp); + + zfs_acl_ids_free(&acl_ids); dmu_tx_commit(tx); zfs_dirent_unlock(dl); - VN_RELE(ZTOV(zp)); + *ipp = ZTOI(zp); - ZFS_EXIT(zfsvfs); + if (zsb->z_os->os_sync == ZFS_SYNC_ALWAYS) + zil_commit(zilog, 0); + + ZFS_EXIT(zsb); return (error); } +EXPORT_SYMBOL(zfs_symlink); /* * Return, in the buffer contained in the provided uio structure, - * the symbolic path referred to by vp. + * the symbolic path referred to by ip. * - * IN: vp - vnode of symbolic link. - * uoip - structure to contain the link path. + * IN: ip - inode of symbolic link + * uio - structure to contain the link path. * cr - credentials of caller. - * ct - caller context - * - * OUT: uio - structure to contain the link path. * * RETURN: 0 if success * error code if failure * * Timestamps: - * vp - atime updated + * ip - atime updated */ /* ARGSUSED */ -static int -zfs_readlink(vnode_t *vp, uio_t *uio, cred_t *cr, caller_context_t *ct) +int +zfs_readlink(struct inode *ip, uio_t *uio, cred_t *cr) { - znode_t *zp = VTOZ(vp); - zfsvfs_t *zfsvfs = zp->z_zfsvfs; - size_t bufsz; + znode_t *zp = ITOZ(ip); + zfs_sb_t *zsb = ITOZSB(ip); int error; - ZFS_ENTER(zfsvfs); + ZFS_ENTER(zsb); ZFS_VERIFY_ZP(zp); - bufsz = (size_t)zp->z_phys->zp_size; - if (bufsz + sizeof (znode_phys_t) <= zp->z_dbuf->db_size) { - error = uiomove(zp->z_phys + 1, - MIN((size_t)bufsz, uio->uio_resid), UIO_READ, uio); - } else { - dmu_buf_t *dbp; - error = dmu_buf_hold(zfsvfs->z_os, zp->z_id, 0, FTAG, &dbp); - if (error) { - ZFS_EXIT(zfsvfs); - return (error); - } - error = uiomove(dbp->db_data, - MIN((size_t)bufsz, uio->uio_resid), UIO_READ, uio); - dmu_buf_rele(dbp, FTAG); - } + mutex_enter(&zp->z_lock); + if (zp->z_is_sa) + error = sa_lookup_uio(zp->z_sa_hdl, + SA_ZPL_SYMLINK(zsb), uio); + else + error = zfs_sa_readlink(zp, uio); + mutex_exit(&zp->z_lock); - ZFS_ACCESSTIME_STAMP(zfsvfs, zp); - ZFS_EXIT(zfsvfs); + ZFS_ACCESSTIME_STAMP(zsb, zp); + zfs_inode_update(zp); + ZFS_EXIT(zsb); return (error); } +EXPORT_SYMBOL(zfs_readlink); /* - * Insert a new entry into directory tdvp referencing svp. + * Insert a new entry into directory tdip referencing sip. * - * IN: tdvp - Directory to contain new entry. - * svp - vnode of new entry. + * IN: tdip - Directory to contain new entry. + * sip - inode of new entry. * name - name of new entry. * cr - credentials of caller. - * ct - caller context * * RETURN: 0 if success * error code if failure * * Timestamps: - * tdvp - ctime|mtime updated - * svp - ctime updated + * tdip - ctime|mtime updated + * sip - ctime updated */ /* ARGSUSED */ -static int -zfs_link(vnode_t *tdvp, vnode_t *svp, char *name, cred_t *cr, - caller_context_t *ct, int flags) +int +zfs_link(struct inode *tdip, struct inode *sip, char *name, cred_t *cr) { - znode_t *dzp = VTOZ(tdvp); + znode_t *dzp = ITOZ(tdip); znode_t *tzp, *szp; - zfsvfs_t *zfsvfs = dzp->z_zfsvfs; + zfs_sb_t *zsb = ITOZSB(tdip); zilog_t *zilog; zfs_dirlock_t *dl; dmu_tx_t *tx; - vnode_t *realvp; int error; int zf = ZNEW; + uint64_t parent; uid_t owner; - ASSERT(tdvp->v_type == VDIR); + ASSERT(S_ISDIR(tdip->i_mode)); - ZFS_ENTER(zfsvfs); + ZFS_ENTER(zsb); ZFS_VERIFY_ZP(dzp); - zilog = zfsvfs->z_log; + zilog = zsb->z_log; - if (VOP_REALVP(svp, &realvp, ct) == 0) - svp = realvp; + /* + * POSIX dictates that we return EPERM here. + * Better choices include ENOTSUP or EISDIR. + */ + if (S_ISDIR(sip->i_mode)) { + ZFS_EXIT(zsb); + return (EPERM); + } - if (svp->v_vfsp != tdvp->v_vfsp) { - ZFS_EXIT(zfsvfs); + if (sip->i_sb != tdip->i_sb) { + ZFS_EXIT(zsb); return (EXDEV); } - szp = VTOZ(svp); + + szp = ITOZ(sip); ZFS_VERIFY_ZP(szp); - if (zfsvfs->z_utf8 && u8_validate(name, + /* Prevent links to .zfs/shares files */ + + if ((error = sa_lookup(szp->z_sa_hdl, SA_ZPL_PARENT(zsb), + &parent, sizeof (uint64_t))) != 0) { + ZFS_EXIT(zsb); + return (error); + } + if (parent == zsb->z_shares_dir) { + ZFS_EXIT(zsb); + return (EPERM); + } + + if (zsb->z_utf8 && u8_validate(name, strlen(name), NULL, U8_VALIDATE_ENTIRE, &error) < 0) { - ZFS_EXIT(zfsvfs); + ZFS_EXIT(zsb); return (EILSEQ); } +#ifdef HAVE_PN_UTILS if (flags & FIGNORECASE) zf |= ZCILOOK; +#endif /* HAVE_PN_UTILS */ -top: /* * We do not support links between attributes and non-attributes * because of the potential security risk of creating links * into "normal" file space in order to circumvent restrictions * imposed in attribute space. */ - if ((szp->z_phys->zp_flags & ZFS_XATTR) != - (dzp->z_phys->zp_flags & ZFS_XATTR)) { - ZFS_EXIT(zfsvfs); + if ((szp->z_pflags & ZFS_XATTR) != (dzp->z_pflags & ZFS_XATTR)) { + ZFS_EXIT(zsb); return (EINVAL); } - /* - * POSIX dictates that we return EPERM here. - * Better choices include ENOTSUP or EISDIR. - */ - if (svp->v_type == VDIR) { - ZFS_EXIT(zfsvfs); - return (EPERM); - } - - owner = zfs_fuid_map_id(zfsvfs, szp->z_phys->zp_uid, cr, ZFS_OWNER); - if (owner != crgetuid(cr) && - secpolicy_basic_link(cr) != 0) { - ZFS_EXIT(zfsvfs); + owner = zfs_fuid_map_id(zsb, szp->z_uid, cr, ZFS_OWNER); + if (owner != crgetuid(cr) && secpolicy_basic_link(cr) != 0) { + ZFS_EXIT(zsb); return (EPERM); } - if (error = zfs_zaccess(dzp, ACE_ADD_FILE, 0, B_FALSE, cr)) { - ZFS_EXIT(zfsvfs); + if ((error = zfs_zaccess(dzp, ACE_ADD_FILE, 0, B_FALSE, cr))) { + ZFS_EXIT(zsb); return (error); } +top: /* * Attempt to lock directory; fail if entry already exists. */ error = zfs_dirent_lock(&dl, dzp, name, &tzp, zf, NULL, NULL); if (error) { - ZFS_EXIT(zfsvfs); + ZFS_EXIT(zsb); return (error); } - tx = dmu_tx_create(zfsvfs->z_os); - dmu_tx_hold_bonus(tx, szp->z_id); + tx = dmu_tx_create(zsb->z_os); + dmu_tx_hold_sa(tx, szp->z_sa_hdl, B_FALSE); dmu_tx_hold_zap(tx, dzp->z_id, TRUE, name); - error = dmu_tx_assign(tx, zfsvfs->z_assign); + zfs_sa_upgrade_txholds(tx, szp); + zfs_sa_upgrade_txholds(tx, dzp); + error = dmu_tx_assign(tx, TXG_NOWAIT); if (error) { zfs_dirent_unlock(dl); - if (error == ERESTART && zfsvfs->z_assign == TXG_NOWAIT) { + if (error == ERESTART) { dmu_tx_wait(tx); dmu_tx_abort(tx); goto top; } dmu_tx_abort(tx); - ZFS_EXIT(zfsvfs); + ZFS_EXIT(zsb); return (error); } @@ -3479,8 +3793,10 @@ top: if (error == 0) { uint64_t txtype = TX_LINK; +#ifdef HAVE_PN_UTILS if (flags & FIGNORECASE) txtype |= TX_CI; +#endif /* HAVE_PN_UTILS */ zfs_log_link(zilog, tx, txtype, dzp, szp, name); } @@ -3488,272 +3804,245 @@ top: zfs_dirent_unlock(dl); - if (error == 0) { - vnevent_link(svp, ct); - } + if (zsb->z_os->os_sync == ZFS_SYNC_ALWAYS) + zil_commit(zilog, 0); - ZFS_EXIT(zfsvfs); + zfs_inode_update(dzp); + zfs_inode_update(szp); + ZFS_EXIT(zsb); return (error); } +EXPORT_SYMBOL(zfs_link); -/* - * zfs_null_putapage() is used when the file system has been force - * unmounted. It just drops the pages. - */ -/* ARGSUSED */ -static int -zfs_null_putapage(vnode_t *vp, page_t *pp, u_offset_t *offp, - size_t *lenp, int flags, cred_t *cr) +static void +zfs_putpage_commit_cb(void *arg, int error) { - pvn_write_done(pp, B_INVAL|B_FORCE|B_ERROR); - return (0); + struct page *pp = arg; + + if (error) { + __set_page_dirty_nobuffers(pp); + + if (error != ECANCELED) + SetPageError(pp); + } else { + ClearPageError(pp); + } + + end_page_writeback(pp); } /* - * Push a page out to disk, klustering if possible. - * - * IN: vp - file to push page to. - * pp - page to push. - * flags - additional flags. - * cr - credentials of caller. + * Push a page out to disk, once the page is on stable storage the + * registered commit callback will be run as notification of completion. * - * OUT: offp - start of range pushed. - * lenp - len of range pushed. + * IN: ip - page mapped for inode. + * pp - page to push (page is locked) + * wbc - writeback control data * * RETURN: 0 if success * error code if failure * - * NOTE: callers must have locked the page to be pushed. On - * exit, the page (and all other pages in the kluster) must be - * unlocked. + * Timestamps: + * ip - ctime|mtime updated */ /* ARGSUSED */ -static int -zfs_putapage(vnode_t *vp, page_t *pp, u_offset_t *offp, - size_t *lenp, int flags, cred_t *cr) +int +zfs_putpage(struct inode *ip, struct page *pp, struct writeback_control *wbc) { - znode_t *zp = VTOZ(vp); - zfsvfs_t *zfsvfs = zp->z_zfsvfs; - zilog_t *zilog = zfsvfs->z_log; - dmu_tx_t *tx; + znode_t *zp = ITOZ(ip); + zfs_sb_t *zsb = ITOZSB(ip); + loff_t offset; + loff_t pgoff; + unsigned int pglen; rl_t *rl; - u_offset_t off, koff; - size_t len, klen; - uint64_t filesz; - int err; - - filesz = zp->z_phys->zp_size; - off = pp->p_offset; - len = PAGESIZE; - /* - * If our blocksize is bigger than the page size, try to kluster - * muiltiple pages so that we write a full block (thus avoiding - * a read-modify-write). - */ - if (off < filesz && zp->z_blksz > PAGESIZE) { - if (!ISP2(zp->z_blksz)) { - /* Only one block in the file. */ - klen = P2ROUNDUP((ulong_t)zp->z_blksz, PAGESIZE); - koff = 0; - } else { - klen = zp->z_blksz; - koff = P2ALIGN(off, (u_offset_t)klen); - } - ASSERT(koff <= filesz); - if (koff + klen > filesz) - klen = P2ROUNDUP(filesz - koff, (uint64_t)PAGESIZE); - pp = pvn_write_kluster(vp, pp, &off, &len, koff, klen, flags); + dmu_tx_t *tx; + caddr_t va; + int err = 0; + uint64_t mtime[2], ctime[2]; + sa_bulk_attr_t bulk[3]; + int cnt = 0; + int sync; + + ZFS_ENTER(zsb); + ZFS_VERIFY_ZP(zp); + + ASSERT(PageLocked(pp)); + + pgoff = page_offset(pp); /* Page byte-offset in file */ + offset = i_size_read(ip); /* File length in bytes */ + pglen = MIN(PAGE_CACHE_SIZE, /* Page length in bytes */ + P2ROUNDUP(offset, PAGE_CACHE_SIZE)-pgoff); + + /* Page is beyond end of file */ + if (pgoff >= offset) { + unlock_page(pp); + ZFS_EXIT(zsb); + return (0); } - ASSERT3U(btop(len), ==, btopr(len)); -top: - rl = zfs_range_lock(zp, off, len, RL_WRITER); + + /* Truncate page length to end of file */ + if (pgoff + pglen > offset) + pglen = offset - pgoff; + +#if 0 /* - * Can't push pages past end-of-file. + * FIXME: Allow mmap writes past its quota. The correct fix + * is to register a page_mkwrite() handler to count the page + * against its quota when it is about to be dirtied. */ - filesz = zp->z_phys->zp_size; - if (off >= filesz) { - /* ignore all pages */ - err = 0; - goto out; - } else if (off + len > filesz) { - int npages = btopr(filesz - off); - page_t *trunc; - - page_list_break(&pp, &trunc, npages); - /* ignore pages past end of file */ - if (trunc) - pvn_write_done(trunc, flags); - len = filesz - off; + if (zfs_owner_overquota(zsb, zp, B_FALSE) || + zfs_owner_overquota(zsb, zp, B_TRUE)) { + err = EDQUOT; } +#endif + + set_page_writeback(pp); + unlock_page(pp); - tx = dmu_tx_create(zfsvfs->z_os); - dmu_tx_hold_write(tx, zp->z_id, off, len); - dmu_tx_hold_bonus(tx, zp->z_id); - err = dmu_tx_assign(tx, zfsvfs->z_assign); + rl = zfs_range_lock(zp, pgoff, pglen, RL_WRITER); + tx = dmu_tx_create(zsb->z_os); + + sync = ((zsb->z_os->os_sync == ZFS_SYNC_ALWAYS) || + (wbc->sync_mode == WB_SYNC_ALL)); + if (!sync) + dmu_tx_callback_register(tx, zfs_putpage_commit_cb, pp); + + dmu_tx_hold_write(tx, zp->z_id, pgoff, pglen); + + dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE); + zfs_sa_upgrade_txholds(tx, zp); + err = dmu_tx_assign(tx, TXG_NOWAIT); if (err != 0) { - if (err == ERESTART && zfsvfs->z_assign == TXG_NOWAIT) { - zfs_range_unlock(rl); + if (err == ERESTART) dmu_tx_wait(tx); - dmu_tx_abort(tx); - err = 0; - goto top; - } + + /* Will call all registered commit callbacks */ dmu_tx_abort(tx); - goto out; - } - if (zp->z_blksz <= PAGESIZE) { - caddr_t va = zfs_map_page(pp, S_READ); - ASSERT3U(len, <=, PAGESIZE); - dmu_write(zfsvfs->z_os, zp->z_id, off, len, va, tx); - zfs_unmap_page(pp, va); - } else { - err = dmu_write_pages(zfsvfs->z_os, zp->z_id, off, len, pp, tx); - } + /* + * For the synchronous case the commit callback must be + * explicitly called because there is no registered callback. + */ + if (sync) + zfs_putpage_commit_cb(pp, ECANCELED); - if (err == 0) { - zfs_time_stamper(zp, CONTENT_MODIFIED, tx); - zfs_log_write(zilog, tx, TX_WRITE, zp, off, len, 0); - dmu_tx_commit(tx); + zfs_range_unlock(rl); + ZFS_EXIT(zsb); + return (err); } -out: + va = kmap(pp); + ASSERT3U(pglen, <=, PAGE_CACHE_SIZE); + dmu_write(zsb->z_os, zp->z_id, pgoff, pglen, va, tx); + kunmap(pp); + + SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_MTIME(zsb), NULL, &mtime, 16); + SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_CTIME(zsb), NULL, &ctime, 16); + SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_FLAGS(zsb), NULL, &zp->z_pflags, 8); + + /* Preserve the mtime and ctime provided by the inode */ + ZFS_TIME_ENCODE(&ip->i_mtime, mtime); + ZFS_TIME_ENCODE(&ip->i_ctime, ctime); + zp->z_atime_dirty = 0; + zp->z_seq++; + + err = sa_bulk_update(zp->z_sa_hdl, bulk, cnt, tx); + + zfs_log_write(zsb->z_log, tx, TX_WRITE, zp, pgoff, pglen, 0); + dmu_tx_commit(tx); + zfs_range_unlock(rl); - pvn_write_done(pp, (err ? B_ERROR : 0) | flags); - if (offp) - *offp = off; - if (lenp) - *lenp = len; + if (sync) { + zil_commit(zsb->z_log, zp->z_id); + zfs_putpage_commit_cb(pp, err); + } + + ZFS_EXIT(zsb); return (err); } /* - * Copy the portion of the file indicated from pages into the file. - * The pages are stored in a page list attached to the files vnode. - * - * IN: vp - vnode of file to push page data to. - * off - position in file to put data. - * len - amount of data to write. - * flags - flags to control the operation. - * cr - credentials of caller. - * ct - caller context. - * - * RETURN: 0 if success - * error code if failure - * - * Timestamps: - * vp - ctime|mtime updated + * Update the system attributes when the inode has been dirtied. For the + * moment we're conservative and only update the atime, mtime, and ctime. */ -/*ARGSUSED*/ -static int -zfs_putpage(vnode_t *vp, offset_t off, size_t len, int flags, cred_t *cr, - caller_context_t *ct) +int +zfs_dirty_inode(struct inode *ip, int flags) { - znode_t *zp = VTOZ(vp); - zfsvfs_t *zfsvfs = zp->z_zfsvfs; - page_t *pp; - size_t io_len; - u_offset_t io_off; - uint64_t filesz; - int error = 0; + znode_t *zp = ITOZ(ip); + zfs_sb_t *zsb = ITOZSB(ip); + dmu_tx_t *tx; + uint64_t atime[2], mtime[2], ctime[2]; + sa_bulk_attr_t bulk[3]; + int error; + int cnt = 0; - ZFS_ENTER(zfsvfs); + ZFS_ENTER(zsb); ZFS_VERIFY_ZP(zp); - if (len == 0) { - /* - * Search the entire vp list for pages >= off. - */ - error = pvn_vplist_dirty(vp, (u_offset_t)off, zfs_putapage, - flags, cr); - goto out; - } + tx = dmu_tx_create(zsb->z_os); - filesz = zp->z_phys->zp_size; /* get consistent copy of zp_size */ - if (off > filesz) { - /* past end of file */ - ZFS_EXIT(zfsvfs); - return (0); + dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE); + zfs_sa_upgrade_txholds(tx, zp); + + error = dmu_tx_assign(tx, TXG_WAIT); + if (error) { + dmu_tx_abort(tx); + goto out; } - len = MIN(len, filesz - off); + mutex_enter(&zp->z_lock); + SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_ATIME(zsb), NULL, &atime, 16); + SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_MTIME(zsb), NULL, &mtime, 16); + SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_CTIME(zsb), NULL, &ctime, 16); - for (io_off = off; io_off < off + len; io_off += io_len) { - if ((flags & B_INVAL) || ((flags & B_ASYNC) == 0)) { - pp = page_lookup(vp, io_off, - (flags & (B_INVAL | B_FREE)) ? SE_EXCL : SE_SHARED); - } else { - pp = page_lookup_nowait(vp, io_off, - (flags & B_FREE) ? SE_EXCL : SE_SHARED); - } + /* Preserve the mtime and ctime provided by the inode */ + ZFS_TIME_ENCODE(&ip->i_atime, atime); + ZFS_TIME_ENCODE(&ip->i_mtime, mtime); + ZFS_TIME_ENCODE(&ip->i_ctime, ctime); + zp->z_atime_dirty = 0; - if (pp != NULL && pvn_getdirty(pp, flags)) { - int err; + error = sa_bulk_update(zp->z_sa_hdl, bulk, cnt, tx); + mutex_exit(&zp->z_lock); - /* - * Found a dirty page to push - */ - err = zfs_putapage(vp, pp, &io_off, &io_len, flags, cr); - if (err) - error = err; - } else { - io_len = PAGESIZE; - } - } + dmu_tx_commit(tx); out: - if ((flags & B_ASYNC) == 0) - zil_commit(zfsvfs->z_log, UINT64_MAX, zp->z_id); - ZFS_EXIT(zfsvfs); + ZFS_EXIT(zsb); return (error); } +EXPORT_SYMBOL(zfs_dirty_inode); /*ARGSUSED*/ void -zfs_inactive(vnode_t *vp, cred_t *cr, caller_context_t *ct) +zfs_inactive(struct inode *ip) { - znode_t *zp = VTOZ(vp); - zfsvfs_t *zfsvfs = zp->z_zfsvfs; + znode_t *zp = ITOZ(ip); + zfs_sb_t *zsb = ITOZSB(ip); int error; - rw_enter(&zfsvfs->z_teardown_inactive_lock, RW_READER); - if (zp->z_dbuf == NULL) { - /* - * The fs has been unmounted, or we did a - * suspend/resume and this file no longer exists. - */ - if (vn_has_cached_data(vp)) { - (void) pvn_vplist_dirty(vp, 0, zfs_null_putapage, - B_INVAL, cr); - } - - mutex_enter(&zp->z_lock); - vp->v_count = 0; /* count arrives as 1 */ - mutex_exit(&zp->z_lock); - rw_exit(&zfsvfs->z_teardown_inactive_lock); - zfs_znode_free(zp); + if (zfsctl_is_node(ip)) { + zfsctl_inode_inactive(ip); return; } - /* - * Attempt to push any data in the page cache. If this fails - * we will get kicked out later in zfs_zinactive(). - */ - if (vn_has_cached_data(vp)) { - (void) pvn_vplist_dirty(vp, 0, zfs_putapage, B_INVAL|B_ASYNC, - cr); + rw_enter(&zsb->z_teardown_inactive_lock, RW_READER); + if (zp->z_sa_hdl == NULL) { + rw_exit(&zsb->z_teardown_inactive_lock); + return; } if (zp->z_atime_dirty && zp->z_unlinked == 0) { - dmu_tx_t *tx = dmu_tx_create(zfsvfs->z_os); + dmu_tx_t *tx = dmu_tx_create(zsb->z_os); - dmu_tx_hold_bonus(tx, zp->z_id); + dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE); + zfs_sa_upgrade_txholds(tx, zp); error = dmu_tx_assign(tx, TXG_WAIT); if (error) { dmu_tx_abort(tx); } else { - dmu_buf_will_dirty(zp->z_dbuf, tx); mutex_enter(&zp->z_lock); + (void) sa_update(zp->z_sa_hdl, SA_ZPL_ATIME(zsb), + (void *)&zp->z_atime, sizeof (zp->z_atime), tx); zp->z_atime_dirty = 0; mutex_exit(&zp->z_lock); dmu_tx_commit(tx); @@ -3761,13 +4050,14 @@ zfs_inactive(vnode_t *vp, cred_t *cr, caller_context_t *ct) } zfs_zinactive(zp); - rw_exit(&zfsvfs->z_teardown_inactive_lock); + rw_exit(&zsb->z_teardown_inactive_lock); } +EXPORT_SYMBOL(zfs_inactive); /* * Bounds-check the seek operation. * - * IN: vp - vnode seeking within + * IN: ip - inode seeking within * ooff - old file offset * noffp - pointer to new file offset * ct - caller context @@ -3776,165 +4066,69 @@ zfs_inactive(vnode_t *vp, cred_t *cr, caller_context_t *ct) * EINVAL if new offset invalid */ /* ARGSUSED */ -static int -zfs_seek(vnode_t *vp, offset_t ooff, offset_t *noffp, - caller_context_t *ct) +int +zfs_seek(struct inode *ip, offset_t ooff, offset_t *noffp) { - if (vp->v_type == VDIR) + if (S_ISDIR(ip->i_mode)) return (0); return ((*noffp < 0 || *noffp > MAXOFFSET_T) ? EINVAL : 0); } +EXPORT_SYMBOL(zfs_seek); /* - * Pre-filter the generic locking function to trap attempts to place - * a mandatory lock on a memory mapped file. + * Fill pages with data from the disk. */ static int -zfs_frlock(vnode_t *vp, int cmd, flock64_t *bfp, int flag, offset_t offset, - flk_callback_t *flk_cbp, cred_t *cr, caller_context_t *ct) +zfs_fillpage(struct inode *ip, struct page *pl[], int nr_pages) { - znode_t *zp = VTOZ(vp); - zfsvfs_t *zfsvfs = zp->z_zfsvfs; - int error; - - ZFS_ENTER(zfsvfs); - ZFS_VERIFY_ZP(zp); + znode_t *zp = ITOZ(ip); + zfs_sb_t *zsb = ITOZSB(ip); + objset_t *os; + struct page *cur_pp; + u_offset_t io_off, total; + size_t io_len; + loff_t i_size; + unsigned page_idx; + int err; + + os = zsb->z_os; + io_len = nr_pages << PAGE_CACHE_SHIFT; + i_size = i_size_read(ip); + io_off = page_offset(pl[0]); + + if (io_off + io_len > i_size) + io_len = i_size - io_off; /* - * We are following the UFS semantics with respect to mapcnt - * here: If we see that the file is mapped already, then we will - * return an error, but we don't worry about races between this - * function and zfs_map(). + * Iterate over list of pages and read each page individually. */ - if (zp->z_mapcnt > 0 && MANDMODE((mode_t)zp->z_phys->zp_mode)) { - ZFS_EXIT(zfsvfs); - return (EAGAIN); - } - error = fs_frlock(vp, cmd, bfp, flag, offset, flk_cbp, cr, ct); - ZFS_EXIT(zfsvfs); - return (error); -} - -/* - * If we can't find a page in the cache, we will create a new page - * and fill it with file data. For efficiency, we may try to fill - * multiple pages at once (klustering). - */ -static int -zfs_fillpage(vnode_t *vp, u_offset_t off, struct seg *seg, - caddr_t addr, page_t *pl[], size_t plsz, enum seg_rw rw) -{ - znode_t *zp = VTOZ(vp); - page_t *pp, *cur_pp; - objset_t *os = zp->z_zfsvfs->z_os; - caddr_t va; - u_offset_t io_off, total; - uint64_t oid = zp->z_id; - size_t io_len; - uint64_t filesz; - int err; - - /* - * If we are only asking for a single page don't bother klustering. - */ - filesz = zp->z_phys->zp_size; /* get consistent copy of zp_size */ - if (off >= filesz) - return (EFAULT); - if (plsz == PAGESIZE || zp->z_blksz <= PAGESIZE) { - io_off = off; - io_len = PAGESIZE; - pp = page_create_va(vp, io_off, io_len, PG_WAIT, seg, addr); - } else { - /* - * Try to fill a kluster of pages (a blocks worth). - */ - size_t klen; - u_offset_t koff; - - if (!ISP2(zp->z_blksz)) { - /* Only one block in the file. */ - klen = P2ROUNDUP((ulong_t)zp->z_blksz, PAGESIZE); - koff = 0; - } else { - /* - * It would be ideal to align our offset to the - * blocksize but doing so has resulted in some - * strange application crashes. For now, we - * leave the offset as is and only adjust the - * length if we are off the end of the file. - */ - koff = off; - klen = plsz; - } - ASSERT(koff <= filesz); - if (koff + klen > filesz) - klen = P2ROUNDUP(filesz, (uint64_t)PAGESIZE) - koff; - ASSERT3U(off, >=, koff); - ASSERT3U(off, <, koff + klen); - pp = pvn_read_kluster(vp, off, seg, addr, &io_off, - &io_len, koff, klen, 0); - } - if (pp == NULL) { - /* - * Some other thread entered the page before us. - * Return to zfs_getpage to retry the lookup. - */ - *pl = NULL; - return (0); - } - - /* - * Fill the pages in the kluster. - */ - cur_pp = pp; + page_idx = 0; + cur_pp = pl[0]; for (total = io_off + io_len; io_off < total; io_off += PAGESIZE) { - ASSERT3U(io_off, ==, cur_pp->p_offset); - va = zfs_map_page(cur_pp, S_WRITE); - err = dmu_read(os, oid, io_off, PAGESIZE, va); - zfs_unmap_page(cur_pp, va); + caddr_t va; + + va = kmap(cur_pp); + err = dmu_read(os, zp->z_id, io_off, PAGESIZE, va, + DMU_READ_PREFETCH); + kunmap(cur_pp); if (err) { - /* On error, toss the entire kluster */ - pvn_read_done(pp, B_ERROR); /* convert checksum errors into IO errors */ if (err == ECKSUM) err = EIO; return (err); } - cur_pp = cur_pp->p_next; + cur_pp = pl[++page_idx]; } -out: - /* - * Fill in the page list array from the kluster. If - * there are too many pages in the kluster, return - * as many pages as possible starting from the desired - * offset `off'. - * NOTE: the page list will always be null terminated. - */ - pvn_plist_init(pp, pl, plsz, off, io_len, rw); return (0); } /* - * Return pointers to the pages for the file region [off, off + len] - * in the pl array. If plsz is greater than len, this function may - * also return page pointers from before or after the specified - * region (i.e. some region [off', off' + plsz]). These additional - * pages are only returned if they are already in the cache, or were - * created as part of a klustered read. - * - * IN: vp - vnode of file to get data from. - * off - position in file to get data from. - * len - amount of data to retrieve. - * plsz - length of provided page list. - * seg - segment to obtain pages for. - * addr - virtual address of fault. - * rw - mode of created pages. - * cr - credentials of caller. - * ct - caller context. + * Uses zfs_fillpage to read data from the file and fill the pages. * - * OUT: protp - protection mode of created pages. - * pl - list of pages created. + * IN: ip - inode of file to get data from. + * pl - list of pages to read + * nr_pages - number of pages to read * * RETURN: 0 if success * error code if failure @@ -3943,263 +4137,119 @@ out: * vp - atime updated */ /* ARGSUSED */ -static int -zfs_getpage(vnode_t *vp, offset_t off, size_t len, uint_t *protp, - page_t *pl[], size_t plsz, struct seg *seg, caddr_t addr, - enum seg_rw rw, cred_t *cr, caller_context_t *ct) +int +zfs_getpage(struct inode *ip, struct page *pl[], int nr_pages) { - znode_t *zp = VTOZ(vp); - zfsvfs_t *zfsvfs = zp->z_zfsvfs; - page_t *pp, **pl0 = pl; - int need_unlock = 0, err = 0; - offset_t orig_off; - - ZFS_ENTER(zfsvfs); - ZFS_VERIFY_ZP(zp); + znode_t *zp = ITOZ(ip); + zfs_sb_t *zsb = ITOZSB(ip); + int err; - if (protp) - *protp = PROT_ALL; - - /* no faultahead (for now) */ - if (pl == NULL) { - ZFS_EXIT(zfsvfs); + if (pl == NULL) return (0); - } - - /* can't fault past EOF */ - if (off >= zp->z_phys->zp_size) { - ZFS_EXIT(zfsvfs); - return (EFAULT); - } - orig_off = off; - - /* - * If we already own the lock, then we must be page faulting - * in the middle of a write to this file (i.e., we are writing - * to this file using data from a mapped region of the file). - */ - if (rw_owner(&zp->z_map_lock) != curthread) { - rw_enter(&zp->z_map_lock, RW_WRITER); - need_unlock = TRUE; - } - - /* - * Loop through the requested range [off, off + len] looking - * for pages. If we don't find a page, we will need to create - * a new page and fill it with data from the file. - */ - while (len > 0) { - if (plsz < PAGESIZE) - break; - if (pp = page_lookup(vp, off, SE_SHARED)) { - *pl++ = pp; - off += PAGESIZE; - addr += PAGESIZE; - len -= PAGESIZE; - plsz -= PAGESIZE; - } else { - err = zfs_fillpage(vp, off, seg, addr, pl, plsz, rw); - if (err) - goto out; - /* - * klustering may have changed our region - * to be block aligned. - */ - if (((pp = *pl) != 0) && (off != pp->p_offset)) { - int delta = off - pp->p_offset; - len += delta; - off -= delta; - addr -= delta; - } - while (*pl) { - pl++; - off += PAGESIZE; - addr += PAGESIZE; - plsz -= PAGESIZE; - if (len > PAGESIZE) - len -= PAGESIZE; - else - len = 0; - } - } - } - - /* - * Fill out the page array with any pages already in the cache. - */ - while (plsz > 0) { - pp = page_lookup_nowait(vp, off, SE_SHARED); - if (pp == NULL) - break; - *pl++ = pp; - off += PAGESIZE; - plsz -= PAGESIZE; - } - ZFS_ACCESSTIME_STAMP(zfsvfs, zp); -out: - /* - * We can't grab the range lock for the page as reader which would - * stop truncation as this leads to deadlock. So we need to recheck - * the file size. - */ - if (orig_off >= zp->z_phys->zp_size) - err = EFAULT; - if (err) { - /* - * Release any pages we have previously locked. - */ - while (pl > pl0) - page_unlock(*--pl); - } + ZFS_ENTER(zsb); + ZFS_VERIFY_ZP(zp); - *pl = NULL; + err = zfs_fillpage(ip, pl, nr_pages); - if (need_unlock) - rw_exit(&zp->z_map_lock); + if (!err) + ZFS_ACCESSTIME_STAMP(zsb, zp); - ZFS_EXIT(zfsvfs); + ZFS_EXIT(zsb); return (err); } +EXPORT_SYMBOL(zfs_getpage); /* - * Request a memory map for a section of a file. This code interacts - * with common code and the VM system as follows: + * Check ZFS specific permissions to memory map a section of a file. * - * common code calls mmap(), which ends up in smmap_common() + * IN: ip - inode of the file to mmap + * off - file offset + * addrp - start address in memory region + * len - length of memory region + * vm_flags- address flags * - * this calls VOP_MAP(), which takes you into (say) zfs - * - * zfs_map() calls as_map(), passing segvn_create() as the callback - * - * segvn_create() creates the new segment and calls VOP_ADDMAP() - * - * zfs_addmap() updates z_mapcnt + * RETURN: 0 if success + * error code if failure */ /*ARGSUSED*/ -static int -zfs_map(vnode_t *vp, offset_t off, struct as *as, caddr_t *addrp, - size_t len, uchar_t prot, uchar_t maxprot, uint_t flags, cred_t *cr, - caller_context_t *ct) +int +zfs_map(struct inode *ip, offset_t off, caddr_t *addrp, size_t len, + unsigned long vm_flags) { - znode_t *zp = VTOZ(vp); - zfsvfs_t *zfsvfs = zp->z_zfsvfs; - segvn_crargs_t vn_a; - int error; + znode_t *zp = ITOZ(ip); + zfs_sb_t *zsb = ITOZSB(ip); - ZFS_ENTER(zfsvfs); + ZFS_ENTER(zsb); ZFS_VERIFY_ZP(zp); - if ((prot & PROT_WRITE) && - (zp->z_phys->zp_flags & (ZFS_IMMUTABLE | ZFS_READONLY | - ZFS_APPENDONLY))) { - ZFS_EXIT(zfsvfs); + if ((vm_flags & VM_WRITE) && (zp->z_pflags & + (ZFS_IMMUTABLE | ZFS_READONLY | ZFS_APPENDONLY))) { + ZFS_EXIT(zsb); return (EPERM); } - if ((prot & (PROT_READ | PROT_EXEC)) && - (zp->z_phys->zp_flags & ZFS_AV_QUARANTINED)) { - ZFS_EXIT(zfsvfs); + if ((vm_flags & (VM_READ | VM_EXEC)) && + (zp->z_pflags & ZFS_AV_QUARANTINED)) { + ZFS_EXIT(zsb); return (EACCES); } - if (vp->v_flag & VNOMAP) { - ZFS_EXIT(zfsvfs); - return (ENOSYS); - } - if (off < 0 || len > MAXOFFSET_T - off) { - ZFS_EXIT(zfsvfs); + ZFS_EXIT(zsb); return (ENXIO); } - if (vp->v_type != VREG) { - ZFS_EXIT(zfsvfs); - return (ENODEV); - } - - /* - * If file is locked, disallow mapping. - */ - if (MANDMODE((mode_t)zp->z_phys->zp_mode) && vn_has_flocks(vp)) { - ZFS_EXIT(zfsvfs); - return (EAGAIN); - } - - as_rangelock(as); - error = choose_addr(as, addrp, len, off, ADDR_VACALIGN, flags); - if (error != 0) { - as_rangeunlock(as); - ZFS_EXIT(zfsvfs); - return (error); - } - - vn_a.vp = vp; - vn_a.offset = (u_offset_t)off; - vn_a.type = flags & MAP_TYPE; - vn_a.prot = prot; - vn_a.maxprot = maxprot; - vn_a.cred = cr; - vn_a.amp = NULL; - vn_a.flags = flags & ~MAP_TYPE; - vn_a.szc = 0; - vn_a.lgrp_mem_policy_flags = 0; - - error = as_map(as, *addrp, len, segvn_create, &vn_a); - - as_rangeunlock(as); - ZFS_EXIT(zfsvfs); - return (error); -} - -/* ARGSUSED */ -static int -zfs_addmap(vnode_t *vp, offset_t off, struct as *as, caddr_t addr, - size_t len, uchar_t prot, uchar_t maxprot, uint_t flags, cred_t *cr, - caller_context_t *ct) -{ - uint64_t pages = btopr(len); - - atomic_add_64(&VTOZ(vp)->z_mapcnt, pages); + ZFS_EXIT(zsb); return (0); } +EXPORT_SYMBOL(zfs_map); /* - * The reason we push dirty pages as part of zfs_delmap() is so that we get a - * more accurate mtime for the associated file. Since we don't have a way of - * detecting when the data was actually modified, we have to resort to - * heuristics. If an explicit msync() is done, then we mark the mtime when the - * last page is pushed. The problem occurs when the msync() call is omitted, - * which by far the most common case: - * - * open() - * mmap() - * - * munmap() - * close() - *