4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2011, Lawrence Livermore National Security, LLC.
26 #include <sys/zfs_vfsops.h>
27 #include <sys/zfs_vnops.h>
28 #include <sys/zfs_znode.h>
33 zpl_open(struct inode *ip, struct file *filp)
39 error = -zfs_open(ip, filp->f_mode, filp->f_flags, cr);
41 ASSERT3S(error, <=, 0);
46 return generic_file_open(ip, filp);
50 zpl_release(struct inode *ip, struct file *filp)
56 error = -zfs_close(ip, filp->f_flags, cr);
58 ASSERT3S(error, <=, 0);
64 zpl_readdir(struct file *filp, void *dirent, filldir_t filldir)
66 struct dentry *dentry = filp->f_path.dentry;
71 error = -zfs_readdir(dentry->d_inode, dirent, filldir,
74 ASSERT3S(error, <=, 0);
81 * As of 2.6.35 the dentry argument to the .fsync() vfs hook was deemed
82 * redundant. The dentry is still accessible via filp->f_path.dentry,
83 * and we are guaranteed that filp will never be NULL.
86 * Prior to 2.6.34 the nfsd kernel server would pass a NULL file struct *
87 * to the .fsync() hook. For this reason, we must be careful not to use
88 * filp unconditionally in the 3 argument case.
90 #ifdef HAVE_2ARGS_FSYNC
92 zpl_fsync(struct file *filp, int datasync)
94 struct dentry *dentry = filp->f_path.dentry;
97 zpl_fsync(struct file *filp, struct dentry *dentry, int datasync)
99 #endif /* HAVE_2ARGS_FSYNC */
104 error = -zfs_fsync(dentry->d_inode, datasync, cr);
106 ASSERT3S(error, <=, 0);
112 zpl_read_common(struct inode *ip, const char *buf, size_t len, loff_t pos,
113 uio_seg_t segment, int flags, cred_t *cr)
119 iov.iov_base = (void *)buf;
125 uio.uio_loffset = pos;
126 uio.uio_limit = MAXOFFSET_T;
127 uio.uio_segflg = segment;
129 error = -zfs_read(ip, &uio, flags, cr);
133 return (len - uio.uio_resid);
137 zpl_read(struct file *filp, char __user *buf, size_t len, loff_t *ppos)
143 read = zpl_read_common(filp->f_mapping->host, buf, len, *ppos,
144 UIO_USERSPACE, filp->f_flags, cr);
155 zpl_write_common(struct inode *ip, const char *buf, size_t len, loff_t pos,
156 uio_seg_t segment, int flags, cred_t *cr)
162 iov.iov_base = (void *)buf;
168 uio.uio_loffset = pos;
169 uio.uio_limit = MAXOFFSET_T;
170 uio.uio_segflg = segment;
172 error = -zfs_write(ip, &uio, flags, cr);
176 return (len - uio.uio_resid);
180 zpl_write(struct file *filp, const char __user *buf, size_t len, loff_t *ppos)
186 wrote = zpl_write_common(filp->f_mapping->host, buf, len, *ppos,
187 UIO_USERSPACE, filp->f_flags, cr);
198 * It's worth taking a moment to describe how mmap is implemented
199 * for zfs because it differs considerably from other Linux filesystems.
200 * However, this issue is handled the same way under OpenSolaris.
202 * The issue is that by design zfs bypasses the Linux page cache and
203 * leaves all caching up to the ARC. This has been shown to work
204 * well for the common read(2)/write(2) case. However, mmap(2)
205 * is problem because it relies on being tightly integrated with the
206 * page cache. To handle this we cache mmap'ed files twice, once in
207 * the ARC and a second time in the page cache. The code is careful
208 * to keep both copies synchronized.
210 * When a file with an mmap'ed region is written to using write(2)
211 * both the data in the ARC and existing pages in the page cache
212 * are updated. For a read(2) data will be read first from the page
213 * cache then the ARC if needed. Neither a write(2) or read(2) will
214 * will ever result in new pages being added to the page cache.
216 * New pages are added to the page cache only via .readpage() which
217 * is called when the vfs needs to read a page off disk to back the
218 * virtual memory region. These pages may be modified without
219 * notifying the ARC and will be written out periodically via
220 * .writepage(). This will occur due to either a sync or the usual
221 * page aging behavior. Note because a read(2) of a mmap'ed file
222 * will always check the page cache first even when the ARC is out
223 * of date correct data will still be returned.
225 * While this implementation ensures correct behavior it does have
226 * have some drawbacks. The most obvious of which is that it
227 * increases the required memory footprint when access mmap'ed
228 * files. It also adds additional complexity to the code keeping
229 * both caches synchronized.
231 * Longer term it may be possible to cleanly resolve this wart by
232 * mapping page cache pages directly on to the ARC buffers. The
233 * Linux address space operations are flexible enough to allow
234 * selection of which pages back a particular index. The trick
235 * would be working out the details of which subsystem is in
236 * charge, the ARC, the page cache, or both. It may also prove
237 * helpful to move the ARC buffers to a scatter-gather lists
238 * rather than a vmalloc'ed region.
241 zpl_mmap(struct file *filp, struct vm_area_struct *vma)
243 struct inode *ip = filp->f_mapping->host;
244 znode_t *zp = ITOZ(ip);
247 error = -zfs_map(ip, vma->vm_pgoff, (caddr_t *)vma->vm_start,
248 (size_t)(vma->vm_end - vma->vm_start), vma->vm_flags);
252 error = generic_file_mmap(filp, vma);
256 mutex_enter(&zp->z_lock);
258 mutex_exit(&zp->z_lock);
264 * Populate a page with data for the Linux page cache. This function is
265 * only used to support mmap(2). There will be an identical copy of the
266 * data in the ARC which is kept up to date via .write() and .writepage().
268 * Current this function relies on zpl_read_common() and the O_DIRECT
269 * flag to read in a page. This works but the more correct way is to
270 * update zfs_fillpage() to be Linux friendly and use that interface.
273 zpl_readpage(struct file *filp, struct page *pp)
279 ASSERT(PageLocked(pp));
280 ip = pp->mapping->host;
283 error = -zfs_getpage(ip, pl, 1);
287 ClearPageUptodate(pp);
291 flush_dcache_page(pp);
299 * Populate a set of pages with data for the Linux page cache. This
300 * function will only be called for read ahead and never for demand
301 * paging. For simplicity, the code relies on read_cache_pages() to
302 * correctly lock each page for IO and call zpl_readpage().
305 zpl_readpages(struct file *filp, struct address_space *mapping,
306 struct list_head *pages, unsigned nr_pages)
308 return (read_cache_pages(mapping, pages,
309 (filler_t *)zpl_readpage, filp));
313 zpl_putpage(struct page *pp, struct writeback_control *wbc, void *data)
315 struct address_space *mapping = data;
317 ASSERT(PageLocked(pp));
318 ASSERT(!PageWriteback(pp));
321 * Disable the normal reclaim path for zpl_putpage(). This
322 * ensures that all memory allocations under this call path
323 * will never enter direct reclaim. If this were to happen
324 * the VM might try to write out additional pages by calling
325 * zpl_putpage() again resulting in a deadlock.
327 current->flags |= PF_MEMALLOC;
328 (void) zfs_putpage(mapping->host, pp, wbc);
329 current->flags &= ~PF_MEMALLOC;
335 zpl_writepages(struct address_space *mapping, struct writeback_control *wbc)
337 return write_cache_pages(mapping, wbc, zpl_putpage, mapping);
341 * Write out dirty pages to the ARC, this function is only required to
342 * support mmap(2). Mapped pages may be dirtied by memory operations
343 * which never call .write(). These dirty pages are kept in sync with
344 * the ARC buffers via this hook.
347 zpl_writepage(struct page *pp, struct writeback_control *wbc)
349 return zpl_putpage(pp, wbc, pp->mapping);
352 const struct address_space_operations zpl_address_space_operations = {
353 .readpages = zpl_readpages,
354 .readpage = zpl_readpage,
355 .writepage = zpl_writepage,
356 .writepages = zpl_writepages,
359 const struct file_operations zpl_file_operations = {
361 .release = zpl_release,
362 .llseek = generic_file_llseek,
365 .readdir = zpl_readdir,
370 const struct file_operations zpl_dir_file_operations = {
371 .llseek = generic_file_llseek,
372 .read = generic_read_dir,
373 .readdir = zpl_readdir,