ASSERT(PageLocked(pp));
ASSERT(!PageWriteback(pp));
+ ASSERT(!(current->flags & PF_NOFS));
/*
- * Disable the normal reclaim path for zpl_putpage(). This
- * ensures that all memory allocations under this call path
- * will never enter direct reclaim. If this were to happen
- * the VM might try to write out additional pages by calling
- * zpl_putpage() again resulting in a deadlock.
+ * Annotate this call path with a flag that indicates that it is
+ * unsafe to use KM_SLEEP during memory allocations due to the
+ * potential for a deadlock. KM_PUSHPAGE should be used instead.
*/
- if (current->flags & PF_MEMALLOC) {
- (void) zfs_putpage(mapping->host, pp, wbc);
- } else {
- current->flags |= PF_MEMALLOC;
- (void) zfs_putpage(mapping->host, pp, wbc);
- current->flags &= ~PF_MEMALLOC;
- }
+ current->flags |= PF_NOFS;
+ (void) zfs_putpage(mapping->host, pp, wbc);
+ current->flags &= ~PF_NOFS;
return (0);
}
}
#endif /* HAVE_FILE_FALLOCATE */
+static long
+zpl_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+ switch (cmd) {
+ case ZFS_IOC_GETFLAGS:
+ case ZFS_IOC_SETFLAGS:
+ return (-EOPNOTSUPP);
+ default:
+ return (-ENOTTY);
+ }
+}
+
+#ifdef CONFIG_COMPAT
+static long
+zpl_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+ return zpl_ioctl(filp, cmd, arg);
+}
+#endif /* CONFIG_COMPAT */
+
+
const struct address_space_operations zpl_address_space_operations = {
.readpages = zpl_readpages,
.readpage = zpl_readpage,
.llseek = generic_file_llseek,
.read = zpl_read,
.write = zpl_write,
- .readdir = zpl_readdir,
.mmap = zpl_mmap,
.fsync = zpl_fsync,
#ifdef HAVE_FILE_FALLOCATE
.fallocate = zpl_fallocate,
#endif /* HAVE_FILE_FALLOCATE */
+ .unlocked_ioctl = zpl_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = zpl_compat_ioctl,
+#endif
};
const struct file_operations zpl_dir_file_operations = {
.read = generic_read_dir,
.readdir = zpl_readdir,
.fsync = zpl_fsync,
+ .unlocked_ioctl = zpl_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = zpl_compat_ioctl,
+#endif
};