From 4c837f0d931546e656b832caf11b8d4c2063d905 Mon Sep 17 00:00:00 2001 From: Brian Behlendorf Date: Wed, 31 Oct 2012 10:06:34 -0700 Subject: [PATCH] Fix "allocating allocated segment" panic Gunnar Beutner did all the hard work on this one by correctly identifying that this issue is a race between dmu_sync() and dbuf_dirty(). Now in all cases the caller is responsible for preventing this race by making sure the zfs_range_lock() is held when dirtying a buffer which may be referenced in a log record. The mmap case which relies on zfs_putpage() was not taking the range lock. This code was accidentally dropped when the function was rewritten for the Linux VFS. This patch adds the required range locking to zfs_putpage(). It also adds the missing ZFS_ENTER()/ZFS_EXIT() macros which aren't strictly required due to the VFS holding a reference. However, this makes the code more consistent with the upsteam code and there's no harm in being extra careful here. Original-patch-by: Gunnar Beutner Signed-off-by: Brian Behlendorf Closes #541 --- module/zfs/zfs_vnops.c | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/module/zfs/zfs_vnops.c b/module/zfs/zfs_vnops.c index 89f0f60..5765c9a 100644 --- a/module/zfs/zfs_vnops.c +++ b/module/zfs/zfs_vnops.c @@ -3790,7 +3790,8 @@ zfs_putpage(struct inode *ip, struct page *pp, struct writeback_control *wbc) zfs_sb_t *zsb = ITOZSB(ip); loff_t offset; loff_t pgoff; - unsigned int pglen; + unsigned int pglen; + rl_t *rl; dmu_tx_t *tx; caddr_t va; int err = 0; @@ -3799,6 +3800,8 @@ zfs_putpage(struct inode *ip, struct page *pp, struct writeback_control *wbc) int cnt = 0; int sync; + ZFS_ENTER(zsb); + ZFS_VERIFY_ZP(zp); ASSERT(PageLocked(pp)); @@ -3810,6 +3813,7 @@ zfs_putpage(struct inode *ip, struct page *pp, struct writeback_control *wbc) /* Page is beyond end of file */ if (pgoff >= offset) { unlock_page(pp); + ZFS_EXIT(zsb); return (0); } @@ -3832,6 +3836,7 @@ zfs_putpage(struct inode *ip, struct page *pp, struct writeback_control *wbc) set_page_writeback(pp); unlock_page(pp); + rl = zfs_range_lock(zp, pgoff, pglen, RL_WRITER); tx = dmu_tx_create(zsb->z_os); sync = ((zsb->z_os->os_sync == ZFS_SYNC_ALWAYS) || @@ -3858,6 +3863,8 @@ zfs_putpage(struct inode *ip, struct page *pp, struct writeback_control *wbc) if (sync) zfs_putpage_commit_cb(pp, ECANCELED); + zfs_range_unlock(rl); + ZFS_EXIT(zsb); return (err); } @@ -3873,6 +3880,7 @@ zfs_putpage(struct inode *ip, struct page *pp, struct writeback_control *wbc) zfs_log_write(zsb->z_log, tx, TX_WRITE, zp, pgoff, pglen, 0); dmu_tx_commit(tx); + zfs_range_unlock(rl); ASSERT3S(err, ==, 0); if (sync) { @@ -3880,6 +3888,7 @@ zfs_putpage(struct inode *ip, struct page *pp, struct writeback_control *wbc) zfs_putpage_commit_cb(pp, err); } + ZFS_EXIT(zsb); return (err); } -- 1.8.3.1