iomap: move locking out of iomap_write_delalloc_release
authorChristoph Hellwig <hch@lst.de>
Tue, 8 Oct 2024 08:59:14 +0000 (10:59 +0200)
committerCarlos Maiolino <cem@kernel.org>
Tue, 15 Oct 2024 09:37:42 +0000 (11:37 +0200)
XFS (which currently is the only user of iomap_write_delalloc_release)
already holds invalidate_lock for most zeroing operations.  To be able
to avoid a deadlock it needs to stop taking the lock, but doing so
in iomap would leak XFS locking details into iomap.

To avoid this require the caller to hold invalidate_lock when calling
iomap_write_delalloc_release instead of taking it there.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Darrick J. Wong <djwong@kernel.org>
Signed-off-by: Carlos Maiolino <cem@kernel.org>
fs/iomap/buffered-io.c
fs/xfs/xfs_iomap.c

index b4f742f..aa587b2 100644 (file)
@@ -1211,12 +1211,13 @@ void iomap_write_delalloc_release(struct inode *inode, loff_t start_byte,
        loff_t scan_end_byte = min(i_size_read(inode), end_byte);
 
        /*
-        * Lock the mapping to avoid races with page faults re-instantiating
-        * folios and dirtying them via ->page_mkwrite whilst we walk the
-        * cache and perform delalloc extent removal. Failing to do this can
-        * leave dirty pages with no space reservation in the cache.
+        * The caller must hold invalidate_lock to avoid races with page faults
+        * re-instantiating folios and dirtying them via ->page_mkwrite whilst
+        * we walk the cache and perform delalloc extent removal.  Failing to do
+        * this can leave dirty pages with no space reservation in the cache.
         */
-       filemap_invalidate_lock(inode->i_mapping);
+       lockdep_assert_held_write(&inode->i_mapping->invalidate_lock);
+
        while (start_byte < scan_end_byte) {
                loff_t          data_end;
 
@@ -1233,7 +1234,7 @@ void iomap_write_delalloc_release(struct inode *inode, loff_t start_byte,
                if (start_byte == -ENXIO || start_byte == scan_end_byte)
                        break;
                if (WARN_ON_ONCE(start_byte < 0))
-                       goto out_unlock;
+                       return;
                WARN_ON_ONCE(start_byte < punch_start_byte);
                WARN_ON_ONCE(start_byte > scan_end_byte);
 
@@ -1244,7 +1245,7 @@ void iomap_write_delalloc_release(struct inode *inode, loff_t start_byte,
                data_end = mapping_seek_hole_data(inode->i_mapping, start_byte,
                                scan_end_byte, SEEK_HOLE);
                if (WARN_ON_ONCE(data_end < 0))
-                       goto out_unlock;
+                       return;
 
                /*
                 * If we race with post-direct I/O invalidation of the page cache,
@@ -1266,8 +1267,6 @@ void iomap_write_delalloc_release(struct inode *inode, loff_t start_byte,
        if (punch_start_byte < end_byte)
                punch(inode, punch_start_byte, end_byte - punch_start_byte,
                                iomap);
-out_unlock:
-       filemap_invalidate_unlock(inode->i_mapping);
 }
 EXPORT_SYMBOL_GPL(iomap_write_delalloc_release);
 
index 30f2530..01324da 100644 (file)
@@ -1239,8 +1239,10 @@ xfs_buffered_write_iomap_end(
        if (start_byte >= end_byte)
                return 0;
 
+       filemap_invalidate_lock(inode->i_mapping);
        iomap_write_delalloc_release(inode, start_byte, end_byte, flags, iomap,
                        xfs_buffered_write_delalloc_punch);
+       filemap_invalidate_unlock(inode->i_mapping);
        return 0;
 }