Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit a8e7d49a authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Fix race in create_empty_buffers() vs __set_page_dirty_buffers()



Nick Piggin noticed this (very unlikely) race between setting a page
dirty and creating the buffers for it - we need to hold the mapping
private_lock until we've set the page dirty bit in order to make sure
that create_empty_buffers() might not build up a set of buffers without
the dirty bits set when the page is dirty.

I doubt anybody has ever hit this race (and it didn't solve the issue
Nick was looking at), but as Nick says: "Still, it does appear to solve
a real race, which we should close."

Acked-by: default avatarNick Piggin <nickpiggin@yahoo.com.au>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 68df3755
Loading
Loading
Loading
Loading
+11 −12
Original line number Diff line number Diff line
@@ -760,15 +760,9 @@ EXPORT_SYMBOL(mark_buffer_dirty_inode);
 * If warn is true, then emit a warning if the page is not uptodate and has
 * not been truncated.
 */
static int __set_page_dirty(struct page *page,
static void __set_page_dirty(struct page *page,
		struct address_space *mapping, int warn)
{
	if (unlikely(!mapping))
		return !TestSetPageDirty(page);

	if (TestSetPageDirty(page))
		return 0;

	spin_lock_irq(&mapping->tree_lock);
	if (page->mapping) {	/* Race with truncate? */
		WARN_ON_ONCE(warn && !PageUptodate(page));
@@ -785,8 +779,6 @@ static int __set_page_dirty(struct page *page,
	}
	spin_unlock_irq(&mapping->tree_lock);
	__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);

	return 1;
}

/*
@@ -816,6 +808,7 @@ static int __set_page_dirty(struct page *page,
 */
int __set_page_dirty_buffers(struct page *page)
{
	int newly_dirty;
	struct address_space *mapping = page_mapping(page);

	if (unlikely(!mapping))
@@ -831,9 +824,12 @@ int __set_page_dirty_buffers(struct page *page)
			bh = bh->b_this_page;
		} while (bh != head);
	}
	newly_dirty = !TestSetPageDirty(page);
	spin_unlock(&mapping->private_lock);

	return __set_page_dirty(page, mapping, 1);
	if (newly_dirty)
		__set_page_dirty(page, mapping, 1);
	return newly_dirty;
}
EXPORT_SYMBOL(__set_page_dirty_buffers);

@@ -1262,8 +1258,11 @@ void mark_buffer_dirty(struct buffer_head *bh)
			return;
	}

	if (!test_set_buffer_dirty(bh))
		__set_page_dirty(bh->b_page, page_mapping(bh->b_page), 0);
	if (!test_set_buffer_dirty(bh)) {
		struct page *page = bh->b_page;
		if (!TestSetPageDirty(page))
			__set_page_dirty(page, page_mapping(page), 0);
	}
}

/*