Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 791b7f08 authored by Aneesh Kumar K.V's avatar Aneesh Kumar K.V Committed by Theodore Ts'o
Browse files

ext4: Fix the delalloc writepages to allocate blocks at the right offset.



When iterating through the pages which have mapped buffer_heads, we
failed to update the b_state value. This results in allocating blocks
at logical offset 0.

Signed-off-by: default avatarAneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Signed-off-by: default avatar"Theodore Ts'o" <tytso@mit.edu>
Cc: stable@kernel.org
parent 2a21e37e
Loading
Loading
Loading
Loading
+39 −17
Original line number Original line Diff line number Diff line
@@ -1653,35 +1653,39 @@ struct mpage_da_data {
 */
 */
static int mpage_da_submit_io(struct mpage_da_data *mpd)
static int mpage_da_submit_io(struct mpage_da_data *mpd)
{
{
	struct address_space *mapping = mpd->inode->i_mapping;
	int ret = 0, err, nr_pages, i;
	unsigned long index, end;
	struct pagevec pvec;
	long pages_skipped;
	long pages_skipped;
	struct pagevec pvec;
	unsigned long index, end;
	int ret = 0, err, nr_pages, i;
	struct inode *inode = mpd->inode;
	struct address_space *mapping = inode->i_mapping;


	BUG_ON(mpd->next_page <= mpd->first_page);
	BUG_ON(mpd->next_page <= mpd->first_page);
	pagevec_init(&pvec, 0);
	/*
	 * We need to start from the first_page to the next_page - 1
	 * to make sure we also write the mapped dirty buffer_heads.
	 * If we look at mpd->lbh.b_blocknr we would only be looking
	 * at the currently mapped buffer_heads.
	 */
	index = mpd->first_page;
	index = mpd->first_page;
	end = mpd->next_page - 1;
	end = mpd->next_page - 1;


	pagevec_init(&pvec, 0);
	while (index <= end) {
	while (index <= end) {
		/*
		nr_pages = pagevec_lookup(&pvec, mapping, index, PAGEVEC_SIZE);
		 * We can use PAGECACHE_TAG_DIRTY lookup here because
		 * even though we have cleared the dirty flag on the page
		 * We still keep the page in the radix tree with tag
		 * PAGECACHE_TAG_DIRTY. See clear_page_dirty_for_io.
		 * The PAGECACHE_TAG_DIRTY is cleared in set_page_writeback
		 * which is called via the below writepage callback.
		 */
		nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
					PAGECACHE_TAG_DIRTY,
					min(end - index,
					(pgoff_t)PAGEVEC_SIZE-1) + 1);
		if (nr_pages == 0)
		if (nr_pages == 0)
			break;
			break;
		for (i = 0; i < nr_pages; i++) {
		for (i = 0; i < nr_pages; i++) {
			struct page *page = pvec.pages[i];
			struct page *page = pvec.pages[i];


			index = page->index;
			if (index > end)
				break;
			index++;

			BUG_ON(!PageLocked(page));
			BUG_ON(PageWriteback(page));

			pages_skipped = mpd->wbc->pages_skipped;
			pages_skipped = mpd->wbc->pages_skipped;
			err = mapping->a_ops->writepage(page, mpd->wbc);
			err = mapping->a_ops->writepage(page, mpd->wbc);
			if (!err && (pages_skipped == mpd->wbc->pages_skipped))
			if (!err && (pages_skipped == mpd->wbc->pages_skipped))
@@ -2095,11 +2099,29 @@ static int __mpage_da_writepage(struct page *page,
		bh = head;
		bh = head;
		do {
		do {
			BUG_ON(buffer_locked(bh));
			BUG_ON(buffer_locked(bh));
			/*
			 * We need to try to allocate
			 * unmapped blocks in the same page.
			 * Otherwise we won't make progress
			 * with the page in ext4_da_writepage
			 */
			if (buffer_dirty(bh) &&
			if (buffer_dirty(bh) &&
				(!buffer_mapped(bh) || buffer_delay(bh))) {
				(!buffer_mapped(bh) || buffer_delay(bh))) {
				mpage_add_bh_to_extent(mpd, logical, bh);
				mpage_add_bh_to_extent(mpd, logical, bh);
				if (mpd->io_done)
				if (mpd->io_done)
					return MPAGE_DA_EXTENT_TAIL;
					return MPAGE_DA_EXTENT_TAIL;
			} else if (buffer_dirty(bh) && (buffer_mapped(bh))) {
				/*
				 * mapped dirty buffer. We need to update
				 * the b_state because we look at
				 * b_state in mpage_da_map_blocks. We don't
				 * update b_size because if we find an
				 * unmapped buffer_head later we need to
				 * use the b_state flag of that buffer_head.
				 */
				if (mpd->lbh.b_size == 0)
					mpd->lbh.b_state =
						bh->b_state & BH_FLAGS;
			}
			}
			logical++;
			logical++;
		} while ((bh = bh->b_this_page) != head);
		} while ((bh = bh->b_this_page) != head);