Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit d0b7da88 authored by Chandan Rajendra's avatar Chandan Rajendra Committed by David Sterba
Browse files

Btrfs: btrfs_page_mkwrite: Reserve space in sectorsized units



In subpagesize-blocksize scenario, if i_size occurs in a block which is not
the last block in the page, then the space to be reserved should be calculated
appropriately.

Reviewed-by: default avatarLiu Bo <bo.li.liu@oracle.com>
Signed-off-by: default avatarChandan Rajendra <chandan@linux.vnet.ibm.com>
Signed-off-by: default avatarDavid Sterba <dsterba@suse.com>
parent 9703fefe
Loading
Loading
Loading
Loading
+30 −5
Original line number Diff line number Diff line
@@ -8802,15 +8802,28 @@ int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
	loff_t size;
	int ret;
	int reserved = 0;
	u64 reserved_space;
	u64 page_start;
	u64 page_end;
	u64 end;

	reserved_space = PAGE_CACHE_SIZE;

	sb_start_pagefault(inode->i_sb);
	page_start = page_offset(page);
	page_end = page_start + PAGE_CACHE_SIZE - 1;
	end = page_end;

	/*
	 * Reserving delalloc space after obtaining the page lock can lead to
	 * deadlock. For example, if a dirty page is locked by this function
	 * and the call to btrfs_delalloc_reserve_space() ends up triggering
	 * dirty page write out, then the btrfs_writepage() function could
	 * end up waiting indefinitely to get a lock on the page currently
	 * being processed by btrfs_page_mkwrite() function.
	 */
	ret = btrfs_delalloc_reserve_space(inode, page_start,
					   PAGE_CACHE_SIZE);
					   reserved_space);
	if (!ret) {
		ret = file_update_time(vma->vm_file);
		reserved = 1;
@@ -8844,7 +8857,7 @@ int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
	 * we can't set the delalloc bits if there are pending ordered
	 * extents.  Drop our locks and wait for them to finish
	 */
	ordered = btrfs_lookup_ordered_extent(inode, page_start);
	ordered = btrfs_lookup_ordered_range(inode, page_start, page_end);
	if (ordered) {
		unlock_extent_cached(io_tree, page_start, page_end,
				     &cached_state, GFP_NOFS);
@@ -8854,6 +8867,18 @@ int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
		goto again;
	}

	if (page->index == ((size - 1) >> PAGE_CACHE_SHIFT)) {
		reserved_space = round_up(size - page_start, root->sectorsize);
		if (reserved_space < PAGE_CACHE_SIZE) {
			end = page_start + reserved_space - 1;
			spin_lock(&BTRFS_I(inode)->lock);
			BTRFS_I(inode)->outstanding_extents++;
			spin_unlock(&BTRFS_I(inode)->lock);
			btrfs_delalloc_release_space(inode, page_start,
						PAGE_CACHE_SIZE - reserved_space);
		}
	}

	/*
	 * XXX - page_mkwrite gets called every time the page is dirtied, even
	 * if it was already dirty, so for space accounting reasons we need to
@@ -8861,12 +8886,12 @@ int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
	 * is probably a better way to do this, but for now keep consistent with
	 * prepare_pages in the normal write path.
	 */
	clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, page_end,
	clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, end,
			  EXTENT_DIRTY | EXTENT_DELALLOC |
			  EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG,
			  0, 0, &cached_state, GFP_NOFS);

	ret = btrfs_set_extent_delalloc(inode, page_start, page_end,
	ret = btrfs_set_extent_delalloc(inode, page_start, end,
					&cached_state);
	if (ret) {
		unlock_extent_cached(io_tree, page_start, page_end,
@@ -8905,7 +8930,7 @@ int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
	}
	unlock_page(page);
out:
	btrfs_delalloc_release_space(inode, page_start, PAGE_CACHE_SIZE);
	btrfs_delalloc_release_space(inode, page_start, reserved_space);
out_noreserve:
	sb_end_pagefault(inode->i_sb);
	return ret;