Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 2e78c927 authored by Chandan Rajendra's avatar Chandan Rajendra Committed by David Sterba
Browse files

Btrfs: __btrfs_buffered_write: Reserve/release extents aligned to block size



Currently, the code reserves/releases extents in multiples of PAGE_CACHE_SIZE
units. Fix this by doing reservation/releases in block size units.

Signed-off-by: default avatarChandan Rajendra <chandan@linux.vnet.ibm.com>
Signed-off-by: default avatarDavid Sterba <dsterba@suse.com>
parent e410e34f
Loading
Loading
Loading
Loading
+3 −0
Original line number Diff line number Diff line
@@ -2353,6 +2353,9 @@ struct btrfs_map_token {
	unsigned long offset;
};

#define BTRFS_BYTES_TO_BLKS(fs_info, bytes) \
				((bytes) >> (fs_info)->sb->s_blocksize_bits)

static inline void btrfs_init_map_token (struct btrfs_map_token *token)
{
	token->kaddr = NULL;
+30 −13
Original line number Diff line number Diff line
@@ -498,7 +498,7 @@ int btrfs_dirty_pages(struct btrfs_root *root, struct inode *inode,
	loff_t isize = i_size_read(inode);

	start_pos = pos & ~((u64)root->sectorsize - 1);
	num_bytes = ALIGN(write_bytes + pos - start_pos, root->sectorsize);
	num_bytes = round_up(write_bytes + pos - start_pos, root->sectorsize);

	end_of_last_block = start_pos + num_bytes - 1;
	err = btrfs_set_extent_delalloc(inode, start_pos, end_of_last_block,
@@ -1379,16 +1379,19 @@ static noinline int prepare_pages(struct inode *inode, struct page **pages,
static noinline int
lock_and_cleanup_extent_if_need(struct inode *inode, struct page **pages,
				size_t num_pages, loff_t pos,
				size_t write_bytes,
				u64 *lockstart, u64 *lockend,
				struct extent_state **cached_state)
{
	struct btrfs_root *root = BTRFS_I(inode)->root;
	u64 start_pos;
	u64 last_pos;
	int i;
	int ret = 0;

	start_pos = pos & ~((u64)PAGE_CACHE_SIZE - 1);
	last_pos = start_pos + ((u64)num_pages << PAGE_CACHE_SHIFT) - 1;
	start_pos = round_down(pos, root->sectorsize);
	last_pos = start_pos
		+ round_up(pos + write_bytes - start_pos, root->sectorsize) - 1;

	if (start_pos < inode->i_size) {
		struct btrfs_ordered_extent *ordered;
@@ -1503,6 +1506,7 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,

	while (iov_iter_count(i) > 0) {
		size_t offset = pos & (PAGE_CACHE_SIZE - 1);
		size_t sector_offset;
		size_t write_bytes = min(iov_iter_count(i),
					 nrptrs * (size_t)PAGE_CACHE_SIZE -
					 offset);
@@ -1511,6 +1515,8 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
		size_t reserve_bytes;
		size_t dirty_pages;
		size_t copied;
		size_t dirty_sectors;
		size_t num_sectors;

		WARN_ON(num_pages > nrptrs);

@@ -1523,7 +1529,9 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
			break;
		}

		reserve_bytes = num_pages << PAGE_CACHE_SHIFT;
		sector_offset = pos & (root->sectorsize - 1);
		reserve_bytes = round_up(write_bytes + sector_offset,
				root->sectorsize);

		if (BTRFS_I(inode)->flags & (BTRFS_INODE_NODATACOW |
					     BTRFS_INODE_PREALLOC)) {
@@ -1542,7 +1550,9 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
				 */
				num_pages = DIV_ROUND_UP(write_bytes + offset,
							 PAGE_CACHE_SIZE);
				reserve_bytes = num_pages << PAGE_CACHE_SHIFT;
				reserve_bytes = round_up(write_bytes
							+ sector_offset,
							root->sectorsize);
				goto reserve_metadata;
			}
		}
@@ -1576,8 +1586,8 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
			break;

		ret = lock_and_cleanup_extent_if_need(inode, pages, num_pages,
						      pos, &lockstart, &lockend,
						      &cached_state);
						pos, write_bytes, &lockstart,
						&lockend, &cached_state);
		if (ret < 0) {
			if (ret == -EAGAIN)
				goto again;
@@ -1612,9 +1622,16 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
		 * we still have an outstanding extent for the chunk we actually
		 * managed to copy.
		 */
		if (num_pages > dirty_pages) {
			release_bytes = (num_pages - dirty_pages) <<
				PAGE_CACHE_SHIFT;
		num_sectors = BTRFS_BYTES_TO_BLKS(root->fs_info,
						reserve_bytes);
		dirty_sectors = round_up(copied + sector_offset,
					root->sectorsize);
		dirty_sectors = BTRFS_BYTES_TO_BLKS(root->fs_info,
						dirty_sectors);

		if (num_sectors > dirty_sectors) {
			release_bytes = (write_bytes - copied)
				& ~((u64)root->sectorsize - 1);
			if (copied > 0) {
				spin_lock(&BTRFS_I(inode)->lock);
				BTRFS_I(inode)->outstanding_extents++;
@@ -1633,7 +1650,8 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
			}
		}

		release_bytes = dirty_pages << PAGE_CACHE_SHIFT;
		release_bytes = round_up(copied + sector_offset,
					root->sectorsize);

		if (copied > 0)
			ret = btrfs_dirty_pages(root, inode, pages,
@@ -1654,8 +1672,7 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,

		if (only_release_metadata && copied > 0) {
			lockstart = round_down(pos, root->sectorsize);
			lockend = lockstart +
				(dirty_pages << PAGE_CACHE_SHIFT) - 1;
			lockend = round_up(pos + copied, root->sectorsize) - 1;

			set_extent_bit(&BTRFS_I(inode)->io_tree, lockstart,
				       lockend, EXTENT_NORESERVE, NULL,