Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit e43bbe5e authored by David Sterba's avatar David Sterba
Browse files

btrfs: sink unlock_extent parameter gfp_flags



All callers pass either GFP_NOFS or GFP_KERNEL now, so we can sink the
parameter to the function, though we lose some of the slightly better
semantics of GFP_KERNEL in some places, it's worth cleaning up the
callchains.

Signed-off-by: default avatarDavid Sterba <dsterba@suse.com>
parent d810a4be
Loading
Loading
Loading
Loading
+1 −1
Original line number Original line Diff line number Diff line
@@ -381,7 +381,7 @@ static int verify_parent_transid(struct extent_io_tree *io_tree,
		clear_extent_buffer_uptodate(eb);
		clear_extent_buffer_uptodate(eb);
out:
out:
	unlock_extent_cached(io_tree, eb->start, eb->start + eb->len - 1,
	unlock_extent_cached(io_tree, eb->start, eb->start + eb->len - 1,
			     &cached_state, GFP_NOFS);
			     &cached_state);
	if (need_lock)
	if (need_lock)
		btrfs_tree_read_unlock_blocking(eb);
		btrfs_tree_read_unlock_blocking(eb);
	return ret;
	return ret;
+4 −6
Original line number Original line Diff line number Diff line
@@ -1648,7 +1648,7 @@ STATIC u64 find_lock_delalloc_range(struct inode *inode,
			     EXTENT_DELALLOC, 1, cached_state);
			     EXTENT_DELALLOC, 1, cached_state);
	if (!ret) {
	if (!ret) {
		unlock_extent_cached(tree, delalloc_start, delalloc_end,
		unlock_extent_cached(tree, delalloc_start, delalloc_end,
				     &cached_state, GFP_NOFS);
				     &cached_state);
		__unlock_for_delalloc(inode, locked_page,
		__unlock_for_delalloc(inode, locked_page,
			      delalloc_start, delalloc_end);
			      delalloc_start, delalloc_end);
		cond_resched();
		cond_resched();
@@ -2941,8 +2941,7 @@ static int __do_readpage(struct extent_io_tree *tree,
			set_extent_uptodate(tree, cur, cur + iosize - 1,
			set_extent_uptodate(tree, cur, cur + iosize - 1,
					    &cached, GFP_NOFS);
					    &cached, GFP_NOFS);
			unlock_extent_cached(tree, cur,
			unlock_extent_cached(tree, cur,
					     cur + iosize - 1,
					     cur + iosize - 1, &cached);
					     &cached, GFP_NOFS);
			break;
			break;
		}
		}
		em = __get_extent_map(inode, page, pg_offset, cur,
		em = __get_extent_map(inode, page, pg_offset, cur,
@@ -3035,8 +3034,7 @@ static int __do_readpage(struct extent_io_tree *tree,
			set_extent_uptodate(tree, cur, cur + iosize - 1,
			set_extent_uptodate(tree, cur, cur + iosize - 1,
					    &cached, GFP_NOFS);
					    &cached, GFP_NOFS);
			unlock_extent_cached(tree, cur,
			unlock_extent_cached(tree, cur,
					     cur + iosize - 1,
					     cur + iosize - 1, &cached);
					     &cached, GFP_NOFS);
			cur = cur + iosize;
			cur = cur + iosize;
			pg_offset += iosize;
			pg_offset += iosize;
			continue;
			continue;
@@ -4621,7 +4619,7 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
out:
out:
	btrfs_free_path(path);
	btrfs_free_path(path);
	unlock_extent_cached(&BTRFS_I(inode)->io_tree, start, start + len - 1,
	unlock_extent_cached(&BTRFS_I(inode)->io_tree, start, start + len - 1,
			     &cached_state, GFP_NOFS);
			     &cached_state);
	return ret;
	return ret;
}
}


+2 −2
Original line number Original line Diff line number Diff line
@@ -312,10 +312,10 @@ static inline int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end)
}
}


static inline int unlock_extent_cached(struct extent_io_tree *tree, u64 start,
static inline int unlock_extent_cached(struct extent_io_tree *tree, u64 start,
		u64 end, struct extent_state **cached, gfp_t mask)
		u64 end, struct extent_state **cached)
{
{
	return __clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, cached,
	return __clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, cached,
				mask, NULL);
				GFP_NOFS, NULL);
}
}


static inline int unlock_extent_cached_atomic(struct extent_io_tree *tree,
static inline int unlock_extent_cached_atomic(struct extent_io_tree *tree,
+7 −8
Original line number Original line Diff line number Diff line
@@ -1504,7 +1504,7 @@ lock_and_cleanup_extent_if_need(struct btrfs_inode *inode, struct page **pages,
		    ordered->file_offset + ordered->len > start_pos &&
		    ordered->file_offset + ordered->len > start_pos &&
		    ordered->file_offset <= last_pos) {
		    ordered->file_offset <= last_pos) {
			unlock_extent_cached(&inode->io_tree, start_pos,
			unlock_extent_cached(&inode->io_tree, start_pos,
					last_pos, cached_state, GFP_NOFS);
					last_pos, cached_state);
			for (i = 0; i < num_pages; i++) {
			for (i = 0; i < num_pages; i++) {
				unlock_page(pages[i]);
				unlock_page(pages[i]);
				put_page(pages[i]);
				put_page(pages[i]);
@@ -1758,8 +1758,7 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
						pos, copied, NULL);
						pos, copied, NULL);
		if (extents_locked)
		if (extents_locked)
			unlock_extent_cached(&BTRFS_I(inode)->io_tree,
			unlock_extent_cached(&BTRFS_I(inode)->io_tree,
					     lockstart, lockend, &cached_state,
					     lockstart, lockend, &cached_state);
					     GFP_NOFS);
		btrfs_delalloc_release_extents(BTRFS_I(inode), reserve_bytes);
		btrfs_delalloc_release_extents(BTRFS_I(inode), reserve_bytes);
		if (ret) {
		if (ret) {
			btrfs_drop_pages(pages, num_pages);
			btrfs_drop_pages(pages, num_pages);
@@ -2600,7 +2599,7 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
		if (ordered)
		if (ordered)
			btrfs_put_ordered_extent(ordered);
			btrfs_put_ordered_extent(ordered);
		unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart,
		unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart,
				     lockend, &cached_state, GFP_NOFS);
				     lockend, &cached_state);
		ret = btrfs_wait_ordered_range(inode, lockstart,
		ret = btrfs_wait_ordered_range(inode, lockstart,
					       lockend - lockstart + 1);
					       lockend - lockstart + 1);
		if (ret) {
		if (ret) {
@@ -2751,7 +2750,7 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
	btrfs_free_block_rsv(fs_info, rsv);
	btrfs_free_block_rsv(fs_info, rsv);
out:
out:
	unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend,
	unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend,
			     &cached_state, GFP_NOFS);
			     &cached_state);
out_only_mutex:
out_only_mutex:
	if (!updated_inode && truncated_block && !ret && !err) {
	if (!updated_inode && truncated_block && !ret && !err) {
		/*
		/*
@@ -2913,7 +2912,7 @@ static long btrfs_fallocate(struct file *file, int mode,
			btrfs_put_ordered_extent(ordered);
			btrfs_put_ordered_extent(ordered);
			unlock_extent_cached(&BTRFS_I(inode)->io_tree,
			unlock_extent_cached(&BTRFS_I(inode)->io_tree,
					     alloc_start, locked_end,
					     alloc_start, locked_end,
					     &cached_state, GFP_KERNEL);
					     &cached_state);
			/*
			/*
			 * we can't wait on the range with the transaction
			 * we can't wait on the range with the transaction
			 * running or with the extent lock held
			 * running or with the extent lock held
@@ -3015,7 +3014,7 @@ static long btrfs_fallocate(struct file *file, int mode,
	}
	}
out_unlock:
out_unlock:
	unlock_extent_cached(&BTRFS_I(inode)->io_tree, alloc_start, locked_end,
	unlock_extent_cached(&BTRFS_I(inode)->io_tree, alloc_start, locked_end,
			     &cached_state, GFP_KERNEL);
			     &cached_state);
out:
out:
	inode_unlock(inode);
	inode_unlock(inode);
	/* Let go of our reservation. */
	/* Let go of our reservation. */
@@ -3088,7 +3087,7 @@ static int find_desired_extent(struct inode *inode, loff_t *offset, int whence)
			*offset = min_t(loff_t, start, inode->i_size);
			*offset = min_t(loff_t, start, inode->i_size);
	}
	}
	unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend,
	unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend,
			     &cached_state, GFP_NOFS);
			     &cached_state);
	return ret;
	return ret;
}
}


+2 −3
Original line number Original line Diff line number Diff line
@@ -1125,8 +1125,7 @@ cleanup_write_cache_enospc(struct inode *inode,
{
{
	io_ctl_drop_pages(io_ctl);
	io_ctl_drop_pages(io_ctl);
	unlock_extent_cached(&BTRFS_I(inode)->io_tree, 0,
	unlock_extent_cached(&BTRFS_I(inode)->io_tree, 0,
			     i_size_read(inode) - 1, cached_state,
			     i_size_read(inode) - 1, cached_state);
			     GFP_NOFS);
}
}


static int __btrfs_wait_cache_io(struct btrfs_root *root,
static int __btrfs_wait_cache_io(struct btrfs_root *root,
@@ -1320,7 +1319,7 @@ static int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
	io_ctl_drop_pages(io_ctl);
	io_ctl_drop_pages(io_ctl);


	unlock_extent_cached(&BTRFS_I(inode)->io_tree, 0,
	unlock_extent_cached(&BTRFS_I(inode)->io_tree, 0,
			     i_size_read(inode) - 1, &cached_state, GFP_NOFS);
			     i_size_read(inode) - 1, &cached_state);


	/*
	/*
	 * at this point the pages are under IO and we're happy,
	 * at this point the pages are under IO and we're happy,
Loading