Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit b2950863 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Chris Mason
Browse files

Btrfs: make things static and include the right headers



Shut up various sparse warnings about symbols that should be either
static or have their declarations in scope.

Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
parent 1ffa4f42
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -217,7 +217,7 @@ int btrfs_copy_root(struct btrfs_trans_handle *trans,
 * this uses that block instead of allocating a new one.  btrfs_alloc_reserved_extent
 * is used to finish the allocation.
 */
int noinline __btrfs_cow_block(struct btrfs_trans_handle *trans,
static int noinline __btrfs_cow_block(struct btrfs_trans_handle *trans,
			     struct btrfs_root *root,
			     struct extent_buffer *buf,
			     struct extent_buffer *parent, int parent_slot,
+8 −8
Original line number Diff line number Diff line
@@ -93,8 +93,8 @@ struct async_submit_bio {
 * extents on the btree inode are pretty simple, there's one extent
 * that covers the entire device
 */
struct extent_map *btree_get_extent(struct inode *inode, struct page *page,
				    size_t page_offset, u64 start, u64 len,
static struct extent_map *btree_get_extent(struct inode *inode,
		struct page *page, size_t page_offset, u64 start, u64 len,
		int create)
{
	struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
@@ -295,7 +295,7 @@ printk("read extent buffer pages failed with ret %d mirror no %d\n", ret, mirror
 * checksum a dirty tree block before IO.  This has extra checks to make
 * sure we only fill in the checksum field in the first page of a multi-page block
 */
int csum_dirty_buffer(struct btrfs_root *root, struct page *page)
static int csum_dirty_buffer(struct btrfs_root *root, struct page *page)
{
	struct extent_io_tree *tree;
	u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
@@ -365,7 +365,7 @@ static int check_tree_block_fsid(struct btrfs_root *root,
	return ret;
}

int btree_readpage_end_io_hook(struct page *page, u64 start, u64 end,
static int btree_readpage_end_io_hook(struct page *page, u64 start, u64 end,
			       struct extent_state *state)
{
	struct extent_io_tree *tree;
@@ -660,7 +660,7 @@ static int btree_writepages(struct address_space *mapping,
	return extent_writepages(tree, mapping, btree_get_extent, wbc);
}

int btree_readpage(struct file *file, struct page *page)
static int btree_readpage(struct file *file, struct page *page)
{
	struct extent_io_tree *tree;
	tree = &BTRFS_I(page->mapping->host)->io_tree;
@@ -1200,7 +1200,7 @@ static void __unplug_io_fn(struct backing_dev_info *bdi, struct page *page)
	}
}

void btrfs_unplug_io_fn(struct backing_dev_info *bdi, struct page *page)
static void btrfs_unplug_io_fn(struct backing_dev_info *bdi, struct page *page)
{
	struct inode *inode;
	struct extent_map_tree *em_tree;
@@ -1842,7 +1842,7 @@ static void btrfs_end_buffer_write_sync(struct buffer_head *bh, int uptodate)
	put_bh(bh);
}

int write_all_supers(struct btrfs_root *root)
static int write_all_supers(struct btrfs_root *root)
{
	struct list_head *cur;
	struct list_head *head = &root->fs_info->fs_devices->devices;
+6 −6
Original line number Diff line number Diff line
@@ -74,7 +74,7 @@ static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits)
 * this adds the block group to the fs_info rb tree for the block group
 * cache
 */
int btrfs_add_block_group_cache(struct btrfs_fs_info *info,
static int btrfs_add_block_group_cache(struct btrfs_fs_info *info,
				struct btrfs_block_group_cache *block_group)
{
	struct rb_node **p;
@@ -289,7 +289,7 @@ err:
/*
 * return the block group that starts at or after bytenr
 */
struct btrfs_block_group_cache *btrfs_lookup_first_block_group(struct
static struct btrfs_block_group_cache *btrfs_lookup_first_block_group(struct
						       btrfs_fs_info *info,
							 u64 bytenr)
{
@@ -3445,7 +3445,7 @@ static int noinline cache_drop_leaf_ref(struct btrfs_trans_handle *trans,
	return 0;
}

int drop_snap_lookup_refcount(struct btrfs_root *root, u64 start, u64 len,
static int drop_snap_lookup_refcount(struct btrfs_root *root, u64 start, u64 len,
			      u32 *refs)
{
	int ret;
@@ -5434,7 +5434,7 @@ static u64 update_block_group_flags(struct btrfs_root *root, u64 flags)
	return flags;
}

int __alloc_chunk_for_shrink(struct btrfs_root *root,
static int __alloc_chunk_for_shrink(struct btrfs_root *root,
		     struct btrfs_block_group_cache *shrink_block_group,
		     int force)
{
@@ -5703,8 +5703,8 @@ out:
	return ret;
}

int find_first_block_group(struct btrfs_root *root, struct btrfs_path *path,
			   struct btrfs_key *key)
static int find_first_block_group(struct btrfs_root *root,
		struct btrfs_path *path, struct btrfs_key *key)
{
	int ret = 0;
	struct btrfs_key found_key;
+15 −20
Original line number Diff line number Diff line
@@ -112,7 +112,7 @@ void extent_io_tree_init(struct extent_io_tree *tree,
}
EXPORT_SYMBOL(extent_io_tree_init);

struct extent_state *alloc_extent_state(gfp_t mask)
static struct extent_state *alloc_extent_state(gfp_t mask)
{
	struct extent_state *state;
#ifdef LEAK_DEBUG
@@ -136,7 +136,7 @@ struct extent_state *alloc_extent_state(gfp_t mask)
}
EXPORT_SYMBOL(alloc_extent_state);

void free_extent_state(struct extent_state *state)
static void free_extent_state(struct extent_state *state)
{
	if (!state)
		return;
@@ -662,7 +662,7 @@ static void set_state_bits(struct extent_io_tree *tree,
 * [start, end] is inclusive
 * This takes the tree lock.
 */
int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int bits,
static int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int bits,
		   int exclusive, u64 *failed_start, gfp_t mask)
{
	struct extent_state *state;
@@ -879,12 +879,11 @@ int set_extent_new(struct extent_io_tree *tree, u64 start, u64 end,
}
EXPORT_SYMBOL(set_extent_new);

int clear_extent_new(struct extent_io_tree *tree, u64 start, u64 end,
static int clear_extent_new(struct extent_io_tree *tree, u64 start, u64 end,
		       gfp_t mask)
{
	return clear_extent_bit(tree, start, end, EXTENT_NEW, 0, 0, mask);
}
EXPORT_SYMBOL(clear_extent_new);

int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
			gfp_t mask)
@@ -894,27 +893,24 @@ int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
}
EXPORT_SYMBOL(set_extent_uptodate);

int clear_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
static int clear_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
			  gfp_t mask)
{
	return clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 0, mask);
}
EXPORT_SYMBOL(clear_extent_uptodate);

int set_extent_writeback(struct extent_io_tree *tree, u64 start, u64 end,
static int set_extent_writeback(struct extent_io_tree *tree, u64 start, u64 end,
			 gfp_t mask)
{
	return set_extent_bit(tree, start, end, EXTENT_WRITEBACK,
			      0, NULL, mask);
}
EXPORT_SYMBOL(set_extent_writeback);

int clear_extent_writeback(struct extent_io_tree *tree, u64 start, u64 end,
static int clear_extent_writeback(struct extent_io_tree *tree, u64 start, u64 end,
			   gfp_t mask)
{
	return clear_extent_bit(tree, start, end, EXTENT_WRITEBACK, 1, 0, mask);
}
EXPORT_SYMBOL(clear_extent_writeback);

int wait_on_extent_writeback(struct extent_io_tree *tree, u64 start, u64 end)
{
@@ -994,7 +990,7 @@ EXPORT_SYMBOL(set_range_dirty);
/*
 * helper function to set both pages and extents in the tree writeback
 */
int set_range_writeback(struct extent_io_tree *tree, u64 start, u64 end)
static int set_range_writeback(struct extent_io_tree *tree, u64 start, u64 end)
{
	unsigned long index = start >> PAGE_CACHE_SHIFT;
	unsigned long end_index = end >> PAGE_CACHE_SHIFT;
@@ -1010,7 +1006,6 @@ int set_range_writeback(struct extent_io_tree *tree, u64 start, u64 end)
	set_extent_writeback(tree, start, end, GFP_NOFS);
	return 0;
}
EXPORT_SYMBOL(set_range_writeback);

/*
 * find the first offset in the io tree with 'bits' set. zero is
@@ -1432,11 +1427,13 @@ out:
	spin_unlock_irq(&tree->lock);
	return total_bytes;
}

#if 0
/*
 * helper function to lock both pages and extents in the tree.
 * pages must be locked first.
 */
int lock_range(struct extent_io_tree *tree, u64 start, u64 end)
static int lock_range(struct extent_io_tree *tree, u64 start, u64 end)
{
	unsigned long index = start >> PAGE_CACHE_SHIFT;
	unsigned long end_index = end >> PAGE_CACHE_SHIFT;
@@ -1473,12 +1470,11 @@ failed:
	}
	return err;
}
EXPORT_SYMBOL(lock_range);

/*
 * helper function to unlock both pages and extents in the tree.
 */
int unlock_range(struct extent_io_tree *tree, u64 start, u64 end)
static int unlock_range(struct extent_io_tree *tree, u64 start, u64 end)
{
	unsigned long index = start >> PAGE_CACHE_SHIFT;
	unsigned long end_index = end >> PAGE_CACHE_SHIFT;
@@ -1493,7 +1489,7 @@ int unlock_range(struct extent_io_tree *tree, u64 start, u64 end)
	unlock_extent(tree, start, end, GFP_NOFS);
	return 0;
}
EXPORT_SYMBOL(unlock_range);
#endif

/*
 * set the private field for a given byte offset in the tree.  If there isn't
@@ -1956,7 +1952,7 @@ void set_page_extent_mapped(struct page *page)
}
EXPORT_SYMBOL(set_page_extent_mapped);

void set_page_extent_head(struct page *page, unsigned long len)
static void set_page_extent_head(struct page *page, unsigned long len)
{
	set_page_private(page, EXTENT_PAGE_PRIVATE_FIRST_PAGE | len << 2);
}
@@ -2397,7 +2393,7 @@ update_nr_written:
 * WB_SYNC_ALL then we were called for data integrity and we must wait for
 * existing IO to complete.
 */
int extent_write_cache_pages(struct extent_io_tree *tree,
static int extent_write_cache_pages(struct extent_io_tree *tree,
			     struct address_space *mapping,
			     struct writeback_control *wbc,
			     writepage_t writepage, void *data,
@@ -2502,7 +2498,6 @@ retry:
	}
	return ret;
}
EXPORT_SYMBOL(extent_write_cache_pages);

static noinline void flush_write_bio(void *data)
{
+4 −2
Original line number Diff line number Diff line
@@ -443,7 +443,8 @@ void btrfs_remove_free_space_cache(struct btrfs_block_group_cache *block_group)
	mutex_unlock(&block_group->alloc_mutex);
}

struct btrfs_free_space *btrfs_find_free_space_offset(struct
#if 0
static struct btrfs_free_space *btrfs_find_free_space_offset(struct
						      btrfs_block_group_cache
						      *block_group, u64 offset,
						      u64 bytes)
@@ -458,7 +459,7 @@ struct btrfs_free_space *btrfs_find_free_space_offset(struct
	return ret;
}

struct btrfs_free_space *btrfs_find_free_space_bytes(struct
static struct btrfs_free_space *btrfs_find_free_space_bytes(struct
						     btrfs_block_group_cache
						     *block_group, u64 offset,
						     u64 bytes)
@@ -472,6 +473,7 @@ struct btrfs_free_space *btrfs_find_free_space_bytes(struct

	return ret;
}
#endif

struct btrfs_free_space *btrfs_find_free_space(struct btrfs_block_group_cache
					       *block_group, u64 offset,
Loading