Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit afd582ac authored by David Sterba's avatar David Sterba
Browse files

Merge remote-tracking branch 'remotes/josef/for-chris' into btrfs-next-stable

parents c3b92c87 016fc6a6
Loading
Loading
Loading
Loading
+9 −8
Original line number Diff line number Diff line
@@ -103,11 +103,6 @@ struct btrfs_inode {
	 */
	u64 delalloc_bytes;

	/* total number of bytes that may be used for this inode for
	 * delalloc
	 */
	u64 reserved_bytes;

	/*
	 * the size of the file stored in the metadata on disk.  data=ordered
	 * means the in-memory i_size might be larger than the size on disk
@@ -115,9 +110,6 @@ struct btrfs_inode {
	 */
	u64 disk_i_size;

	/* flags field from the on disk inode */
	u32 flags;

	/*
	 * if this is a directory then index_cnt is the counter for the index
	 * number for new files that are created
@@ -131,6 +123,15 @@ struct btrfs_inode {
	 */
	u64 last_unlink_trans;

	/*
	 * Number of bytes outstanding that are going to need csums.  This is
	 * used in ENOSPC accounting.
	 */
	u64 csum_bytes;

	/* flags field from the on disk inode */
	u32 flags;

	/*
	 * Counters to keep track of the number of extent item's we may use due
	 * to delalloc and such.  outstanding_extents is the number of extent
+27 −29
Original line number Diff line number Diff line
@@ -30,6 +30,7 @@
#include <linux/kobject.h>
#include <trace/events/btrfs.h>
#include <asm/kmap_types.h>
#include <linux/pagemap.h>
#include "extent_io.h"
#include "extent_map.h"
#include "async-thread.h"
@@ -772,14 +773,8 @@ struct btrfs_space_info {
struct btrfs_block_rsv {
	u64 size;
	u64 reserved;
	u64 freed[2];
	struct btrfs_space_info *space_info;
	struct list_head list;
	spinlock_t lock;
	atomic_t usage;
	unsigned int priority:8;
	unsigned int durable:1;
	unsigned int refill_used:1;
	unsigned int full:1;
};

@@ -840,10 +835,10 @@ struct btrfs_block_group_cache {
	spinlock_t lock;
	u64 pinned;
	u64 reserved;
	u64 reserved_pinned;
	u64 bytes_super;
	u64 flags;
	u64 sectorsize;
	u64 cache_generation;
	unsigned int ro:1;
	unsigned int dirty:1;
	unsigned int iref:1;
@@ -899,6 +894,10 @@ struct btrfs_fs_info {
	spinlock_t block_group_cache_lock;
	struct rb_root block_group_cache_tree;

	/* keep track of unallocated space */
	spinlock_t free_chunk_lock;
	u64 free_chunk_space;

	struct extent_io_tree freed_extents[2];
	struct extent_io_tree *pinned_extents;

@@ -919,11 +918,6 @@ struct btrfs_fs_info {

	struct btrfs_block_rsv empty_block_rsv;

	/* list of block reservations that cross multiple transactions */
	struct list_head durable_block_rsv_list;

	struct mutex durable_block_rsv_mutex;

	u64 generation;
	u64 last_trans_committed;

@@ -2129,6 +2123,11 @@ static inline bool btrfs_mixed_space_info(struct btrfs_space_info *space_info)
		(space_info->flags & BTRFS_BLOCK_GROUP_DATA));
}

static inline gfp_t btrfs_alloc_write_mask(struct address_space *mapping)
{
	return mapping_gfp_mask(mapping) & ~__GFP_FS;
}

/* extent-tree.c */
static inline u64 btrfs_calc_trans_metadata_size(struct btrfs_root *root,
						 unsigned num_items)
@@ -2137,6 +2136,17 @@ static inline u64 btrfs_calc_trans_metadata_size(struct btrfs_root *root,
		3 * num_items;
}

/*
 * Doing a truncate won't result in new nodes or leaves, just what we need for
 * COW.
 */
static inline u64 btrfs_calc_trunc_metadata_size(struct btrfs_root *root,
						 unsigned num_items)
{
	return (root->leafsize + root->nodesize * (BTRFS_MAX_LEVEL - 1)) *
		num_items;
}

void btrfs_put_block_group(struct btrfs_block_group_cache *cache);
int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
			   struct btrfs_root *root, unsigned long count);
@@ -2196,8 +2206,6 @@ int btrfs_free_extent(struct btrfs_trans_handle *trans,
		      u64 root_objectid, u64 owner, u64 offset);

int btrfs_free_reserved_extent(struct btrfs_root *root, u64 start, u64 len);
int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
				u64 num_bytes, int reserve, int sinfo);
int btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans,
				struct btrfs_root *root);
int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
@@ -2240,25 +2248,20 @@ void btrfs_init_block_rsv(struct btrfs_block_rsv *rsv);
struct btrfs_block_rsv *btrfs_alloc_block_rsv(struct btrfs_root *root);
void btrfs_free_block_rsv(struct btrfs_root *root,
			  struct btrfs_block_rsv *rsv);
void btrfs_add_durable_block_rsv(struct btrfs_fs_info *fs_info,
				 struct btrfs_block_rsv *rsv);
int btrfs_block_rsv_add(struct btrfs_trans_handle *trans,
			struct btrfs_root *root,
int btrfs_block_rsv_add(struct btrfs_root *root,
			struct btrfs_block_rsv *block_rsv,
			u64 num_bytes);
int btrfs_block_rsv_check(struct btrfs_trans_handle *trans,
			  struct btrfs_root *root,
int btrfs_block_rsv_check(struct btrfs_root *root,
			  struct btrfs_block_rsv *block_rsv, int min_factor);
int btrfs_block_rsv_refill(struct btrfs_root *root,
			  struct btrfs_block_rsv *block_rsv,
			  u64 min_reserved, int min_factor);
			  u64 min_reserved);
int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src_rsv,
			    struct btrfs_block_rsv *dst_rsv,
			    u64 num_bytes);
void btrfs_block_rsv_release(struct btrfs_root *root,
			     struct btrfs_block_rsv *block_rsv,
			     u64 num_bytes);
int btrfs_truncate_reserve_metadata(struct btrfs_trans_handle *trans,
				    struct btrfs_root *root,
				    struct btrfs_block_rsv *rsv);
int btrfs_set_block_group_ro(struct btrfs_root *root,
			     struct btrfs_block_group_cache *cache);
int btrfs_set_block_group_rw(struct btrfs_root *root,
@@ -2579,11 +2582,6 @@ int btrfs_update_inode(struct btrfs_trans_handle *trans,
int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode);
int btrfs_orphan_del(struct btrfs_trans_handle *trans, struct inode *inode);
int btrfs_orphan_cleanup(struct btrfs_root *root);
void btrfs_orphan_pre_snapshot(struct btrfs_trans_handle *trans,
				struct btrfs_pending_snapshot *pending,
				u64 *bytes_to_reserve);
void btrfs_orphan_post_snapshot(struct btrfs_trans_handle *trans,
				struct btrfs_pending_snapshot *pending);
void btrfs_orphan_commit_root(struct btrfs_trans_handle *trans,
			      struct btrfs_root *root);
int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size);
+4 −4
Original line number Diff line number Diff line
@@ -1648,6 +1648,7 @@ struct btrfs_root *open_ctree(struct super_block *sb,
	spin_lock_init(&fs_info->fs_roots_radix_lock);
	spin_lock_init(&fs_info->delayed_iput_lock);
	spin_lock_init(&fs_info->defrag_inodes_lock);
	spin_lock_init(&fs_info->free_chunk_lock);
	mutex_init(&fs_info->reloc_mutex);

	init_completion(&fs_info->kobj_unregister);
@@ -1665,8 +1666,6 @@ struct btrfs_root *open_ctree(struct super_block *sb,
	btrfs_init_block_rsv(&fs_info->trans_block_rsv);
	btrfs_init_block_rsv(&fs_info->chunk_block_rsv);
	btrfs_init_block_rsv(&fs_info->empty_block_rsv);
	INIT_LIST_HEAD(&fs_info->durable_block_rsv_list);
	mutex_init(&fs_info->durable_block_rsv_mutex);
	atomic_set(&fs_info->nr_async_submits, 0);
	atomic_set(&fs_info->async_delalloc_pages, 0);
	atomic_set(&fs_info->async_submit_draining, 0);
@@ -1677,6 +1676,7 @@ struct btrfs_root *open_ctree(struct super_block *sb,
	fs_info->metadata_ratio = 0;
	fs_info->defrag_inodes = RB_ROOT;
	fs_info->trans_no_join = 0;
	fs_info->free_chunk_space = 0;

	fs_info->thread_pool_size = min_t(unsigned long,
					  num_online_cpus() + 2, 8);
@@ -2545,8 +2545,6 @@ int close_ctree(struct btrfs_root *root)
	/* clear out the rbtree of defraggable inodes */
	btrfs_run_defrag_inodes(root->fs_info);

	btrfs_put_block_group_cache(fs_info);

	/*
	 * Here come 2 situations when btrfs is broken to flip readonly:
	 *
@@ -2572,6 +2570,8 @@ int close_ctree(struct btrfs_root *root)
			printk(KERN_ERR "btrfs: commit super ret %d\n", ret);
	}

	btrfs_put_block_group_cache(fs_info);

	kthread_stop(root->fs_info->transaction_kthread);
	kthread_stop(root->fs_info->cleaner_kthread);

+364 −304

File changed.

Preview size limit exceeded, changes collapsed.

+193 −1
Original line number Diff line number Diff line
@@ -894,6 +894,194 @@ int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
	goto again;
}

/**
 * convert_extent - convert all bits in a given range from one bit to another
 * @tree:	the io tree to search
 * @start:	the start offset in bytes
 * @end:	the end offset in bytes (inclusive)
 * @bits:	the bits to set in this range
 * @clear_bits:	the bits to clear in this range
 * @mask:	the allocation mask
 *
 * This will go through and set bits for the given range.  If any states exist
 * already in this range they are set with the given bit and cleared of the
 * clear_bits.  This is only meant to be used by things that are mergeable, ie
 * converting from say DELALLOC to DIRTY.  This is not meant to be used with
 * boundary bits like LOCK.
 */
int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
		       int bits, int clear_bits, gfp_t mask)
{
	struct extent_state *state;
	struct extent_state *prealloc = NULL;
	struct rb_node *node;
	int err = 0;
	u64 last_start;
	u64 last_end;

again:
	if (!prealloc && (mask & __GFP_WAIT)) {
		prealloc = alloc_extent_state(mask);
		if (!prealloc)
			return -ENOMEM;
	}

	spin_lock(&tree->lock);
	/*
	 * this search will find all the extents that end after
	 * our range starts.
	 */
	node = tree_search(tree, start);
	if (!node) {
		prealloc = alloc_extent_state_atomic(prealloc);
		if (!prealloc)
			return -ENOMEM;
		err = insert_state(tree, prealloc, start, end, &bits);
		prealloc = NULL;
		BUG_ON(err == -EEXIST);
		goto out;
	}
	state = rb_entry(node, struct extent_state, rb_node);
hit_next:
	last_start = state->start;
	last_end = state->end;

	/*
	 * | ---- desired range ---- |
	 * | state |
	 *
	 * Just lock what we found and keep going
	 */
	if (state->start == start && state->end <= end) {
		struct rb_node *next_node;

		set_state_bits(tree, state, &bits);
		clear_state_bit(tree, state, &clear_bits, 0);

		merge_state(tree, state);
		if (last_end == (u64)-1)
			goto out;

		start = last_end + 1;
		next_node = rb_next(&state->rb_node);
		if (next_node && start < end && prealloc && !need_resched()) {
			state = rb_entry(next_node, struct extent_state,
					 rb_node);
			if (state->start == start)
				goto hit_next;
		}
		goto search_again;
	}

	/*
	 *     | ---- desired range ---- |
	 * | state |
	 *   or
	 * | ------------- state -------------- |
	 *
	 * We need to split the extent we found, and may flip bits on
	 * second half.
	 *
	 * If the extent we found extends past our
	 * range, we just split and search again.  It'll get split
	 * again the next time though.
	 *
	 * If the extent we found is inside our range, we set the
	 * desired bit on it.
	 */
	if (state->start < start) {
		prealloc = alloc_extent_state_atomic(prealloc);
		if (!prealloc)
			return -ENOMEM;
		err = split_state(tree, state, prealloc, start);
		BUG_ON(err == -EEXIST);
		prealloc = NULL;
		if (err)
			goto out;
		if (state->end <= end) {
			set_state_bits(tree, state, &bits);
			clear_state_bit(tree, state, &clear_bits, 0);
			merge_state(tree, state);
			if (last_end == (u64)-1)
				goto out;
			start = last_end + 1;
		}
		goto search_again;
	}
	/*
	 * | ---- desired range ---- |
	 *     | state | or               | state |
	 *
	 * There's a hole, we need to insert something in it and
	 * ignore the extent we found.
	 */
	if (state->start > start) {
		u64 this_end;
		if (end < last_start)
			this_end = end;
		else
			this_end = last_start - 1;

		prealloc = alloc_extent_state_atomic(prealloc);
		if (!prealloc)
			return -ENOMEM;

		/*
		 * Avoid to free 'prealloc' if it can be merged with
		 * the later extent.
		 */
		err = insert_state(tree, prealloc, start, this_end,
				   &bits);
		BUG_ON(err == -EEXIST);
		if (err) {
			free_extent_state(prealloc);
			prealloc = NULL;
			goto out;
		}
		prealloc = NULL;
		start = this_end + 1;
		goto search_again;
	}
	/*
	 * | ---- desired range ---- |
	 *                        | state |
	 * We need to split the extent, and set the bit
	 * on the first half
	 */
	if (state->start <= end && state->end > end) {
		prealloc = alloc_extent_state_atomic(prealloc);
		if (!prealloc)
			return -ENOMEM;

		err = split_state(tree, state, prealloc, end + 1);
		BUG_ON(err == -EEXIST);

		set_state_bits(tree, prealloc, &bits);
		clear_state_bit(tree, prealloc, &clear_bits, 0);

		merge_state(tree, prealloc);
		prealloc = NULL;
		goto out;
	}

	goto search_again;

out:
	spin_unlock(&tree->lock);
	if (prealloc)
		free_extent_state(prealloc);

	return err;

search_again:
	if (start > end)
		goto out;
	spin_unlock(&tree->lock);
	if (mask & __GFP_WAIT)
		cond_resched();
	goto again;
}

/* wrappers around set/clear extent bit */
int set_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
		     gfp_t mask)
@@ -2136,6 +2324,7 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
	int compressed;
	int write_flags;
	unsigned long nr_written = 0;
	bool fill_delalloc = true;

	if (wbc->sync_mode == WB_SYNC_ALL)
		write_flags = WRITE_SYNC;
@@ -2166,10 +2355,13 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,

	set_page_extent_mapped(page);

	if (!tree->ops || !tree->ops->fill_delalloc)
		fill_delalloc = false;

	delalloc_start = start;
	delalloc_end = 0;
	page_started = 0;
	if (!epd->extent_locked) {
	if (!epd->extent_locked && fill_delalloc) {
		u64 delalloc_to_write = 0;
		/*
		 * make sure the wbc mapping index is at least updated
Loading