Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 3c25fa74 authored by Linus Torvalds's avatar Linus Torvalds
Browse files
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mason/btrfs-unstable:
  Btrfs: use join_transaction in btrfs_evict_inode()
  Btrfs - use %pU to print fsid
  Btrfs: fix extent state leak on failed nodatasum reads
  btrfs: fix unlocked access of delalloc_inodes
  Btrfs: avoid stack bloat in btrfs_ioctl_fs_info()
  btrfs: remove 64bit alignment padding to allow extent_buffer to fit into one fewer cacheline
  Btrfs: clear current->journal_info on async transaction commit
  Btrfs: make sure to recheck for bitmaps in clusters
  btrfs: remove unneeded includes from scrub.c
  btrfs: reinitialize scrub workers
  btrfs: scrub: errors in tree enumeration
  Btrfs: don't map extent buffer if path->skip_locking is set
  Btrfs: unlock the trans lock properly
  Btrfs: don't map extent buffer if path->skip_locking is set
  Btrfs: fix duplicate checking logic
  Btrfs: fix the allocator loop logic
  Btrfs: fix bitmap regression
  Btrfs: don't commit the transaction if we dont have enough pinned bytes
  Btrfs: noinline the cluster searching functions
  Btrfs: cache bitmaps when searching for a cluster
parents 9d6fa8fa 30b4caf5
Loading
Loading
Loading
Loading
+7 −3
Original line number Diff line number Diff line
@@ -1228,6 +1228,7 @@ static void reada_for_search(struct btrfs_root *root,
	u32 nr;
	u32 blocksize;
	u32 nscan = 0;
	bool map = true;

	if (level != 1)
		return;
@@ -1249,8 +1250,11 @@ static void reada_for_search(struct btrfs_root *root,

	nritems = btrfs_header_nritems(node);
	nr = slot;
	if (node->map_token || path->skip_locking)
		map = false;

	while (1) {
		if (!node->map_token) {
		if (map && !node->map_token) {
			unsigned long offset = btrfs_node_key_ptr_offset(nr);
			map_private_extent_buffer(node, offset,
						  sizeof(struct btrfs_key_ptr),
@@ -1277,7 +1281,7 @@ static void reada_for_search(struct btrfs_root *root,
		if ((search <= target && target - search <= 65536) ||
		    (search > target && search - target <= 65536)) {
			gen = btrfs_node_ptr_generation(node, nr);
			if (node->map_token) {
			if (map && node->map_token) {
				unmap_extent_buffer(node, node->map_token,
						    KM_USER1);
				node->map_token = NULL;
@@ -1289,7 +1293,7 @@ static void reada_for_search(struct btrfs_root *root,
		if ((nread > 65536 || nscan > 32))
			break;
	}
	if (node->map_token) {
	if (map && node->map_token) {
		unmap_extent_buffer(node, node->map_token, KM_USER1);
		node->map_token = NULL;
	}
+1 −4
Original line number Diff line number Diff line
@@ -1668,8 +1668,6 @@ struct btrfs_root *open_ctree(struct super_block *sb,
	init_waitqueue_head(&fs_info->scrub_pause_wait);
	init_rwsem(&fs_info->scrub_super_lock);
	fs_info->scrub_workers_refcnt = 0;
	btrfs_init_workers(&fs_info->scrub_workers, "scrub",
			   fs_info->thread_pool_size, &fs_info->generic_worker);

	sb->s_blocksize = 4096;
	sb->s_blocksize_bits = blksize_bits(4096);
@@ -2911,9 +2909,8 @@ static int btrfs_destroy_delalloc_inodes(struct btrfs_root *root)

	INIT_LIST_HEAD(&splice);

	list_splice_init(&root->fs_info->delalloc_inodes, &splice);

	spin_lock(&root->fs_info->delalloc_lock);
	list_splice_init(&root->fs_info->delalloc_inodes, &splice);

	while (!list_empty(&splice)) {
		btrfs_inode = list_entry(splice.next, struct btrfs_inode,
+32 −23
Original line number Diff line number Diff line
@@ -3089,6 +3089,13 @@ alloc:
			}
			goto again;
		}

		/*
		 * If we have less pinned bytes than we want to allocate then
		 * don't bother committing the transaction, it won't help us.
		 */
		if (data_sinfo->bytes_pinned < bytes)
			committed = 1;
		spin_unlock(&data_sinfo->lock);

		/* commit the current transaction and try again */
@@ -5211,9 +5218,7 @@ loop:
	 * LOOP_NO_EMPTY_SIZE, set empty_size and empty_cluster to 0 and try
	 *			again
	 */
	if (!ins->objectid && loop < LOOP_NO_EMPTY_SIZE &&
	    (found_uncached_bg || empty_size || empty_cluster ||
	     allowed_chunk_alloc)) {
	if (!ins->objectid && loop < LOOP_NO_EMPTY_SIZE) {
		index = 0;
		if (loop == LOOP_FIND_IDEAL && found_uncached_bg) {
			found_uncached_bg = false;
@@ -5253,32 +5258,36 @@ loop:
			goto search;
		}

		if (loop < LOOP_CACHING_WAIT) {
		loop++;
			goto search;
		}

		if (loop == LOOP_ALLOC_CHUNK) {
			empty_size = 0;
			empty_cluster = 0;
		}

		       if (allowed_chunk_alloc) {
				ret = do_chunk_alloc(trans, root, num_bytes +
						     2 * 1024 * 1024, data,
						     CHUNK_ALLOC_LIMITED);
				allowed_chunk_alloc = 0;
				if (ret == 1)
					done_chunk_alloc = 1;
			} else if (!done_chunk_alloc &&
			   space_info->force_alloc == CHUNK_ALLOC_NO_FORCE) {
				   space_info->force_alloc ==
				   CHUNK_ALLOC_NO_FORCE) {
				space_info->force_alloc = CHUNK_ALLOC_LIMITED;
			}

		if (loop < LOOP_NO_EMPTY_SIZE) {
			loop++;
			goto search;
		       /*
			* We didn't allocate a chunk, go ahead and drop the
			* empty size and loop again.
			*/
		       if (!done_chunk_alloc)
			       loop = LOOP_NO_EMPTY_SIZE;
		}
		ret = -ENOSPC;

		if (loop == LOOP_NO_EMPTY_SIZE) {
			empty_size = 0;
			empty_cluster = 0;
		}

		goto search;
	} else if (!ins->objectid) {
		ret = -ENOSPC;
	} else if (ins->objectid) {
+1 −1
Original line number Diff line number Diff line
@@ -126,9 +126,9 @@ struct extent_buffer {
	unsigned long map_len;
	struct page *first_page;
	unsigned long bflags;
	atomic_t refs;
	struct list_head leak_list;
	struct rcu_head rcu_head;
	atomic_t refs;

	/* the spinlock is used to protect most operations */
	spinlock_t lock;
+130 −33
Original line number Diff line number Diff line
@@ -250,7 +250,7 @@ int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
	pgoff_t index = 0;
	unsigned long first_page_offset;
	int num_checksums;
	int ret = 0, ret2;
	int ret = 0;

	INIT_LIST_HEAD(&bitmaps);

@@ -421,11 +421,10 @@ int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
					goto free_cache;
				}
				spin_lock(&ctl->tree_lock);
				ret2 = link_free_space(ctl, e);
				ret = link_free_space(ctl, e);
				ctl->total_bitmaps++;
				ctl->op->recalc_thresholds(ctl);
				spin_unlock(&ctl->tree_lock);
				list_add_tail(&e->list, &bitmaps);
				if (ret) {
					printk(KERN_ERR "Duplicate entries in "
					       "free space cache, dumping\n");
@@ -434,6 +433,7 @@ int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
					page_cache_release(page);
					goto free_cache;
				}
				list_add_tail(&e->list, &bitmaps);
			}

			num_entries--;
@@ -1417,6 +1417,23 @@ again:
	return 0;
}

static u64 add_bytes_to_bitmap(struct btrfs_free_space_ctl *ctl,
			       struct btrfs_free_space *info, u64 offset,
			       u64 bytes)
{
	u64 bytes_to_set = 0;
	u64 end;

	end = info->offset + (u64)(BITS_PER_BITMAP * ctl->unit);

	bytes_to_set = min(end - offset, bytes);

	bitmap_set_bits(ctl, info, offset, bytes_to_set);

	return bytes_to_set;

}

static bool use_bitmap(struct btrfs_free_space_ctl *ctl,
		      struct btrfs_free_space *info)
{
@@ -1453,12 +1470,18 @@ static bool use_bitmap(struct btrfs_free_space_ctl *ctl,
	return true;
}

static struct btrfs_free_space_op free_space_op = {
	.recalc_thresholds	= recalculate_thresholds,
	.use_bitmap		= use_bitmap,
};

static int insert_into_bitmap(struct btrfs_free_space_ctl *ctl,
			      struct btrfs_free_space *info)
{
	struct btrfs_free_space *bitmap_info;
	struct btrfs_block_group_cache *block_group = NULL;
	int added = 0;
	u64 bytes, offset, end;
	u64 bytes, offset, bytes_added;
	int ret;

	bytes = info->bytes;
@@ -1467,7 +1490,49 @@ static int insert_into_bitmap(struct btrfs_free_space_ctl *ctl,
	if (!ctl->op->use_bitmap(ctl, info))
		return 0;

	if (ctl->op == &free_space_op)
		block_group = ctl->private;
again:
	/*
	 * Since we link bitmaps right into the cluster we need to see if we
	 * have a cluster here, and if so and it has our bitmap we need to add
	 * the free space to that bitmap.
	 */
	if (block_group && !list_empty(&block_group->cluster_list)) {
		struct btrfs_free_cluster *cluster;
		struct rb_node *node;
		struct btrfs_free_space *entry;

		cluster = list_entry(block_group->cluster_list.next,
				     struct btrfs_free_cluster,
				     block_group_list);
		spin_lock(&cluster->lock);
		node = rb_first(&cluster->root);
		if (!node) {
			spin_unlock(&cluster->lock);
			goto no_cluster_bitmap;
		}

		entry = rb_entry(node, struct btrfs_free_space, offset_index);
		if (!entry->bitmap) {
			spin_unlock(&cluster->lock);
			goto no_cluster_bitmap;
		}

		if (entry->offset == offset_to_bitmap(ctl, offset)) {
			bytes_added = add_bytes_to_bitmap(ctl, entry,
							  offset, bytes);
			bytes -= bytes_added;
			offset += bytes_added;
		}
		spin_unlock(&cluster->lock);
		if (!bytes) {
			ret = 1;
			goto out;
		}
	}

no_cluster_bitmap:
	bitmap_info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset),
					 1, 0);
	if (!bitmap_info) {
@@ -1475,19 +1540,10 @@ again:
		goto new_bitmap;
	}

	end = bitmap_info->offset + (u64)(BITS_PER_BITMAP * ctl->unit);

	if (offset >= bitmap_info->offset && offset + bytes > end) {
		bitmap_set_bits(ctl, bitmap_info, offset, end - offset);
		bytes -= end - offset;
		offset = end;
	bytes_added = add_bytes_to_bitmap(ctl, bitmap_info, offset, bytes);
	bytes -= bytes_added;
	offset += bytes_added;
	added = 0;
	} else if (offset >= bitmap_info->offset && offset + bytes <= end) {
		bitmap_set_bits(ctl, bitmap_info, offset, bytes);
		bytes = 0;
	} else {
		BUG();
	}

	if (!bytes) {
		ret = 1;
@@ -1766,11 +1822,6 @@ void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group,
	       "\n", count);
}

static struct btrfs_free_space_op free_space_op = {
	.recalc_thresholds	= recalculate_thresholds,
	.use_bitmap		= use_bitmap,
};

void btrfs_init_free_space_ctl(struct btrfs_block_group_cache *block_group)
{
	struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
@@ -2142,9 +2193,11 @@ again:
/*
 * This searches the block group for just extents to fill the cluster with.
 */
static int setup_cluster_no_bitmap(struct btrfs_block_group_cache *block_group,
static noinline int
setup_cluster_no_bitmap(struct btrfs_block_group_cache *block_group,
			struct btrfs_free_cluster *cluster,
				   u64 offset, u64 bytes, u64 min_bytes)
			struct list_head *bitmaps, u64 offset, u64 bytes,
			u64 min_bytes)
{
	struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
	struct btrfs_free_space *first = NULL;
@@ -2166,6 +2219,8 @@ static int setup_cluster_no_bitmap(struct btrfs_block_group_cache *block_group,
	 * extent entry.
	 */
	while (entry->bitmap) {
		if (list_empty(&entry->list))
			list_add_tail(&entry->list, bitmaps);
		node = rb_next(&entry->offset_index);
		if (!node)
			return -ENOSPC;
@@ -2185,8 +2240,12 @@ static int setup_cluster_no_bitmap(struct btrfs_block_group_cache *block_group,
			return -ENOSPC;
		entry = rb_entry(node, struct btrfs_free_space, offset_index);

		if (entry->bitmap)
		if (entry->bitmap) {
			if (list_empty(&entry->list))
				list_add_tail(&entry->list, bitmaps);
			continue;
		}

		/*
		 * we haven't filled the empty size and the window is
		 * very large.  reset and try again
@@ -2238,9 +2297,11 @@ static int setup_cluster_no_bitmap(struct btrfs_block_group_cache *block_group,
 * This specifically looks for bitmaps that may work in the cluster, we assume
 * that we have already failed to find extents that will work.
 */
static int setup_cluster_bitmap(struct btrfs_block_group_cache *block_group,
static noinline int
setup_cluster_bitmap(struct btrfs_block_group_cache *block_group,
		     struct btrfs_free_cluster *cluster,
				u64 offset, u64 bytes, u64 min_bytes)
		     struct list_head *bitmaps, u64 offset, u64 bytes,
		     u64 min_bytes)
{
	struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
	struct btrfs_free_space *entry;
@@ -2250,10 +2311,39 @@ static int setup_cluster_bitmap(struct btrfs_block_group_cache *block_group,
	if (ctl->total_bitmaps == 0)
		return -ENOSPC;

	/*
	 * First check our cached list of bitmaps and see if there is an entry
	 * here that will work.
	 */
	list_for_each_entry(entry, bitmaps, list) {
		if (entry->bytes < min_bytes)
			continue;
		ret = btrfs_bitmap_cluster(block_group, entry, cluster, offset,
					   bytes, min_bytes);
		if (!ret)
			return 0;
	}

	/*
	 * If we do have entries on our list and we are here then we didn't find
	 * anything, so go ahead and get the next entry after the last entry in
	 * this list and start the search from there.
	 */
	if (!list_empty(bitmaps)) {
		entry = list_entry(bitmaps->prev, struct btrfs_free_space,
				   list);
		node = rb_next(&entry->offset_index);
		if (!node)
			return -ENOSPC;
		entry = rb_entry(node, struct btrfs_free_space, offset_index);
		goto search;
	}

	entry = tree_search_offset(ctl, offset_to_bitmap(ctl, offset), 0, 1);
	if (!entry)
		return -ENOSPC;

search:
	node = &entry->offset_index;
	do {
		entry = rb_entry(node, struct btrfs_free_space, offset_index);
@@ -2284,6 +2374,8 @@ int btrfs_find_space_cluster(struct btrfs_trans_handle *trans,
			     u64 offset, u64 bytes, u64 empty_size)
{
	struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
	struct list_head bitmaps;
	struct btrfs_free_space *entry, *tmp;
	u64 min_bytes;
	int ret;

@@ -2322,11 +2414,16 @@ int btrfs_find_space_cluster(struct btrfs_trans_handle *trans,
		goto out;
	}

	ret = setup_cluster_no_bitmap(block_group, cluster, offset, bytes,
				      min_bytes);
	if (ret)
		ret = setup_cluster_bitmap(block_group, cluster, offset,
	INIT_LIST_HEAD(&bitmaps);
	ret = setup_cluster_no_bitmap(block_group, cluster, &bitmaps, offset,
				      bytes, min_bytes);
	if (ret)
		ret = setup_cluster_bitmap(block_group, cluster, &bitmaps,
					   offset, bytes, min_bytes);

	/* Clear our temporary list */
	list_for_each_entry_safe(entry, tmp, &bitmaps, list)
		list_del_init(&entry->list);

	if (!ret) {
		atomic_inc(&block_group->count);
Loading