Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 902b22f3 authored by David Woodhouse's avatar David Woodhouse Committed by Chris Mason
Browse files

Btrfs: Remove broken optimisations in end_bio functions.



These ended up freeing objects while they were still using them. Under
guidance from Chris, just rip out the 'clever' bits and do things the
simple way.

Signed-off-by: default avatarDavid Woodhouse <David.Woodhouse@intel.com>
Signed-off-by: default avatarChris Mason <chris.mason@oracle.com>
parent 53863232
Loading
Loading
Loading
Loading
+21 −138
Original line number Original line Diff line number Diff line
@@ -15,6 +15,8 @@
#include "extent_io.h"
#include "extent_io.h"
#include "extent_map.h"
#include "extent_map.h"
#include "compat.h"
#include "compat.h"
#include "ctree.h"
#include "btrfs_inode.h"


/* temporary define until extent_map moves out of btrfs */
/* temporary define until extent_map moves out of btrfs */
struct kmem_cache *btrfs_cache_create(const char *name, size_t size,
struct kmem_cache *btrfs_cache_create(const char *name, size_t size,
@@ -1394,15 +1396,11 @@ static int end_bio_extent_writepage(struct bio *bio,
{
{
	int uptodate = err == 0;
	int uptodate = err == 0;
	struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
	struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
	struct extent_state *state = bio->bi_private;
	struct extent_io_tree *tree;
	struct extent_io_tree *tree = state->tree;
	struct rb_node *node;
	u64 start;
	u64 start;
	u64 end;
	u64 end;
	u64 cur;
	int whole_page;
	int whole_page;
	int ret;
	int ret;
	unsigned long flags;


#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
	if (bio->bi_size)
	if (bio->bi_size)
@@ -1410,6 +1408,8 @@ static int end_bio_extent_writepage(struct bio *bio,
#endif
#endif
	do {
	do {
		struct page *page = bvec->bv_page;
		struct page *page = bvec->bv_page;
		tree = &BTRFS_I(page->mapping->host)->io_tree;

		start = ((u64)page->index << PAGE_CACHE_SHIFT) +
		start = ((u64)page->index << PAGE_CACHE_SHIFT) +
			 bvec->bv_offset;
			 bvec->bv_offset;
		end = start + bvec->bv_len - 1;
		end = start + bvec->bv_len - 1;
@@ -1423,7 +1423,7 @@ static int end_bio_extent_writepage(struct bio *bio,
			prefetchw(&bvec->bv_page->flags);
			prefetchw(&bvec->bv_page->flags);
		if (tree->ops && tree->ops->writepage_end_io_hook) {
		if (tree->ops && tree->ops->writepage_end_io_hook) {
			ret = tree->ops->writepage_end_io_hook(page, start,
			ret = tree->ops->writepage_end_io_hook(page, start,
						       end, state, uptodate);
						       end, NULL, uptodate);
			if (ret)
			if (ret)
				uptodate = 0;
				uptodate = 0;
		}
		}
@@ -1431,9 +1431,8 @@ static int end_bio_extent_writepage(struct bio *bio,
		if (!uptodate && tree->ops &&
		if (!uptodate && tree->ops &&
		    tree->ops->writepage_io_failed_hook) {
		    tree->ops->writepage_io_failed_hook) {
			ret = tree->ops->writepage_io_failed_hook(bio, page,
			ret = tree->ops->writepage_io_failed_hook(bio, page,
							 start, end, state);
							 start, end, NULL);
			if (ret == 0) {
			if (ret == 0) {
				state = NULL;
				uptodate = (err == 0);
				uptodate = (err == 0);
				continue;
				continue;
			}
			}
@@ -1445,68 +1444,7 @@ static int end_bio_extent_writepage(struct bio *bio,
			SetPageError(page);
			SetPageError(page);
		}
		}


		/*
		clear_extent_writeback(tree, start, end, GFP_ATOMIC);
		 * bios can get merged in funny ways, and so we need to
		 * be careful with the state variable.  We know the
		 * state won't be merged with others because it has
		 * WRITEBACK set, but we can't be sure each biovec is
		 * sequential in the file.  So, if our cached state
		 * doesn't match the expected end, search the tree
		 * for the correct one.
		 */

		spin_lock_irqsave(&tree->lock, flags);
		if (!state || state->end != end) {
			state = NULL;
			node = __etree_search(tree, start, NULL, NULL);
			if (node) {
				state = rb_entry(node, struct extent_state,
						 rb_node);
				if (state->end != end ||
				    !(state->state & EXTENT_WRITEBACK))
					state = NULL;
			}
			if (!state) {
				spin_unlock_irqrestore(&tree->lock, flags);
				clear_extent_writeback(tree, start,
						       end, GFP_ATOMIC);
				goto next_io;
			}
		}
		cur = end;
		while(1) {
			struct extent_state *clear = state;
			cur = state->start;
			node = rb_prev(&state->rb_node);
			if (node) {
				state = rb_entry(node,
						 struct extent_state,
						 rb_node);
			} else {
				state = NULL;
			}

			clear_state_bit(tree, clear, EXTENT_WRITEBACK,
					1, 0);
			if (cur == start)
				break;
			if (cur < start) {
				WARN_ON(1);
				break;
			}
			if (!node)
				break;
		}
		/* before releasing the lock, make sure the next state
		 * variable has the expected bits set and corresponds
		 * to the correct offsets in the file
		 */
		if (state && (state->end + 1 != start ||
		    !(state->state & EXTENT_WRITEBACK))) {
			state = NULL;
		}
		spin_unlock_irqrestore(&tree->lock, flags);
next_io:


		if (whole_page)
		if (whole_page)
			end_page_writeback(page);
			end_page_writeback(page);
@@ -1539,13 +1477,9 @@ static int end_bio_extent_readpage(struct bio *bio,
{
{
	int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
	int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
	struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
	struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
	struct extent_state *state = bio->bi_private;
	struct extent_io_tree *tree;
	struct extent_io_tree *tree = state->tree;
	struct rb_node *node;
	u64 start;
	u64 start;
	u64 end;
	u64 end;
	u64 cur;
	unsigned long flags;
	int whole_page;
	int whole_page;
	int ret;
	int ret;


@@ -1556,6 +1490,8 @@ static int end_bio_extent_readpage(struct bio *bio,


	do {
	do {
		struct page *page = bvec->bv_page;
		struct page *page = bvec->bv_page;
		tree = &BTRFS_I(page->mapping->host)->io_tree;

		start = ((u64)page->index << PAGE_CACHE_SHIFT) +
		start = ((u64)page->index << PAGE_CACHE_SHIFT) +
			bvec->bv_offset;
			bvec->bv_offset;
		end = start + bvec->bv_len - 1;
		end = start + bvec->bv_len - 1;
@@ -1570,80 +1506,26 @@ static int end_bio_extent_readpage(struct bio *bio,


		if (uptodate && tree->ops && tree->ops->readpage_end_io_hook) {
		if (uptodate && tree->ops && tree->ops->readpage_end_io_hook) {
			ret = tree->ops->readpage_end_io_hook(page, start, end,
			ret = tree->ops->readpage_end_io_hook(page, start, end,
							      state);
							      NULL);
			if (ret)
			if (ret)
				uptodate = 0;
				uptodate = 0;
		}
		}
		if (!uptodate && tree->ops &&
		if (!uptodate && tree->ops &&
		    tree->ops->readpage_io_failed_hook) {
		    tree->ops->readpage_io_failed_hook) {
			ret = tree->ops->readpage_io_failed_hook(bio, page,
			ret = tree->ops->readpage_io_failed_hook(bio, page,
							 start, end, state);
							 start, end, NULL);
			if (ret == 0) {
			if (ret == 0) {
				state = NULL;
				uptodate =
				uptodate =
					test_bit(BIO_UPTODATE, &bio->bi_flags);
					test_bit(BIO_UPTODATE, &bio->bi_flags);
				continue;
				continue;
			}
			}
		}
		}


		spin_lock_irqsave(&tree->lock, flags);
		if (!state || state->end != end) {
			state = NULL;
			node = __etree_search(tree, start, NULL, NULL);
			if (node) {
				state = rb_entry(node, struct extent_state,
						 rb_node);
				if (state->end != end ||
				    !(state->state & EXTENT_LOCKED))
					state = NULL;
			}
			if (!state) {
				spin_unlock_irqrestore(&tree->lock, flags);
		if (uptodate)
		if (uptodate)
			set_extent_uptodate(tree, start, end,
			set_extent_uptodate(tree, start, end,
					    GFP_ATOMIC);
					    GFP_ATOMIC);
		unlock_extent(tree, start, end, GFP_ATOMIC);
		unlock_extent(tree, start, end, GFP_ATOMIC);
				goto next_io;
			}
		}


		cur = end;
		while(1) {
			struct extent_state *clear = state;
			cur = state->start;
			node = rb_prev(&state->rb_node);
			if (node) {
				state = rb_entry(node,
					 struct extent_state,
					 rb_node);
			} else {
				state = NULL;
			}
			if (uptodate) {
				set_state_cb(tree, clear, EXTENT_UPTODATE);
				clear->state |= EXTENT_UPTODATE;
			}
			clear_state_bit(tree, clear, EXTENT_LOCKED,
					1, 0);
			if (cur == start)
				break;
			if (cur < start) {
				WARN_ON(1);
				break;
			}
			if (!node)
				break;
		}
		/* before releasing the lock, make sure the next state
		 * variable has the expected bits set and corresponds
		 * to the correct offsets in the file
		 */
		if (state && (state->end + 1 != start ||
		    !(state->state & EXTENT_LOCKED))) {
			state = NULL;
		}
		spin_unlock_irqrestore(&tree->lock, flags);
next_io:
		if (whole_page) {
		if (whole_page) {
			if (uptodate) {
			if (uptodate) {
				SetPageUptodate(page);
				SetPageUptodate(page);
@@ -1683,8 +1565,7 @@ static int end_bio_extent_preparewrite(struct bio *bio,
{
{
	const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
	const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
	struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
	struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
	struct extent_state *state = bio->bi_private;
	struct extent_io_tree *tree;
	struct extent_io_tree *tree = state->tree;
	u64 start;
	u64 start;
	u64 end;
	u64 end;


@@ -1695,6 +1576,8 @@ static int end_bio_extent_preparewrite(struct bio *bio,


	do {
	do {
		struct page *page = bvec->bv_page;
		struct page *page = bvec->bv_page;
		tree = &BTRFS_I(page->mapping->host)->io_tree;

		start = ((u64)page->index << PAGE_CACHE_SHIFT) +
		start = ((u64)page->index << PAGE_CACHE_SHIFT) +
			bvec->bv_offset;
			bvec->bv_offset;
		end = start + bvec->bv_len - 1;
		end = start + bvec->bv_len - 1;
@@ -1765,7 +1648,7 @@ static int submit_one_bio(int rw, struct bio *bio, int mirror_num)
	BUG_ON(state->end != end);
	BUG_ON(state->end != end);
	spin_unlock_irq(&tree->lock);
	spin_unlock_irq(&tree->lock);


	bio->bi_private = state;
	bio->bi_private = NULL;


	bio_get(bio);
	bio_get(bio);