Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit e288e931 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'bcache' (bcache fixes from Kent Overstreet)

Merge bcache fixes from Kent Overstreet:
 "There's fixes for _three_ different data corruption bugs, all of which
  were found by users hitting them in the wild.

  The first one isn't bcache specific - in 3.11 bcache was switched to
  the bio_copy_data in fs/bio.c, and that's when the bug in that code
  was discovered, but it's also used by raid1 and pktcdvd.  (That was my
  code too, so the bug's doubly embarassing given that it was or
  should've been just a cut and paste from bcache code.  Dunno what
  happened there).

  Most of these (all the non data corruption bugs, actually) were ready
  before the merge window and have been sitting in Jens' tree, but I
  don't know what's been up with him lately..."

* emailed patches from Kent Overstreet <kmo@daterainc.com>:
  bcache: Fix flushes in writeback mode
  bcache: Fix for handling overlapping extents when reading in a btree node
  bcache: Fix a shrinker deadlock
  bcache: Fix a dumb CPU spinning bug in writeback
  bcache: Fix a flush/fua performance bug
  bcache: Fix a writeback performance regression
  bcache: Correct printf()-style format length modifier
  bcache: Fix for when no journal entries are found
  bcache: Strip endline when writing the label through sysfs
  bcache: Fix a dumb journal discard bug
  block: Fix bio_copy_data()
parents db6aaf4d c0f04d88
Loading
Loading
Loading
Loading
+3 −4
Original line number Original line Diff line number Diff line
@@ -498,7 +498,7 @@ struct cached_dev {
	 */
	 */
	atomic_t		has_dirty;
	atomic_t		has_dirty;


	struct ratelimit	writeback_rate;
	struct bch_ratelimit	writeback_rate;
	struct delayed_work	writeback_rate_update;
	struct delayed_work	writeback_rate_update;


	/*
	/*
@@ -507,10 +507,9 @@ struct cached_dev {
	 */
	 */
	sector_t		last_read;
	sector_t		last_read;


	/* Number of writeback bios in flight */
	/* Limit number of writeback bios in flight */
	atomic_t		in_flight;
	struct semaphore	in_flight;
	struct closure_with_timer writeback;
	struct closure_with_timer writeback;
	struct closure_waitlist	writeback_wait;


	struct keybuf		writeback_keys;
	struct keybuf		writeback_keys;


+28 −11
Original line number Original line Diff line number Diff line
@@ -926,28 +926,45 @@ struct bkey *bch_next_recurse_key(struct btree *b, struct bkey *search)


/* Mergesort */
/* Mergesort */


static void sort_key_next(struct btree_iter *iter,
			  struct btree_iter_set *i)
{
	i->k = bkey_next(i->k);

	if (i->k == i->end)
		*i = iter->data[--iter->used];
}

static void btree_sort_fixup(struct btree_iter *iter)
static void btree_sort_fixup(struct btree_iter *iter)
{
{
	while (iter->used > 1) {
	while (iter->used > 1) {
		struct btree_iter_set *top = iter->data, *i = top + 1;
		struct btree_iter_set *top = iter->data, *i = top + 1;
		struct bkey *k;


		if (iter->used > 2 &&
		if (iter->used > 2 &&
		    btree_iter_cmp(i[0], i[1]))
		    btree_iter_cmp(i[0], i[1]))
			i++;
			i++;


		for (k = i->k;
		if (bkey_cmp(top->k, &START_KEY(i->k)) <= 0)
		     k != i->end && bkey_cmp(top->k, &START_KEY(k)) > 0;
		     k = bkey_next(k))
			if (top->k > i->k)
				__bch_cut_front(top->k, k);
			else if (KEY_SIZE(k))
				bch_cut_back(&START_KEY(k), top->k);

		if (top->k < i->k || k == i->k)
			break;
			break;


		if (!KEY_SIZE(i->k)) {
			sort_key_next(iter, i);
			heap_sift(iter, i - top, btree_iter_cmp);
			heap_sift(iter, i - top, btree_iter_cmp);
			continue;
		}

		if (top->k > i->k) {
			if (bkey_cmp(top->k, i->k) >= 0)
				sort_key_next(iter, i);
			else
				bch_cut_front(top->k, i->k);

			heap_sift(iter, i - top, btree_iter_cmp);
		} else {
			/* can't happen because of comparison func */
			BUG_ON(!bkey_cmp(&START_KEY(top->k), &START_KEY(i->k)));
			bch_cut_back(&START_KEY(i->k), top->k);
		}
	}
	}
}
}


+2 −2
Original line number Original line Diff line number Diff line
@@ -255,7 +255,7 @@ void bch_btree_node_read(struct btree *b)


	return;
	return;
err:
err:
	bch_cache_set_error(b->c, "io error reading bucket %lu",
	bch_cache_set_error(b->c, "io error reading bucket %zu",
			    PTR_BUCKET_NR(b->c, &b->key, 0));
			    PTR_BUCKET_NR(b->c, &b->key, 0));
}
}


@@ -612,7 +612,7 @@ static unsigned long bch_mca_scan(struct shrinker *shrink,
		return SHRINK_STOP;
		return SHRINK_STOP;


	/* Return -1 if we can't do anything right now */
	/* Return -1 if we can't do anything right now */
	if (sc->gfp_mask & __GFP_WAIT)
	if (sc->gfp_mask & __GFP_IO)
		mutex_lock(&c->bucket_lock);
		mutex_lock(&c->bucket_lock);
	else if (!mutex_trylock(&c->bucket_lock))
	else if (!mutex_trylock(&c->bucket_lock))
		return -1;
		return -1;
+20 −13
Original line number Original line Diff line number Diff line
@@ -153,7 +153,8 @@ int bch_journal_read(struct cache_set *c, struct list_head *list,
		bitmap_zero(bitmap, SB_JOURNAL_BUCKETS);
		bitmap_zero(bitmap, SB_JOURNAL_BUCKETS);
		pr_debug("%u journal buckets", ca->sb.njournal_buckets);
		pr_debug("%u journal buckets", ca->sb.njournal_buckets);


		/* Read journal buckets ordered by golden ratio hash to quickly
		/*
		 * Read journal buckets ordered by golden ratio hash to quickly
		 * find a sequence of buckets with valid journal entries
		 * find a sequence of buckets with valid journal entries
		 */
		 */
		for (i = 0; i < ca->sb.njournal_buckets; i++) {
		for (i = 0; i < ca->sb.njournal_buckets; i++) {
@@ -166,18 +167,20 @@ int bch_journal_read(struct cache_set *c, struct list_head *list,
				goto bsearch;
				goto bsearch;
		}
		}


		/* If that fails, check all the buckets we haven't checked
		/*
		 * If that fails, check all the buckets we haven't checked
		 * already
		 * already
		 */
		 */
		pr_debug("falling back to linear search");
		pr_debug("falling back to linear search");


		for (l = 0; l < ca->sb.njournal_buckets; l++) {
		for (l = find_first_zero_bit(bitmap, ca->sb.njournal_buckets);
			if (test_bit(l, bitmap))
		     l < ca->sb.njournal_buckets;
				continue;
		     l = find_next_zero_bit(bitmap, ca->sb.njournal_buckets, l + 1))

			if (read_bucket(l))
			if (read_bucket(l))
				goto bsearch;
				goto bsearch;
		}

		if (list_empty(list))
			continue;
bsearch:
bsearch:
		/* Binary search */
		/* Binary search */
		m = r = find_next_bit(bitmap, ca->sb.njournal_buckets, l + 1);
		m = r = find_next_bit(bitmap, ca->sb.njournal_buckets, l + 1);
@@ -197,10 +200,12 @@ int bch_journal_read(struct cache_set *c, struct list_head *list,
				r = m;
				r = m;
		}
		}


		/* Read buckets in reverse order until we stop finding more
		/*
		 * Read buckets in reverse order until we stop finding more
		 * journal entries
		 * journal entries
		 */
		 */
		pr_debug("finishing up");
		pr_debug("finishing up: m %u njournal_buckets %u",
			 m, ca->sb.njournal_buckets);
		l = m;
		l = m;


		while (1) {
		while (1) {
@@ -228,6 +233,7 @@ int bch_journal_read(struct cache_set *c, struct list_head *list,
			}
			}
	}
	}


	if (!list_empty(list))
		c->journal.seq = list_entry(list->prev,
		c->journal.seq = list_entry(list->prev,
					    struct journal_replay,
					    struct journal_replay,
					    list)->j.seq;
					    list)->j.seq;
@@ -428,7 +434,7 @@ static void do_journal_discard(struct cache *ca)
		return;
		return;
	}
	}


	switch (atomic_read(&ja->discard_in_flight) == DISCARD_IN_FLIGHT) {
	switch (atomic_read(&ja->discard_in_flight)) {
	case DISCARD_IN_FLIGHT:
	case DISCARD_IN_FLIGHT:
		return;
		return;


@@ -689,6 +695,7 @@ void bch_journal_meta(struct cache_set *c, struct closure *cl)
		if (cl)
		if (cl)
			BUG_ON(!closure_wait(&w->wait, cl));
			BUG_ON(!closure_wait(&w->wait, cl));


		closure_flush(&c->journal.io);
		__journal_try_write(c, true);
		__journal_try_write(c, true);
	}
	}
}
}
+9 −6
Original line number Original line Diff line number Diff line
@@ -997,14 +997,17 @@ static void request_write(struct cached_dev *dc, struct search *s)
	} else {
	} else {
		bch_writeback_add(dc);
		bch_writeback_add(dc);


		if (s->op.flush_journal) {
		if (bio->bi_rw & REQ_FLUSH) {
			/* Also need to send a flush to the backing device */
			/* Also need to send a flush to the backing device */
			s->op.cache_bio = bio_clone_bioset(bio, GFP_NOIO,
			struct bio *flush = bio_alloc_bioset(0, GFP_NOIO,
							     dc->disk.bio_split);
							     dc->disk.bio_split);


			bio->bi_size = 0;
			flush->bi_rw	= WRITE_FLUSH;
			bio->bi_vcnt = 0;
			flush->bi_bdev	= bio->bi_bdev;
			closure_bio_submit(bio, cl, s->d);
			flush->bi_end_io = request_endio;
			flush->bi_private = cl;

			closure_bio_submit(flush, cl, s->d);
		} else {
		} else {
			s->op.cache_bio = bio;
			s->op.cache_bio = bio;
		}
		}
Loading