Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 60e53a67 authored by Jens Axboe's avatar Jens Axboe
Browse files

Merge branch 'bcache-for-3.13' of git://evilpiepirate.org/~kent/linux-bcache into for-linus

Kent writes:

Jens - small pile of bcache fixes. I've been slacking on the writeback
fixes but those definitely need to get into 3.13.
parents 85157366 16749c23
Loading
Loading
Loading
Loading
+2 −0
Original line number Diff line number Diff line
@@ -421,9 +421,11 @@ long bch_bucket_alloc(struct cache *ca, unsigned watermark, bool wait)

	if (watermark <= WATERMARK_METADATA) {
		SET_GC_MARK(b, GC_MARK_METADATA);
		SET_GC_MOVE(b, 0);
		b->prio = BTREE_PRIO;
	} else {
		SET_GC_MARK(b, GC_MARK_RECLAIMABLE);
		SET_GC_MOVE(b, 0);
		b->prio = INITIAL_PRIO;
	}

+6 −6
Original line number Diff line number Diff line
@@ -197,7 +197,7 @@ struct bucket {
	uint8_t		disk_gen;
	uint8_t		last_gc; /* Most out of date gen in the btree */
	uint8_t		gc_gen;
	uint16_t	gc_mark;
	uint16_t	gc_mark; /* Bitfield used by GC. See below for field */
};

/*
@@ -209,7 +209,8 @@ BITMASK(GC_MARK, struct bucket, gc_mark, 0, 2);
#define GC_MARK_RECLAIMABLE	0
#define GC_MARK_DIRTY		1
#define GC_MARK_METADATA	2
BITMASK(GC_SECTORS_USED, struct bucket, gc_mark, 2, 14);
BITMASK(GC_SECTORS_USED, struct bucket, gc_mark, 2, 13);
BITMASK(GC_MOVE, struct bucket, gc_mark, 15, 1);

#include "journal.h"
#include "stats.h"
@@ -372,14 +373,14 @@ struct cached_dev {
	unsigned char		writeback_percent;
	unsigned		writeback_delay;

	int			writeback_rate_change;
	int64_t			writeback_rate_derivative;
	uint64_t		writeback_rate_target;
	int64_t			writeback_rate_proportional;
	int64_t			writeback_rate_derivative;
	int64_t			writeback_rate_change;

	unsigned		writeback_rate_update_seconds;
	unsigned		writeback_rate_d_term;
	unsigned		writeback_rate_p_term_inverse;
	unsigned		writeback_rate_d_smooth;
};

enum alloc_watermarks {
@@ -445,7 +446,6 @@ struct cache {
	 * call prio_write() to keep gens from wrapping.
	 */
	uint8_t			need_save_prio;
	unsigned		gc_move_threshold;

	/*
	 * If nonzero, we know we aren't going to find any buckets to invalidate
+25 −2
Original line number Diff line number Diff line
@@ -1561,6 +1561,28 @@ size_t bch_btree_gc_finish(struct cache_set *c)
		SET_GC_MARK(PTR_BUCKET(c, &c->uuid_bucket, i),
			    GC_MARK_METADATA);

	/* don't reclaim buckets to which writeback keys point */
	rcu_read_lock();
	for (i = 0; i < c->nr_uuids; i++) {
		struct bcache_device *d = c->devices[i];
		struct cached_dev *dc;
		struct keybuf_key *w, *n;
		unsigned j;

		if (!d || UUID_FLASH_ONLY(&c->uuids[i]))
			continue;
		dc = container_of(d, struct cached_dev, disk);

		spin_lock(&dc->writeback_keys.lock);
		rbtree_postorder_for_each_entry_safe(w, n,
					&dc->writeback_keys.keys, node)
			for (j = 0; j < KEY_PTRS(&w->key); j++)
				SET_GC_MARK(PTR_BUCKET(c, &w->key, j),
					    GC_MARK_DIRTY);
		spin_unlock(&dc->writeback_keys.lock);
	}
	rcu_read_unlock();

	for_each_cache(ca, c, i) {
		uint64_t *i;

@@ -1817,7 +1839,8 @@ static bool fix_overlapping_extents(struct btree *b, struct bkey *insert,
			if (KEY_START(k) > KEY_START(insert) + sectors_found)
				goto check_failed;

			if (KEY_PTRS(replace_key) != KEY_PTRS(k))
			if (KEY_PTRS(k) != KEY_PTRS(replace_key) ||
			    KEY_DIRTY(k) != KEY_DIRTY(replace_key))
				goto check_failed;

			/* skip past gen */
@@ -2217,7 +2240,7 @@ struct btree_insert_op {
	struct bkey	*replace_key;
};

int btree_insert_fn(struct btree_op *b_op, struct btree *b)
static int btree_insert_fn(struct btree_op *b_op, struct btree *b)
{
	struct btree_insert_op *op = container_of(b_op,
					struct btree_insert_op, op);
+15 −6
Original line number Diff line number Diff line
@@ -25,10 +25,9 @@ static bool moving_pred(struct keybuf *buf, struct bkey *k)
	unsigned i;

	for (i = 0; i < KEY_PTRS(k); i++) {
		struct cache *ca = PTR_CACHE(c, k, i);
		struct bucket *g = PTR_BUCKET(c, k, i);

		if (GC_SECTORS_USED(g) < ca->gc_move_threshold)
		if (GC_MOVE(g))
			return true;
	}

@@ -65,11 +64,16 @@ static void write_moving_finish(struct closure *cl)

static void read_moving_endio(struct bio *bio, int error)
{
	struct bbio *b = container_of(bio, struct bbio, bio);
	struct moving_io *io = container_of(bio->bi_private,
					    struct moving_io, cl);

	if (error)
		io->op.error = error;
	else if (!KEY_DIRTY(&b->key) &&
		 ptr_stale(io->op.c, &b->key, 0)) {
		io->op.error = -EINTR;
	}

	bch_bbio_endio(io->op.c, bio, error, "reading data to move");
}
@@ -141,6 +145,11 @@ static void read_moving(struct cache_set *c)
		if (!w)
			break;

		if (ptr_stale(c, &w->key, 0)) {
			bch_keybuf_del(&c->moving_gc_keys, w);
			continue;
		}

		io = kzalloc(sizeof(struct moving_io) + sizeof(struct bio_vec)
			     * DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS),
			     GFP_KERNEL);
@@ -184,7 +193,8 @@ static bool bucket_cmp(struct bucket *l, struct bucket *r)

static unsigned bucket_heap_top(struct cache *ca)
{
	return GC_SECTORS_USED(heap_peek(&ca->heap));
	struct bucket *b;
	return (b = heap_peek(&ca->heap)) ? GC_SECTORS_USED(b) : 0;
}

void bch_moving_gc(struct cache_set *c)
@@ -226,9 +236,8 @@ void bch_moving_gc(struct cache_set *c)
			sectors_to_move -= GC_SECTORS_USED(b);
		}

		ca->gc_move_threshold = bucket_heap_top(ca);

		pr_debug("threshold %u", ca->gc_move_threshold);
		while (heap_pop(&ca->heap, b, bucket_cmp))
			SET_GC_MOVE(b, 1);
	}

	mutex_unlock(&c->bucket_lock);
+1 −1
Original line number Diff line number Diff line
@@ -1676,7 +1676,7 @@ static void run_cache_set(struct cache_set *c)
static bool can_attach_cache(struct cache *ca, struct cache_set *c)
{
	return ca->sb.block_size	== c->sb.block_size &&
		ca->sb.bucket_size	== c->sb.block_size &&
		ca->sb.bucket_size	== c->sb.bucket_size &&
		ca->sb.nr_in_set	== c->sb.nr_in_set;
}

Loading