Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 99d54001 authored by Jens Axboe's avatar Jens Axboe
Browse files

Merge branch 'for-jens' of http://evilpiepirate.org/git/linux-bcache into for-3.17/drivers

Kent writes:

Hey Jens, here's the pull request for 3.17 - typically late, but lots of
tasty fixes in this one.
parents bf0d6e4a 0781c874
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -331,7 +331,7 @@ static int bch_allocator_thread(void *arg)
				mutex_unlock(&ca->set->bucket_lock);
				blkdev_issue_discard(ca->bdev,
					bucket_to_sector(ca->set, bucket),
					ca->sb.block_size, GFP_KERNEL, 0);
					ca->sb.bucket_size, GFP_KERNEL, 0);
				mutex_lock(&ca->set->bucket_lock);
			}

+4 −0
Original line number Diff line number Diff line
@@ -477,9 +477,13 @@ struct gc_stat {
 * CACHE_SET_STOPPING always gets set first when we're closing down a cache set;
 * we'll continue to run normally for awhile with CACHE_SET_STOPPING set (i.e.
 * flushing dirty data).
 *
 * CACHE_SET_RUNNING means all cache devices have been registered and journal
 * replay is complete.
 */
#define CACHE_SET_UNREGISTERING		0
#define	CACHE_SET_STOPPING		1
#define	CACHE_SET_RUNNING		2

struct cache_set {
	struct closure		cl;
+1 −1
Original line number Diff line number Diff line
@@ -1182,7 +1182,7 @@ static void __btree_sort(struct btree_keys *b, struct btree_iter *iter,
{
	uint64_t start_time;
	bool used_mempool = false;
	struct bset *out = (void *) __get_free_pages(__GFP_NOWARN|GFP_NOIO,
	struct bset *out = (void *) __get_free_pages(__GFP_NOWARN|GFP_NOWAIT,
						     order);
	if (!out) {
		struct page *outp;
+1 −1
Original line number Diff line number Diff line
@@ -453,7 +453,7 @@ static inline bool bch_bkey_equal_header(const struct bkey *l,
{
	return (KEY_DIRTY(l) == KEY_DIRTY(r) &&
		KEY_PTRS(l) == KEY_PTRS(r) &&
		KEY_CSUM(l) == KEY_CSUM(l));
		KEY_CSUM(l) == KEY_CSUM(r));
}

/* Keylists */
+31 −19
Original line number Diff line number Diff line
@@ -117,9 +117,9 @@
({									\
	int _r, l = (b)->level - 1;					\
	bool _w = l <= (op)->lock;					\
	struct btree *_child = bch_btree_node_get((b)->c, op, key, l, _w);\
	struct btree *_child = bch_btree_node_get((b)->c, op, key, l,	\
						  _w, b);		\
	if (!IS_ERR(_child)) {						\
		_child->parent = (b);					\
		_r = bch_btree_ ## fn(_child, op, ##__VA_ARGS__);	\
		rw_unlock(_w, _child);					\
	} else								\
@@ -142,7 +142,6 @@
		rw_lock(_w, _b, _b->level);				\
		if (_b == (c)->root &&					\
		    _w == insert_lock(op, _b)) {			\
			_b->parent = NULL;				\
			_r = bch_btree_ ## fn(_b, op, ##__VA_ARGS__);	\
		}							\
		rw_unlock(_w, _b);					\
@@ -202,7 +201,7 @@ void bch_btree_node_read_done(struct btree *b)
	struct bset *i = btree_bset_first(b);
	struct btree_iter *iter;

	iter = mempool_alloc(b->c->fill_iter, GFP_NOWAIT);
	iter = mempool_alloc(b->c->fill_iter, GFP_NOIO);
	iter->size = b->c->sb.bucket_size / b->c->sb.block_size;
	iter->used = 0;

@@ -421,7 +420,7 @@ static void do_btree_node_write(struct btree *b)
	SET_PTR_OFFSET(&k.key, 0, PTR_OFFSET(&k.key, 0) +
		       bset_sector_offset(&b->keys, i));

	if (!bio_alloc_pages(b->bio, GFP_NOIO)) {
	if (!bio_alloc_pages(b->bio, __GFP_NOWARN|GFP_NOWAIT)) {
		int j;
		struct bio_vec *bv;
		void *base = (void *) ((unsigned long) i & ~(PAGE_SIZE - 1));
@@ -967,7 +966,8 @@ static struct btree *mca_alloc(struct cache_set *c, struct btree_op *op,
 * level and op->lock.
 */
struct btree *bch_btree_node_get(struct cache_set *c, struct btree_op *op,
				 struct bkey *k, int level, bool write)
				 struct bkey *k, int level, bool write,
				 struct btree *parent)
{
	int i = 0;
	struct btree *b;
@@ -1002,6 +1002,7 @@ struct btree *bch_btree_node_get(struct cache_set *c, struct btree_op *op,
		BUG_ON(b->level != level);
	}

	b->parent = parent;
	b->accessed = 1;

	for (; i <= b->keys.nsets && b->keys.set[i].size; i++) {
@@ -1022,15 +1023,16 @@ struct btree *bch_btree_node_get(struct cache_set *c, struct btree_op *op,
	return b;
}

static void btree_node_prefetch(struct cache_set *c, struct bkey *k, int level)
static void btree_node_prefetch(struct btree *parent, struct bkey *k)
{
	struct btree *b;

	mutex_lock(&c->bucket_lock);
	b = mca_alloc(c, NULL, k, level);
	mutex_unlock(&c->bucket_lock);
	mutex_lock(&parent->c->bucket_lock);
	b = mca_alloc(parent->c, NULL, k, parent->level - 1);
	mutex_unlock(&parent->c->bucket_lock);

	if (!IS_ERR_OR_NULL(b)) {
		b->parent = parent;
		bch_btree_node_read(b);
		rw_unlock(true, b);
	}
@@ -1060,15 +1062,16 @@ static void btree_node_free(struct btree *b)
	mutex_unlock(&b->c->bucket_lock);
}

struct btree *bch_btree_node_alloc(struct cache_set *c, struct btree_op *op,
				   int level)
struct btree *__bch_btree_node_alloc(struct cache_set *c, struct btree_op *op,
				     int level, bool wait,
				     struct btree *parent)
{
	BKEY_PADDED(key) k;
	struct btree *b = ERR_PTR(-EAGAIN);

	mutex_lock(&c->bucket_lock);
retry:
	if (__bch_bucket_alloc_set(c, RESERVE_BTREE, &k.key, 1, op != NULL))
	if (__bch_bucket_alloc_set(c, RESERVE_BTREE, &k.key, 1, wait))
		goto err;

	bkey_put(c, &k.key);
@@ -1085,6 +1088,7 @@ struct btree *bch_btree_node_alloc(struct cache_set *c, struct btree_op *op,
	}

	b->accessed = 1;
	b->parent = parent;
	bch_bset_init_next(&b->keys, b->keys.set->data, bset_magic(&b->c->sb));

	mutex_unlock(&c->bucket_lock);
@@ -1096,14 +1100,21 @@ struct btree *bch_btree_node_alloc(struct cache_set *c, struct btree_op *op,
err:
	mutex_unlock(&c->bucket_lock);

	trace_bcache_btree_node_alloc_fail(b);
	trace_bcache_btree_node_alloc_fail(c);
	return b;
}

static struct btree *bch_btree_node_alloc(struct cache_set *c,
					  struct btree_op *op, int level,
					  struct btree *parent)
{
	return __bch_btree_node_alloc(c, op, level, op != NULL, parent);
}

static struct btree *btree_node_alloc_replacement(struct btree *b,
						  struct btree_op *op)
{
	struct btree *n = bch_btree_node_alloc(b->c, op, b->level);
	struct btree *n = bch_btree_node_alloc(b->c, op, b->level, b->parent);
	if (!IS_ERR_OR_NULL(n)) {
		mutex_lock(&n->write_lock);
		bch_btree_sort_into(&b->keys, &n->keys, &b->c->sort);
@@ -1403,6 +1414,7 @@ static int btree_gc_coalesce(struct btree *b, struct btree_op *op,
	BUG_ON(btree_bset_first(new_nodes[0])->keys);
	btree_node_free(new_nodes[0]);
	rw_unlock(true, new_nodes[0]);
	new_nodes[0] = NULL;

	for (i = 0; i < nodes; i++) {
		if (__bch_keylist_realloc(&keylist, bkey_u64s(&r[i].b->key)))
@@ -1516,7 +1528,7 @@ static int btree_gc_recurse(struct btree *b, struct btree_op *op,
		k = bch_btree_iter_next_filter(&iter, &b->keys, bch_ptr_bad);
		if (k) {
			r->b = bch_btree_node_get(b->c, op, k, b->level - 1,
						  true);
						  true, b);
			if (IS_ERR(r->b)) {
				ret = PTR_ERR(r->b);
				break;
@@ -1811,7 +1823,7 @@ static int bch_btree_check_recurse(struct btree *b, struct btree_op *op)
			k = bch_btree_iter_next_filter(&iter, &b->keys,
						       bch_ptr_bad);
			if (k)
				btree_node_prefetch(b->c, k, b->level - 1);
				btree_node_prefetch(b, k);

			if (p)
				ret = btree(check_recurse, p, b, op);
@@ -1976,12 +1988,12 @@ static int btree_split(struct btree *b, struct btree_op *op,

		trace_bcache_btree_node_split(b, btree_bset_first(n1)->keys);

		n2 = bch_btree_node_alloc(b->c, op, b->level);
		n2 = bch_btree_node_alloc(b->c, op, b->level, b->parent);
		if (IS_ERR(n2))
			goto err_free1;

		if (!b->parent) {
			n3 = bch_btree_node_alloc(b->c, op, b->level + 1);
			n3 = bch_btree_node_alloc(b->c, op, b->level + 1, NULL);
			if (IS_ERR(n3))
				goto err_free2;
		}
Loading