Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 0878ae2d authored by Jens Axboe's avatar Jens Axboe
Browse files

Merge branch 'bcache-for-3.11' of git://evilpiepirate.org/~kent/linux-bcache into for-3.11/drivers

Kent writes:

Hey Jens - I've been busy torture testing and chasing bugs, here's the
fruits of my labors. These are all fairly small fixes, some of them
quite important.
parents d0e3d023 79826c35
Loading
Loading
Loading
Loading
+8 −10
Original line number Diff line number Diff line
@@ -63,6 +63,7 @@
#include "bcache.h"
#include "btree.h"

#include <linux/freezer.h>
#include <linux/kthread.h>
#include <linux/random.h>
#include <trace/events/bcache.h>
@@ -363,11 +364,10 @@ do { \
			break;						\
									\
		mutex_unlock(&(ca)->set->bucket_lock);			\
		if (test_bit(CACHE_SET_STOPPING_2, &ca->set->flags)) {	\
			closure_put(&ca->set->cl);			\
		if (kthread_should_stop())				\
			return 0;					\
		}							\
									\
		try_to_freeze();					\
		schedule();						\
		mutex_lock(&(ca)->set->bucket_lock);			\
	}								\
@@ -547,14 +547,12 @@ int bch_bucket_alloc_set(struct cache_set *c, unsigned watermark,

int bch_cache_allocator_start(struct cache *ca)
{
	ca->alloc_thread = kthread_create(bch_allocator_thread,
	struct task_struct *k = kthread_run(bch_allocator_thread,
					    ca, "bcache_allocator");
	if (IS_ERR(ca->alloc_thread))
		return PTR_ERR(ca->alloc_thread);

	closure_get(&ca->set->cl);
	wake_up_process(ca->alloc_thread);
	if (IS_ERR(k))
		return PTR_ERR(k);

	ca->alloc_thread = k;
	return 0;
}

+1 −4
Original line number Diff line number Diff line
@@ -434,6 +434,7 @@ struct bcache_device {

	/* If nonzero, we're detaching/unregistering from cache set */
	atomic_t		detaching;
	int			flush_done;

	uint64_t		nr_stripes;
	unsigned		stripe_size_bits;
@@ -663,13 +664,9 @@ struct gc_stat {
 * CACHE_SET_STOPPING always gets set first when we're closing down a cache set;
 * we'll continue to run normally for awhile with CACHE_SET_STOPPING set (i.e.
 * flushing dirty data).
 *
 * CACHE_SET_STOPPING_2 gets set at the last phase, when it's time to shut down
 * the allocation thread.
 */
#define CACHE_SET_UNREGISTERING		0
#define	CACHE_SET_STOPPING		1
#define	CACHE_SET_STOPPING_2		2

struct cache_set {
	struct closure		cl;
+3 −1
Original line number Diff line number Diff line
@@ -1410,8 +1410,10 @@ static void btree_gc_start(struct cache_set *c)
	for_each_cache(ca, c, i)
		for_each_bucket(b, ca) {
			b->gc_gen = b->gen;
			if (!atomic_read(&b->pin))
			if (!atomic_read(&b->pin)) {
				SET_GC_MARK(b, GC_MARK_RECLAIMABLE);
				SET_GC_SECTORS_USED(b, 0);
			}
		}

	mutex_unlock(&c->bucket_lock);
+4 −2
Original line number Diff line number Diff line
@@ -66,16 +66,18 @@ static inline void closure_put_after_sub(struct closure *cl, int flags)
		} else {
			struct closure *parent = cl->parent;
			struct closure_waitlist *wait = closure_waitlist(cl);
			closure_fn *destructor = cl->fn;

			closure_debug_destroy(cl);

			smp_mb();
			atomic_set(&cl->remaining, -1);

			if (wait)
				closure_wake_up(wait);

			if (cl->fn)
				cl->fn(cl);
			if (destructor)
				destructor(cl);

			if (parent)
				closure_put(parent);
+6 −1
Original line number Diff line number Diff line
@@ -184,9 +184,14 @@ bsearch:
		pr_debug("starting binary search, l %u r %u", l, r);

		while (l + 1 < r) {
			seq = list_entry(list->prev, struct journal_replay,
					 list)->j.seq;

			m = (l + r) >> 1;
			read_bucket(m);

			if (read_bucket(m))
			if (seq != list_entry(list->prev, struct journal_replay,
					      list)->j.seq)
				l = m;
			else
				r = m;
Loading