Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 119ba0f8 authored by Kent Overstreet's avatar Kent Overstreet
Browse files

bcache: Convert allocator thread to kthread



Using a workqueue when we just want a single thread is a bit silly.

Signed-off-by: default avatarKent Overstreet <koverstreet@google.com>
parent a9dd53ad
Loading
Loading
Loading
Loading
+22 −12
Original line number Diff line number Diff line
@@ -63,6 +63,7 @@
#include "bcache.h"
#include "btree.h"

#include <linux/kthread.h>
#include <linux/random.h>

#define MAX_IN_FLIGHT_DISCARDS		8U
@@ -151,7 +152,7 @@ static void discard_finish(struct work_struct *w)
	mutex_unlock(&ca->set->bucket_lock);

	closure_wake_up(&ca->set->bucket_wait);
	wake_up(&ca->set->alloc_wait);
	wake_up_process(ca->alloc_thread);

	closure_put(&ca->set->cl);
}
@@ -358,30 +359,26 @@ static void invalidate_buckets(struct cache *ca)

#define allocator_wait(ca, cond)					\
do {									\
	DEFINE_WAIT(__wait);						\
									\
	while (1) {							\
		prepare_to_wait(&ca->set->alloc_wait,			\
				&__wait, TASK_INTERRUPTIBLE);		\
		set_current_state(TASK_INTERRUPTIBLE);			\
		if (cond)						\
			break;						\
									\
		mutex_unlock(&(ca)->set->bucket_lock);			\
		if (test_bit(CACHE_SET_STOPPING_2, &ca->set->flags)) {	\
			finish_wait(&ca->set->alloc_wait, &__wait);	\
			closure_return(cl);				\
			closure_put(&ca->set->cl);			\
			return 0;					\
		}							\
									\
		schedule();						\
		mutex_lock(&(ca)->set->bucket_lock);			\
	}								\
									\
	finish_wait(&ca->set->alloc_wait, &__wait);			\
	__set_current_state(TASK_RUNNING);				\
} while (0)

void bch_allocator_thread(struct closure *cl)
static int bch_allocator_thread(void *arg)
{
	struct cache *ca = container_of(cl, struct cache, alloc);
	struct cache *ca = arg;

	mutex_lock(&ca->set->bucket_lock);

@@ -442,7 +439,7 @@ long bch_bucket_alloc(struct cache *ca, unsigned watermark, struct closure *cl)
{
	long r = -1;
again:
	wake_up(&ca->set->alloc_wait);
	wake_up_process(ca->alloc_thread);

	if (fifo_used(&ca->free) > ca->watermark[watermark] &&
	    fifo_pop(&ca->free, r)) {
@@ -552,6 +549,19 @@ int bch_bucket_alloc_set(struct cache_set *c, unsigned watermark,

/* Init */

int bch_cache_allocator_start(struct cache *ca)
{
	ca->alloc_thread = kthread_create(bch_allocator_thread,
					  ca, "bcache_allocator");
	if (IS_ERR(ca->alloc_thread))
		return PTR_ERR(ca->alloc_thread);

	closure_get(&ca->set->cl);
	wake_up_process(ca->alloc_thread);

	return 0;
}

void bch_cache_allocator_exit(struct cache *ca)
{
	struct discard *d;
+11 −6
Original line number Diff line number Diff line
@@ -565,8 +565,7 @@ struct cache {

	unsigned		watermark[WATERMARK_MAX];

	struct closure		alloc;
	struct workqueue_struct	*alloc_workqueue;
	struct task_struct	*alloc_thread;

	struct closure		prio;
	struct prio_set		*disk_buckets;
@@ -703,9 +702,6 @@ struct cache_set {
	/* For the btree cache */
	struct shrinker		shrink;

	/* For the allocator itself */
	wait_queue_head_t	alloc_wait;

	/* For the btree cache and anything allocation related */
	struct mutex		bucket_lock;

@@ -1173,6 +1169,15 @@ static inline uint8_t bucket_disk_gen(struct bucket *b)
	static struct kobj_attribute ksysfs_##n =			\
		__ATTR(n, S_IWUSR|S_IRUSR, show, store)

static inline void wake_up_allocators(struct cache_set *c)
{
	struct cache *ca;
	unsigned i;

	for_each_cache(ca, c, i)
		wake_up_process(ca->alloc_thread);
}

/* Forward declarations */

void bch_writeback_queue(struct cached_dev *);
@@ -1193,7 +1198,6 @@ void bch_submit_bbio(struct bio *, struct cache_set *, struct bkey *, unsigned);
uint8_t bch_inc_gen(struct cache *, struct bucket *);
void bch_rescale_priorities(struct cache_set *, int);
bool bch_bucket_add_unused(struct cache *, struct bucket *);
void bch_allocator_thread(struct closure *);

long bch_bucket_alloc(struct cache *, unsigned, struct closure *);
void bch_bucket_free(struct cache_set *, struct bkey *);
@@ -1244,6 +1248,7 @@ int bch_btree_cache_alloc(struct cache_set *);
void bch_cached_dev_writeback_init(struct cached_dev *);
void bch_moving_init_cache_set(struct cache_set *);

int bch_cache_allocator_start(struct cache *ca);
void bch_cache_allocator_exit(struct cache *ca);
int bch_cache_allocator_init(struct cache *ca);

+3 −3
Original line number Diff line number Diff line
@@ -273,7 +273,7 @@ static void btree_complete_write(struct btree *b, struct btree_write *w)
{
	if (w->prio_blocked &&
	    !atomic_sub_return(w->prio_blocked, &b->c->prio_blocked))
		wake_up(&b->c->alloc_wait);
		wake_up_allocators(b->c);

	if (w->journal) {
		atomic_dec_bug(w->journal);
@@ -984,7 +984,7 @@ static void btree_node_free(struct btree *b, struct btree_op *op)

	if (b->prio_blocked &&
	    !atomic_sub_return(b->prio_blocked, &b->c->prio_blocked))
		wake_up(&b->c->alloc_wait);
		wake_up_allocators(b->c);

	b->prio_blocked = 0;

@@ -1547,7 +1547,7 @@ static void bch_btree_gc(struct closure *cl)
	blktrace_msg_all(c, "Finished gc");

	trace_bcache_gc_end(c->sb.set_uuid);
	wake_up(&c->alloc_wait);
	wake_up_allocators(c);

	continue_at(cl, bch_moving_gc, bch_gc_wq);
}
+7 −12
Original line number Diff line number Diff line
@@ -1282,7 +1282,7 @@ static void cache_set_flush(struct closure *cl)

	/* Shut down allocator threads */
	set_bit(CACHE_SET_STOPPING_2, &c->flags);
	wake_up(&c->alloc_wait);
	wake_up_allocators(c);

	bch_cache_accounting_destroy(&c->accounting);

@@ -1373,7 +1373,6 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)
		c->btree_pages = max_t(int, c->btree_pages / 4,
				       BTREE_MAX_PAGES);

	init_waitqueue_head(&c->alloc_wait);
	mutex_init(&c->bucket_lock);
	mutex_init(&c->fill_lock);
	mutex_init(&c->sort_lock);
@@ -1496,9 +1495,10 @@ static void run_cache_set(struct cache_set *c)
		 */
		bch_journal_next(&c->journal);

		err = "error starting allocator thread";
		for_each_cache(ca, c, i)
			closure_call(&ca->alloc, bch_allocator_thread,
				     system_wq, &c->cl);
			if (bch_cache_allocator_start(ca))
				goto err;

		/*
		 * First place it's safe to allocate: btree_check() and
@@ -1531,17 +1531,16 @@ static void run_cache_set(struct cache_set *c)

		bch_btree_gc_finish(c);

		err = "error starting allocator thread";
		for_each_cache(ca, c, i)
			closure_call(&ca->alloc, bch_allocator_thread,
				     ca->alloc_workqueue, &c->cl);
			if (bch_cache_allocator_start(ca))
				goto err;

		mutex_lock(&c->bucket_lock);
		for_each_cache(ca, c, i)
			bch_prio_write(ca);
		mutex_unlock(&c->bucket_lock);

		wake_up(&c->alloc_wait);

		err = "cannot allocate new UUID bucket";
		if (__uuid_write(c))
			goto err_unlock_gc;
@@ -1673,9 +1672,6 @@ void bch_cache_release(struct kobject *kobj)

	bio_split_pool_free(&ca->bio_split_hook);

	if (ca->alloc_workqueue)
		destroy_workqueue(ca->alloc_workqueue);

	free_pages((unsigned long) ca->disk_buckets, ilog2(bucket_pages(ca)));
	kfree(ca->prio_buckets);
	vfree(ca->buckets);
@@ -1723,7 +1719,6 @@ static int cache_alloc(struct cache_sb *sb, struct cache *ca)
	    !(ca->prio_buckets	= kzalloc(sizeof(uint64_t) * prio_buckets(ca) *
					  2, GFP_KERNEL)) ||
	    !(ca->disk_buckets	= alloc_bucket_pages(GFP_KERNEL, ca)) ||
	    !(ca->alloc_workqueue = alloc_workqueue("bch_allocator", 0, 1)) ||
	    bio_split_pool_init(&ca->bio_split_hook))
		return -ENOMEM;