Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 8b712842 authored by Chris Mason's avatar Chris Mason
Browse files

Btrfs: Add async worker threads for pre and post IO checksumming



Btrfs has been using workqueues to spread the checksumming load across
other CPUs in the system.  But, workqueues only schedule work on the
same CPU that queued the work, giving them a limited benefit for systems with
higher CPU counts.

This code adds a generic facility to schedule work with pools of kthreads,
and changes the bio submission code to queue bios up.  The queueing is
important to make sure large numbers of procs on the system don't
turn streaming workloads into random workloads by sending IO down
concurrently.

The end result of all of this is much higher performance (and CPU usage) when
doing checksumming on large machines.  Two worker pools are created,
one for writes and one for endio processing.  The two could deadlock if
we tried to service both from a single pool.

Signed-off-by: default avatarChris Mason <chris.mason@oracle.com>
parent 43e570b0
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -6,7 +6,7 @@ btrfs-y := super.o ctree.o extent-tree.o print-tree.o root-tree.o dir-item.o \
	   hash.o file-item.o inode-item.o inode-map.o disk-io.o \
	   transaction.o bit-radix.o inode.o file.o tree-defrag.o \
	   extent_map.o sysfs.o struct-funcs.o xattr.o ordered-data.o \
	   extent_io.o volumes.o
	   extent_io.o volumes.o async-thread.o

btrfs-$(CONFIG_FS_POSIX_ACL)	+= acl.o
else
+288 −0
Original line number Diff line number Diff line
/*
 * Copyright (C) 2007 Oracle.  All rights reserved.
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public
 * License v2 as published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * General Public License for more details.
 *
 * You should have received a copy of the GNU General Public
 * License along with this program; if not, write to the
 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
 * Boston, MA 021110-1307, USA.
 */

#include <linux/kthread.h>
#include <linux/list.h>
#include <linux/spinlock.h>
#include <linux/freezer.h>
#include "async-thread.h"

/*
 * container for the kthread task pointer and the list of pending work
 * One of these is allocated per thread.
 */
struct btrfs_worker_thread {
	/* list of struct btrfs_work that are waiting for service */
	struct list_head pending;

	/* list of worker threads from struct btrfs_workers */
	struct list_head worker_list;

	/* kthread */
	struct task_struct *task;

	/* number of things on the pending list */
	atomic_t num_pending;

	/* protects the pending list. */
	spinlock_t lock;

	/* set to non-zero when this thread is already awake and kicking */
	int working;
};

/*
 * main loop for servicing work items
 */
static int worker_loop(void *arg)
{
	struct btrfs_worker_thread *worker = arg;
	struct list_head *cur;
	struct btrfs_work *work;
	do {
		spin_lock_irq(&worker->lock);
		while(!list_empty(&worker->pending)) {
			cur = worker->pending.next;
			work = list_entry(cur, struct btrfs_work, list);
			list_del(&work->list);
			clear_bit(0, &work->flags);

			work->worker = worker;
			spin_unlock_irq(&worker->lock);

			work->func(work);

			atomic_dec(&worker->num_pending);
			spin_lock_irq(&worker->lock);
		}
		worker->working = 0;
		if (freezing(current)) {
			refrigerator();
		} else {
			set_current_state(TASK_INTERRUPTIBLE);
			spin_unlock_irq(&worker->lock);
			schedule();
			__set_current_state(TASK_RUNNING);
		}
	} while (!kthread_should_stop());
	return 0;
}

/*
 * this will wait for all the worker threads to shutdown
 */
int btrfs_stop_workers(struct btrfs_workers *workers)
{
	struct list_head *cur;
	struct btrfs_worker_thread *worker;

	while(!list_empty(&workers->worker_list)) {
		cur = workers->worker_list.next;
		worker = list_entry(cur, struct btrfs_worker_thread,
				    worker_list);
		kthread_stop(worker->task);
		list_del(&worker->worker_list);
		kfree(worker);
	}
	return 0;
}

/*
 * simple init on struct btrfs_workers
 */
void btrfs_init_workers(struct btrfs_workers *workers, int max)
{
	workers->num_workers = 0;
	INIT_LIST_HEAD(&workers->worker_list);
	workers->last = NULL;
	spin_lock_init(&workers->lock);
	workers->max_workers = max;
}

/*
 * starts new worker threads.  This does not enforce the max worker
 * count in case you need to temporarily go past it.
 */
int btrfs_start_workers(struct btrfs_workers *workers, int num_workers)
{
	struct btrfs_worker_thread *worker;
	int ret = 0;
	int i;

	for (i = 0; i < num_workers; i++) {
		worker = kzalloc(sizeof(*worker), GFP_NOFS);
		if (!worker) {
			ret = -ENOMEM;
			goto fail;
		}

		INIT_LIST_HEAD(&worker->pending);
		INIT_LIST_HEAD(&worker->worker_list);
		spin_lock_init(&worker->lock);
		atomic_set(&worker->num_pending, 0);
		worker->task = kthread_run(worker_loop, worker, "btrfs");
		if (IS_ERR(worker->task)) {
			ret = PTR_ERR(worker->task);
			goto fail;
		}

		spin_lock_irq(&workers->lock);
		list_add_tail(&worker->worker_list, &workers->worker_list);
		workers->last = worker;
		workers->num_workers++;
		spin_unlock_irq(&workers->lock);
	}
	return 0;
fail:
	btrfs_stop_workers(workers);
	return ret;
}

/*
 * run through the list and find a worker thread that doesn't have a lot
 * to do right now.  This can return null if we aren't yet at the thread
 * count limit and all of the threads are busy.
 */
static struct btrfs_worker_thread *next_worker(struct btrfs_workers *workers)
{
	struct btrfs_worker_thread *worker;
	struct list_head *next;
	struct list_head *start;
	int enforce_min = workers->num_workers < workers->max_workers;

	/* start with the last thread if it isn't busy */
	worker = workers->last;
	if (atomic_read(&worker->num_pending) < 64)
		goto done;

	next = worker->worker_list.next;
	start = &worker->worker_list;

	/*
	 * check all the workers for someone that is bored.  FIXME, do
	 * something smart here
	 */
	while(next != start) {
		if (next == &workers->worker_list) {
			next = workers->worker_list.next;
			continue;
		}
		worker = list_entry(next, struct btrfs_worker_thread,
				    worker_list);
		if (atomic_read(&worker->num_pending) < 64 || !enforce_min)
			goto done;
		next = next->next;
	}
	/*
	 * nobody was bored, if we're already at the max thread count,
	 * use the last thread
	 */
	if (!enforce_min || atomic_read(&workers->last->num_pending) < 64) {
		return workers->last;
	}
	return NULL;
done:
	workers->last = worker;
	return worker;
}

static struct btrfs_worker_thread *find_worker(struct btrfs_workers *workers)
{
	struct btrfs_worker_thread *worker;
	unsigned long flags;

again:
	spin_lock_irqsave(&workers->lock, flags);
	worker = next_worker(workers);
	spin_unlock_irqrestore(&workers->lock, flags);

	if (!worker) {
		spin_lock_irqsave(&workers->lock, flags);
		if (workers->num_workers >= workers->max_workers) {
			/*
			 * we have failed to find any workers, just
			 * return the force one
			 */
			worker = list_entry(workers->worker_list.next,
				  struct btrfs_worker_thread, worker_list);
			spin_unlock_irqrestore(&workers->lock, flags);
		} else {
			spin_unlock_irqrestore(&workers->lock, flags);
			/* we're below the limit, start another worker */
			btrfs_start_workers(workers, 1);
			goto again;
		}
	}
	return worker;
}

/*
 * btrfs_requeue_work just puts the work item back on the tail of the list
 * it was taken from.  It is intended for use with long running work functions
 * that make some progress and want to give the cpu up for others.
 */
int btrfs_requeue_work(struct btrfs_work *work)
{
	struct btrfs_worker_thread *worker = work->worker;
	unsigned long flags;

	if (test_and_set_bit(0, &work->flags))
		goto out;

	spin_lock_irqsave(&worker->lock, flags);
	atomic_inc(&worker->num_pending);
	list_add_tail(&work->list, &worker->pending);
	spin_unlock_irqrestore(&worker->lock, flags);
out:
	return 0;
}

/*
 * places a struct btrfs_work into the pending queue of one of the kthreads
 */
int btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work)
{
	struct btrfs_worker_thread *worker;
	unsigned long flags;
	int wake = 0;

	/* don't requeue something already on a list */
	if (test_and_set_bit(0, &work->flags))
		goto out;

	worker = find_worker(workers);

	spin_lock_irqsave(&worker->lock, flags);
	atomic_inc(&worker->num_pending);
	list_add_tail(&work->list, &worker->pending);

	/*
	 * avoid calling into wake_up_process if this thread has already
	 * been kicked
	 */
	if (!worker->working)
		wake = 1;
	worker->working = 1;

	spin_unlock_irqrestore(&worker->lock, flags);

	if (wake)
		wake_up_process(worker->task);
out:
	return 0;
}
+78 −0
Original line number Diff line number Diff line
/*
 * Copyright (C) 2007 Oracle.  All rights reserved.
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public
 * License v2 as published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * General Public License for more details.
 *
 * You should have received a copy of the GNU General Public
 * License along with this program; if not, write to the
 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
 * Boston, MA 021110-1307, USA.
 */

#ifndef __BTRFS_ASYNC_THREAD_
#define __BTRFS_ASYNC_THREAD_

struct btrfs_worker_thread;

/*
 * This is similar to a workqueue, but it is meant to spread the operations
 * across all available cpus instead of just the CPU that was used to
 * queue the work.  There is also some batching introduced to try and
 * cut down on context switches.
 *
 * By default threads are added on demand up to 2 * the number of cpus.
 * Changing struct btrfs_workers->max_workers is one way to prevent
 * demand creation of kthreads.
 *
 * the basic model of these worker threads is to embed a btrfs_work
 * structure in your own data struct, and use container_of in a
 * work function to get back to your data struct.
 */
struct btrfs_work {
	/*
	 * only func should be set to the function you want called
	 * your work struct is passed as the only arg
	 */
	void (*func)(struct btrfs_work *work);

	/*
	 * flags should be set to zero.  It is used to make sure the
	 * struct is only inserted once into the list.
	 */
	unsigned long flags;

	/* don't touch these */
	struct btrfs_worker_thread *worker;
	struct list_head list;
};

struct btrfs_workers {
	/* current number of running workers */
	int num_workers;

	/* max number of workers allowed.  changed by btrfs_start_workers */
	int max_workers;

	/* list with all the work threads */
	struct list_head worker_list;

	/* the last worker thread to have something queued */
	struct btrfs_worker_thread *last;

	/* lock for finding the next worker thread to queue on */
	spinlock_t lock;
};

int btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work);
int btrfs_start_workers(struct btrfs_workers *workers, int num_workers);
int btrfs_stop_workers(struct btrfs_workers *workers);
void btrfs_init_workers(struct btrfs_workers *workers, int max);
int btrfs_requeue_work(struct btrfs_work *work);
#endif
+11 −3
Original line number Diff line number Diff line
@@ -30,6 +30,7 @@
#include "bit-radix.h"
#include "extent_io.h"
#include "extent_map.h"
#include "async-thread.h"

struct btrfs_trans_handle;
struct btrfs_transaction;
@@ -518,13 +519,20 @@ struct btrfs_fs_info {
	struct list_head hashers;
	struct list_head dead_roots;
	struct list_head end_io_work_list;
	struct list_head async_submit_work_list;
	struct work_struct end_io_work;
	struct work_struct async_submit_work;
	spinlock_t end_io_work_lock;
	spinlock_t async_submit_work_lock;
	atomic_t nr_async_submits;

	/*
	 * there is a pool of worker threads for checksumming during writes
	 * and a pool for checksumming after reads.  This is because readers
	 * can run with FS locks held, and the writers may be waiting for
	 * those locks.  We don't want ordering in the pending list to cause
	 * deadlocks, and so the two are serviced separately.
	 */
	struct btrfs_workers workers;
	struct btrfs_workers endio_workers;

#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,18)
	struct work_struct trans_work;
#else
+82 −118
Original line number Diff line number Diff line
@@ -31,6 +31,7 @@
#include "btrfs_inode.h"
#include "volumes.h"
#include "print-tree.h"
#include "async-thread.h"

#if 0
static int check_tree_block(struct btrfs_root *root, struct extent_buffer *buf)
@@ -46,8 +47,7 @@ static int check_tree_block(struct btrfs_root *root, struct extent_buffer *buf)
#endif

static struct extent_io_ops btree_extent_io_ops;
static struct workqueue_struct *end_io_workqueue;
static struct workqueue_struct *async_submit_workqueue;
static void end_workqueue_fn(struct btrfs_work *work);

struct end_io_wq {
	struct bio *bio;
@@ -57,6 +57,7 @@ struct end_io_wq {
	int error;
	int metadata;
	struct list_head list;
	struct btrfs_work work;
};

struct async_submit_bio {
@@ -66,6 +67,7 @@ struct async_submit_bio {
	extent_submit_bio_hook_t *submit_bio_hook;
	int rw;
	int mirror_num;
	struct btrfs_work work;
};

struct extent_map *btree_get_extent(struct inode *inode, struct page *page,
@@ -389,7 +391,6 @@ static int end_workqueue_bio(struct bio *bio,
{
	struct end_io_wq *end_io_wq = bio->bi_private;
	struct btrfs_fs_info *fs_info;
	unsigned long flags;

#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
	if (bio->bi_size)
@@ -397,11 +398,10 @@ static int end_workqueue_bio(struct bio *bio,
#endif

	fs_info = end_io_wq->info;
	spin_lock_irqsave(&fs_info->end_io_work_lock, flags);
	end_io_wq->error = err;
	list_add_tail(&end_io_wq->list, &fs_info->end_io_work_list);
	spin_unlock_irqrestore(&fs_info->end_io_work_lock, flags);
	queue_work(end_io_workqueue, &fs_info->end_io_work);
	end_io_wq->work.func = end_workqueue_fn;
	end_io_wq->work.flags = 0;
	btrfs_queue_worker(&fs_info->endio_workers, &end_io_wq->work);

#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
	return 0;
@@ -428,6 +428,19 @@ int btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio,
	return 0;
}

static void run_one_async_submit(struct btrfs_work *work)
{
	struct btrfs_fs_info *fs_info;
	struct async_submit_bio *async;

	async = container_of(work, struct  async_submit_bio, work);
	fs_info = BTRFS_I(async->inode)->root->fs_info;
	atomic_dec(&fs_info->nr_async_submits);
	async->submit_bio_hook(async->inode, async->rw, async->bio,
			       async->mirror_num);
	kfree(async);
}

int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode,
			int rw, struct bio *bio, int mirror_num,
			extent_submit_bio_hook_t *submit_bio_hook)
@@ -443,13 +456,10 @@ int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode,
	async->bio = bio;
	async->mirror_num = mirror_num;
	async->submit_bio_hook = submit_bio_hook;

	spin_lock(&fs_info->async_submit_work_lock);
	list_add_tail(&async->list, &fs_info->async_submit_work_list);
	async->work.func = run_one_async_submit;
	async->work.flags = 0;
	atomic_inc(&fs_info->nr_async_submits);
	spin_unlock(&fs_info->async_submit_work_lock);

	queue_work(async_submit_workqueue, &fs_info->async_submit_work);
	btrfs_queue_worker(&fs_info->workers, &async->work);
	return 0;
}

@@ -462,19 +472,32 @@ static int __btree_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,

	offset = bio->bi_sector << 9;

	/*
	 * when we're called for a write, we're already in the async
	 * submission context.  Just jump ingo btrfs_map_bio
	 */
	if (rw & (1 << BIO_RW)) {
		return btrfs_map_bio(BTRFS_I(inode)->root, rw, bio, mirror_num);
		return btrfs_map_bio(BTRFS_I(inode)->root, rw, bio,
				     mirror_num, 0);
	}

	/*
	 * called for a read, do the setup so that checksum validation
	 * can happen in the async kernel threads
	 */
	ret = btrfs_bio_wq_end_io(root->fs_info, bio, 1);
	BUG_ON(ret);

	return btrfs_map_bio(BTRFS_I(inode)->root, rw, bio, mirror_num);
	return btrfs_map_bio(BTRFS_I(inode)->root, rw, bio, mirror_num, 1);
}

static int btree_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
				 int mirror_num)
{
	/*
	 * kthread helpers are used to submit writes so that checksumming
	 * can happen in parallel across all CPUs
	 */
	if (!(rw & (1 << BIO_RW))) {
		return __btree_submit_bio_hook(inode, rw, bio, mirror_num);
	}
@@ -1036,50 +1059,30 @@ static int bio_ready_for_csum(struct bio *bio)
	return ret;
}

#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,18)
static void btrfs_end_io_csum(void *p)
#else
static void btrfs_end_io_csum(struct work_struct *work)
#endif
/*
 * called by the kthread helper functions to finally call the bio end_io
 * functions.  This is where read checksum verification actually happens
 */
static void end_workqueue_fn(struct btrfs_work *work)
{
#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,18)
	struct btrfs_fs_info *fs_info = p;
#else
	struct btrfs_fs_info *fs_info = container_of(work,
						     struct btrfs_fs_info,
						     end_io_work);
#endif
	unsigned long flags;
	struct end_io_wq *end_io_wq;
	struct bio *bio;
	struct list_head *next;
	struct end_io_wq *end_io_wq;
	struct btrfs_fs_info *fs_info;
	int error;
	int was_empty;

	while(1) {
		spin_lock_irqsave(&fs_info->end_io_work_lock, flags);
		if (list_empty(&fs_info->end_io_work_list)) {
			spin_unlock_irqrestore(&fs_info->end_io_work_lock,
					       flags);
			return;
		}
		next = fs_info->end_io_work_list.next;
		list_del(next);
		spin_unlock_irqrestore(&fs_info->end_io_work_lock, flags);

		end_io_wq = list_entry(next, struct end_io_wq, list);

	end_io_wq = container_of(work, struct end_io_wq, work);
	bio = end_io_wq->bio;
	fs_info = end_io_wq->info;

	/* metadata bios are special because the whole tree block must
	 * be checksummed at once.  This makes sure the entire block is in
	 * ram and up to date before trying to verify things.  For
	 * blocksize <= pagesize, it is basically a noop
	 */
	if (end_io_wq->metadata && !bio_ready_for_csum(bio)) {
			spin_lock_irqsave(&fs_info->end_io_work_lock, flags);
			was_empty = list_empty(&fs_info->end_io_work_list);
			list_add_tail(&end_io_wq->list,
				      &fs_info->end_io_work_list);
			spin_unlock_irqrestore(&fs_info->end_io_work_lock,
					       flags);
			if (was_empty)
		btrfs_queue_worker(&fs_info->endio_workers,
				   &end_io_wq->work);
		return;
			continue;
	}
	error = end_io_wq->error;
	bio->bi_private = end_io_wq->private;
@@ -1091,41 +1094,6 @@ static void btrfs_end_io_csum(struct work_struct *work)
	bio_endio(bio, error);
#endif
}
}

#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,18)
static void btrfs_async_submit_work(void *p)
#else
static void btrfs_async_submit_work(struct work_struct *work)
#endif
{
#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,18)
	struct btrfs_fs_info *fs_info = p;
#else
	struct btrfs_fs_info *fs_info = container_of(work,
						     struct btrfs_fs_info,
						     async_submit_work);
#endif
	struct async_submit_bio *async;
	struct list_head *next;

	while(1) {
		spin_lock(&fs_info->async_submit_work_lock);
		if (list_empty(&fs_info->async_submit_work_list)) {
			spin_unlock(&fs_info->async_submit_work_lock);
			return;
		}
		next = fs_info->async_submit_work_list.next;
		list_del(next);
		atomic_dec(&fs_info->nr_async_submits);
		spin_unlock(&fs_info->async_submit_work_lock);

		async = list_entry(next, struct async_submit_bio, list);
		async->submit_bio_hook(async->inode, async->rw, async->bio,
				       async->mirror_num);
		kfree(async);
	}
}

struct btrfs_root *open_ctree(struct super_block *sb,
			      struct btrfs_fs_devices *fs_devices,
@@ -1155,19 +1123,11 @@ struct btrfs_root *open_ctree(struct super_block *sb,
		err = -ENOMEM;
		goto fail;
	}
	end_io_workqueue = create_workqueue("btrfs-end-io");
	BUG_ON(!end_io_workqueue);
	async_submit_workqueue = create_workqueue("btrfs-async-submit");

	INIT_RADIX_TREE(&fs_info->fs_roots_radix, GFP_NOFS);
	INIT_LIST_HEAD(&fs_info->trans_list);
	INIT_LIST_HEAD(&fs_info->dead_roots);
	INIT_LIST_HEAD(&fs_info->hashers);
	INIT_LIST_HEAD(&fs_info->end_io_work_list);
	INIT_LIST_HEAD(&fs_info->async_submit_work_list);
	spin_lock_init(&fs_info->hash_lock);
	spin_lock_init(&fs_info->end_io_work_lock);
	spin_lock_init(&fs_info->async_submit_work_lock);
	spin_lock_init(&fs_info->delalloc_lock);
	spin_lock_init(&fs_info->new_trans_lock);

@@ -1222,13 +1182,8 @@ struct btrfs_root *open_ctree(struct super_block *sb,
	fs_info->do_barriers = 1;

#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,18)
	INIT_WORK(&fs_info->end_io_work, btrfs_end_io_csum, fs_info);
	INIT_WORK(&fs_info->async_submit_work, btrfs_async_submit_work,
		  fs_info);
	INIT_WORK(&fs_info->trans_work, btrfs_transaction_cleaner, fs_info);
#else
	INIT_WORK(&fs_info->end_io_work, btrfs_end_io_csum);
	INIT_WORK(&fs_info->async_submit_work, btrfs_async_submit_work);
	INIT_DELAYED_WORK(&fs_info->trans_work, btrfs_transaction_cleaner);
#endif
	BTRFS_I(fs_info->btree_inode)->root = tree_root;
@@ -1240,6 +1195,19 @@ struct btrfs_root *open_ctree(struct super_block *sb,
	mutex_init(&fs_info->trans_mutex);
	mutex_init(&fs_info->fs_mutex);

	/* we need to start all the end_io workers up front because the
	 * queue work function gets called at interrupt time.  The endio
	 * workers don't normally start IO, so some number of them <= the
	 * number of cpus is fine.  They handle checksumming after a read.
	 *
	 * The other worker threads do start IO, so the max is larger than
	 * the number of CPUs.  FIXME, tune this for huge machines
	 */
	btrfs_init_workers(&fs_info->workers, num_online_cpus() * 2);
	btrfs_init_workers(&fs_info->endio_workers, num_online_cpus());
	btrfs_start_workers(&fs_info->workers, 1);
	btrfs_start_workers(&fs_info->endio_workers, num_online_cpus());

#if 0
	ret = add_hasher(fs_info, "crc32c");
	if (ret) {
@@ -1375,6 +1343,8 @@ struct btrfs_root *open_ctree(struct super_block *sb,
	extent_io_tree_empty_lru(&BTRFS_I(fs_info->btree_inode)->io_tree);
fail_iput:
	iput(fs_info->btree_inode);
	btrfs_stop_workers(&fs_info->workers);
	btrfs_stop_workers(&fs_info->endio_workers);
fail:
	btrfs_close_devices(fs_info->fs_devices);
	btrfs_mapping_tree_free(&fs_info->mapping_tree);
@@ -1623,16 +1593,10 @@ int close_ctree(struct btrfs_root *root)
	extent_io_tree_empty_lru(&fs_info->extent_ins);
	extent_io_tree_empty_lru(&BTRFS_I(fs_info->btree_inode)->io_tree);

	flush_workqueue(async_submit_workqueue);
	flush_workqueue(end_io_workqueue);

	truncate_inode_pages(fs_info->btree_inode->i_mapping, 0);

	flush_workqueue(async_submit_workqueue);
	destroy_workqueue(async_submit_workqueue);

	flush_workqueue(end_io_workqueue);
	destroy_workqueue(end_io_workqueue);
	btrfs_stop_workers(&fs_info->workers);
	btrfs_stop_workers(&fs_info->endio_workers);

	iput(fs_info->btree_inode);
#if 0
Loading