Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 6f97b220 authored by Linus Torvalds's avatar Linus Torvalds
Browse files
* git://git.kernel.org/pub/scm/linux/kernel/git/agk/linux-2.6-dm: (24 commits)
  dm crypt: add documentation
  dm: remove md argument from specific_minor
  dm table: remove unused dm_create_error_table
  dm table: drop void suspend_targets return
  dm: unplug queues in threads
  dm raid1: use timer
  dm: move include files
  dm kcopyd: rename
  dm: expose macros
  dm kcopyd: remove redundant client counting
  dm kcopyd: private mempool
  dm kcopyd: per device
  dm log: make module use tracking internal
  dm log: move register functions
  dm log: clean interface
  dm kcopyd: clean interface
  dm io: clean interface
  dm io: rename error to error_bits
  dm snapshot: store pointer to target instance
  dm log: move dirty region log code into separate module
  ...
parents 4b7227ca e3dcc5a3
Loading
Loading
Loading
Loading
+52 −0
Original line number Diff line number Diff line
dm-crypt
=========

Device-Mapper's "crypt" target provides transparent encryption of block devices
using the kernel crypto API.

Parameters: <cipher> <key> <iv_offset> <device path> <offset>

<cipher>
    Encryption cipher and an optional IV generation mode.
    (In format cipher-chainmode-ivopts:ivmode).
    Examples:
       des
       aes-cbc-essiv:sha256
       twofish-ecb

    /proc/crypto contains supported crypto modes

<key>
    Key used for encryption. It is encoded as a hexadecimal number.
    You can only use key sizes that are valid for the selected cipher.

<iv_offset>
    The IV offset is a sector count that is added to the sector number
    before creating the IV.

<device path>
    This is the device that is going to be used as backend and contains the
    encrypted data.  You can specify it as a path like /dev/xxx or a device
    number <major>:<minor>.

<offset>
    Starting sector within the device where the encrypted data begins.

Example scripts
===============
LUKS (Linux Unified Key Setup) is now the preferred way to set up disk
encryption with dm-crypt using the 'cryptsetup' utility, see
http://luks.endorphin.org/

[[
#!/bin/sh
# Create a crypt device using dmsetup
dmsetup create crypt1 --table "0 `blockdev --getsize $1` crypt aes-cbc-essiv:sha256 babebabebabebabebabebabebabebabe 0 $1 0"
]]

[[
#!/bin/sh
# Create a crypt device using cryptsetup and LUKS header with default cipher
cryptsetup luksFormat $1
cryptsetup luksOpen $1 crypt1
]]
+3 −3
Original line number Diff line number Diff line
@@ -3,10 +3,10 @@
#

dm-mod-objs	:= dm.o dm-table.o dm-target.o dm-linear.o dm-stripe.o \
		   dm-ioctl.o dm-io.o kcopyd.o
		   dm-ioctl.o dm-io.o dm-kcopyd.o
dm-multipath-objs := dm-hw-handler.o dm-path-selector.o dm-mpath.o
dm-snapshot-objs := dm-snap.o dm-exception-store.o
dm-mirror-objs	:= dm-log.o dm-raid1.o
dm-mirror-objs	:= dm-raid1.o
dm-rdac-objs	:= dm-mpath-rdac.o
dm-hp-sw-objs	:= dm-mpath-hp-sw.o
md-mod-objs     := md.o bitmap.o
@@ -39,7 +39,7 @@ obj-$(CONFIG_DM_MULTIPATH_EMC) += dm-emc.o
obj-$(CONFIG_DM_MULTIPATH_HP)	+= dm-hp-sw.o
obj-$(CONFIG_DM_MULTIPATH_RDAC)	+= dm-rdac.o
obj-$(CONFIG_DM_SNAPSHOT)	+= dm-snapshot.o
obj-$(CONFIG_DM_MIRROR)		+= dm-mirror.o
obj-$(CONFIG_DM_MIRROR)		+= dm-mirror.o dm-log.o
obj-$(CONFIG_DM_ZERO)		+= dm-zero.o

quiet_cmd_unroll = UNROLL  $@
+5 −5
Original line number Diff line number Diff line
@@ -9,13 +9,13 @@

#include "dm.h"
#include "dm-snap.h"
#include "dm-io.h"
#include "kcopyd.h"

#include <linux/mm.h>
#include <linux/pagemap.h>
#include <linux/vmalloc.h>
#include <linux/slab.h>
#include <linux/dm-io.h>
#include <linux/dm-kcopyd.h>

#define DM_MSG_PREFIX "snapshots"
#define DM_CHUNK_SIZE_DEFAULT_SECTORS 32	/* 16KB */
@@ -131,7 +131,7 @@ struct pstore {

static unsigned sectors_to_pages(unsigned sectors)
{
	return sectors / (PAGE_SIZE >> 9);
	return DIV_ROUND_UP(sectors, PAGE_SIZE >> 9);
}

static int alloc_area(struct pstore *ps)
@@ -159,7 +159,7 @@ static void free_area(struct pstore *ps)
}

struct mdata_req {
	struct io_region *where;
	struct dm_io_region *where;
	struct dm_io_request *io_req;
	struct work_struct work;
	int result;
@@ -177,7 +177,7 @@ static void do_metadata(struct work_struct *work)
 */
static int chunk_io(struct pstore *ps, uint32_t chunk, int rw, int metadata)
{
	struct io_region where = {
	struct dm_io_region where = {
		.bdev = ps->snap->cow->bdev,
		.sector = ps->snap->chunk_size * chunk,
		.count = ps->snap->chunk_size,
+22 −16
Original line number Diff line number Diff line
@@ -5,13 +5,14 @@
 * This file is released under the GPL.
 */

#include "dm-io.h"
#include "dm.h"

#include <linux/bio.h>
#include <linux/mempool.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/dm-io.h>

struct dm_io_client {
	mempool_t *pool;
@@ -20,7 +21,7 @@ struct dm_io_client {

/* FIXME: can we shrink this ? */
struct io {
	unsigned long error;
	unsigned long error_bits;
	atomic_t count;
	struct task_struct *sleeper;
	struct dm_io_client *client;
@@ -107,14 +108,14 @@ static inline unsigned bio_get_region(struct bio *bio)
static void dec_count(struct io *io, unsigned int region, int error)
{
	if (error)
		set_bit(region, &io->error);
		set_bit(region, &io->error_bits);

	if (atomic_dec_and_test(&io->count)) {
		if (io->sleeper)
			wake_up_process(io->sleeper);

		else {
			unsigned long r = io->error;
			unsigned long r = io->error_bits;
			io_notify_fn fn = io->callback;
			void *context = io->context;

@@ -271,7 +272,7 @@ static void km_dp_init(struct dpages *dp, void *data)
/*-----------------------------------------------------------------
 * IO routines that accept a list of pages.
 *---------------------------------------------------------------*/
static void do_region(int rw, unsigned int region, struct io_region *where,
static void do_region(int rw, unsigned region, struct dm_io_region *where,
		      struct dpages *dp, struct io *io)
{
	struct bio *bio;
@@ -320,7 +321,7 @@ static void do_region(int rw, unsigned int region, struct io_region *where,
}

static void dispatch_io(int rw, unsigned int num_regions,
			struct io_region *where, struct dpages *dp,
			struct dm_io_region *where, struct dpages *dp,
			struct io *io, int sync)
{
	int i;
@@ -347,17 +348,17 @@ static void dispatch_io(int rw, unsigned int num_regions,
}

static int sync_io(struct dm_io_client *client, unsigned int num_regions,
		   struct io_region *where, int rw, struct dpages *dp,
		   struct dm_io_region *where, int rw, struct dpages *dp,
		   unsigned long *error_bits)
{
	struct io io;

	if (num_regions > 1 && rw != WRITE) {
	if (num_regions > 1 && (rw & RW_MASK) != WRITE) {
		WARN_ON(1);
		return -EIO;
	}

	io.error = 0;
	io.error_bits = 0;
	atomic_set(&io.count, 1); /* see dispatch_io() */
	io.sleeper = current;
	io.client = client;
@@ -378,25 +379,25 @@ static int sync_io(struct dm_io_client *client, unsigned int num_regions,
		return -EINTR;

	if (error_bits)
		*error_bits = io.error;
		*error_bits = io.error_bits;

	return io.error ? -EIO : 0;
	return io.error_bits ? -EIO : 0;
}

static int async_io(struct dm_io_client *client, unsigned int num_regions,
		    struct io_region *where, int rw, struct dpages *dp,
		    struct dm_io_region *where, int rw, struct dpages *dp,
		    io_notify_fn fn, void *context)
{
	struct io *io;

	if (num_regions > 1 && rw != WRITE) {
	if (num_regions > 1 && (rw & RW_MASK) != WRITE) {
		WARN_ON(1);
		fn(1, context);
		return -EIO;
	}

	io = mempool_alloc(client->pool, GFP_NOIO);
	io->error = 0;
	io->error_bits = 0;
	atomic_set(&io->count, 1); /* see dispatch_io() */
	io->sleeper = NULL;
	io->client = client;
@@ -435,10 +436,15 @@ static int dp_init(struct dm_io_request *io_req, struct dpages *dp)
}

/*
 * New collapsed (a)synchronous interface
 * New collapsed (a)synchronous interface.
 *
 * If the IO is asynchronous (i.e. it has notify.fn), you must either unplug
 * the queue with blk_unplug() some time later or set the BIO_RW_SYNC bit in
 * io_req->bi_rw. If you fail to do one of these, the IO will be submitted to
 * the disk after q->unplug_delay, which defaults to 3ms in blk-settings.c.
 */
int dm_io(struct dm_io_request *io_req, unsigned num_regions,
	  struct io_region *where, unsigned long *sync_error_bits)
	  struct dm_io_region *where, unsigned long *sync_error_bits)
{
	int r;
	struct dpages dp;
+122 −176
Original line number Diff line number Diff line
@@ -9,9 +9,8 @@
 * completion notification.
 */

#include <asm/types.h>
#include <linux/types.h>
#include <asm/atomic.h>

#include <linux/blkdev.h>
#include <linux/fs.h>
#include <linux/init.h>
@@ -23,24 +22,15 @@
#include <linux/vmalloc.h>
#include <linux/workqueue.h>
#include <linux/mutex.h>
#include <linux/dm-kcopyd.h>

#include "kcopyd.h"

static struct workqueue_struct *_kcopyd_wq;
static struct work_struct _kcopyd_work;

static void wake(void)
{
	queue_work(_kcopyd_wq, &_kcopyd_work);
}
#include "dm.h"

/*-----------------------------------------------------------------
 * Each kcopyd client has its own little pool of preallocated
 * pages for kcopyd io.
 *---------------------------------------------------------------*/
struct kcopyd_client {
	struct list_head list;

struct dm_kcopyd_client {
	spinlock_t lock;
	struct page_list *pages;
	unsigned int nr_pages;
@@ -50,8 +40,32 @@ struct kcopyd_client {

	wait_queue_head_t destroyq;
	atomic_t nr_jobs;

	mempool_t *job_pool;

	struct workqueue_struct *kcopyd_wq;
	struct work_struct kcopyd_work;

/*
 * We maintain three lists of jobs:
 *
 * i)   jobs waiting for pages
 * ii)  jobs that have pages, and are waiting for the io to be issued.
 * iii) jobs that have completed.
 *
 * All three of these are protected by job_lock.
 */
	spinlock_t job_lock;
	struct list_head complete_jobs;
	struct list_head io_jobs;
	struct list_head pages_jobs;
};

static void wake(struct dm_kcopyd_client *kc)
{
	queue_work(kc->kcopyd_wq, &kc->kcopyd_work);
}

static struct page_list *alloc_pl(void)
{
	struct page_list *pl;
@@ -75,7 +89,7 @@ static void free_pl(struct page_list *pl)
	kfree(pl);
}

static int kcopyd_get_pages(struct kcopyd_client *kc,
static int kcopyd_get_pages(struct dm_kcopyd_client *kc,
			    unsigned int nr, struct page_list **pages)
{
	struct page_list *pl;
@@ -98,7 +112,7 @@ static int kcopyd_get_pages(struct kcopyd_client *kc,
	return 0;
}

static void kcopyd_put_pages(struct kcopyd_client *kc, struct page_list *pl)
static void kcopyd_put_pages(struct dm_kcopyd_client *kc, struct page_list *pl)
{
	struct page_list *cursor;

@@ -126,7 +140,7 @@ static void drop_pages(struct page_list *pl)
	}
}

static int client_alloc_pages(struct kcopyd_client *kc, unsigned int nr)
static int client_alloc_pages(struct dm_kcopyd_client *kc, unsigned int nr)
{
	unsigned int i;
	struct page_list *pl = NULL, *next;
@@ -147,7 +161,7 @@ static int client_alloc_pages(struct kcopyd_client *kc, unsigned int nr)
	return 0;
}

static void client_free_pages(struct kcopyd_client *kc)
static void client_free_pages(struct dm_kcopyd_client *kc)
{
	BUG_ON(kc->nr_free_pages != kc->nr_pages);
	drop_pages(kc->pages);
@@ -161,7 +175,7 @@ static void client_free_pages(struct kcopyd_client *kc)
 * ever having to do io (which could cause a deadlock).
 *---------------------------------------------------------------*/
struct kcopyd_job {
	struct kcopyd_client *kc;
	struct dm_kcopyd_client *kc;
	struct list_head list;
	unsigned long flags;

@@ -175,13 +189,13 @@ struct kcopyd_job {
	 * Either READ or WRITE
	 */
	int rw;
	struct io_region source;
	struct dm_io_region source;

	/*
	 * The destinations for the transfer.
	 */
	unsigned int num_dests;
	struct io_region dests[KCOPYD_MAX_REGIONS];
	struct dm_io_region dests[DM_KCOPYD_MAX_REGIONS];

	sector_t offset;
	unsigned int nr_pages;
@@ -191,7 +205,7 @@ struct kcopyd_job {
	 * Set this to ensure you are notified when the job has
	 * completed.  'context' is for callback to use.
	 */
	kcopyd_notify_fn fn;
	dm_kcopyd_notify_fn fn;
	void *context;

	/*
@@ -207,47 +221,19 @@ struct kcopyd_job {
#define MIN_JOBS 512

static struct kmem_cache *_job_cache;
static mempool_t *_job_pool;

/*
 * We maintain three lists of jobs:
 *
 * i)   jobs waiting for pages
 * ii)  jobs that have pages, and are waiting for the io to be issued.
 * iii) jobs that have completed.
 *
 * All three of these are protected by job_lock.
 */
static DEFINE_SPINLOCK(_job_lock);

static LIST_HEAD(_complete_jobs);
static LIST_HEAD(_io_jobs);
static LIST_HEAD(_pages_jobs);

static int jobs_init(void)
int __init dm_kcopyd_init(void)
{
	_job_cache = KMEM_CACHE(kcopyd_job, 0);
	if (!_job_cache)
		return -ENOMEM;

	_job_pool = mempool_create_slab_pool(MIN_JOBS, _job_cache);
	if (!_job_pool) {
		kmem_cache_destroy(_job_cache);
		return -ENOMEM;
	}

	return 0;
}

static void jobs_exit(void)
void dm_kcopyd_exit(void)
{
	BUG_ON(!list_empty(&_complete_jobs));
	BUG_ON(!list_empty(&_io_jobs));
	BUG_ON(!list_empty(&_pages_jobs));

	mempool_destroy(_job_pool);
	kmem_cache_destroy(_job_cache);
	_job_pool = NULL;
	_job_cache = NULL;
}

@@ -255,18 +241,19 @@ static void jobs_exit(void)
 * Functions to push and pop a job onto the head of a given job
 * list.
 */
static struct kcopyd_job *pop(struct list_head *jobs)
static struct kcopyd_job *pop(struct list_head *jobs,
			      struct dm_kcopyd_client *kc)
{
	struct kcopyd_job *job = NULL;
	unsigned long flags;

	spin_lock_irqsave(&_job_lock, flags);
	spin_lock_irqsave(&kc->job_lock, flags);

	if (!list_empty(jobs)) {
		job = list_entry(jobs->next, struct kcopyd_job, list);
		list_del(&job->list);
	}
	spin_unlock_irqrestore(&_job_lock, flags);
	spin_unlock_irqrestore(&kc->job_lock, flags);

	return job;
}
@@ -274,10 +261,11 @@ static struct kcopyd_job *pop(struct list_head *jobs)
static void push(struct list_head *jobs, struct kcopyd_job *job)
{
	unsigned long flags;
	struct dm_kcopyd_client *kc = job->kc;

	spin_lock_irqsave(&_job_lock, flags);
	spin_lock_irqsave(&kc->job_lock, flags);
	list_add_tail(&job->list, jobs);
	spin_unlock_irqrestore(&_job_lock, flags);
	spin_unlock_irqrestore(&kc->job_lock, flags);
}

/*
@@ -294,11 +282,11 @@ static int run_complete_job(struct kcopyd_job *job)
	void *context = job->context;
	int read_err = job->read_err;
	unsigned long write_err = job->write_err;
	kcopyd_notify_fn fn = job->fn;
	struct kcopyd_client *kc = job->kc;
	dm_kcopyd_notify_fn fn = job->fn;
	struct dm_kcopyd_client *kc = job->kc;

	kcopyd_put_pages(kc, job->pages);
	mempool_free(job, _job_pool);
	mempool_free(job, kc->job_pool);
	fn(read_err, write_err, context);

	if (atomic_dec_and_test(&kc->nr_jobs))
@@ -310,6 +298,7 @@ static int run_complete_job(struct kcopyd_job *job)
static void complete_io(unsigned long error, void *context)
{
	struct kcopyd_job *job = (struct kcopyd_job *) context;
	struct dm_kcopyd_client *kc = job->kc;

	if (error) {
		if (job->rw == WRITE)
@@ -317,22 +306,22 @@ static void complete_io(unsigned long error, void *context)
		else
			job->read_err = 1;

		if (!test_bit(KCOPYD_IGNORE_ERROR, &job->flags)) {
			push(&_complete_jobs, job);
			wake();
		if (!test_bit(DM_KCOPYD_IGNORE_ERROR, &job->flags)) {
			push(&kc->complete_jobs, job);
			wake(kc);
			return;
		}
	}

	if (job->rw == WRITE)
		push(&_complete_jobs, job);
		push(&kc->complete_jobs, job);

	else {
		job->rw = WRITE;
		push(&_io_jobs, job);
		push(&kc->io_jobs, job);
	}

	wake();
	wake(kc);
}

/*
@@ -343,7 +332,7 @@ static int run_io_job(struct kcopyd_job *job)
{
	int r;
	struct dm_io_request io_req = {
		.bi_rw = job->rw,
		.bi_rw = job->rw | (1 << BIO_RW_SYNC),
		.mem.type = DM_IO_PAGE_LIST,
		.mem.ptr.pl = job->pages,
		.mem.offset = job->offset,
@@ -369,7 +358,7 @@ static int run_pages_job(struct kcopyd_job *job)
	r = kcopyd_get_pages(job->kc, job->nr_pages, &job->pages);
	if (!r) {
		/* this job is ready for io */
		push(&_io_jobs, job);
		push(&job->kc->io_jobs, job);
		return 0;
	}

@@ -384,12 +373,13 @@ static int run_pages_job(struct kcopyd_job *job)
 * Run through a list for as long as possible.  Returns the count
 * of successful jobs.
 */
static int process_jobs(struct list_head *jobs, int (*fn) (struct kcopyd_job *))
static int process_jobs(struct list_head *jobs, struct dm_kcopyd_client *kc,
			int (*fn) (struct kcopyd_job *))
{
	struct kcopyd_job *job;
	int r, count = 0;

	while ((job = pop(jobs))) {
	while ((job = pop(jobs, kc))) {

		r = fn(job);

@@ -399,7 +389,7 @@ static int process_jobs(struct list_head *jobs, int (*fn) (struct kcopyd_job *))
				job->write_err = (unsigned long) -1L;
			else
				job->read_err = 1;
			push(&_complete_jobs, job);
			push(&kc->complete_jobs, job);
			break;
		}

@@ -421,8 +411,11 @@ static int process_jobs(struct list_head *jobs, int (*fn) (struct kcopyd_job *))
/*
 * kcopyd does this every time it's woken up.
 */
static void do_work(struct work_struct *ignored)
static void do_work(struct work_struct *work)
{
	struct dm_kcopyd_client *kc = container_of(work,
					struct dm_kcopyd_client, kcopyd_work);

	/*
	 * The order that these are called is *very* important.
	 * complete jobs can free some pages for pages jobs.
@@ -430,9 +423,9 @@ static void do_work(struct work_struct *ignored)
	 * list.  io jobs call wake when they complete and it all
	 * starts again.
	 */
	process_jobs(&_complete_jobs, run_complete_job);
	process_jobs(&_pages_jobs, run_pages_job);
	process_jobs(&_io_jobs, run_io_job);
	process_jobs(&kc->complete_jobs, kc, run_complete_job);
	process_jobs(&kc->pages_jobs, kc, run_pages_job);
	process_jobs(&kc->io_jobs, kc, run_io_job);
}

/*
@@ -442,9 +435,10 @@ static void do_work(struct work_struct *ignored)
 */
static void dispatch_job(struct kcopyd_job *job)
{
	atomic_inc(&job->kc->nr_jobs);
	push(&_pages_jobs, job);
	wake();
	struct dm_kcopyd_client *kc = job->kc;
	atomic_inc(&kc->nr_jobs);
	push(&kc->pages_jobs, job);
	wake(kc);
}

#define SUB_JOB_SIZE 128
@@ -469,7 +463,7 @@ static void segment_complete(int read_err, unsigned long write_err,
	 * Only dispatch more work if there hasn't been an error.
	 */
	if ((!job->read_err && !job->write_err) ||
	    test_bit(KCOPYD_IGNORE_ERROR, &job->flags)) {
	    test_bit(DM_KCOPYD_IGNORE_ERROR, &job->flags)) {
		/* get the next chunk of work */
		progress = job->progress;
		count = job->source.count - progress;
@@ -484,7 +478,8 @@ static void segment_complete(int read_err, unsigned long write_err,

	if (count) {
		int i;
		struct kcopyd_job *sub_job = mempool_alloc(_job_pool, GFP_NOIO);
		struct kcopyd_job *sub_job = mempool_alloc(job->kc->job_pool,
							   GFP_NOIO);

		*sub_job = *job;
		sub_job->source.sector += progress;
@@ -508,7 +503,7 @@ static void segment_complete(int read_err, unsigned long write_err,
		 * after we've completed.
		 */
		job->fn(read_err, write_err, job->context);
		mempool_free(job, _job_pool);
		mempool_free(job, job->kc->job_pool);
	}
}

@@ -526,16 +521,16 @@ static void split_job(struct kcopyd_job *job)
		segment_complete(0, 0u, job);
}

int kcopyd_copy(struct kcopyd_client *kc, struct io_region *from,
		unsigned int num_dests, struct io_region *dests,
		unsigned int flags, kcopyd_notify_fn fn, void *context)
int dm_kcopyd_copy(struct dm_kcopyd_client *kc, struct dm_io_region *from,
		   unsigned int num_dests, struct dm_io_region *dests,
		   unsigned int flags, dm_kcopyd_notify_fn fn, void *context)
{
	struct kcopyd_job *job;

	/*
	 * Allocate a new job.
	 */
	job = mempool_alloc(_job_pool, GFP_NOIO);
	job = mempool_alloc(kc->job_pool, GFP_NOIO);

	/*
	 * set up for the read.
@@ -569,6 +564,7 @@ int kcopyd_copy(struct kcopyd_client *kc, struct io_region *from,

	return 0;
}
EXPORT_SYMBOL(dm_kcopyd_copy);

/*
 * Cancels a kcopyd job, eg. someone might be deactivating a
@@ -583,126 +579,76 @@ int kcopyd_cancel(struct kcopyd_job *job, int block)
#endif  /*  0  */

/*-----------------------------------------------------------------
 * Unit setup
 * Client setup
 *---------------------------------------------------------------*/
static DEFINE_MUTEX(_client_lock);
static LIST_HEAD(_clients);

static void client_add(struct kcopyd_client *kc)
int dm_kcopyd_client_create(unsigned int nr_pages,
			    struct dm_kcopyd_client **result)
{
	mutex_lock(&_client_lock);
	list_add(&kc->list, &_clients);
	mutex_unlock(&_client_lock);
}
	int r = -ENOMEM;
	struct dm_kcopyd_client *kc;

static void client_del(struct kcopyd_client *kc)
{
	mutex_lock(&_client_lock);
	list_del(&kc->list);
	mutex_unlock(&_client_lock);
}

static DEFINE_MUTEX(kcopyd_init_lock);
static int kcopyd_clients = 0;

static int kcopyd_init(void)
{
	int r;

	mutex_lock(&kcopyd_init_lock);

	if (kcopyd_clients) {
		/* Already initialized. */
		kcopyd_clients++;
		mutex_unlock(&kcopyd_init_lock);
		return 0;
	}

	r = jobs_init();
	if (r) {
		mutex_unlock(&kcopyd_init_lock);
		return r;
	}

	_kcopyd_wq = create_singlethread_workqueue("kcopyd");
	if (!_kcopyd_wq) {
		jobs_exit();
		mutex_unlock(&kcopyd_init_lock);
	kc = kmalloc(sizeof(*kc), GFP_KERNEL);
	if (!kc)
		return -ENOMEM;
	}

	kcopyd_clients++;
	INIT_WORK(&_kcopyd_work, do_work);
	mutex_unlock(&kcopyd_init_lock);
	return 0;
}

static void kcopyd_exit(void)
{
	mutex_lock(&kcopyd_init_lock);
	kcopyd_clients--;
	if (!kcopyd_clients) {
		jobs_exit();
		destroy_workqueue(_kcopyd_wq);
		_kcopyd_wq = NULL;
	}
	mutex_unlock(&kcopyd_init_lock);
}

int kcopyd_client_create(unsigned int nr_pages, struct kcopyd_client **result)
{
	int r = 0;
	struct kcopyd_client *kc;
	spin_lock_init(&kc->lock);
	spin_lock_init(&kc->job_lock);
	INIT_LIST_HEAD(&kc->complete_jobs);
	INIT_LIST_HEAD(&kc->io_jobs);
	INIT_LIST_HEAD(&kc->pages_jobs);

	r = kcopyd_init();
	if (r)
		return r;
	kc->job_pool = mempool_create_slab_pool(MIN_JOBS, _job_cache);
	if (!kc->job_pool)
		goto bad_slab;

	kc = kmalloc(sizeof(*kc), GFP_KERNEL);
	if (!kc) {
		kcopyd_exit();
		return -ENOMEM;
	}
	INIT_WORK(&kc->kcopyd_work, do_work);
	kc->kcopyd_wq = create_singlethread_workqueue("kcopyd");
	if (!kc->kcopyd_wq)
		goto bad_workqueue;

	spin_lock_init(&kc->lock);
	kc->pages = NULL;
	kc->nr_pages = kc->nr_free_pages = 0;
	r = client_alloc_pages(kc, nr_pages);
	if (r) {
		kfree(kc);
		kcopyd_exit();
		return r;
	}
	if (r)
		goto bad_client_pages;

	kc->io_client = dm_io_client_create(nr_pages);
	if (IS_ERR(kc->io_client)) {
		r = PTR_ERR(kc->io_client);
		client_free_pages(kc);
		kfree(kc);
		kcopyd_exit();
		return r;
		goto bad_io_client;
	}

	init_waitqueue_head(&kc->destroyq);
	atomic_set(&kc->nr_jobs, 0);

	client_add(kc);
	*result = kc;
	return 0;

bad_io_client:
	client_free_pages(kc);
bad_client_pages:
	destroy_workqueue(kc->kcopyd_wq);
bad_workqueue:
	mempool_destroy(kc->job_pool);
bad_slab:
	kfree(kc);

	return r;
}
EXPORT_SYMBOL(dm_kcopyd_client_create);

void kcopyd_client_destroy(struct kcopyd_client *kc)
void dm_kcopyd_client_destroy(struct dm_kcopyd_client *kc)
{
	/* Wait for completion of all jobs submitted by this client. */
	wait_event(kc->destroyq, !atomic_read(&kc->nr_jobs));

	BUG_ON(!list_empty(&kc->complete_jobs));
	BUG_ON(!list_empty(&kc->io_jobs));
	BUG_ON(!list_empty(&kc->pages_jobs));
	destroy_workqueue(kc->kcopyd_wq);
	dm_io_client_destroy(kc->io_client);
	client_free_pages(kc);
	client_del(kc);
	mempool_destroy(kc->job_pool);
	kfree(kc);
	kcopyd_exit();
}

EXPORT_SYMBOL(kcopyd_client_create);
EXPORT_SYMBOL(kcopyd_client_destroy);
EXPORT_SYMBOL(kcopyd_copy);
EXPORT_SYMBOL(dm_kcopyd_client_destroy);
Loading