Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 028867ac authored by Alasdair G Kergon's avatar Alasdair G Kergon Committed by Linus Torvalds
Browse files

dm: use kmem_cache macro



Use new KMEM_CACHE() macro and make the newly-exposed structure names more
meaningful.  Also remove some superfluous casts and inlines (let a modern
compiler be the judge).

Acked-by: default avatarChristoph Lameter <clameter@sgi.com>
Signed-off-by: default avatarAlasdair G Kergon <agk@redhat.com>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 79e15ae4
Loading
Loading
Loading
Loading
+14 −16
Original line number Diff line number Diff line
@@ -30,7 +30,7 @@
/*
 * per bio private data
 */
struct crypt_io {
struct dm_crypt_io {
	struct dm_target *target;
	struct bio *base_bio;
	struct work_struct work;
@@ -106,7 +106,7 @@ struct crypt_config {

static struct kmem_cache *_crypt_io_pool;

static void clone_init(struct crypt_io *, struct bio *);
static void clone_init(struct dm_crypt_io *, struct bio *);

/*
 * Different IV generation algorithms:
@@ -382,7 +382,7 @@ static int crypt_convert(struct crypt_config *cc,

 static void dm_crypt_bio_destructor(struct bio *bio)
 {
	struct crypt_io *io = bio->bi_private;
	struct dm_crypt_io *io = bio->bi_private;
	struct crypt_config *cc = io->target->private;

	bio_free(bio, cc->bs);
@@ -393,7 +393,7 @@ static int crypt_convert(struct crypt_config *cc,
 * This should never violate the device limitations
 * May return a smaller bio when running out of pages
 */
static struct bio *crypt_alloc_buffer(struct crypt_io *io, unsigned int size)
static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size)
{
	struct crypt_config *cc = io->target->private;
	struct bio *clone;
@@ -479,7 +479,7 @@ static void crypt_free_buffer_pages(struct crypt_config *cc,
 * One of the bios was finished. Check for completion of
 * the whole request and correctly clean up the buffer.
 */
static void dec_pending(struct crypt_io *io, int error)
static void dec_pending(struct dm_crypt_io *io, int error)
{
	struct crypt_config *cc = (struct crypt_config *) io->target->private;

@@ -503,7 +503,7 @@ static void dec_pending(struct crypt_io *io, int error)
static struct workqueue_struct *_kcryptd_workqueue;
static void kcryptd_do_work(struct work_struct *work);

static void kcryptd_queue_io(struct crypt_io *io)
static void kcryptd_queue_io(struct dm_crypt_io *io)
{
	INIT_WORK(&io->work, kcryptd_do_work);
	queue_work(_kcryptd_workqueue, &io->work);
@@ -511,7 +511,7 @@ static void kcryptd_queue_io(struct crypt_io *io)

static int crypt_endio(struct bio *clone, unsigned int done, int error)
{
	struct crypt_io *io = clone->bi_private;
	struct dm_crypt_io *io = clone->bi_private;
	struct crypt_config *cc = io->target->private;
	unsigned read_io = bio_data_dir(clone) == READ;

@@ -545,7 +545,7 @@ out:
	return error;
}

static void clone_init(struct crypt_io *io, struct bio *clone)
static void clone_init(struct dm_crypt_io *io, struct bio *clone)
{
	struct crypt_config *cc = io->target->private;

@@ -556,7 +556,7 @@ static void clone_init(struct crypt_io *io, struct bio *clone)
	clone->bi_destructor = dm_crypt_bio_destructor;
}

static void process_read(struct crypt_io *io)
static void process_read(struct dm_crypt_io *io)
{
	struct crypt_config *cc = io->target->private;
	struct bio *base_bio = io->base_bio;
@@ -587,7 +587,7 @@ static void process_read(struct crypt_io *io)
	generic_make_request(clone);
}

static void process_write(struct crypt_io *io)
static void process_write(struct dm_crypt_io *io)
{
	struct crypt_config *cc = io->target->private;
	struct bio *base_bio = io->base_bio;
@@ -644,7 +644,7 @@ static void process_write(struct crypt_io *io)
	}
}

static void process_read_endio(struct crypt_io *io)
static void process_read_endio(struct dm_crypt_io *io)
{
	struct crypt_config *cc = io->target->private;
	struct convert_context ctx;
@@ -657,7 +657,7 @@ static void process_read_endio(struct crypt_io *io)

static void kcryptd_do_work(struct work_struct *work)
{
	struct crypt_io *io = container_of(work, struct crypt_io, work);
	struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);

	if (io->post_process)
		process_read_endio(io);
@@ -939,7 +939,7 @@ static int crypt_map(struct dm_target *ti, struct bio *bio,
		     union map_info *map_context)
{
	struct crypt_config *cc = ti->private;
	struct crypt_io *io;
	struct dm_crypt_io *io;

	if (bio_barrier(bio))
		return -EOPNOTSUPP;
@@ -1062,9 +1062,7 @@ static int __init dm_crypt_init(void)
{
	int r;

	_crypt_io_pool = kmem_cache_create("dm-crypt_io",
	                                   sizeof(struct crypt_io),
	                                   0, 0, NULL, NULL);
	_crypt_io_pool = KMEM_CACHE(dm_crypt_io, 0);
	if (!_crypt_io_pool)
		return -ENOMEM;

+4 −7
Original line number Diff line number Diff line
@@ -37,7 +37,7 @@ struct delay_c {
	unsigned writes;
};

struct delay_info {
struct dm_delay_info {
	struct delay_c *context;
	struct list_head list;
	struct bio *bio;
@@ -80,7 +80,7 @@ static void flush_bios(struct bio *bio)

static struct bio *flush_delayed_bios(struct delay_c *dc, int flush_all)
{
	struct delay_info *delayed, *next;
	struct dm_delay_info *delayed, *next;
	unsigned long next_expires = 0;
	int start_timer = 0;
	BIO_LIST(flush_bios);
@@ -227,7 +227,7 @@ static void delay_dtr(struct dm_target *ti)

static int delay_bio(struct delay_c *dc, int delay, struct bio *bio)
{
	struct delay_info *delayed;
	struct dm_delay_info *delayed;
	unsigned long expires = 0;

	if (!delay || !atomic_read(&dc->may_delay))
@@ -338,10 +338,7 @@ static int __init dm_delay_init(void)
		goto bad_queue;
	}

	delayed_cache = kmem_cache_create("dm-delay",
					  sizeof(struct delay_info),
					  __alignof__(struct delay_info),
					  0, NULL, NULL);
	delayed_cache = KMEM_CACHE(dm_delay_info, 0);
	if (!delayed_cache) {
		DMERR("Couldn't create delayed bio cache.");
		goto bad_memcache;
+9 −8
Original line number Diff line number Diff line
@@ -127,7 +127,7 @@ struct pstore {
	struct dm_io_client *io_client;
};

static inline unsigned int sectors_to_pages(unsigned int sectors)
static unsigned sectors_to_pages(unsigned sectors)
{
	return sectors / (PAGE_SIZE >> 9);
}
@@ -393,7 +393,7 @@ static int read_exceptions(struct pstore *ps)
	return 0;
}

static inline struct pstore *get_info(struct exception_store *store)
static struct pstore *get_info(struct exception_store *store)
{
	return (struct pstore *) store->context;
}
@@ -480,7 +480,7 @@ static int persistent_read_metadata(struct exception_store *store)
}

static int persistent_prepare(struct exception_store *store,
			      struct exception *e)
			      struct dm_snap_exception *e)
{
	struct pstore *ps = get_info(store);
	uint32_t stride;
@@ -505,7 +505,7 @@ static int persistent_prepare(struct exception_store *store,
}

static void persistent_commit(struct exception_store *store,
			      struct exception *e,
			      struct dm_snap_exception *e,
			      void (*callback) (void *, int success),
			      void *callback_context)
{
@@ -616,7 +616,8 @@ static int transient_read_metadata(struct exception_store *store)
	return 0;
}

static int transient_prepare(struct exception_store *store, struct exception *e)
static int transient_prepare(struct exception_store *store,
			     struct dm_snap_exception *e)
{
	struct transient_c *tc = (struct transient_c *) store->context;
	sector_t size = get_dev_size(store->snap->cow->bdev);
@@ -631,7 +632,7 @@ static int transient_prepare(struct exception_store *store, struct exception *e)
}

static void transient_commit(struct exception_store *store,
		      struct exception *e,
			     struct dm_snap_exception *e,
			     void (*callback) (void *, int success),
			     void *callback_context)
{
+11 −12
Original line number Diff line number Diff line
@@ -83,7 +83,7 @@ struct multipath {
	struct work_struct trigger_event;

	/*
	 * We must use a mempool of mpath_io structs so that we
	 * We must use a mempool of dm_mpath_io structs so that we
	 * can resubmit bios on error.
	 */
	mempool_t *mpio_pool;
@@ -92,7 +92,7 @@ struct multipath {
/*
 * Context information attached to each bio we process.
 */
struct mpath_io {
struct dm_mpath_io {
	struct pgpath *pgpath;
	struct dm_bio_details details;
};
@@ -122,7 +122,7 @@ static struct pgpath *alloc_pgpath(void)
	return pgpath;
}

static inline void free_pgpath(struct pgpath *pgpath)
static void free_pgpath(struct pgpath *pgpath)
{
	kfree(pgpath);
}
@@ -299,8 +299,8 @@ static int __must_push_back(struct multipath *m)
		dm_noflush_suspending(m->ti));
}

static int map_io(struct multipath *m, struct bio *bio, struct mpath_io *mpio,
		  unsigned was_queued)
static int map_io(struct multipath *m, struct bio *bio,
		  struct dm_mpath_io *mpio, unsigned was_queued)
{
	int r = DM_MAPIO_REMAPPED;
	unsigned long flags;
@@ -374,7 +374,7 @@ static void dispatch_queued_ios(struct multipath *m)
	int r;
	unsigned long flags;
	struct bio *bio = NULL, *next;
	struct mpath_io *mpio;
	struct dm_mpath_io *mpio;
	union map_info *info;

	spin_lock_irqsave(&m->lock, flags);
@@ -795,7 +795,7 @@ static int multipath_map(struct dm_target *ti, struct bio *bio,
			 union map_info *map_context)
{
	int r;
	struct mpath_io *mpio;
	struct dm_mpath_io *mpio;
	struct multipath *m = (struct multipath *) ti->private;

	if (bio_barrier(bio))
@@ -1014,7 +1014,7 @@ void dm_pg_init_complete(struct dm_path *path, unsigned err_flags)
 * end_io handling
 */
static int do_end_io(struct multipath *m, struct bio *bio,
		     int error, struct mpath_io *mpio)
		     int error, struct dm_mpath_io *mpio)
{
	struct hw_handler *hwh = &m->hw_handler;
	unsigned err_flags = MP_FAIL_PATH;	/* Default behavior */
@@ -1075,8 +1075,8 @@ static int do_end_io(struct multipath *m, struct bio *bio,
static int multipath_end_io(struct dm_target *ti, struct bio *bio,
			    int error, union map_info *map_context)
{
	struct multipath *m = (struct multipath *) ti->private;
	struct mpath_io *mpio = (struct mpath_io *) map_context->ptr;
	struct multipath *m = ti->private;
	struct dm_mpath_io *mpio = map_context->ptr;
	struct pgpath *pgpath = mpio->pgpath;
	struct path_selector *ps;
	int r;
@@ -1346,8 +1346,7 @@ static int __init dm_multipath_init(void)
	int r;

	/* allocate a slab for the dm_ios */
	_mpio_cache = kmem_cache_create("dm_mpath", sizeof(struct mpath_io),
					0, 0, NULL, NULL);
	_mpio_cache = KMEM_CACHE(dm_mpath_io, 0);
	if (!_mpio_cache)
		return -ENOMEM;

+50 −55
Original line number Diff line number Diff line
@@ -42,8 +42,8 @@
static struct workqueue_struct *ksnapd;
static void flush_queued_bios(struct work_struct *work);

struct pending_exception {
	struct exception e;
struct dm_snap_pending_exception {
	struct dm_snap_exception e;

	/*
	 * Origin buffers waiting for this to complete are held
@@ -63,7 +63,7 @@ struct pending_exception {
	 * group of pending_exceptions.  It is always last to get freed.
	 * These fields get set up when writing to the origin.
	 */
	struct pending_exception *primary_pe;
	struct dm_snap_pending_exception *primary_pe;

	/*
	 * Number of pending_exceptions processing this chunk.
@@ -137,7 +137,7 @@ static void exit_origin_hash(void)
	kfree(_origins);
}

static inline unsigned int origin_hash(struct block_device *bdev)
static unsigned origin_hash(struct block_device *bdev)
{
	return bdev->bd_dev & ORIGIN_MASK;
}
@@ -231,7 +231,7 @@ static int init_exception_table(struct exception_table *et, uint32_t size)
static void exit_exception_table(struct exception_table *et, struct kmem_cache *mem)
{
	struct list_head *slot;
	struct exception *ex, *next;
	struct dm_snap_exception *ex, *next;
	int i, size;

	size = et->hash_mask + 1;
@@ -245,18 +245,19 @@ static void exit_exception_table(struct exception_table *et, struct kmem_cache *
	vfree(et->table);
}

static inline uint32_t exception_hash(struct exception_table *et, chunk_t chunk)
static uint32_t exception_hash(struct exception_table *et, chunk_t chunk)
{
	return chunk & et->hash_mask;
}

static void insert_exception(struct exception_table *eh, struct exception *e)
static void insert_exception(struct exception_table *eh,
			     struct dm_snap_exception *e)
{
	struct list_head *l = &eh->table[exception_hash(eh, e->old_chunk)];
	list_add(&e->hash_list, l);
}

static inline void remove_exception(struct exception *e)
static void remove_exception(struct dm_snap_exception *e)
{
	list_del(&e->hash_list);
}
@@ -265,11 +266,11 @@ static inline void remove_exception(struct exception *e)
 * Return the exception data for a sector, or NULL if not
 * remapped.
 */
static struct exception *lookup_exception(struct exception_table *et,
static struct dm_snap_exception *lookup_exception(struct exception_table *et,
						  chunk_t chunk)
{
	struct list_head *slot;
	struct exception *e;
	struct dm_snap_exception *e;

	slot = &et->table[exception_hash(et, chunk)];
	list_for_each_entry (e, slot, hash_list)
@@ -279,9 +280,9 @@ static struct exception *lookup_exception(struct exception_table *et,
	return NULL;
}

static inline struct exception *alloc_exception(void)
static struct dm_snap_exception *alloc_exception(void)
{
	struct exception *e;
	struct dm_snap_exception *e;

	e = kmem_cache_alloc(exception_cache, GFP_NOIO);
	if (!e)
@@ -290,24 +291,24 @@ static inline struct exception *alloc_exception(void)
	return e;
}

static inline void free_exception(struct exception *e)
static void free_exception(struct dm_snap_exception *e)
{
	kmem_cache_free(exception_cache, e);
}

static inline struct pending_exception *alloc_pending_exception(void)
static struct dm_snap_pending_exception *alloc_pending_exception(void)
{
	return mempool_alloc(pending_pool, GFP_NOIO);
}

static inline void free_pending_exception(struct pending_exception *pe)
static void free_pending_exception(struct dm_snap_pending_exception *pe)
{
	mempool_free(pe, pending_pool);
}

int dm_add_exception(struct dm_snapshot *s, chunk_t old, chunk_t new)
{
	struct exception *e;
	struct dm_snap_exception *e;

	e = alloc_exception();
	if (!e)
@@ -334,7 +335,7 @@ static int calc_max_buckets(void)
/*
 * Rounds a number down to a power of 2.
 */
static inline uint32_t round_down(uint32_t n)
static uint32_t round_down(uint32_t n)
{
	while (n & (n - 1))
		n &= (n - 1);
@@ -384,7 +385,7 @@ static int init_hash_tables(struct dm_snapshot *s)
 * Round a number up to the nearest 'size' boundary.  size must
 * be a power of 2.
 */
static inline ulong round_up(ulong n, ulong size)
static ulong round_up(ulong n, ulong size)
{
	size--;
	return (n + size) & ~size;
@@ -577,7 +578,7 @@ static void __free_exceptions(struct dm_snapshot *s)

static void snapshot_dtr(struct dm_target *ti)
{
	struct dm_snapshot *s = (struct dm_snapshot *) ti->private;
	struct dm_snapshot *s = ti->private;

	flush_workqueue(ksnapd);

@@ -655,14 +656,14 @@ static void __invalidate_snapshot(struct dm_snapshot *s, int err)
	dm_table_event(s->table);
}

static void get_pending_exception(struct pending_exception *pe)
static void get_pending_exception(struct dm_snap_pending_exception *pe)
{
	atomic_inc(&pe->ref_count);
}

static struct bio *put_pending_exception(struct pending_exception *pe)
static struct bio *put_pending_exception(struct dm_snap_pending_exception *pe)
{
	struct pending_exception *primary_pe;
	struct dm_snap_pending_exception *primary_pe;
	struct bio *origin_bios = NULL;

	primary_pe = pe->primary_pe;
@@ -692,9 +693,9 @@ static struct bio *put_pending_exception(struct pending_exception *pe)
	return origin_bios;
}

static void pending_complete(struct pending_exception *pe, int success)
static void pending_complete(struct dm_snap_pending_exception *pe, int success)
{
	struct exception *e;
	struct dm_snap_exception *e;
	struct dm_snapshot *s = pe->snap;
	struct bio *origin_bios = NULL;
	struct bio *snapshot_bios = NULL;
@@ -748,7 +749,8 @@ static void pending_complete(struct pending_exception *pe, int success)

static void commit_callback(void *context, int success)
{
	struct pending_exception *pe = (struct pending_exception *) context;
	struct dm_snap_pending_exception *pe = context;

	pending_complete(pe, success);
}

@@ -758,7 +760,7 @@ static void commit_callback(void *context, int success)
 */
static void copy_callback(int read_err, unsigned int write_err, void *context)
{
	struct pending_exception *pe = (struct pending_exception *) context;
	struct dm_snap_pending_exception *pe = context;
	struct dm_snapshot *s = pe->snap;

	if (read_err || write_err)
@@ -773,7 +775,7 @@ static void copy_callback(int read_err, unsigned int write_err, void *context)
/*
 * Dispatches the copy operation to kcopyd.
 */
static void start_copy(struct pending_exception *pe)
static void start_copy(struct dm_snap_pending_exception *pe)
{
	struct dm_snapshot *s = pe->snap;
	struct io_region src, dest;
@@ -803,11 +805,11 @@ static void start_copy(struct pending_exception *pe)
 * NOTE: a write lock must be held on snap->lock before calling
 * this.
 */
static struct pending_exception *
static struct dm_snap_pending_exception *
__find_pending_exception(struct dm_snapshot *s, struct bio *bio)
{
	struct exception *e;
	struct pending_exception *pe;
	struct dm_snap_exception *e;
	struct dm_snap_pending_exception *pe;
	chunk_t chunk = sector_to_chunk(s, bio->bi_sector);

	/*
@@ -816,7 +818,7 @@ __find_pending_exception(struct dm_snapshot *s, struct bio *bio)
	e = lookup_exception(&s->pending, chunk);
	if (e) {
		/* cast the exception to a pending exception */
		pe = container_of(e, struct pending_exception, e);
		pe = container_of(e, struct dm_snap_pending_exception, e);
		goto out;
	}

@@ -836,7 +838,7 @@ __find_pending_exception(struct dm_snapshot *s, struct bio *bio)
	e = lookup_exception(&s->pending, chunk);
	if (e) {
		free_pending_exception(pe);
		pe = container_of(e, struct pending_exception, e);
		pe = container_of(e, struct dm_snap_pending_exception, e);
		goto out;
	}

@@ -860,7 +862,7 @@ __find_pending_exception(struct dm_snapshot *s, struct bio *bio)
	return pe;
}

static inline void remap_exception(struct dm_snapshot *s, struct exception *e,
static void remap_exception(struct dm_snapshot *s, struct dm_snap_exception *e,
			    struct bio *bio)
{
	bio->bi_bdev = s->cow->bdev;
@@ -871,11 +873,11 @@ static inline void remap_exception(struct dm_snapshot *s, struct exception *e,
static int snapshot_map(struct dm_target *ti, struct bio *bio,
			union map_info *map_context)
{
	struct exception *e;
	struct dm_snapshot *s = (struct dm_snapshot *) ti->private;
	struct dm_snap_exception *e;
	struct dm_snapshot *s = ti->private;
	int r = DM_MAPIO_REMAPPED;
	chunk_t chunk;
	struct pending_exception *pe = NULL;
	struct dm_snap_pending_exception *pe = NULL;

	chunk = sector_to_chunk(s, bio->bi_sector);

@@ -945,7 +947,7 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio,

static void snapshot_resume(struct dm_target *ti)
{
	struct dm_snapshot *s = (struct dm_snapshot *) ti->private;
	struct dm_snapshot *s = ti->private;

	down_write(&s->lock);
	s->active = 1;
@@ -955,7 +957,7 @@ static void snapshot_resume(struct dm_target *ti)
static int snapshot_status(struct dm_target *ti, status_type_t type,
			   char *result, unsigned int maxlen)
{
	struct dm_snapshot *snap = (struct dm_snapshot *) ti->private;
	struct dm_snapshot *snap = ti->private;

	switch (type) {
	case STATUSTYPE_INFO:
@@ -999,8 +1001,8 @@ static int __origin_write(struct list_head *snapshots, struct bio *bio)
{
	int r = DM_MAPIO_REMAPPED, first = 0;
	struct dm_snapshot *snap;
	struct exception *e;
	struct pending_exception *pe, *next_pe, *primary_pe = NULL;
	struct dm_snap_exception *e;
	struct dm_snap_pending_exception *pe, *next_pe, *primary_pe = NULL;
	chunk_t chunk;
	LIST_HEAD(pe_queue);

@@ -1147,14 +1149,14 @@ static int origin_ctr(struct dm_target *ti, unsigned int argc, char **argv)

static void origin_dtr(struct dm_target *ti)
{
	struct dm_dev *dev = (struct dm_dev *) ti->private;
	struct dm_dev *dev = ti->private;
	dm_put_device(ti, dev);
}

static int origin_map(struct dm_target *ti, struct bio *bio,
		      union map_info *map_context)
{
	struct dm_dev *dev = (struct dm_dev *) ti->private;
	struct dm_dev *dev = ti->private;
	bio->bi_bdev = dev->bdev;

	if (unlikely(bio_barrier(bio)))
@@ -1172,7 +1174,7 @@ static int origin_map(struct dm_target *ti, struct bio *bio,
 */
static void origin_resume(struct dm_target *ti)
{
	struct dm_dev *dev = (struct dm_dev *) ti->private;
	struct dm_dev *dev = ti->private;
	struct dm_snapshot *snap;
	struct origin *o;
	chunk_t chunk_size = 0;
@@ -1190,7 +1192,7 @@ static void origin_resume(struct dm_target *ti)
static int origin_status(struct dm_target *ti, status_type_t type, char *result,
			 unsigned int maxlen)
{
	struct dm_dev *dev = (struct dm_dev *) ti->private;
	struct dm_dev *dev = ti->private;

	switch (type) {
	case STATUSTYPE_INFO:
@@ -1249,21 +1251,14 @@ static int __init dm_snapshot_init(void)
		goto bad2;
	}

	exception_cache = kmem_cache_create("dm-snapshot-ex",
					    sizeof(struct exception),
					    __alignof__(struct exception),
					    0, NULL, NULL);
	exception_cache = KMEM_CACHE(dm_snap_exception, 0);
	if (!exception_cache) {
		DMERR("Couldn't create exception cache.");
		r = -ENOMEM;
		goto bad3;
	}

	pending_cache =
	    kmem_cache_create("dm-snapshot-in",
			      sizeof(struct pending_exception),
			      __alignof__(struct pending_exception),
			      0, NULL, NULL);
	pending_cache = KMEM_CACHE(dm_snap_pending_exception, 0);
	if (!pending_cache) {
		DMERR("Couldn't create pending cache.");
		r = -ENOMEM;
Loading