Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit a5f08c1c authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge master.kernel.org:/pub/scm/linux/kernel/git/agk/linux-2.6-dm

* master.kernel.org:/pub/scm/linux/kernel/git/agk/linux-2.6-dm: (29 commits)
  dm crypt: tidy pending
  dm mpath: send uevents
  dm: uevent generate events
  dm: add uevent to core
  dm: export name and uuid
  dm raid1: add mirror_set to struct mirror
  dm log: split suspend
  dm mpath: hp retry if not ready
  dm mpath: add hp handler
  dm mpath: add retry pg init
  dm crypt: tidy labels
  dm crypt: tidy whitespace
  dm crypt: add post processing queue
  dm crypt: use per device singlethread workqueues
  dm mpath: emc fix an error message
  dm: bio_list macro renaming
  dm io:ctl remove vmalloc void cast
  dm: tidy bio_io_error usage
  kcopyd use mutex instead of semaphore
  dm: use kzalloc
  ...
parents c1cb8e48 80fd6626
Loading
Loading
Loading
Loading
+97 −0
Original line number Diff line number Diff line
The device-mapper uevent code adds the capability to device-mapper to create
and send kobject uevents (uevents).  Previously device-mapper events were only
available through the ioctl interface.  The advantage of the uevents interface
is the event contains environment attributes providing increased context for
the event avoiding the need to query the state of the device-mapper device after
the event is received.

There are two functions currently for device-mapper events.  The first function
listed creates the event and the second function sends the event(s).

void dm_path_uevent(enum dm_uevent_type event_type, struct dm_target *ti,
                    const char *path, unsigned nr_valid_paths)

void dm_send_uevents(struct list_head *events, struct kobject *kobj)


The variables added to the uevent environment are:

Variable Name: DM_TARGET
Uevent Action(s): KOBJ_CHANGE
Type: string
Description:
Value: Name of device-mapper target that generated the event.

Variable Name: DM_ACTION
Uevent Action(s): KOBJ_CHANGE
Type: string
Description:
Value: Device-mapper specific action that caused the uevent action.
	PATH_FAILED - A path has failed.
	PATH_REINSTATED - A path has been reinstated.

Variable Name: DM_SEQNUM
Uevent Action(s): KOBJ_CHANGE
Type: unsigned integer
Description: A sequence number for this specific device-mapper device.
Value: Valid unsigned integer range.

Variable Name: DM_PATH
Uevent Action(s): KOBJ_CHANGE
Type: string
Description: Major and minor number of the path device pertaining to this
event.
Value: Path name in the form of "Major:Minor"

Variable Name: DM_NR_VALID_PATHS
Uevent Action(s): KOBJ_CHANGE
Type: unsigned integer
Description:
Value: Valid unsigned integer range.

Variable Name: DM_NAME
Uevent Action(s): KOBJ_CHANGE
Type: string
Description: Name of the device-mapper device.
Value: Name

Variable Name: DM_UUID
Uevent Action(s): KOBJ_CHANGE
Type: string
Description: UUID of the device-mapper device.
Value: UUID. (Empty string if there isn't one.)

An example of the uevents generated as captured by udevmonitor is shown
below.

1.) Path failure.
UEVENT[1192521009.711215] change@/block/dm-3
ACTION=change
DEVPATH=/block/dm-3
SUBSYSTEM=block
DM_TARGET=multipath
DM_ACTION=PATH_FAILED
DM_SEQNUM=1
DM_PATH=8:32
DM_NR_VALID_PATHS=0
DM_NAME=mpath2
DM_UUID=mpath-35333333000002328
MINOR=3
MAJOR=253
SEQNUM=1130

2.) Path reinstate.
UEVENT[1192521132.989927] change@/block/dm-3
ACTION=change
DEVPATH=/block/dm-3
SUBSYSTEM=block
DM_TARGET=multipath
DM_ACTION=PATH_REINSTATED
DM_SEQNUM=2
DM_PATH=8:32
DM_NR_VALID_PATHS=1
DM_NAME=mpath2
DM_UUID=mpath-35333333000002328
MINOR=3
MAJOR=253
SEQNUM=1131
+12 −0
Original line number Diff line number Diff line
@@ -267,6 +267,12 @@ config DM_MULTIPATH_RDAC
	---help---
	  Multipath support for LSI/Engenio RDAC.

config DM_MULTIPATH_HP
        tristate "HP MSA multipath support (EXPERIMENTAL)"
        depends on DM_MULTIPATH && BLK_DEV_DM && EXPERIMENTAL
        ---help---
          Multipath support for HP MSA (Active/Passive) series hardware.

config DM_DELAY
	tristate "I/O delaying target (EXPERIMENTAL)"
	depends on BLK_DEV_DM && EXPERIMENTAL
@@ -276,4 +282,10 @@ config DM_DELAY

	If unsure, say N.

config DM_UEVENT
	bool "DM uevents (EXPERIMENTAL)"
	depends on BLK_DEV_DM && EXPERIMENTAL
	---help---
	Generate udev events for DM events.

endif # MD
+6 −0
Original line number Diff line number Diff line
@@ -8,6 +8,7 @@ dm-multipath-objs := dm-hw-handler.o dm-path-selector.o dm-mpath.o
dm-snapshot-objs := dm-snap.o dm-exception-store.o
dm-mirror-objs	:= dm-log.o dm-raid1.o
dm-rdac-objs	:= dm-mpath-rdac.o
dm-hp-sw-objs	:= dm-mpath-hp-sw.o
md-mod-objs     := md.o bitmap.o
raid456-objs	:= raid5.o raid6algos.o raid6recov.o raid6tables.o \
		   raid6int1.o raid6int2.o raid6int4.o \
@@ -35,6 +36,7 @@ obj-$(CONFIG_DM_CRYPT) += dm-crypt.o
obj-$(CONFIG_DM_DELAY)		+= dm-delay.o
obj-$(CONFIG_DM_MULTIPATH)	+= dm-multipath.o dm-round-robin.o
obj-$(CONFIG_DM_MULTIPATH_EMC)	+= dm-emc.o
obj-$(CONFIG_DM_MULTIPATH_HP)	+= dm-hp-sw.o
obj-$(CONFIG_DM_MULTIPATH_RDAC)	+= dm-rdac.o
obj-$(CONFIG_DM_SNAPSHOT)	+= dm-snapshot.o
obj-$(CONFIG_DM_MIRROR)		+= dm-mirror.o
@@ -48,6 +50,10 @@ ifeq ($(CONFIG_ALTIVEC),y)
altivec_flags := -maltivec -mabi=altivec
endif

ifeq ($(CONFIG_DM_UEVENT),y)
dm-mod-objs			+= dm-uevent.o
endif

targets += raid6int1.c
$(obj)/raid6int1.c:   UNROLL := 1
$(obj)/raid6int1.c:   $(src)/raid6int.uc $(src)/unroll.pl FORCE
+0 −5
Original line number Diff line number Diff line
@@ -21,11 +21,6 @@ static inline int bio_list_empty(const struct bio_list *bl)
	return bl->head == NULL;
}

#define BIO_LIST_INIT { .head = NULL, .tail = NULL }

#define BIO_LIST(bl) \
	struct bio_list bl = BIO_LIST_INIT

static inline void bio_list_init(struct bio_list *bl)
{
	bl->head = bl->tail = NULL;
+102 −68
Original line number Diff line number Diff line
@@ -36,7 +36,6 @@ struct dm_crypt_io {
	struct work_struct work;
	atomic_t pending;
	int error;
	int post_process;
};

/*
@@ -80,6 +79,8 @@ struct crypt_config {
	mempool_t *page_pool;
	struct bio_set *bs;

	struct workqueue_struct *io_queue;
	struct workqueue_struct *crypt_queue;
	/*
	 * crypto related data
	 */
@@ -175,6 +176,7 @@ static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti,

	if (err) {
		ti->error = "Error calculating hash in ESSIV";
		kfree(salt);
		return err;
	}

@@ -319,8 +321,8 @@ crypt_convert_scatterlist(struct crypt_config *cc, struct scatterlist *out,
	return r;
}

static void
crypt_convert_init(struct crypt_config *cc, struct convert_context *ctx,
static void crypt_convert_init(struct crypt_config *cc,
			       struct convert_context *ctx,
			       struct bio *bio_out, struct bio *bio_in,
			       sector_t sector, int write)
{
@@ -458,7 +460,7 @@ static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone)
 * One of the bios was finished. Check for completion of
 * the whole request and correctly clean up the buffer.
 */
static void dec_pending(struct dm_crypt_io *io, int error)
static void crypt_dec_pending(struct dm_crypt_io *io, int error)
{
	struct crypt_config *cc = (struct crypt_config *) io->target->private;

@@ -474,18 +476,36 @@ static void dec_pending(struct dm_crypt_io *io, int error)
}

/*
 * kcryptd:
 * kcryptd/kcryptd_io:
 *
 * Needed because it would be very unwise to do decryption in an
 * interrupt context.
 *
 * kcryptd performs the actual encryption or decryption.
 *
 * kcryptd_io performs the IO submission.
 *
 * They must be separated as otherwise the final stages could be
 * starved by new requests which can block in the first stages due
 * to memory allocation.
 */
static struct workqueue_struct *_kcryptd_workqueue;
static void kcryptd_do_work(struct work_struct *work);
static void kcryptd_do_crypt(struct work_struct *work);

static void kcryptd_queue_io(struct dm_crypt_io *io)
{
	struct crypt_config *cc = io->target->private;

	INIT_WORK(&io->work, kcryptd_do_work);
	queue_work(_kcryptd_workqueue, &io->work);
	queue_work(cc->io_queue, &io->work);
}

static void kcryptd_queue_crypt(struct dm_crypt_io *io)
{
	struct crypt_config *cc = io->target->private;

	INIT_WORK(&io->work, kcryptd_do_crypt);
	queue_work(cc->crypt_queue, &io->work);
}

static void crypt_endio(struct bio *clone, int error)
@@ -508,13 +528,12 @@ static void crypt_endio(struct bio *clone, int error)
	}

	bio_put(clone);
	io->post_process = 1;
	kcryptd_queue_io(io);
	kcryptd_queue_crypt(io);
	return;

out:
	bio_put(clone);
	dec_pending(io, error);
	crypt_dec_pending(io, error);
}

static void clone_init(struct dm_crypt_io *io, struct bio *clone)
@@ -544,7 +563,7 @@ static void process_read(struct dm_crypt_io *io)
	 */
	clone = bio_alloc_bioset(GFP_NOIO, bio_segments(base_bio), cc->bs);
	if (unlikely(!clone)) {
		dec_pending(io, -ENOMEM);
		crypt_dec_pending(io, -ENOMEM);
		return;
	}

@@ -579,7 +598,7 @@ static void process_write(struct dm_crypt_io *io)
	while (remaining) {
		clone = crypt_alloc_buffer(io, remaining);
		if (unlikely(!clone)) {
			dec_pending(io, -ENOMEM);
			crypt_dec_pending(io, -ENOMEM);
			return;
		}

@@ -589,7 +608,7 @@ static void process_write(struct dm_crypt_io *io)
		if (unlikely(crypt_convert(cc, &ctx) < 0)) {
			crypt_free_buffer_pages(cc, clone);
			bio_put(clone);
			dec_pending(io, -EIO);
			crypt_dec_pending(io, -EIO);
			return;
		}

@@ -624,17 +643,23 @@ static void process_read_endio(struct dm_crypt_io *io)
	crypt_convert_init(cc, &ctx, io->base_bio, io->base_bio,
			   io->base_bio->bi_sector - io->target->begin, 0);

	dec_pending(io, crypt_convert(cc, &ctx));
	crypt_dec_pending(io, crypt_convert(cc, &ctx));
}

static void kcryptd_do_work(struct work_struct *work)
{
	struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);

	if (io->post_process)
		process_read_endio(io);
	else if (bio_data_dir(io->base_bio) == READ)
	if (bio_data_dir(io->base_bio) == READ)
		process_read(io);
}

static void kcryptd_do_crypt(struct work_struct *work)
{
	struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);

	if (bio_data_dir(io->base_bio) == READ)
		process_read_endio(io);
	else
		process_write(io);
}
@@ -746,7 +771,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)

 	if (crypt_set_key(cc, argv[1])) {
		ti->error = "Error decoding key";
		goto bad1;
		goto bad_cipher;
	}

	/* Compatiblity mode for old dm-crypt cipher strings */
@@ -757,19 +782,19 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)

	if (strcmp(chainmode, "ecb") && !ivmode) {
		ti->error = "This chaining mode requires an IV mechanism";
		goto bad1;
		goto bad_cipher;
	}

	if (snprintf(cc->cipher, CRYPTO_MAX_ALG_NAME, "%s(%s)", chainmode, 
		     cipher) >= CRYPTO_MAX_ALG_NAME) {
	if (snprintf(cc->cipher, CRYPTO_MAX_ALG_NAME, "%s(%s)",
		     chainmode, cipher) >= CRYPTO_MAX_ALG_NAME) {
		ti->error = "Chain mode + cipher name is too long";
		goto bad1;
		goto bad_cipher;
	}

	tfm = crypto_alloc_blkcipher(cc->cipher, 0, CRYPTO_ALG_ASYNC);
	if (IS_ERR(tfm)) {
		ti->error = "Error allocating crypto tfm";
		goto bad1;
		goto bad_cipher;
	}

	strcpy(cc->cipher, cipher);
@@ -793,12 +818,12 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
		cc->iv_gen_ops = &crypt_iv_null_ops;
	else {
		ti->error = "Invalid IV mode";
		goto bad2;
		goto bad_ivmode;
	}

	if (cc->iv_gen_ops && cc->iv_gen_ops->ctr &&
	    cc->iv_gen_ops->ctr(cc, ti, ivopts) < 0)
		goto bad2;
		goto bad_ivmode;

	cc->iv_size = crypto_blkcipher_ivsize(tfm);
	if (cc->iv_size)
@@ -817,13 +842,13 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
	cc->io_pool = mempool_create_slab_pool(MIN_IOS, _crypt_io_pool);
	if (!cc->io_pool) {
		ti->error = "Cannot allocate crypt io mempool";
		goto bad3;
		goto bad_slab_pool;
	}

	cc->page_pool = mempool_create_page_pool(MIN_POOL_PAGES, 0);
	if (!cc->page_pool) {
		ti->error = "Cannot allocate page mempool";
		goto bad4;
		goto bad_page_pool;
	}

	cc->bs = bioset_create(MIN_IOS, MIN_IOS);
@@ -834,25 +859,25 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)

	if (crypto_blkcipher_setkey(tfm, cc->key, key_size) < 0) {
		ti->error = "Error setting key";
		goto bad5;
		goto bad_device;
	}

	if (sscanf(argv[2], "%llu", &tmpll) != 1) {
		ti->error = "Invalid iv_offset sector";
		goto bad5;
		goto bad_device;
	}
	cc->iv_offset = tmpll;

	if (sscanf(argv[4], "%llu", &tmpll) != 1) {
		ti->error = "Invalid device sector";
		goto bad5;
		goto bad_device;
	}
	cc->start = tmpll;

	if (dm_get_device(ti, argv[3], cc->start, ti->len,
			  dm_table_get_mode(ti->table), &cc->dev)) {
		ti->error = "Device lookup failed";
		goto bad5;
		goto bad_device;
	}

	if (ivmode && cc->iv_gen_ops) {
@@ -861,27 +886,45 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
		cc->iv_mode = kmalloc(strlen(ivmode) + 1, GFP_KERNEL);
		if (!cc->iv_mode) {
			ti->error = "Error kmallocing iv_mode string";
			goto bad5;
			goto bad_ivmode_string;
		}
		strcpy(cc->iv_mode, ivmode);
	} else
		cc->iv_mode = NULL;

	cc->io_queue = create_singlethread_workqueue("kcryptd_io");
	if (!cc->io_queue) {
		ti->error = "Couldn't create kcryptd io queue";
		goto bad_io_queue;
	}

	cc->crypt_queue = create_singlethread_workqueue("kcryptd");
	if (!cc->crypt_queue) {
		ti->error = "Couldn't create kcryptd queue";
		goto bad_crypt_queue;
	}

	ti->private = cc;
	return 0;

bad5:
bad_crypt_queue:
	destroy_workqueue(cc->io_queue);
bad_io_queue:
	kfree(cc->iv_mode);
bad_ivmode_string:
	dm_put_device(ti, cc->dev);
bad_device:
	bioset_free(cc->bs);
bad_bs:
	mempool_destroy(cc->page_pool);
bad4:
bad_page_pool:
	mempool_destroy(cc->io_pool);
bad3:
bad_slab_pool:
	if (cc->iv_gen_ops && cc->iv_gen_ops->dtr)
		cc->iv_gen_ops->dtr(cc);
bad2:
bad_ivmode:
	crypto_free_blkcipher(tfm);
bad1:
bad_cipher:
	/* Must zero key material before freeing */
	memset(cc, 0, sizeof(*cc) + cc->key_size * sizeof(u8));
	kfree(cc);
@@ -892,7 +935,8 @@ static void crypt_dtr(struct dm_target *ti)
{
	struct crypt_config *cc = (struct crypt_config *) ti->private;

	flush_workqueue(_kcryptd_workqueue);
	destroy_workqueue(cc->io_queue);
	destroy_workqueue(cc->crypt_queue);

	bioset_free(cc->bs);
	mempool_destroy(cc->page_pool);
@@ -918,9 +962,13 @@ static int crypt_map(struct dm_target *ti, struct bio *bio,
	io = mempool_alloc(cc->io_pool, GFP_NOIO);
	io->target = ti;
	io->base_bio = bio;
	io->error = io->post_process = 0;
	io->error = 0;
	atomic_set(&io->pending, 0);

	if (bio_data_dir(io->base_bio) == READ)
		kcryptd_queue_io(io);
	else
		kcryptd_queue_crypt(io);

	return DM_MAPIO_SUBMITTED;
}
@@ -1037,25 +1085,12 @@ static int __init dm_crypt_init(void)
	if (!_crypt_io_pool)
		return -ENOMEM;

	_kcryptd_workqueue = create_workqueue("kcryptd");
	if (!_kcryptd_workqueue) {
		r = -ENOMEM;
		DMERR("couldn't create kcryptd");
		goto bad1;
	}

	r = dm_register_target(&crypt_target);
	if (r < 0) {
		DMERR("register failed %d", r);
		goto bad2;
		kmem_cache_destroy(_crypt_io_pool);
	}

	return 0;

bad2:
	destroy_workqueue(_kcryptd_workqueue);
bad1:
	kmem_cache_destroy(_crypt_io_pool);
	return r;
}

@@ -1066,7 +1101,6 @@ static void __exit dm_crypt_exit(void)
	if (r < 0)
		DMERR("unregister failed %d", r);

	destroy_workqueue(_kcryptd_workqueue);
	kmem_cache_destroy(_crypt_io_pool);
}

Loading