Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 17eb2c3b authored by Linus Torvalds's avatar Linus Torvalds
Browse files
* git://git.kernel.org/pub/scm/linux/kernel/git/agk/linux-2.6-dm:
  dm crypt: use bio_add_page
  dm: merge max_hw_sector
  dm: trigger change uevent on rename
  dm crypt: fix write endio
  dm mpath: hp requires scsi
  dm: table detect io beyond device
parents 3c378158 91e10625
Loading
Loading
Loading
Loading
+1 −1
Original line number Original line Diff line number Diff line
@@ -269,7 +269,7 @@ config DM_MULTIPATH_RDAC


config DM_MULTIPATH_HP
config DM_MULTIPATH_HP
        tristate "HP MSA multipath support (EXPERIMENTAL)"
        tristate "HP MSA multipath support (EXPERIMENTAL)"
        depends on DM_MULTIPATH && BLK_DEV_DM && EXPERIMENTAL
        depends on DM_MULTIPATH && BLK_DEV_DM && SCSI && EXPERIMENTAL
        ---help---
        ---help---
          Multipath support for HP MSA (Active/Passive) series hardware.
          Multipath support for HP MSA (Active/Passive) series hardware.


+15 −16
Original line number Original line Diff line number Diff line
@@ -398,7 +398,8 @@ static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size)
	struct bio *clone;
	struct bio *clone;
	unsigned int nr_iovecs = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
	unsigned int nr_iovecs = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
	gfp_t gfp_mask = GFP_NOIO | __GFP_HIGHMEM;
	gfp_t gfp_mask = GFP_NOIO | __GFP_HIGHMEM;
	unsigned int i;
	unsigned i, len;
	struct page *page;


	clone = bio_alloc_bioset(GFP_NOIO, nr_iovecs, cc->bs);
	clone = bio_alloc_bioset(GFP_NOIO, nr_iovecs, cc->bs);
	if (!clone)
	if (!clone)
@@ -407,10 +408,8 @@ static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size)
	clone_init(io, clone);
	clone_init(io, clone);


	for (i = 0; i < nr_iovecs; i++) {
	for (i = 0; i < nr_iovecs; i++) {
		struct bio_vec *bv = bio_iovec_idx(clone, i);
		page = mempool_alloc(cc->page_pool, gfp_mask);

		if (!page)
		bv->bv_page = mempool_alloc(cc->page_pool, gfp_mask);
		if (!bv->bv_page)
			break;
			break;


		/*
		/*
@@ -421,15 +420,14 @@ static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size)
		if (i == (MIN_BIO_PAGES - 1))
		if (i == (MIN_BIO_PAGES - 1))
			gfp_mask = (gfp_mask | __GFP_NOWARN) & ~__GFP_WAIT;
			gfp_mask = (gfp_mask | __GFP_NOWARN) & ~__GFP_WAIT;


		bv->bv_offset = 0;
		len = (size > PAGE_SIZE) ? PAGE_SIZE : size;
		if (size > PAGE_SIZE)

			bv->bv_len = PAGE_SIZE;
		if (!bio_add_page(clone, page, len, 0)) {
		else
			mempool_free(page, cc->page_pool);
			bv->bv_len = size;
			break;
		}


		clone->bi_size += bv->bv_len;
		size -= len;
		clone->bi_vcnt++;
		size -= bv->bv_len;
	}
	}


	if (!clone->bi_size) {
	if (!clone->bi_size) {
@@ -511,6 +509,9 @@ static void crypt_endio(struct bio *clone, int error)
	struct crypt_config *cc = io->target->private;
	struct crypt_config *cc = io->target->private;
	unsigned read_io = bio_data_dir(clone) == READ;
	unsigned read_io = bio_data_dir(clone) == READ;


	if (unlikely(!bio_flagged(clone, BIO_UPTODATE) && !error))
		error = -EIO;

	/*
	/*
	 * free the processed pages
	 * free the processed pages
	 */
	 */
@@ -519,10 +520,8 @@ static void crypt_endio(struct bio *clone, int error)
		goto out;
		goto out;
	}
	}


	if (unlikely(!bio_flagged(clone, BIO_UPTODATE))) {
	if (unlikely(error))
		error = -EIO;
		goto out;
		goto out;
	}


	bio_put(clone);
	bio_put(clone);
	kcryptd_queue_crypt(io);
	kcryptd_queue_crypt(io);
+5 −7
Original line number Original line Diff line number Diff line
@@ -332,6 +332,8 @@ static int dm_hash_rename(const char *old, const char *new)
		dm_table_put(table);
		dm_table_put(table);
	}
	}


	dm_kobject_uevent(hc->md);

	dm_put(hc->md);
	dm_put(hc->md);
	up_write(&_hash_lock);
	up_write(&_hash_lock);
	kfree(old_name);
	kfree(old_name);
@@ -1250,21 +1252,17 @@ static int target_message(struct dm_ioctl *param, size_t param_size)
	if (!table)
	if (!table)
		goto out_argv;
		goto out_argv;


	if (tmsg->sector >= dm_table_get_size(table)) {
	ti = dm_table_find_target(table, tmsg->sector);
	if (!dm_target_is_valid(ti)) {
		DMWARN("Target message sector outside device.");
		DMWARN("Target message sector outside device.");
		r = -EINVAL;
		r = -EINVAL;
		goto out_table;
	} else if (ti->type->message)
	}

	ti = dm_table_find_target(table, tmsg->sector);
	if (ti->type->message)
		r = ti->type->message(ti, argc, argv);
		r = ti->type->message(ti, argc, argv);
	else {
	else {
		DMWARN("Target type does not support messages");
		DMWARN("Target type does not support messages");
		r = -EINVAL;
		r = -EINVAL;
	}
	}


 out_table:
	dm_table_put(table);
	dm_table_put(table);
 out_argv:
 out_argv:
	kfree(argv);
	kfree(argv);
+15 −1
Original line number Original line Diff line number Diff line
@@ -99,6 +99,9 @@ static void combine_restrictions_low(struct io_restrictions *lhs,
	lhs->max_segment_size =
	lhs->max_segment_size =
		min_not_zero(lhs->max_segment_size, rhs->max_segment_size);
		min_not_zero(lhs->max_segment_size, rhs->max_segment_size);


	lhs->max_hw_sectors =
		min_not_zero(lhs->max_hw_sectors, rhs->max_hw_sectors);

	lhs->seg_boundary_mask =
	lhs->seg_boundary_mask =
		min_not_zero(lhs->seg_boundary_mask, rhs->seg_boundary_mask);
		min_not_zero(lhs->seg_boundary_mask, rhs->seg_boundary_mask);


@@ -189,8 +192,10 @@ static int alloc_targets(struct dm_table *t, unsigned int num)


	/*
	/*
	 * Allocate both the target array and offset array at once.
	 * Allocate both the target array and offset array at once.
	 * Append an empty entry to catch sectors beyond the end of
	 * the device.
	 */
	 */
	n_highs = (sector_t *) dm_vcalloc(num, sizeof(struct dm_target) +
	n_highs = (sector_t *) dm_vcalloc(num + 1, sizeof(struct dm_target) +
					  sizeof(sector_t));
					  sizeof(sector_t));
	if (!n_highs)
	if (!n_highs)
		return -ENOMEM;
		return -ENOMEM;
@@ -564,6 +569,9 @@ void dm_set_device_limits(struct dm_target *ti, struct block_device *bdev)
	rs->max_segment_size =
	rs->max_segment_size =
		min_not_zero(rs->max_segment_size, q->max_segment_size);
		min_not_zero(rs->max_segment_size, q->max_segment_size);


	rs->max_hw_sectors =
		min_not_zero(rs->max_hw_sectors, q->max_hw_sectors);

	rs->seg_boundary_mask =
	rs->seg_boundary_mask =
		min_not_zero(rs->seg_boundary_mask,
		min_not_zero(rs->seg_boundary_mask,
			     q->seg_boundary_mask);
			     q->seg_boundary_mask);
@@ -701,6 +709,8 @@ static void check_for_valid_limits(struct io_restrictions *rs)
{
{
	if (!rs->max_sectors)
	if (!rs->max_sectors)
		rs->max_sectors = SAFE_MAX_SECTORS;
		rs->max_sectors = SAFE_MAX_SECTORS;
	if (!rs->max_hw_sectors)
		rs->max_hw_sectors = SAFE_MAX_SECTORS;
	if (!rs->max_phys_segments)
	if (!rs->max_phys_segments)
		rs->max_phys_segments = MAX_PHYS_SEGMENTS;
		rs->max_phys_segments = MAX_PHYS_SEGMENTS;
	if (!rs->max_hw_segments)
	if (!rs->max_hw_segments)
@@ -867,6 +877,9 @@ struct dm_target *dm_table_get_target(struct dm_table *t, unsigned int index)


/*
/*
 * Search the btree for the correct target.
 * Search the btree for the correct target.
 *
 * Caller should check returned pointer with dm_target_is_valid()
 * to trap I/O beyond end of device.
 */
 */
struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector)
struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector)
{
{
@@ -896,6 +909,7 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q)
	q->max_hw_segments = t->limits.max_hw_segments;
	q->max_hw_segments = t->limits.max_hw_segments;
	q->hardsect_size = t->limits.hardsect_size;
	q->hardsect_size = t->limits.hardsect_size;
	q->max_segment_size = t->limits.max_segment_size;
	q->max_segment_size = t->limits.max_segment_size;
	q->max_hw_sectors = t->limits.max_hw_sectors;
	q->seg_boundary_mask = t->limits.seg_boundary_mask;
	q->seg_boundary_mask = t->limits.seg_boundary_mask;
	q->bounce_pfn = t->limits.bounce_pfn;
	q->bounce_pfn = t->limits.bounce_pfn;
	if (t->limits.no_cluster)
	if (t->limits.no_cluster)
+24 −7
Original line number Original line Diff line number Diff line
@@ -672,13 +672,19 @@ static struct bio *clone_bio(struct bio *bio, sector_t sector,
	return clone;
	return clone;
}
}


static void __clone_and_map(struct clone_info *ci)
static int __clone_and_map(struct clone_info *ci)
{
{
	struct bio *clone, *bio = ci->bio;
	struct bio *clone, *bio = ci->bio;
	struct dm_target *ti = dm_table_find_target(ci->map, ci->sector);
	struct dm_target *ti;
	sector_t len = 0, max = max_io_len(ci->md, ci->sector, ti);
	sector_t len = 0, max;
	struct dm_target_io *tio;
	struct dm_target_io *tio;


	ti = dm_table_find_target(ci->map, ci->sector);
	if (!dm_target_is_valid(ti))
		return -EIO;

	max = max_io_len(ci->md, ci->sector, ti);

	/*
	/*
	 * Allocate a target io object.
	 * Allocate a target io object.
	 */
	 */
@@ -736,6 +742,9 @@ static void __clone_and_map(struct clone_info *ci)
		do {
		do {
			if (offset) {
			if (offset) {
				ti = dm_table_find_target(ci->map, ci->sector);
				ti = dm_table_find_target(ci->map, ci->sector);
				if (!dm_target_is_valid(ti))
					return -EIO;

				max = max_io_len(ci->md, ci->sector, ti);
				max = max_io_len(ci->md, ci->sector, ti);


				tio = alloc_tio(ci->md);
				tio = alloc_tio(ci->md);
@@ -759,6 +768,8 @@ static void __clone_and_map(struct clone_info *ci)


		ci->idx++;
		ci->idx++;
	}
	}

	return 0;
}
}


/*
/*
@@ -767,6 +778,7 @@ static void __clone_and_map(struct clone_info *ci)
static int __split_bio(struct mapped_device *md, struct bio *bio)
static int __split_bio(struct mapped_device *md, struct bio *bio)
{
{
	struct clone_info ci;
	struct clone_info ci;
	int error = 0;


	ci.map = dm_get_table(md);
	ci.map = dm_get_table(md);
	if (unlikely(!ci.map))
	if (unlikely(!ci.map))
@@ -784,11 +796,11 @@ static int __split_bio(struct mapped_device *md, struct bio *bio)
	ci.idx = bio->bi_idx;
	ci.idx = bio->bi_idx;


	start_io_acct(ci.io);
	start_io_acct(ci.io);
	while (ci.sector_count)
	while (ci.sector_count && !error)
		__clone_and_map(&ci);
		error = __clone_and_map(&ci);


	/* drop the extra reference count */
	/* drop the extra reference count */
	dec_pending(ci.io, 0);
	dec_pending(ci.io, error);
	dm_table_put(ci.map);
	dm_table_put(ci.map);


	return 0;
	return 0;
@@ -1502,7 +1514,7 @@ int dm_resume(struct mapped_device *md)


	dm_table_unplug_all(map);
	dm_table_unplug_all(map);


	kobject_uevent(&md->disk->kobj, KOBJ_CHANGE);
	dm_kobject_uevent(md);


	r = 0;
	r = 0;


@@ -1516,6 +1528,11 @@ out:
/*-----------------------------------------------------------------
/*-----------------------------------------------------------------
 * Event notification.
 * Event notification.
 *---------------------------------------------------------------*/
 *---------------------------------------------------------------*/
void dm_kobject_uevent(struct mapped_device *md)
{
	kobject_uevent(&md->disk->kobj, KOBJ_CHANGE);
}

uint32_t dm_next_uevent_seq(struct mapped_device *md)
uint32_t dm_next_uevent_seq(struct mapped_device *md)
{
{
	return atomic_add_return(1, &md->uevent_seq);
	return atomic_add_return(1, &md->uevent_seq);
Loading