Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit ebb7c197 authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull device mapper fixes from Mike Snitzer:
 "A few dm-cache fixes, an invalid ioctl handling fix for dm multipath,
  a couple immutable biovec fixups for dm mirror, and a few dm-thin
  fixes.

  There will likely be additional dm-thin metadata and data resize fixes
  to include in 3.14-rc6 next week.

  Note to stable-minded folks: Immutable biovecs were introduced in
  3.14, so the related fixups for dm mirror are not needed in stable@
  kernels"

* tag 'dm-3.14-fixes-1' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm:
  dm cache: fix truncation bug when mapping I/O to >2TB fast device
  dm thin: allow metadata space larger than supported to go unused
  dm mpath: fix stalls when handling invalid ioctls
  dm thin: fix the error path for the thin device constructor
  dm raid1: fix immutable biovec related BUG when retrying read bio
  dm io: fix I/O to multiple destinations
  dm thin: avoid metadata commit if a pool's thin devices haven't changed
  dm cache: do not add migration to completed list before unhooking bio
  dm cache: move hook_info into common portion of per_bio_data structure
parents 7aa48355 e0d849fa
Loading
Loading
Loading
Loading
+8 −5
Original line number Original line Diff line number Diff line
@@ -289,6 +289,7 @@ struct per_bio_data {
	bool tick:1;
	bool tick:1;
	unsigned req_nr:2;
	unsigned req_nr:2;
	struct dm_deferred_entry *all_io_entry;
	struct dm_deferred_entry *all_io_entry;
	struct dm_hook_info hook_info;


	/*
	/*
	 * writethrough fields.  These MUST remain at the end of this
	 * writethrough fields.  These MUST remain at the end of this
@@ -297,7 +298,6 @@ struct per_bio_data {
	 */
	 */
	struct cache *cache;
	struct cache *cache;
	dm_cblock_t cblock;
	dm_cblock_t cblock;
	struct dm_hook_info hook_info;
	struct dm_bio_details bio_details;
	struct dm_bio_details bio_details;
};
};


@@ -671,15 +671,16 @@ static void remap_to_cache(struct cache *cache, struct bio *bio,
			   dm_cblock_t cblock)
			   dm_cblock_t cblock)
{
{
	sector_t bi_sector = bio->bi_iter.bi_sector;
	sector_t bi_sector = bio->bi_iter.bi_sector;
	sector_t block = from_cblock(cblock);


	bio->bi_bdev = cache->cache_dev->bdev;
	bio->bi_bdev = cache->cache_dev->bdev;
	if (!block_size_is_power_of_two(cache))
	if (!block_size_is_power_of_two(cache))
		bio->bi_iter.bi_sector =
		bio->bi_iter.bi_sector =
			(from_cblock(cblock) * cache->sectors_per_block) +
			(block * cache->sectors_per_block) +
			sector_div(bi_sector, cache->sectors_per_block);
			sector_div(bi_sector, cache->sectors_per_block);
	else
	else
		bio->bi_iter.bi_sector =
		bio->bi_iter.bi_sector =
			(from_cblock(cblock) << cache->sectors_per_block_shift) |
			(block << cache->sectors_per_block_shift) |
			(bi_sector & (cache->sectors_per_block - 1));
			(bi_sector & (cache->sectors_per_block - 1));
}
}


@@ -1010,13 +1011,15 @@ static void overwrite_endio(struct bio *bio, int err)
	struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
	struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
	unsigned long flags;
	unsigned long flags;


	dm_unhook_bio(&pb->hook_info, bio);

	if (err)
	if (err)
		mg->err = true;
		mg->err = true;


	mg->requeue_holder = false;

	spin_lock_irqsave(&cache->lock, flags);
	spin_lock_irqsave(&cache->lock, flags);
	list_add_tail(&mg->list, &cache->completed_migrations);
	list_add_tail(&mg->list, &cache->completed_migrations);
	dm_unhook_bio(&pb->hook_info, bio);
	mg->requeue_holder = false;
	spin_unlock_irqrestore(&cache->lock, flags);
	spin_unlock_irqrestore(&cache->lock, flags);


	wake_worker(cache);
	wake_worker(cache);
+11 −12
Original line number Original line Diff line number Diff line
@@ -201,29 +201,28 @@ static void list_dp_init(struct dpages *dp, struct page_list *pl, unsigned offse
/*
/*
 * Functions for getting the pages from a bvec.
 * Functions for getting the pages from a bvec.
 */
 */
static void bio_get_page(struct dpages *dp,
static void bio_get_page(struct dpages *dp, struct page **p,
		  struct page **p, unsigned long *len, unsigned *offset)
			 unsigned long *len, unsigned *offset)
{
{
	struct bio *bio = dp->context_ptr;
	struct bio_vec *bvec = dp->context_ptr;
	struct bio_vec bvec = bio_iovec(bio);
	*p = bvec->bv_page;
	*p = bvec.bv_page;
	*len = bvec->bv_len - dp->context_u;
	*len = bvec.bv_len;
	*offset = bvec->bv_offset + dp->context_u;
	*offset = bvec.bv_offset;
}
}


static void bio_next_page(struct dpages *dp)
static void bio_next_page(struct dpages *dp)
{
{
	struct bio *bio = dp->context_ptr;
	struct bio_vec *bvec = dp->context_ptr;
	struct bio_vec bvec = bio_iovec(bio);
	dp->context_ptr = bvec + 1;

	dp->context_u = 0;
	bio_advance(bio, bvec.bv_len);
}
}


static void bio_dp_init(struct dpages *dp, struct bio *bio)
static void bio_dp_init(struct dpages *dp, struct bio *bio)
{
{
	dp->get_page = bio_get_page;
	dp->get_page = bio_get_page;
	dp->next_page = bio_next_page;
	dp->next_page = bio_next_page;
	dp->context_ptr = bio;
	dp->context_ptr = __bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter);
	dp->context_u = bio->bi_iter.bi_bvec_done;
}
}


/*
/*
+5 −2
Original line number Original line Diff line number Diff line
@@ -1626,8 +1626,11 @@ static int multipath_ioctl(struct dm_target *ti, unsigned int cmd,
	/*
	/*
	 * Only pass ioctls through if the device sizes match exactly.
	 * Only pass ioctls through if the device sizes match exactly.
	 */
	 */
	if (!r && ti->len != i_size_read(bdev->bd_inode) >> SECTOR_SHIFT)
	if (!bdev || ti->len != i_size_read(bdev->bd_inode) >> SECTOR_SHIFT) {
		r = scsi_verify_blk_ioctl(NULL, cmd);
		int err = scsi_verify_blk_ioctl(NULL, cmd);
		if (err)
			r = err;
	}


	if (r == -ENOTCONN && !fatal_signal_pending(current))
	if (r == -ENOTCONN && !fatal_signal_pending(current))
		queue_work(kmultipathd, &m->process_queued_ios);
		queue_work(kmultipathd, &m->process_queued_ios);
+3 −0
Original line number Original line Diff line number Diff line
@@ -1244,6 +1244,9 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio, int error)


			dm_bio_restore(bd, bio);
			dm_bio_restore(bd, bio);
			bio_record->details.bi_bdev = NULL;
			bio_record->details.bi_bdev = NULL;

			atomic_inc(&bio->bi_remaining);

			queue_bio(ms, bio, rw);
			queue_bio(ms, bio, rw);
			return DM_ENDIO_INCOMPLETE;
			return DM_ENDIO_INCOMPLETE;
		}
		}
+19 −2
Original line number Original line Diff line number Diff line
@@ -483,7 +483,7 @@ static int __write_initial_superblock(struct dm_pool_metadata *pmd)


	disk_super->data_mapping_root = cpu_to_le64(pmd->root);
	disk_super->data_mapping_root = cpu_to_le64(pmd->root);
	disk_super->device_details_root = cpu_to_le64(pmd->details_root);
	disk_super->device_details_root = cpu_to_le64(pmd->details_root);
	disk_super->metadata_block_size = cpu_to_le32(THIN_METADATA_BLOCK_SIZE >> SECTOR_SHIFT);
	disk_super->metadata_block_size = cpu_to_le32(THIN_METADATA_BLOCK_SIZE);
	disk_super->metadata_nr_blocks = cpu_to_le64(bdev_size >> SECTOR_TO_BLOCK_SHIFT);
	disk_super->metadata_nr_blocks = cpu_to_le64(bdev_size >> SECTOR_TO_BLOCK_SHIFT);
	disk_super->data_block_size = cpu_to_le32(pmd->data_block_size);
	disk_super->data_block_size = cpu_to_le32(pmd->data_block_size);


@@ -651,7 +651,7 @@ static int __create_persistent_data_objects(struct dm_pool_metadata *pmd, bool f
{
{
	int r;
	int r;


	pmd->bm = dm_block_manager_create(pmd->bdev, THIN_METADATA_BLOCK_SIZE,
	pmd->bm = dm_block_manager_create(pmd->bdev, THIN_METADATA_BLOCK_SIZE << SECTOR_SHIFT,
					  THIN_METADATA_CACHE_SIZE,
					  THIN_METADATA_CACHE_SIZE,
					  THIN_MAX_CONCURRENT_LOCKS);
					  THIN_MAX_CONCURRENT_LOCKS);
	if (IS_ERR(pmd->bm)) {
	if (IS_ERR(pmd->bm)) {
@@ -1489,6 +1489,23 @@ bool dm_thin_changed_this_transaction(struct dm_thin_device *td)
	return r;
	return r;
}
}


bool dm_pool_changed_this_transaction(struct dm_pool_metadata *pmd)
{
	bool r = false;
	struct dm_thin_device *td, *tmp;

	down_read(&pmd->root_lock);
	list_for_each_entry_safe(td, tmp, &pmd->thin_devices, list) {
		if (td->changed) {
			r = td->changed;
			break;
		}
	}
	up_read(&pmd->root_lock);

	return r;
}

bool dm_thin_aborted_changes(struct dm_thin_device *td)
bool dm_thin_aborted_changes(struct dm_thin_device *td)
{
{
	bool r;
	bool r;
Loading