Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 5a7a662c authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull device mapper fixes from Mike Snitzer:

 - stable fix for dm-thin that avoids normal IO racing with discard

 - stable fix for a dm-cache related bug in dm-btree walking code that
   results from using very large fast device (eg 4T) with a very small
   cache blocksize (eg 32K) -- this is a very uncommon configuration

 - a couple fixes for dm-raid (one for stable and the other addresses a
   crash in 3.18-rc1 code)

 - stable fix for dm-thinp that addresses a very rare dm-bufio bug
   having to do with memory reclaimation (via shrinker) when using
   dm-thinp ontop of loopback devices

 - fix a leak in dm-stripe target constructor's error path

* tag 'dm-3.18-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm:
  dm btree: fix a recursion depth bug in btree walking code
  dm thin: grab a virtual cell before looking up the mapping
  dm raid: fix inaccessible superblocks causing oops in configure_discard_support
  dm raid: ensure superblock's size matches device's logical block size
  dm bufio: change __GFP_IO to __GFP_FS in shrinker callbacks
  dm stripe: fix potential for leak in stripe_ctr error path
parents 04689e74 9b460d36
Loading
Loading
Loading
Loading
+6 −6
Original line number Original line Diff line number Diff line
@@ -1434,9 +1434,9 @@ static void drop_buffers(struct dm_bufio_client *c)


/*
/*
 * Test if the buffer is unused and too old, and commit it.
 * Test if the buffer is unused and too old, and commit it.
 * At if noio is set, we must not do any I/O because we hold
 * And if GFP_NOFS is used, we must not do any I/O because we hold
 * dm_bufio_clients_lock and we would risk deadlock if the I/O gets rerouted to
 * dm_bufio_clients_lock and we would risk deadlock if the I/O gets
 * different bufio client.
 * rerouted to different bufio client.
 */
 */
static int __cleanup_old_buffer(struct dm_buffer *b, gfp_t gfp,
static int __cleanup_old_buffer(struct dm_buffer *b, gfp_t gfp,
				unsigned long max_jiffies)
				unsigned long max_jiffies)
@@ -1444,7 +1444,7 @@ static int __cleanup_old_buffer(struct dm_buffer *b, gfp_t gfp,
	if (jiffies - b->last_accessed < max_jiffies)
	if (jiffies - b->last_accessed < max_jiffies)
		return 0;
		return 0;


	if (!(gfp & __GFP_IO)) {
	if (!(gfp & __GFP_FS)) {
		if (test_bit(B_READING, &b->state) ||
		if (test_bit(B_READING, &b->state) ||
		    test_bit(B_WRITING, &b->state) ||
		    test_bit(B_WRITING, &b->state) ||
		    test_bit(B_DIRTY, &b->state))
		    test_bit(B_DIRTY, &b->state))
@@ -1486,7 +1486,7 @@ dm_bufio_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
	unsigned long freed;
	unsigned long freed;


	c = container_of(shrink, struct dm_bufio_client, shrinker);
	c = container_of(shrink, struct dm_bufio_client, shrinker);
	if (sc->gfp_mask & __GFP_IO)
	if (sc->gfp_mask & __GFP_FS)
		dm_bufio_lock(c);
		dm_bufio_lock(c);
	else if (!dm_bufio_trylock(c))
	else if (!dm_bufio_trylock(c))
		return SHRINK_STOP;
		return SHRINK_STOP;
@@ -1503,7 +1503,7 @@ dm_bufio_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
	unsigned long count;
	unsigned long count;


	c = container_of(shrink, struct dm_bufio_client, shrinker);
	c = container_of(shrink, struct dm_bufio_client, shrinker);
	if (sc->gfp_mask & __GFP_IO)
	if (sc->gfp_mask & __GFP_FS)
		dm_bufio_lock(c);
		dm_bufio_lock(c);
	else if (!dm_bufio_trylock(c))
	else if (!dm_bufio_trylock(c))
		return 0;
		return 0;
+12 −5
Original line number Original line Diff line number Diff line
@@ -789,8 +789,7 @@ struct dm_raid_superblock {
	__le32 layout;
	__le32 layout;
	__le32 stripe_sectors;
	__le32 stripe_sectors;


	__u8 pad[452];		/* Round struct to 512 bytes. */
	/* Remainder of a logical block is zero-filled when writing (see super_sync()). */
				/* Always set to 0 when writing. */
} __packed;
} __packed;


static int read_disk_sb(struct md_rdev *rdev, int size)
static int read_disk_sb(struct md_rdev *rdev, int size)
@@ -827,7 +826,7 @@ static void super_sync(struct mddev *mddev, struct md_rdev *rdev)
		    test_bit(Faulty, &(rs->dev[i].rdev.flags)))
		    test_bit(Faulty, &(rs->dev[i].rdev.flags)))
			failed_devices |= (1ULL << i);
			failed_devices |= (1ULL << i);


	memset(sb, 0, sizeof(*sb));
	memset(sb + 1, 0, rdev->sb_size - sizeof(*sb));


	sb->magic = cpu_to_le32(DM_RAID_MAGIC);
	sb->magic = cpu_to_le32(DM_RAID_MAGIC);
	sb->features = cpu_to_le32(0);	/* No features yet */
	sb->features = cpu_to_le32(0);	/* No features yet */
@@ -862,7 +861,11 @@ static int super_load(struct md_rdev *rdev, struct md_rdev *refdev)
	uint64_t events_sb, events_refsb;
	uint64_t events_sb, events_refsb;


	rdev->sb_start = 0;
	rdev->sb_start = 0;
	rdev->sb_size = sizeof(*sb);
	rdev->sb_size = bdev_logical_block_size(rdev->meta_bdev);
	if (rdev->sb_size < sizeof(*sb) || rdev->sb_size > PAGE_SIZE) {
		DMERR("superblock size of a logical block is no longer valid");
		return -EINVAL;
	}


	ret = read_disk_sb(rdev, rdev->sb_size);
	ret = read_disk_sb(rdev, rdev->sb_size);
	if (ret)
	if (ret)
@@ -1169,8 +1172,12 @@ static void configure_discard_support(struct dm_target *ti, struct raid_set *rs)
	raid456 = (rs->md.level == 4 || rs->md.level == 5 || rs->md.level == 6);
	raid456 = (rs->md.level == 4 || rs->md.level == 5 || rs->md.level == 6);


	for (i = 0; i < rs->md.raid_disks; i++) {
	for (i = 0; i < rs->md.raid_disks; i++) {
		struct request_queue *q = bdev_get_queue(rs->dev[i].rdev.bdev);
		struct request_queue *q;

		if (!rs->dev[i].rdev.bdev)
			continue;


		q = bdev_get_queue(rs->dev[i].rdev.bdev);
		if (!q || !blk_queue_discard(q))
		if (!q || !blk_queue_discard(q))
			return;
			return;


+3 −1
Original line number Original line Diff line number Diff line
@@ -159,8 +159,10 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
		sc->stripes_shift = __ffs(stripes);
		sc->stripes_shift = __ffs(stripes);


	r = dm_set_target_max_io_len(ti, chunk_size);
	r = dm_set_target_max_io_len(ti, chunk_size);
	if (r)
	if (r) {
		kfree(sc);
		return r;
		return r;
	}


	ti->num_flush_bios = stripes;
	ti->num_flush_bios = stripes;
	ti->num_discard_bios = stripes;
	ti->num_discard_bios = stripes;
+12 −4
Original line number Original line Diff line number Diff line
@@ -1936,6 +1936,14 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio)
		return DM_MAPIO_SUBMITTED;
		return DM_MAPIO_SUBMITTED;
	}
	}


	/*
	 * We must hold the virtual cell before doing the lookup, otherwise
	 * there's a race with discard.
	 */
	build_virtual_key(tc->td, block, &key);
	if (dm_bio_detain(tc->pool->prison, &key, bio, &cell1, &cell_result))
		return DM_MAPIO_SUBMITTED;

	r = dm_thin_find_block(td, block, 0, &result);
	r = dm_thin_find_block(td, block, 0, &result);


	/*
	/*
@@ -1959,13 +1967,10 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio)
			 * shared flag will be set in their case.
			 * shared flag will be set in their case.
			 */
			 */
			thin_defer_bio(tc, bio);
			thin_defer_bio(tc, bio);
			cell_defer_no_holder_no_free(tc, &cell1);
			return DM_MAPIO_SUBMITTED;
			return DM_MAPIO_SUBMITTED;
		}
		}


		build_virtual_key(tc->td, block, &key);
		if (dm_bio_detain(tc->pool->prison, &key, bio, &cell1, &cell_result))
			return DM_MAPIO_SUBMITTED;

		build_data_key(tc->td, result.block, &key);
		build_data_key(tc->td, result.block, &key);
		if (dm_bio_detain(tc->pool->prison, &key, bio, &cell2, &cell_result)) {
		if (dm_bio_detain(tc->pool->prison, &key, bio, &cell2, &cell_result)) {
			cell_defer_no_holder_no_free(tc, &cell1);
			cell_defer_no_holder_no_free(tc, &cell1);
@@ -1986,6 +1991,7 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio)
			 * of doing so.
			 * of doing so.
			 */
			 */
			handle_unserviceable_bio(tc->pool, bio);
			handle_unserviceable_bio(tc->pool, bio);
			cell_defer_no_holder_no_free(tc, &cell1);
			return DM_MAPIO_SUBMITTED;
			return DM_MAPIO_SUBMITTED;
		}
		}
		/* fall through */
		/* fall through */
@@ -1996,6 +2002,7 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio)
		 * provide the hint to load the metadata into cache.
		 * provide the hint to load the metadata into cache.
		 */
		 */
		thin_defer_bio(tc, bio);
		thin_defer_bio(tc, bio);
		cell_defer_no_holder_no_free(tc, &cell1);
		return DM_MAPIO_SUBMITTED;
		return DM_MAPIO_SUBMITTED;


	default:
	default:
@@ -2005,6 +2012,7 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio)
		 * pool is switched to fail-io mode.
		 * pool is switched to fail-io mode.
		 */
		 */
		bio_io_error(bio);
		bio_io_error(bio);
		cell_defer_no_holder_no_free(tc, &cell1);
		return DM_MAPIO_SUBMITTED;
		return DM_MAPIO_SUBMITTED;
	}
	}
}
}
+6 −0
Original line number Original line Diff line number Diff line
@@ -42,6 +42,12 @@ struct btree_node {
} __packed;
} __packed;




/*
 * Locks a block using the btree node validator.
 */
int bn_read_lock(struct dm_btree_info *info, dm_block_t b,
		 struct dm_block **result);

void inc_children(struct dm_transaction_manager *tm, struct btree_node *n,
void inc_children(struct dm_transaction_manager *tm, struct btree_node *n,
		  struct dm_btree_value_type *vt);
		  struct dm_btree_value_type *vt);


Loading