Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit adeba81a authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge tag 'for-4.15/dm-changes-2' of...

Merge tag 'for-4.15/dm-changes-2' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm

Pull  more device mapper updates from Mike Snitzer:
 "Given your expected travel I figured I'd get these fixes to you sooner
  rather than later.

   - a DM multipath stable@ fix to silence an annoying error message
     that isn't _really_ an error

   - a DM core @stable fix for discard support that was enabled for an
     entire DM device despite only having partial support for discards
     due to a mix of discard capabilities across the underlying devices.

   - a couple other DM core discard fixes.

   - a DM bufio @stable fix that resolves a 32-bit overflow"

* tag 'for-4.15/dm-changes-2' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm:
  dm bufio: fix integer overflow when limiting maximum cache size
  dm: clear all discard attributes in queue_limits when discards are disabled
  dm: do not set 'discards_supported' in targets that do not need it
  dm: discard support requires all targets in a table support discards
  dm mpath: remove annoying message of 'blk_get_request() returned -11'
parents 854ac870 74d4108d
Loading
Loading
Loading
Loading
+6 −9
Original line number Diff line number Diff line
@@ -974,7 +974,8 @@ static void __get_memory_limit(struct dm_bufio_client *c,
		buffers = c->minimum_buffers;

	*limit_buffers = buffers;
	*threshold_buffers = buffers * DM_BUFIO_WRITEBACK_PERCENT / 100;
	*threshold_buffers = mult_frac(buffers,
				       DM_BUFIO_WRITEBACK_PERCENT, 100);
}

/*
@@ -1910,19 +1911,15 @@ static int __init dm_bufio_init(void)
	memset(&dm_bufio_caches, 0, sizeof dm_bufio_caches);
	memset(&dm_bufio_cache_names, 0, sizeof dm_bufio_cache_names);

	mem = (__u64)((totalram_pages - totalhigh_pages) *
		      DM_BUFIO_MEMORY_PERCENT / 100) << PAGE_SHIFT;
	mem = (__u64)mult_frac(totalram_pages - totalhigh_pages,
			       DM_BUFIO_MEMORY_PERCENT, 100) << PAGE_SHIFT;

	if (mem > ULONG_MAX)
		mem = ULONG_MAX;

#ifdef CONFIG_MMU
	/*
	 * Get the size of vmalloc space the same way as VMALLOC_TOTAL
	 * in fs/proc/internal.h
	 */
	if (mem > (VMALLOC_END - VMALLOC_START) * DM_BUFIO_VMALLOC_PERCENT / 100)
		mem = (VMALLOC_END - VMALLOC_START) * DM_BUFIO_VMALLOC_PERCENT / 100;
	if (mem > mult_frac(VMALLOC_TOTAL, DM_BUFIO_VMALLOC_PERCENT, 100))
		mem = mult_frac(VMALLOC_TOTAL, DM_BUFIO_VMALLOC_PERCENT, 100);
#endif

	dm_bufio_default_cache_size = mem;
+0 −1
Original line number Diff line number Diff line
@@ -1513,7 +1513,6 @@ static int era_ctr(struct dm_target *ti, unsigned argc, char **argv)
	ti->flush_supported = true;

	ti->num_discard_bios = 1;
	ti->discards_supported = true;
	era->callbacks.congested_fn = era_is_congested;
	dm_table_add_target_callbacks(ti->table, &era->callbacks);

+0 −2
Original line number Diff line number Diff line
@@ -499,8 +499,6 @@ static int multipath_clone_and_map(struct dm_target *ti, struct request *rq,
	if (IS_ERR(clone)) {
		/* EBUSY, ENODEV or EWOULDBLOCK: requeue */
		bool queue_dying = blk_queue_dying(q);
		DMERR_LIMIT("blk_get_request() returned %ld%s - requeuing",
			    PTR_ERR(clone), queue_dying ? " (path offline)" : "");
		if (queue_dying) {
			atomic_inc(&m->pg_init_in_progress);
			activate_or_offline_path(pgpath);
+0 −6
Original line number Diff line number Diff line
@@ -2887,9 +2887,6 @@ static void configure_discard_support(struct raid_set *rs)
	bool raid456;
	struct dm_target *ti = rs->ti;

	/* Assume discards not supported until after checks below. */
	ti->discards_supported = false;

	/*
	 * XXX: RAID level 4,5,6 require zeroing for safety.
	 */
@@ -2914,9 +2911,6 @@ static void configure_discard_support(struct raid_set *rs)
		}
	}

	/* All RAID members properly support discards */
	ti->discards_supported = true;

	/*
	 * RAID1 and RAID10 personalities require bio splitting,
	 * RAID0/4/5/6 don't and process large discard bios properly.
+22 −21
Original line number Diff line number Diff line
@@ -1758,13 +1758,12 @@ static bool dm_table_supports_write_zeroes(struct dm_table *t)
	return true;
}


static int device_discard_capable(struct dm_target *ti, struct dm_dev *dev,
static int device_not_discard_capable(struct dm_target *ti, struct dm_dev *dev,
				      sector_t start, sector_t len, void *data)
{
	struct request_queue *q = bdev_get_queue(dev->bdev);

	return q && blk_queue_discard(q);
	return q && !blk_queue_discard(q);
}

static bool dm_table_supports_discards(struct dm_table *t)
@@ -1772,28 +1771,24 @@ static bool dm_table_supports_discards(struct dm_table *t)
	struct dm_target *ti;
	unsigned i;

	/*
	 * Unless any target used by the table set discards_supported,
	 * require at least one underlying device to support discards.
	 * t->devices includes internal dm devices such as mirror logs
	 * so we need to use iterate_devices here, which targets
	 * supporting discard selectively must provide.
	 */
	for (i = 0; i < dm_table_get_num_targets(t); i++) {
		ti = dm_table_get_target(t, i);

		if (!ti->num_discard_bios)
			continue;

		if (ti->discards_supported)
			return true;
			return false;

		if (ti->type->iterate_devices &&
		    ti->type->iterate_devices(ti, device_discard_capable, NULL))
			return true;
		/*
		 * Either the target provides discard support (as implied by setting
		 * 'discards_supported') or it relies on _all_ data devices having
		 * discard support.
		 */
		if (!ti->discards_supported &&
		    (!ti->type->iterate_devices ||
		     ti->type->iterate_devices(ti, device_not_discard_capable, NULL)))
			return false;
	}

	return false;
	return true;
}

void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
@@ -1806,9 +1801,15 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
	 */
	q->limits = *limits;

	if (!dm_table_supports_discards(t))
	if (!dm_table_supports_discards(t)) {
		queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, q);
	else
		/* Must also clear discard limits... */
		q->limits.max_discard_sectors = 0;
		q->limits.max_hw_discard_sectors = 0;
		q->limits.discard_granularity = 0;
		q->limits.discard_alignment = 0;
		q->limits.discard_misaligned = 0;
	} else
		queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);

	if (dm_table_supports_flush(t, (1UL << QUEUE_FLAG_WC))) {