Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 93e1585e authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull device mapper fixes from Mike Snitzer:
 "A set of device-mapper fixes for 3.13.

  A fix for possible memory corruption during DM table load, fix a
  possible leak of snapshot space in case of a crash, fix a possible
  deadlock due to a shared workqueue in the delay target, fix to
  initialize read-only module parameters that are used to export metrics
  for dm stats and dm bufio.

  Quite a few stable fixes were identified for both the thin-
  provisioning and caching targets as a result of increased regression
  testing using the device-mapper-test-suite (dmts).  The most notable
  of these are the reference counting fixes for the space map btree that
  is used by the dm-array interface -- without these the dm-cache
  metadata will leak, resulting in dm-cache devices running out of
  metadata blocks.  Also, some important fixes related to the
  thin-provisioning target's transition to read-only mode on error"

* tag 'dm-3.13-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm:
  dm array: fix a reference counting bug in shadow_ablock
  dm space map: disallow decrementing a reference count below zero
  dm stats: initialize read-only module parameter
  dm bufio: initialize read-only module parameters
  dm cache: actually resize cache
  dm cache: update Documentation for invalidate_cblocks's range syntax
  dm cache policy mq: fix promotions to occur as expected
  dm thin: allow pool in read-only mode to transition to read-write mode
  dm thin: re-establish read-only state when switching to fail mode
  dm thin: always fallback the pool mode if commit fails
  dm thin: switch to read-only mode if metadata space is exhausted
  dm thin: switch to read only mode if a mapping insert fails
  dm space map metadata: return on failure in sm_metadata_new_block
  dm table: fail dm_table_create on dm_round_up overflow
  dm snapshot: avoid snapshot space leak on crash
  dm delay: fix a possible deadlock due to shared workqueue
parents 1008ebb6 ed9571f0
Loading
Loading
Loading
Loading
+6 −4
Original line number Diff line number Diff line
@@ -266,10 +266,12 @@ E.g.
Invalidation is removing an entry from the cache without writing it
back.  Cache blocks can be invalidated via the invalidate_cblocks
message, which takes an arbitrary number of cblock ranges.  Each cblock
must be expressed as a decimal value, in the future a variant message
that takes cblock ranges expressed in hexidecimal may be needed to
better support efficient invalidation of larger caches.  The cache must
be in passthrough mode when invalidate_cblocks is used.
range's end value is "one past the end", meaning 5-10 expresses a range
of values from 5 to 9.  Each cblock must be expressed as a decimal
value, in the future a variant message that takes cblock ranges
expressed in hexidecimal may be needed to better support efficient
invalidation of larger caches.  The cache must be in passthrough mode
when invalidate_cblocks is used.

   invalidate_cblocks [<cblock>|<cblock begin>-<cblock end>]*

+5 −0
Original line number Diff line number Diff line
@@ -1717,6 +1717,11 @@ static int __init dm_bufio_init(void)
{
	__u64 mem;

	dm_bufio_allocated_kmem_cache = 0;
	dm_bufio_allocated_get_free_pages = 0;
	dm_bufio_allocated_vmalloc = 0;
	dm_bufio_current_allocated = 0;

	memset(&dm_bufio_caches, 0, sizeof dm_bufio_caches);
	memset(&dm_bufio_cache_names, 0, sizeof dm_bufio_cache_names);

+8 −5
Original line number Diff line number Diff line
@@ -730,15 +730,18 @@ static int pre_cache_entry_found(struct mq_policy *mq, struct entry *e,
	int r = 0;
	bool updated = updated_this_tick(mq, e);

	requeue_and_update_tick(mq, e);

	if ((!discarded_oblock && updated) ||
	    !should_promote(mq, e, discarded_oblock, data_dir))
	    !should_promote(mq, e, discarded_oblock, data_dir)) {
		requeue_and_update_tick(mq, e);
		result->op = POLICY_MISS;
	else if (!can_migrate)

	} else if (!can_migrate)
		r = -EWOULDBLOCK;
	else

	else {
		requeue_and_update_tick(mq, e);
		r = pre_cache_to_cache(mq, e, result);
	}

	return r;
}
+1 −1
Original line number Diff line number Diff line
@@ -2755,7 +2755,7 @@ static int resize_cache_dev(struct cache *cache, dm_cblock_t new_size)
{
	int r;

	r = dm_cache_resize(cache->cmd, cache->cache_size);
	r = dm_cache_resize(cache->cmd, new_size);
	if (r) {
		DMERR("could not resize cache metadata");
		return r;
+11 −12
Original line number Diff line number Diff line
@@ -20,6 +20,7 @@
struct delay_c {
	struct timer_list delay_timer;
	struct mutex timer_lock;
	struct workqueue_struct *kdelayd_wq;
	struct work_struct flush_expired_bios;
	struct list_head delayed_bios;
	atomic_t may_delay;
@@ -45,14 +46,13 @@ struct dm_delay_info {

static DEFINE_MUTEX(delayed_bios_lock);

static struct workqueue_struct *kdelayd_wq;
static struct kmem_cache *delayed_cache;

static void handle_delayed_timer(unsigned long data)
{
	struct delay_c *dc = (struct delay_c *)data;

	queue_work(kdelayd_wq, &dc->flush_expired_bios);
	queue_work(dc->kdelayd_wq, &dc->flush_expired_bios);
}

static void queue_timeout(struct delay_c *dc, unsigned long expires)
@@ -191,6 +191,12 @@ out:
		goto bad_dev_write;
	}

	dc->kdelayd_wq = alloc_workqueue("kdelayd", WQ_MEM_RECLAIM, 0);
	if (!dc->kdelayd_wq) {
		DMERR("Couldn't start kdelayd");
		goto bad_queue;
	}

	setup_timer(&dc->delay_timer, handle_delayed_timer, (unsigned long)dc);

	INIT_WORK(&dc->flush_expired_bios, flush_expired_bios);
@@ -203,6 +209,8 @@ out:
	ti->private = dc;
	return 0;

bad_queue:
	mempool_destroy(dc->delayed_pool);
bad_dev_write:
	if (dc->dev_write)
		dm_put_device(ti, dc->dev_write);
@@ -217,7 +225,7 @@ static void delay_dtr(struct dm_target *ti)
{
	struct delay_c *dc = ti->private;

	flush_workqueue(kdelayd_wq);
	destroy_workqueue(dc->kdelayd_wq);

	dm_put_device(ti, dc->dev_read);

@@ -350,12 +358,6 @@ static int __init dm_delay_init(void)
{
	int r = -ENOMEM;

	kdelayd_wq = alloc_workqueue("kdelayd", WQ_MEM_RECLAIM, 0);
	if (!kdelayd_wq) {
		DMERR("Couldn't start kdelayd");
		goto bad_queue;
	}

	delayed_cache = KMEM_CACHE(dm_delay_info, 0);
	if (!delayed_cache) {
		DMERR("Couldn't create delayed bio cache.");
@@ -373,8 +375,6 @@ static int __init dm_delay_init(void)
bad_register:
	kmem_cache_destroy(delayed_cache);
bad_memcache:
	destroy_workqueue(kdelayd_wq);
bad_queue:
	return r;
}

@@ -382,7 +382,6 @@ static void __exit dm_delay_exit(void)
{
	dm_unregister_target(&delay_target);
	kmem_cache_destroy(delayed_cache);
	destroy_workqueue(kdelayd_wq);
}

/* Module hooks */
Loading