Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit b91593fa authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull device mapper updates from Mike Snitzer:

 - a few conversions from atomic_t to ref_count_t

 - a DM core fix for a race during device destruction that could result
   in a BUG_ON

 - a stable@ fix for a DM cache race condition that could lead to data
   corruption when operating in writeback mode (writethrough is default)

 - various DM cache cleanups and improvements

 - add DAX support to the DM log-writes target

 - a fix for the DM zoned target's ability to deal with the last zone of
   the drive being smaller than all others

 - a stable@ DM crypt and DM integrity fix for a negative check that was
   to restrictive (prevented slab debug with XFS ontop of DM crypt from
   working)

 - a DM raid target fix for a panic that can occur when forcing a raid
   to sync

* tag 'for-4.15/dm' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm: (25 commits)
  dm cache: lift common migration preparation code to alloc_migration()
  dm cache: remove usused deferred_cells member from struct cache
  dm cache policy smq: allocate cache blocks in order
  dm cache policy smq: change max background work from 10240 to 4096 blocks
  dm cache background tracker: limit amount of background work that may be issued at once
  dm cache policy smq: take origin idle status into account when queuing writebacks
  dm cache policy smq: handle races with queuing background_work
  dm raid: fix panic when attempting to force a raid to sync
  dm integrity: allow unaligned bv_offset
  dm crypt: allow unaligned bv_offset
  dm: small cleanup in dm_get_md()
  dm: fix race between dm_get_from_kobject() and __dm_destroy()
  dm: allocate struct mapped_device with kvzalloc
  dm zoned: ignore last smaller runt zone
  dm space map metadata: use ARRAY_SIZE
  dm log writes: add support for DAX
  dm log writes: add support for inline data buffers
  dm cache: simplify get_per_bio_data() by removing data_size argument
  dm cache: remove all obsolete writethrough-specific code
  dm cache: submit writethrough writes in parallel to origin and cache
  ...
parents e2c5923c ef7afb36
Loading
Loading
Loading
Loading
+12 −6
Original line number Diff line number Diff line
@@ -161,8 +161,17 @@ EXPORT_SYMBOL_GPL(btracker_nr_demotions_queued);

static bool max_work_reached(struct background_tracker *b)
{
	// FIXME: finish
	return false;
	return atomic_read(&b->pending_promotes) +
		atomic_read(&b->pending_writebacks) +
		atomic_read(&b->pending_demotes) >= b->max_work;
}

struct bt_work *alloc_work(struct background_tracker *b)
{
	if (max_work_reached(b))
		return NULL;

	return kmem_cache_alloc(b->work_cache, GFP_NOWAIT);
}

int btracker_queue(struct background_tracker *b,
@@ -174,10 +183,7 @@ int btracker_queue(struct background_tracker *b,
	if (pwork)
		*pwork = NULL;

	if (max_work_reached(b))
		return -ENOMEM;

	w = kmem_cache_alloc(b->work_cache, GFP_NOWAIT);
	w = alloc_work(b);
	if (!w)
		return -ENOMEM;

+5 −4
Original line number Diff line number Diff line
@@ -13,6 +13,7 @@
#include "persistent-data/dm-transaction-manager.h"

#include <linux/device-mapper.h>
#include <linux/refcount.h>

/*----------------------------------------------------------------*/

@@ -100,7 +101,7 @@ struct cache_disk_superblock {
} __packed;

struct dm_cache_metadata {
	atomic_t ref_count;
	refcount_t ref_count;
	struct list_head list;

	unsigned version;
@@ -753,7 +754,7 @@ static struct dm_cache_metadata *metadata_open(struct block_device *bdev,
	}

	cmd->version = metadata_version;
	atomic_set(&cmd->ref_count, 1);
	refcount_set(&cmd->ref_count, 1);
	init_rwsem(&cmd->root_lock);
	cmd->bdev = bdev;
	cmd->data_block_size = data_block_size;
@@ -791,7 +792,7 @@ static struct dm_cache_metadata *lookup(struct block_device *bdev)

	list_for_each_entry(cmd, &table, list)
		if (cmd->bdev == bdev) {
			atomic_inc(&cmd->ref_count);
			refcount_inc(&cmd->ref_count);
			return cmd;
		}

@@ -862,7 +863,7 @@ struct dm_cache_metadata *dm_cache_metadata_open(struct block_device *bdev,

void dm_cache_metadata_close(struct dm_cache_metadata *cmd)
{
	if (atomic_dec_and_test(&cmd->ref_count)) {
	if (refcount_dec_and_test(&cmd->ref_count)) {
		mutex_lock(&table_lock);
		list_del(&cmd->list);
		mutex_unlock(&table_lock);
+33 −9
Original line number Diff line number Diff line
@@ -213,6 +213,19 @@ static void l_del(struct entry_space *es, struct ilist *l, struct entry *e)
		l->nr_elts--;
}

static struct entry *l_pop_head(struct entry_space *es, struct ilist *l)
{
	struct entry *e;

	for (e = l_head(es, l); e; e = l_next(es, e))
		if (!e->sentinel) {
			l_del(es, l, e);
			return e;
		}

	return NULL;
}

static struct entry *l_pop_tail(struct entry_space *es, struct ilist *l)
{
	struct entry *e;
@@ -719,7 +732,7 @@ static struct entry *alloc_entry(struct entry_alloc *ea)
	if (l_empty(&ea->free))
		return NULL;

	e = l_pop_tail(ea->es, &ea->free);
	e = l_pop_head(ea->es, &ea->free);
	init_entry(e);
	ea->nr_allocated++;

@@ -1158,13 +1171,13 @@ static void clear_pending(struct smq_policy *mq, struct entry *e)
	e->pending_work = false;
}

static void queue_writeback(struct smq_policy *mq)
static void queue_writeback(struct smq_policy *mq, bool idle)
{
	int r;
	struct policy_work work;
	struct entry *e;

	e = q_peek(&mq->dirty, mq->dirty.nr_levels, !mq->migrations_allowed);
	e = q_peek(&mq->dirty, mq->dirty.nr_levels, idle);
	if (e) {
		mark_pending(mq, e);
		q_del(&mq->dirty, e);
@@ -1174,12 +1187,16 @@ static void queue_writeback(struct smq_policy *mq)
		work.cblock = infer_cblock(mq, e);

		r = btracker_queue(mq->bg_work, &work, NULL);
		WARN_ON_ONCE(r); // FIXME: finish, I think we have to get rid of this race.
		if (r) {
			clear_pending(mq, e);
			q_push_front(&mq->dirty, e);
		}
	}
}

static void queue_demotion(struct smq_policy *mq)
{
	int r;
	struct policy_work work;
	struct entry *e;

@@ -1189,7 +1206,7 @@ static void queue_demotion(struct smq_policy *mq)
	e = q_peek(&mq->clean, mq->clean.nr_levels / 2, true);
	if (!e) {
		if (!clean_target_met(mq, true))
			queue_writeback(mq);
			queue_writeback(mq, false);
		return;
	}

@@ -1199,12 +1216,17 @@ static void queue_demotion(struct smq_policy *mq)
	work.op = POLICY_DEMOTE;
	work.oblock = e->oblock;
	work.cblock = infer_cblock(mq, e);
	btracker_queue(mq->bg_work, &work, NULL);
	r = btracker_queue(mq->bg_work, &work, NULL);
	if (r) {
		clear_pending(mq, e);
		q_push_front(&mq->clean, e);
	}
}

static void queue_promotion(struct smq_policy *mq, dm_oblock_t oblock,
			    struct policy_work **workp)
{
	int r;
	struct entry *e;
	struct policy_work work;

@@ -1234,7 +1256,9 @@ static void queue_promotion(struct smq_policy *mq, dm_oblock_t oblock,
	work.op = POLICY_PROMOTE;
	work.oblock = oblock;
	work.cblock = infer_cblock(mq, e);
	btracker_queue(mq->bg_work, &work, workp);
	r = btracker_queue(mq->bg_work, &work, workp);
	if (r)
		free_entry(&mq->cache_alloc, e);
}

/*----------------------------------------------------------------*/
@@ -1418,7 +1442,7 @@ static int smq_get_background_work(struct dm_cache_policy *p, bool idle,
	r = btracker_issue(mq->bg_work, result);
	if (r == -ENODATA) {
		if (!clean_target_met(mq, idle)) {
			queue_writeback(mq);
			queue_writeback(mq, idle);
			r = btracker_issue(mq->bg_work, result);
		}
	}
@@ -1778,7 +1802,7 @@ static struct dm_cache_policy *__smq_create(dm_cblock_t cache_size,
	mq->next_hotspot_period = jiffies;
	mq->next_cache_period = jiffies;

	mq->bg_work = btracker_create(10240); /* FIXME: hard coded value */
	mq->bg_work = btracker_create(4096); /* FIXME: hard coded value */
	if (!mq->bg_work)
		goto bad_btracker;

+132 −194

File changed.

Preview size limit exceeded, changes collapsed.

+2 −1
Original line number Diff line number Diff line
@@ -29,7 +29,6 @@ struct dm_kobject_holder {
 * DM targets must _not_ deference a mapped_device to directly access its members!
 */
struct mapped_device {
	struct srcu_struct io_barrier;
	struct mutex suspend_lock;

	/*
@@ -127,6 +126,8 @@ struct mapped_device {
	struct blk_mq_tag_set *tag_set;
	bool use_blk_mq:1;
	bool init_tio_pdu:1;

	struct srcu_struct io_barrier;
};

void dm_init_md_queue(struct mapped_device *md);
Loading