Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit ec667158 authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull device-mapper updates from Alasdair Kergon:
 "Allow devices that hold metadata for the device-mapper thin
  provisioning target to be extended easily; allow WRITE SAME on
  multipath devices; an assortment of little fixes and clean-ups."

* tag 'dm-3.10-changes-2' of git://git.kernel.org/pub/scm/linux/kernel/git/agk/linux-dm: (21 commits)
  dm cache: set config value
  dm cache: move config fns
  dm thin: generate event when metadata threshold passed
  dm persistent metadata: add space map threshold callback
  dm persistent data: add threshold callback to space map
  dm thin: detect metadata device resizing
  dm persistent data: support space map resizing
  dm thin: open dev read only when possible
  dm thin: refactor data dev resize
  dm cache: replace memcpy with struct assignment
  dm cache: fix typos in comments
  dm cache policy: fix description of lookup fn
  dm: document iterate_devices
  dm persistent data: fix error message typos
  dm cache: tune migration throttling
  dm mpath: enable WRITE SAME support
  dm table: fix write same support
  dm bufio: avoid a possible __vmalloc deadlock
  dm snapshot: fix error return code in snapshot_ctr
  dm cache: fix error return code in cache_create
  ...
parents f755407d 2f14f4b5
Loading
Loading
Loading
Loading
+23 −1
Original line number Original line Diff line number Diff line
@@ -319,6 +319,9 @@ static void __cache_size_refresh(void)
static void *alloc_buffer_data(struct dm_bufio_client *c, gfp_t gfp_mask,
static void *alloc_buffer_data(struct dm_bufio_client *c, gfp_t gfp_mask,
			       enum data_mode *data_mode)
			       enum data_mode *data_mode)
{
{
	unsigned noio_flag;
	void *ptr;

	if (c->block_size <= DM_BUFIO_BLOCK_SIZE_SLAB_LIMIT) {
	if (c->block_size <= DM_BUFIO_BLOCK_SIZE_SLAB_LIMIT) {
		*data_mode = DATA_MODE_SLAB;
		*data_mode = DATA_MODE_SLAB;
		return kmem_cache_alloc(DM_BUFIO_CACHE(c), gfp_mask);
		return kmem_cache_alloc(DM_BUFIO_CACHE(c), gfp_mask);
@@ -332,7 +335,26 @@ static void *alloc_buffer_data(struct dm_bufio_client *c, gfp_t gfp_mask,
	}
	}


	*data_mode = DATA_MODE_VMALLOC;
	*data_mode = DATA_MODE_VMALLOC;
	return __vmalloc(c->block_size, gfp_mask, PAGE_KERNEL);

	/*
	 * __vmalloc allocates the data pages and auxiliary structures with
	 * gfp_flags that were specified, but pagetables are always allocated
	 * with GFP_KERNEL, no matter what was specified as gfp_mask.
	 *
	 * Consequently, we must set per-process flag PF_MEMALLOC_NOIO so that
	 * all allocations done by this process (including pagetables) are done
	 * as if GFP_NOIO was specified.
	 */

	if (gfp_mask & __GFP_NORETRY)
		noio_flag = memalloc_noio_save();

	ptr = __vmalloc(c->block_size, gfp_mask, PAGE_KERNEL);

	if (gfp_mask & __GFP_NORETRY)
		memalloc_noio_restore(noio_flag);

	return ptr;
}
}


/*
/*
+2 −2
Original line number Original line Diff line number Diff line
@@ -1044,7 +1044,7 @@ void dm_cache_metadata_get_stats(struct dm_cache_metadata *cmd,
				 struct dm_cache_statistics *stats)
				 struct dm_cache_statistics *stats)
{
{
	down_read(&cmd->root_lock);
	down_read(&cmd->root_lock);
	memcpy(stats, &cmd->stats, sizeof(*stats));
	*stats = cmd->stats;
	up_read(&cmd->root_lock);
	up_read(&cmd->root_lock);
}
}


@@ -1052,7 +1052,7 @@ void dm_cache_metadata_set_stats(struct dm_cache_metadata *cmd,
				 struct dm_cache_statistics *stats)
				 struct dm_cache_statistics *stats)
{
{
	down_write(&cmd->root_lock);
	down_write(&cmd->root_lock);
	memcpy(&cmd->stats, stats, sizeof(*stats));
	cmd->stats = *stats;
	up_write(&cmd->root_lock);
	up_write(&cmd->root_lock);
}
}


+2 −2
Original line number Original line Diff line number Diff line
@@ -130,8 +130,8 @@ struct dm_cache_policy {
	 *
	 *
	 * Must not block.
	 * Must not block.
	 *
	 *
	 * Returns 1 iff in cache, 0 iff not, < 0 on error (-EWOULDBLOCK
	 * Returns 0 if in cache, -ENOENT if not, < 0 for other errors
	 * would be typical).
	 * (-EWOULDBLOCK would be typical).
	 */
	 */
	int (*lookup)(struct dm_cache_policy *p, dm_oblock_t oblock, dm_cblock_t *cblock);
	int (*lookup)(struct dm_cache_policy *p, dm_oblock_t oblock, dm_cblock_t *cblock);


+53 −47
Original line number Original line Diff line number Diff line
@@ -419,6 +419,7 @@ static void clear_dirty(struct cache *cache, dm_oblock_t oblock, dm_cblock_t cbl
}
}


/*----------------------------------------------------------------*/
/*----------------------------------------------------------------*/

static bool block_size_is_power_of_two(struct cache *cache)
static bool block_size_is_power_of_two(struct cache *cache)
{
{
	return cache->sectors_per_block_shift >= 0;
	return cache->sectors_per_block_shift >= 0;
@@ -667,7 +668,7 @@ static void writethrough_endio(struct bio *bio, int err)


	/*
	/*
	 * We can't issue this bio directly, since we're in interrupt
	 * We can't issue this bio directly, since we're in interrupt
	 * context.  So it get's put on a bio list for processing by the
	 * context.  So it gets put on a bio list for processing by the
	 * worker thread.
	 * worker thread.
	 */
	 */
	defer_writethrough_bio(pb->cache, bio);
	defer_writethrough_bio(pb->cache, bio);
@@ -1445,6 +1446,7 @@ static void do_worker(struct work_struct *ws)
static void do_waker(struct work_struct *ws)
static void do_waker(struct work_struct *ws)
{
{
	struct cache *cache = container_of(to_delayed_work(ws), struct cache, waker);
	struct cache *cache = container_of(to_delayed_work(ws), struct cache, waker);
	policy_tick(cache->policy);
	wake_worker(cache);
	wake_worker(cache);
	queue_delayed_work(cache->wq, &cache->waker, COMMIT_PERIOD);
	queue_delayed_work(cache->wq, &cache->waker, COMMIT_PERIOD);
}
}
@@ -1809,7 +1811,37 @@ static int parse_cache_args(struct cache_args *ca, int argc, char **argv,


static struct kmem_cache *migration_cache;
static struct kmem_cache *migration_cache;


static int set_config_values(struct dm_cache_policy *p, int argc, const char **argv)
#define NOT_CORE_OPTION 1

static int process_config_option(struct cache *cache, const char *key, const char *value)
{
	unsigned long tmp;

	if (!strcasecmp(key, "migration_threshold")) {
		if (kstrtoul(value, 10, &tmp))
			return -EINVAL;

		cache->migration_threshold = tmp;
		return 0;
	}

	return NOT_CORE_OPTION;
}

static int set_config_value(struct cache *cache, const char *key, const char *value)
{
	int r = process_config_option(cache, key, value);

	if (r == NOT_CORE_OPTION)
		r = policy_set_config_value(cache->policy, key, value);

	if (r)
		DMWARN("bad config value for %s: %s", key, value);

	return r;
}

static int set_config_values(struct cache *cache, int argc, const char **argv)
{
{
	int r = 0;
	int r = 0;


@@ -1819,12 +1851,9 @@ static int set_config_values(struct dm_cache_policy *p, int argc, const char **a
	}
	}


	while (argc) {
	while (argc) {
		r = policy_set_config_value(p, argv[0], argv[1]);
		r = set_config_value(cache, argv[0], argv[1]);
		if (r) {
		if (r)
			DMWARN("policy_set_config_value failed: key = '%s', value = '%s'",
			break;
			       argv[0], argv[1]);
			return r;
		}


		argc -= 2;
		argc -= 2;
		argv += 2;
		argv += 2;
@@ -1836,8 +1865,6 @@ static int set_config_values(struct dm_cache_policy *p, int argc, const char **a
static int create_cache_policy(struct cache *cache, struct cache_args *ca,
static int create_cache_policy(struct cache *cache, struct cache_args *ca,
			       char **error)
			       char **error)
{
{
	int r;

	cache->policy =	dm_cache_policy_create(ca->policy_name,
	cache->policy =	dm_cache_policy_create(ca->policy_name,
					       cache->cache_size,
					       cache->cache_size,
					       cache->origin_sectors,
					       cache->origin_sectors,
@@ -1847,14 +1874,7 @@ static int create_cache_policy(struct cache *cache, struct cache_args *ca,
		return -ENOMEM;
		return -ENOMEM;
	}
	}


	r = set_config_values(cache->policy, ca->policy_argc, ca->policy_argv);
	return 0;
	if (r) {
		*error = "Error setting cache policy's config values";
		dm_cache_policy_destroy(cache->policy);
		cache->policy = NULL;
	}

	return r;
}
}


/*
/*
@@ -1886,7 +1906,7 @@ static sector_t calculate_discard_block_size(sector_t cache_block_size,
	return discard_block_size;
	return discard_block_size;
}
}


#define DEFAULT_MIGRATION_THRESHOLD (2048 * 100)
#define DEFAULT_MIGRATION_THRESHOLD 2048


static int cache_create(struct cache_args *ca, struct cache **result)
static int cache_create(struct cache_args *ca, struct cache **result)
{
{
@@ -1911,7 +1931,7 @@ static int cache_create(struct cache_args *ca, struct cache **result)
	ti->discards_supported = true;
	ti->discards_supported = true;
	ti->discard_zeroes_data_unsupported = true;
	ti->discard_zeroes_data_unsupported = true;


	memcpy(&cache->features, &ca->features, sizeof(cache->features));
	cache->features = ca->features;
	ti->per_bio_data_size = get_per_bio_data_size(cache);
	ti->per_bio_data_size = get_per_bio_data_size(cache);


	cache->callbacks.congested_fn = cache_is_congested;
	cache->callbacks.congested_fn = cache_is_congested;
@@ -1948,7 +1968,15 @@ static int cache_create(struct cache_args *ca, struct cache **result)
	r = create_cache_policy(cache, ca, error);
	r = create_cache_policy(cache, ca, error);
	if (r)
	if (r)
		goto bad;
		goto bad;

	cache->policy_nr_args = ca->policy_argc;
	cache->policy_nr_args = ca->policy_argc;
	cache->migration_threshold = DEFAULT_MIGRATION_THRESHOLD;

	r = set_config_values(cache, ca->policy_argc, ca->policy_argv);
	if (r) {
		*error = "Error setting cache policy's config values";
		goto bad;
	}


	cmd = dm_cache_metadata_open(cache->metadata_dev->bdev,
	cmd = dm_cache_metadata_open(cache->metadata_dev->bdev,
				     ca->block_size, may_format,
				     ca->block_size, may_format,
@@ -1967,10 +1995,10 @@ static int cache_create(struct cache_args *ca, struct cache **result)
	INIT_LIST_HEAD(&cache->quiesced_migrations);
	INIT_LIST_HEAD(&cache->quiesced_migrations);
	INIT_LIST_HEAD(&cache->completed_migrations);
	INIT_LIST_HEAD(&cache->completed_migrations);
	INIT_LIST_HEAD(&cache->need_commit_migrations);
	INIT_LIST_HEAD(&cache->need_commit_migrations);
	cache->migration_threshold = DEFAULT_MIGRATION_THRESHOLD;
	atomic_set(&cache->nr_migrations, 0);
	atomic_set(&cache->nr_migrations, 0);
	init_waitqueue_head(&cache->migration_wait);
	init_waitqueue_head(&cache->migration_wait);


	r = -ENOMEM;
	cache->nr_dirty = 0;
	cache->nr_dirty = 0;
	cache->dirty_bitset = alloc_bitset(from_cblock(cache->cache_size));
	cache->dirty_bitset = alloc_bitset(from_cblock(cache->cache_size));
	if (!cache->dirty_bitset) {
	if (!cache->dirty_bitset) {
@@ -2517,23 +2545,6 @@ static void cache_status(struct dm_target *ti, status_type_t type,
	DMEMIT("Error");
	DMEMIT("Error");
}
}


#define NOT_CORE_OPTION 1

static int process_config_option(struct cache *cache, char **argv)
{
	unsigned long tmp;

	if (!strcasecmp(argv[0], "migration_threshold")) {
		if (kstrtoul(argv[1], 10, &tmp))
			return -EINVAL;

		cache->migration_threshold = tmp;
		return 0;
	}

	return NOT_CORE_OPTION;
}

/*
/*
 * Supports <key> <value>.
 * Supports <key> <value>.
 *
 *
@@ -2541,17 +2552,12 @@ static int process_config_option(struct cache *cache, char **argv)
 */
 */
static int cache_message(struct dm_target *ti, unsigned argc, char **argv)
static int cache_message(struct dm_target *ti, unsigned argc, char **argv)
{
{
	int r;
	struct cache *cache = ti->private;
	struct cache *cache = ti->private;


	if (argc != 2)
	if (argc != 2)
		return -EINVAL;
		return -EINVAL;


	r = process_config_option(cache, argv);
	return set_config_value(cache, argv[0], argv[1]);
	if (r == NOT_CORE_OPTION)
		return policy_set_config_value(cache->policy, argv[0], argv[1]);

	return r;
}
}


static int cache_iterate_devices(struct dm_target *ti,
static int cache_iterate_devices(struct dm_target *ti,
@@ -2609,7 +2615,7 @@ static void cache_io_hints(struct dm_target *ti, struct queue_limits *limits)


static struct target_type cache_target = {
static struct target_type cache_target = {
	.name = "cache",
	.name = "cache",
	.version = {1, 1, 0},
	.version = {1, 1, 1},
	.module = THIS_MODULE,
	.module = THIS_MODULE,
	.ctr = cache_ctr,
	.ctr = cache_ctr,
	.dtr = cache_dtr,
	.dtr = cache_dtr,
+1 −0
Original line number Original line Diff line number Diff line
@@ -907,6 +907,7 @@ static int multipath_ctr(struct dm_target *ti, unsigned int argc,


	ti->num_flush_bios = 1;
	ti->num_flush_bios = 1;
	ti->num_discard_bios = 1;
	ti->num_discard_bios = 1;
	ti->num_write_same_bios = 1;


	return 0;
	return 0;


Loading