Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 71f4d95b authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge tag 'for-4.20/dm-changes' of...

Merge tag 'for-4.20/dm-changes' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm

Pull device mapper updates from Mike Snitzer:

 - The biggest change this cycle is to remove support for the legacy IO
   path (.request_fn) from request-based DM.

   Jens has already started preparing for complete removal of the legacy
   IO path in 4.21 but this earlier removal of support from DM has been
   coordinated with Jens (as evidenced by the commit being attributed to
   him).

   Making request-based DM exclussively blk-mq only cleans up that
   portion of DM core quite nicely.

 - Convert the thinp and zoned targets over to using refcount_t where
   applicable.

 - A couple fixes to the DM zoned target for refcounting and other races
   buried in the implementation of metadata block creation and use.

 - Small cleanups to remove redundant unlikely() around a couple
   WARN_ON_ONCE().

 - Simplify how dm-ioctl copies from userspace, eliminating some
   potential for a malicious user trying to change the executed ioctl
   after its processing has begun.

 - Tweaked DM crypt target to use the DM device name when naming the
   various workqueues created for a particular DM crypt device (makes
   the N workqueues for a DM crypt device more easily understood and
   enhances user's accounting capabilities at a glance via "ps")

 - Small fixup to remove dead branch in DM writecache's memory_entry().

* tag 'for-4.20/dm-changes' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm:
  dm writecache: remove disabled code in memory_entry()
  dm zoned: fix various dmz_get_mblock() issues
  dm zoned: fix metadata block ref counting
  dm raid: avoid bitmap with raid4/5/6 journal device
  dm crypt: make workqueue names device-specific
  dm: add dm_table_device_name()
  dm ioctl: harden copy_params()'s copy_from_user() from malicious users
  dm: remove unnecessary unlikely() around WARN_ON_ONCE()
  dm zoned: target: use refcount_t for dm zoned reference counters
  dm thin: use refcount_t for thin_c reference counting
  dm table: require that request-based DM be layered on blk-mq devices
  dm: rename DM_TYPE_MQ_REQUEST_BASED to DM_TYPE_REQUEST_BASED
  dm: remove legacy request-based IO path
parents 6080ad3a da4ad3a2
Loading
Loading
Loading
Loading
+0 −11
Original line number Diff line number Diff line
@@ -215,17 +215,6 @@ config BLK_DEV_DM

	  If unsure, say N.

config DM_MQ_DEFAULT
	bool "request-based DM: use blk-mq I/O path by default"
	depends on BLK_DEV_DM
	---help---
	  This option enables the blk-mq based I/O path for request-based
	  DM devices by default.  With the option the dm_mod.use_blk_mq
	  module/boot option defaults to Y, without it to N, but it can
	  still be overriden either way.

	  If unsure say N.

config DM_DEBUG
	bool "Device mapper debugging support"
	depends on BLK_DEV_DM
+1 −1
Original line number Diff line number Diff line
@@ -1200,7 +1200,7 @@ static void queue_demotion(struct smq_policy *mq)
	struct policy_work work;
	struct entry *e;

	if (unlikely(WARN_ON_ONCE(!mq->migrations_allowed)))
	if (WARN_ON_ONCE(!mq->migrations_allowed))
		return;

	e = q_peek(&mq->clean, mq->clean.nr_levels / 2, true);
+0 −10
Original line number Diff line number Diff line
@@ -112,18 +112,8 @@ struct mapped_device {

	struct dm_stats stats;

	struct kthread_worker kworker;
	struct task_struct *kworker_task;

	/* for request-based merge heuristic in dm_request_fn() */
	unsigned seq_rq_merge_deadline_usecs;
	int last_rq_rw;
	sector_t last_rq_pos;
	ktime_t last_rq_start_time;

	/* for blk-mq request-based DM support */
	struct blk_mq_tag_set *tag_set;
	bool use_blk_mq:1;
	bool init_tio_pdu:1;

	struct srcu_struct io_barrier;
+10 −5
Original line number Diff line number Diff line
@@ -2661,6 +2661,7 @@ static int crypt_ctr_optional(struct dm_target *ti, unsigned int argc, char **ar
static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
{
	struct crypt_config *cc;
	const char *devname = dm_table_device_name(ti->table);
	int key_size;
	unsigned int align_mask;
	unsigned long long tmpll;
@@ -2806,18 +2807,22 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
	}

	ret = -ENOMEM;
	cc->io_queue = alloc_workqueue("kcryptd_io", WQ_HIGHPRI | WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM, 1);
	cc->io_queue = alloc_workqueue("kcryptd_io/%s",
				       WQ_HIGHPRI | WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM,
				       1, devname);
	if (!cc->io_queue) {
		ti->error = "Couldn't create kcryptd io queue";
		goto bad;
	}

	if (test_bit(DM_CRYPT_SAME_CPU, &cc->flags))
		cc->crypt_queue = alloc_workqueue("kcryptd", WQ_HIGHPRI | WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM, 1);
		cc->crypt_queue = alloc_workqueue("kcryptd/%s",
						  WQ_HIGHPRI | WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM,
						  1, devname);
	else
		cc->crypt_queue = alloc_workqueue("kcryptd",
		cc->crypt_queue = alloc_workqueue("kcryptd/%s",
						  WQ_HIGHPRI | WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM | WQ_UNBOUND,
						  num_online_cpus());
						  num_online_cpus(), devname);
	if (!cc->crypt_queue) {
		ti->error = "Couldn't create kcryptd queue";
		goto bad;
@@ -2826,7 +2831,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
	spin_lock_init(&cc->write_thread_lock);
	cc->write_tree = RB_ROOT;

	cc->write_thread = kthread_create(dmcrypt_write, cc, "dmcrypt_write");
	cc->write_thread = kthread_create(dmcrypt_write, cc, "dmcrypt_write/%s", devname);
	if (IS_ERR(cc->write_thread)) {
		ret = PTR_ERR(cc->write_thread);
		cc->write_thread = NULL;
+6 −12
Original line number Diff line number Diff line
@@ -1720,8 +1720,7 @@ static void free_params(struct dm_ioctl *param, size_t param_size, int param_fla
}

static int copy_params(struct dm_ioctl __user *user, struct dm_ioctl *param_kernel,
		       int ioctl_flags,
		       struct dm_ioctl **param, int *param_flags)
		       int ioctl_flags, struct dm_ioctl **param, int *param_flags)
{
	struct dm_ioctl *dmi;
	int secure_data;
@@ -1762,18 +1761,13 @@ static int copy_params(struct dm_ioctl __user *user, struct dm_ioctl *param_kern

	*param_flags |= DM_PARAMS_MALLOC;

	if (copy_from_user(dmi, user, param_kernel->data_size))
		goto bad;
	/* Copy from param_kernel (which was already copied from user) */
	memcpy(dmi, param_kernel, minimum_data_size);

data_copied:
	/*
	 * Abort if something changed the ioctl data while it was being copied.
	 */
	if (dmi->data_size != param_kernel->data_size) {
		DMERR("rejecting ioctl: data size modified while processing parameters");
	if (copy_from_user(&dmi->data, (char __user *)user + minimum_data_size,
			   param_kernel->data_size - minimum_data_size))
		goto bad;
	}

data_copied:
	/* Wipe the user buffer so we do not return it to userspace */
	if (secure_data && clear_user(user, param_kernel->data_size))
		goto bad;
Loading