Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 3989144f authored by Petr Mladek's avatar Petr Mladek Committed by Linus Torvalds
Browse files

kthread: kthread worker API cleanup

A good practice is to prefix the names of functions by the name
of the subsystem.

The kthread worker API is a mix of classic kthreads and workqueues.  Each
worker has a dedicated kthread.  It runs a generic function that process
queued works.  It is implemented as part of the kthread subsystem.

This patch renames the existing kthread worker API to use
the corresponding name from the workqueues API prefixed by
kthread_:

__init_kthread_worker()		-> __kthread_init_worker()
init_kthread_worker()		-> kthread_init_worker()
init_kthread_work()		-> kthread_init_work()
insert_kthread_work()		-> kthread_insert_work()
queue_kthread_work()		-> kthread_queue_work()
flush_kthread_work()		-> kthread_flush_work()
flush_kthread_worker()		-> kthread_flush_worker()

Note that the names of DEFINE_KTHREAD_WORK*() macros stay
as they are. It is common that the "DEFINE_" prefix has
precedence over the subsystem names.

Note that INIT() macros and init() functions use different
naming scheme. There is no good solution. There are several
reasons for this solution:

  + "init" in the function names stands for the verb "initialize"
    aka "initialize worker". While "INIT" in the macro names
    stands for the noun "INITIALIZER" aka "worker initializer".

  + INIT() macros are used only in DEFINE() macros

  + init() functions are used close to the other kthread()
    functions. It looks much better if all the functions
    use the same scheme.

  + There will be also kthread_destroy_worker() that will
    be used close to kthread_cancel_work(). It is related
    to the init() function. Again it looks better if all
    functions use the same naming scheme.

  + there are several precedents for such init() function
    names, e.g. amd_iommu_init_device(), free_area_init_node(),
    jump_label_init_type(),  regmap_init_mmio_clk(),

  + It is not an argument but it was inconsistent even before.

[arnd@arndb.de: fix linux-next merge conflict]
 Link: http://lkml.kernel.org/r/20160908135724.1311726-1-arnd@arndb.de
Link: http://lkml.kernel.org/r/1470754545-17632-3-git-send-email-pmladek@suse.com


Suggested-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarPetr Mladek <pmladek@suse.com>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Tejun Heo <tj@kernel.org>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
Cc: Josh Triplett <josh@joshtriplett.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Jiri Kosina <jkosina@suse.cz>
Cc: Borislav Petkov <bp@suse.de>
Cc: Michal Hocko <mhocko@suse.cz>
Cc: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: default avatarArnd Bergmann <arnd@arndb.de>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent e700591a
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -57,7 +57,7 @@ Call Trace:
 [<ffffffff817db154>] kernel_thread_helper+0x4/0x10
 [<ffffffff81066430>] ? finish_task_switch+0x80/0x110
 [<ffffffff817d9c04>] ? retint_restore_args+0xe/0xe
 [<ffffffff81097510>] ? __init_kthread_worker+0x70/0x70
 [<ffffffff81097510>] ? __kthread_init_worker+0x70/0x70
 [<ffffffff817db150>] ? gs_change+0xb/0xb

Line 2776 of block/cfq-iosched.c in v3.0-rc5 is as follows:
+7 −7
Original line number Diff line number Diff line
@@ -212,7 +212,7 @@ static void kvm_pit_ack_irq(struct kvm_irq_ack_notifier *kian)
	 */
	smp_mb();
	if (atomic_dec_if_positive(&ps->pending) > 0)
		queue_kthread_work(&pit->worker, &pit->expired);
		kthread_queue_work(&pit->worker, &pit->expired);
}

void __kvm_migrate_pit_timer(struct kvm_vcpu *vcpu)
@@ -233,7 +233,7 @@ void __kvm_migrate_pit_timer(struct kvm_vcpu *vcpu)
static void destroy_pit_timer(struct kvm_pit *pit)
{
	hrtimer_cancel(&pit->pit_state.timer);
	flush_kthread_work(&pit->expired);
	kthread_flush_work(&pit->expired);
}

static void pit_do_work(struct kthread_work *work)
@@ -272,7 +272,7 @@ static enum hrtimer_restart pit_timer_fn(struct hrtimer *data)
	if (atomic_read(&ps->reinject))
		atomic_inc(&ps->pending);

	queue_kthread_work(&pt->worker, &pt->expired);
	kthread_queue_work(&pt->worker, &pt->expired);

	if (ps->is_periodic) {
		hrtimer_add_expires_ns(&ps->timer, ps->period);
@@ -324,7 +324,7 @@ static void create_pit_timer(struct kvm_pit *pit, u32 val, int is_period)

	/* TODO The new value only affected after the retriggered */
	hrtimer_cancel(&ps->timer);
	flush_kthread_work(&pit->expired);
	kthread_flush_work(&pit->expired);
	ps->period = interval;
	ps->is_periodic = is_period;

@@ -667,13 +667,13 @@ struct kvm_pit *kvm_create_pit(struct kvm *kvm, u32 flags)
	pid_nr = pid_vnr(pid);
	put_pid(pid);

	init_kthread_worker(&pit->worker);
	kthread_init_worker(&pit->worker);
	pit->worker_task = kthread_run(kthread_worker_fn, &pit->worker,
				       "kvm-pit/%d", pid_nr);
	if (IS_ERR(pit->worker_task))
		goto fail_kthread;

	init_kthread_work(&pit->expired, pit_do_work);
	kthread_init_work(&pit->expired, pit_do_work);

	pit->kvm = kvm;

@@ -730,7 +730,7 @@ void kvm_free_pit(struct kvm *kvm)
		kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS, &pit->speaker_dev);
		kvm_pit_set_reinject(pit, false);
		hrtimer_cancel(&pit->pit_state.timer);
		flush_kthread_work(&pit->expired);
		kthread_flush_work(&pit->expired);
		kthread_stop(pit->worker_task);
		kvm_free_irq_source_id(kvm, pit->irq_source_id);
		kfree(pit);
+10 −10
Original line number Diff line number Diff line
@@ -47,7 +47,7 @@ static void crypto_pump_requests(struct crypto_engine *engine,

	/* If another context is idling then defer */
	if (engine->idling) {
		queue_kthread_work(&engine->kworker, &engine->pump_requests);
		kthread_queue_work(&engine->kworker, &engine->pump_requests);
		goto out;
	}

@@ -58,7 +58,7 @@ static void crypto_pump_requests(struct crypto_engine *engine,

		/* Only do teardown in the thread */
		if (!in_kthread) {
			queue_kthread_work(&engine->kworker,
			kthread_queue_work(&engine->kworker,
					   &engine->pump_requests);
			goto out;
		}
@@ -189,7 +189,7 @@ int crypto_transfer_cipher_request(struct crypto_engine *engine,
	ret = ablkcipher_enqueue_request(&engine->queue, req);

	if (!engine->busy && need_pump)
		queue_kthread_work(&engine->kworker, &engine->pump_requests);
		kthread_queue_work(&engine->kworker, &engine->pump_requests);

	spin_unlock_irqrestore(&engine->queue_lock, flags);
	return ret;
@@ -231,7 +231,7 @@ int crypto_transfer_hash_request(struct crypto_engine *engine,
	ret = ahash_enqueue_request(&engine->queue, req);

	if (!engine->busy && need_pump)
		queue_kthread_work(&engine->kworker, &engine->pump_requests);
		kthread_queue_work(&engine->kworker, &engine->pump_requests);

	spin_unlock_irqrestore(&engine->queue_lock, flags);
	return ret;
@@ -284,7 +284,7 @@ void crypto_finalize_cipher_request(struct crypto_engine *engine,

	req->base.complete(&req->base, err);

	queue_kthread_work(&engine->kworker, &engine->pump_requests);
	kthread_queue_work(&engine->kworker, &engine->pump_requests);
}
EXPORT_SYMBOL_GPL(crypto_finalize_cipher_request);

@@ -321,7 +321,7 @@ void crypto_finalize_hash_request(struct crypto_engine *engine,

	req->base.complete(&req->base, err);

	queue_kthread_work(&engine->kworker, &engine->pump_requests);
	kthread_queue_work(&engine->kworker, &engine->pump_requests);
}
EXPORT_SYMBOL_GPL(crypto_finalize_hash_request);

@@ -345,7 +345,7 @@ int crypto_engine_start(struct crypto_engine *engine)
	engine->running = true;
	spin_unlock_irqrestore(&engine->queue_lock, flags);

	queue_kthread_work(&engine->kworker, &engine->pump_requests);
	kthread_queue_work(&engine->kworker, &engine->pump_requests);

	return 0;
}
@@ -422,7 +422,7 @@ struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt)
	crypto_init_queue(&engine->queue, CRYPTO_ENGINE_MAX_QLEN);
	spin_lock_init(&engine->queue_lock);

	init_kthread_worker(&engine->kworker);
	kthread_init_worker(&engine->kworker);
	engine->kworker_task = kthread_run(kthread_worker_fn,
					   &engine->kworker, "%s",
					   engine->name);
@@ -430,7 +430,7 @@ struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt)
		dev_err(dev, "failed to create crypto request pump task\n");
		return NULL;
	}
	init_kthread_work(&engine->pump_requests, crypto_pump_work);
	kthread_init_work(&engine->pump_requests, crypto_pump_work);

	if (engine->rt) {
		dev_info(dev, "will run requests pump with realtime priority\n");
@@ -455,7 +455,7 @@ int crypto_engine_exit(struct crypto_engine *engine)
	if (ret)
		return ret;

	flush_kthread_worker(&engine->kworker);
	kthread_flush_worker(&engine->kworker);
	kthread_stop(engine->kworker_task);

	return 0;
+4 −4
Original line number Diff line number Diff line
@@ -840,13 +840,13 @@ static void loop_config_discard(struct loop_device *lo)

static void loop_unprepare_queue(struct loop_device *lo)
{
	flush_kthread_worker(&lo->worker);
	kthread_flush_worker(&lo->worker);
	kthread_stop(lo->worker_task);
}

static int loop_prepare_queue(struct loop_device *lo)
{
	init_kthread_worker(&lo->worker);
	kthread_init_worker(&lo->worker);
	lo->worker_task = kthread_run(kthread_worker_fn,
			&lo->worker, "loop%d", lo->lo_number);
	if (IS_ERR(lo->worker_task))
@@ -1658,7 +1658,7 @@ static int loop_queue_rq(struct blk_mq_hw_ctx *hctx,
		break;
	}

	queue_kthread_work(&lo->worker, &cmd->work);
	kthread_queue_work(&lo->worker, &cmd->work);

	return BLK_MQ_RQ_QUEUE_OK;
}
@@ -1696,7 +1696,7 @@ static int loop_init_request(void *data, struct request *rq,
	struct loop_cmd *cmd = blk_mq_rq_to_pdu(rq);

	cmd->rq = rq;
	init_kthread_work(&cmd->work, loop_queue_work);
	kthread_init_work(&cmd->work, loop_queue_work);

	return 0;
}
+5 −5
Original line number Diff line number Diff line
@@ -129,7 +129,7 @@ void rvt_cq_enter(struct rvt_cq *cq, struct ib_wc *entry, bool solicited)
		if (likely(worker)) {
			cq->notify = RVT_CQ_NONE;
			cq->triggered++;
			queue_kthread_work(worker, &cq->comptask);
			kthread_queue_work(worker, &cq->comptask);
		}
	}

@@ -265,7 +265,7 @@ struct ib_cq *rvt_create_cq(struct ib_device *ibdev,
	cq->ibcq.cqe = entries;
	cq->notify = RVT_CQ_NONE;
	spin_lock_init(&cq->lock);
	init_kthread_work(&cq->comptask, send_complete);
	kthread_init_work(&cq->comptask, send_complete);
	cq->queue = wc;

	ret = &cq->ibcq;
@@ -295,7 +295,7 @@ int rvt_destroy_cq(struct ib_cq *ibcq)
	struct rvt_cq *cq = ibcq_to_rvtcq(ibcq);
	struct rvt_dev_info *rdi = cq->rdi;

	flush_kthread_work(&cq->comptask);
	kthread_flush_work(&cq->comptask);
	spin_lock(&rdi->n_cqs_lock);
	rdi->n_cqs_allocated--;
	spin_unlock(&rdi->n_cqs_lock);
@@ -514,7 +514,7 @@ int rvt_driver_cq_init(struct rvt_dev_info *rdi)
	rdi->worker = kzalloc(sizeof(*rdi->worker), GFP_KERNEL);
	if (!rdi->worker)
		return -ENOMEM;
	init_kthread_worker(rdi->worker);
	kthread_init_worker(rdi->worker);
	task = kthread_create_on_node(
		kthread_worker_fn,
		rdi->worker,
@@ -547,7 +547,7 @@ void rvt_cq_exit(struct rvt_dev_info *rdi)
	/* blocks future queuing from send_complete() */
	rdi->worker = NULL;
	smp_wmb(); /* See rdi_cq_enter */
	flush_kthread_worker(worker);
	kthread_flush_worker(worker);
	kthread_stop(worker->task);
	kfree(worker);
}
Loading