Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 8ee294cd authored by Dmitry Torokhov's avatar Dmitry Torokhov
Browse files

Input: serio - convert to common workqueue instead of a thread



Instead of creating an exclusive thread to handle serio events (which
happen rarely), let's switch to using common workqueue. With the arrival
of concurrency-managed workqueue infrastructure we are not concerned
that our callers or callees also using workqueue (no deadlocks anymore)
and it should reduce total number of threads in the system.

Signed-off-by: default avatarDmitry Torokhov <dtor@mail.ru>
parent ce16a474
Loading
Loading
Loading
Loading
+69 −86
Original line number Diff line number Diff line
@@ -32,10 +32,9 @@
#include <linux/module.h>
#include <linux/serio.h>
#include <linux/errno.h>
#include <linux/wait.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/kthread.h>
#include <linux/workqueue.h>
#include <linux/mutex.h>

MODULE_AUTHOR("Vojtech Pavlik <vojtech@ucw.cz>");
@@ -44,7 +43,7 @@ MODULE_LICENSE("GPL");

/*
 * serio_mutex protects entire serio subsystem and is taken every time
 * serio port or driver registrered or unregistered.
 * serio port or driver registered or unregistered.
 */
static DEFINE_MUTEX(serio_mutex);

@@ -165,58 +164,22 @@ struct serio_event {

static DEFINE_SPINLOCK(serio_event_lock);	/* protects serio_event_list */
static LIST_HEAD(serio_event_list);
static DECLARE_WAIT_QUEUE_HEAD(serio_wait);
static struct task_struct *serio_task;

static int serio_queue_event(void *object, struct module *owner,
			     enum serio_event_type event_type)
static struct serio_event *serio_get_event(void)
{
	struct serio_event *event = NULL;
	unsigned long flags;
	struct serio_event *event;
	int retval = 0;

	spin_lock_irqsave(&serio_event_lock, flags);

	/*
	 * Scan event list for the other events for the same serio port,
	 * starting with the most recent one. If event is the same we
	 * do not need add new one. If event is of different type we
	 * need to add this event and should not look further because
	 * we need to preseve sequence of distinct events.
	 */
	list_for_each_entry_reverse(event, &serio_event_list, node) {
		if (event->object == object) {
			if (event->type == event_type)
				goto out;
			break;
		}
	}

	event = kmalloc(sizeof(struct serio_event), GFP_ATOMIC);
	if (!event) {
		pr_err("Not enough memory to queue event %d\n", event_type);
		retval = -ENOMEM;
		goto out;
	}

	if (!try_module_get(owner)) {
		pr_warning("Can't get module reference, dropping event %d\n",
			   event_type);
		kfree(event);
		retval = -EINVAL;
		goto out;
	if (!list_empty(&serio_event_list)) {
		event = list_first_entry(&serio_event_list,
					 struct serio_event, node);
		list_del_init(&event->node);
	}

	event->type = event_type;
	event->object = object;
	event->owner = owner;

	list_add_tail(&event->node, &serio_event_list);
	wake_up(&serio_wait);

out:
	spin_unlock_irqrestore(&serio_event_lock, flags);
	return retval;
	return event;
}

static void serio_free_event(struct serio_event *event)
@@ -250,25 +213,7 @@ static void serio_remove_duplicate_events(struct serio_event *event)
	spin_unlock_irqrestore(&serio_event_lock, flags);
}


static struct serio_event *serio_get_event(void)
{
	struct serio_event *event = NULL;
	unsigned long flags;

	spin_lock_irqsave(&serio_event_lock, flags);

	if (!list_empty(&serio_event_list)) {
		event = list_first_entry(&serio_event_list,
					 struct serio_event, node);
		list_del_init(&event->node);
	}

	spin_unlock_irqrestore(&serio_event_lock, flags);
	return event;
}

static void serio_handle_event(void)
static void serio_handle_event(struct work_struct *work)
{
	struct serio_event *event;

@@ -307,6 +252,59 @@ static void serio_handle_event(void)
	mutex_unlock(&serio_mutex);
}

static DECLARE_WORK(serio_event_work, serio_handle_event);

static int serio_queue_event(void *object, struct module *owner,
			     enum serio_event_type event_type)
{
	unsigned long flags;
	struct serio_event *event;
	int retval = 0;

	spin_lock_irqsave(&serio_event_lock, flags);

	/*
	 * Scan event list for the other events for the same serio port,
	 * starting with the most recent one. If event is the same we
	 * do not need add new one. If event is of different type we
	 * need to add this event and should not look further because
	 * we need to preseve sequence of distinct events.
	 */
	list_for_each_entry_reverse(event, &serio_event_list, node) {
		if (event->object == object) {
			if (event->type == event_type)
				goto out;
			break;
		}
	}

	event = kmalloc(sizeof(struct serio_event), GFP_ATOMIC);
	if (!event) {
		pr_err("Not enough memory to queue event %d\n", event_type);
		retval = -ENOMEM;
		goto out;
	}

	if (!try_module_get(owner)) {
		pr_warning("Can't get module reference, dropping event %d\n",
			   event_type);
		kfree(event);
		retval = -EINVAL;
		goto out;
	}

	event->type = event_type;
	event->object = object;
	event->owner = owner;

	list_add_tail(&event->node, &serio_event_list);
	schedule_work(&serio_event_work);

out:
	spin_unlock_irqrestore(&serio_event_lock, flags);
	return retval;
}

/*
 * Remove all events that have been submitted for a given
 * object, be it serio port or driver.
@@ -356,18 +354,6 @@ static struct serio *serio_get_pending_child(struct serio *parent)
	return child;
}

static int serio_thread(void *nothing)
{
	do {
		serio_handle_event();
		wait_event_interruptible(serio_wait,
			kthread_should_stop() || !list_empty(&serio_event_list));
	} while (!kthread_should_stop());

	return 0;
}


/*
 * Serio port operations
 */
@@ -1040,21 +1026,18 @@ static int __init serio_init(void)
		return error;
	}

	serio_task = kthread_run(serio_thread, NULL, "kseriod");
	if (IS_ERR(serio_task)) {
		bus_unregister(&serio_bus);
		error = PTR_ERR(serio_task);
		pr_err("Failed to start kseriod, error: %d\n", error);
		return error;
	}

	return 0;
}

static void __exit serio_exit(void)
{
	bus_unregister(&serio_bus);
	kthread_stop(serio_task);

	/*
	 * There should not be any outstanding events but work may
	 * still be scheduled so simply cancel it.
	 */
	cancel_work_sync(&serio_event_work);
}

subsys_initcall(serio_init);