Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit ca85a66e authored by qctecmdr Service's avatar qctecmdr Service Committed by Gerrit - the friendly Code Review server
Browse files

Merge "net: qrtr: Move rx worker to separate worker thread"

parents 7ba42f3c 31df2523
Loading
Loading
Loading
Loading
+22 −7
Original line number Original line Diff line number Diff line
@@ -11,6 +11,7 @@
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 * GNU General Public License for more details.
 */
 */
#include <linux/kthread.h>
#include <linux/module.h>
#include <linux/module.h>
#include <linux/netlink.h>
#include <linux/netlink.h>
#include <linux/qrtr.h>
#include <linux/qrtr.h>
@@ -141,8 +142,10 @@ static DEFINE_MUTEX(qrtr_port_lock);
 * @resume_tx: wait until remote port acks control flag
 * @resume_tx: wait until remote port acks control flag
 * @qrtr_tx_lock: lock for qrtr_tx_flow
 * @qrtr_tx_lock: lock for qrtr_tx_flow
 * @rx_queue: receive queue
 * @rx_queue: receive queue
 * @work: scheduled work struct for recv work
 * @item: list item for broadcast list
 * @item: list item for broadcast list
 * @kworker: worker thread for recv work
 * @task: task to run the worker thread
 * @read_data: scheduled work for recv work
 * @ilc: ipc logging context reference
 * @ilc: ipc logging context reference
 */
 */
struct qrtr_node {
struct qrtr_node {
@@ -157,9 +160,12 @@ struct qrtr_node {
	struct mutex qrtr_tx_lock;	/* for qrtr_tx_flow */
	struct mutex qrtr_tx_lock;	/* for qrtr_tx_flow */


	struct sk_buff_head rx_queue;
	struct sk_buff_head rx_queue;
	struct work_struct work;
	struct list_head item;
	struct list_head item;


	struct kthread_worker kworker;
	struct task_struct *task;
	struct kthread_work read_data;

	void *ilc;
	void *ilc;
};
};


@@ -332,8 +338,9 @@ static void __qrtr_node_release(struct kref *kref)
	}
	}
	mutex_unlock(&node->qrtr_tx_lock);
	mutex_unlock(&node->qrtr_tx_lock);


	kthread_flush_worker(&node->kworker);
	kthread_stop(node->task);


	flush_work(&node->work);
	skb_queue_purge(&node->rx_queue);
	skb_queue_purge(&node->rx_queue);
	kfree(node);
	kfree(node);
}
}
@@ -673,7 +680,7 @@ int qrtr_endpoint_post(struct qrtr_endpoint *ep, const void *data, size_t len)
	qrtr_log_rx_msg(node, skb);
	qrtr_log_rx_msg(node, skb);


	skb_queue_tail(&node->rx_queue, skb);
	skb_queue_tail(&node->rx_queue, skb);
	schedule_work(&node->work);
	kthread_queue_work(&node->kworker, &node->read_data);


	return 0;
	return 0;


@@ -715,9 +722,10 @@ static void qrtr_port_put(struct qrtr_sock *ipc);
 *
 *
 * This will auto-reply with resume-tx packet as necessary.
 * This will auto-reply with resume-tx packet as necessary.
 */
 */
static void qrtr_node_rx_work(struct work_struct *work)
static void qrtr_node_rx_work(struct kthread_work *work)
{
{
	struct qrtr_node *node = container_of(work, struct qrtr_node, work);
	struct qrtr_node *node = container_of(work, struct qrtr_node,
					      read_data);
	struct qrtr_ctrl_pkt *pkt;
	struct qrtr_ctrl_pkt *pkt;
	struct sk_buff *skb;
	struct sk_buff *skb;


@@ -770,7 +778,6 @@ int qrtr_endpoint_register(struct qrtr_endpoint *ep, unsigned int nid)
	if (!node)
	if (!node)
		return -ENOMEM;
		return -ENOMEM;


	INIT_WORK(&node->work, qrtr_node_rx_work);
	kref_init(&node->ref);
	kref_init(&node->ref);
	mutex_init(&node->ep_lock);
	mutex_init(&node->ep_lock);
	skb_queue_head_init(&node->rx_queue);
	skb_queue_head_init(&node->rx_queue);
@@ -778,6 +785,14 @@ int qrtr_endpoint_register(struct qrtr_endpoint *ep, unsigned int nid)
	node->ep = ep;
	node->ep = ep;
	atomic_set(&node->hello_sent, 0);
	atomic_set(&node->hello_sent, 0);


	kthread_init_work(&node->read_data, qrtr_node_rx_work);
	kthread_init_worker(&node->kworker);
	node->task = kthread_run(kthread_worker_fn, &node->kworker, "qrtr_rx");
	if (IS_ERR(node->task)) {
		kfree(node);
		return -ENOMEM;
	}

	mutex_init(&node->qrtr_tx_lock);
	mutex_init(&node->qrtr_tx_lock);
	INIT_RADIX_TREE(&node->qrtr_tx_flow, GFP_KERNEL);
	INIT_RADIX_TREE(&node->qrtr_tx_flow, GFP_KERNEL);
	init_waitqueue_head(&node->resume_tx);
	init_waitqueue_head(&node->resume_tx);