Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 52f5c480 authored by Bojun Pan's avatar Bojun Pan Committed by Gerrit - the friendly Code Review server
Browse files

msm: ipa: Use tasklet instead of WQ for write done



From ETM, it shows great amount of time is spent in
ieob irq doing queue_work for write done. Change it
to use tasklet mechanism.

Change-Id: I2c0abf9e55bc6d80f2e43de472121247f00ba7e0
Signed-off-by: default avatarBojun Pan <bojunp@codeaurora.org>
parent adc7dca4
Loading
Loading
Loading
Loading
+22 −17
Original line number Diff line number Diff line
@@ -191,7 +191,7 @@ static void ipa3_wq_write_done_status(int src_pipe,
/**
 * ipa_write_done() - this function will be (eventually) called when a Tx
 * operation is complete
 * * @work:	work_struct used by the work queue
 * @data: user pointer point to the ipa3_sys_context
 *
 * Will be called in deferred context.
 * - invoke the callback supplied by the client who sent this command
@@ -200,26 +200,27 @@ static void ipa3_wq_write_done_status(int src_pipe,
 * - delete all the tx packet descriptors from the system
 *   pipe context (not needed anymore)
 */
static void ipa3_wq_write_done(struct work_struct *work)
static void ipa3_tasklet_write_done(unsigned long data)
{
	struct ipa3_tx_pkt_wrapper *tx_pkt;
	struct ipa3_sys_context *sys;
	struct ipa3_tx_pkt_wrapper *this_pkt;
	bool xmit_done = false;

	tx_pkt = container_of(work, struct ipa3_tx_pkt_wrapper, work);
	sys = tx_pkt->sys;
	sys = (struct ipa3_sys_context *)data;
	spin_lock_bh(&sys->spinlock);
	while (atomic_add_unless(&sys->xmit_eot_cnt, -1, 0)) {
		while (!list_empty(&sys->head_desc_list)) {
			this_pkt = list_first_entry(&sys->head_desc_list,
				struct ipa3_tx_pkt_wrapper, link);
	while (tx_pkt != this_pkt) {
			xmit_done = this_pkt->xmit_done;
			spin_unlock_bh(&sys->spinlock);
			ipa3_wq_write_done_common(sys, this_pkt);
			spin_lock_bh(&sys->spinlock);
		this_pkt = list_first_entry(&sys->head_desc_list,
			struct ipa3_tx_pkt_wrapper, link);
			if (xmit_done)
				break;
		}
	}
	spin_unlock_bh(&sys->spinlock);
	ipa3_wq_write_done_common(sys, tx_pkt);
}


@@ -239,7 +240,6 @@ static void ipa3_send_nop_desc(struct work_struct *work)

	INIT_LIST_HEAD(&tx_pkt->link);
	tx_pkt->cnt = 1;
	INIT_WORK(&tx_pkt->work, ipa3_wq_write_done);
	tx_pkt->no_unmap_dma = true;
	tx_pkt->sys = sys;
	spin_lock_bh(&sys->spinlock);
@@ -350,7 +350,6 @@ int ipa3_send(struct ipa3_sys_context *sys,
		if (i == 0) {
			tx_pkt_first = tx_pkt;
			tx_pkt->cnt = num_desc;
			INIT_WORK(&tx_pkt->work, ipa3_wq_write_done);
		}

		/* populate tag field */
@@ -406,6 +405,7 @@ int ipa3_send(struct ipa3_sys_context *sys,
		tx_pkt->callback = desc[i].callback;
		tx_pkt->user1 = desc[i].user1;
		tx_pkt->user2 = desc[i].user2;
		tx_pkt->xmit_done = false;

		list_add_tail(&tx_pkt->link, &sys->head_desc_list);

@@ -1026,6 +1026,9 @@ int ipa3_setup_sys_pipe(struct ipa_sys_connect_params *sys_in, u32 *clnt_hdl)
		memset(ep->sys, 0, offsetof(struct ipa3_sys_context, ep));
	}

	atomic_set(&ep->sys->xmit_eot_cnt, 0);
	tasklet_init(&ep->sys->tasklet, ipa3_tasklet_write_done,
			(unsigned long) ep->sys);
	ep->skip_ep_cfg = sys_in->skip_ep_cfg;
	if (ipa3_assign_policy(sys_in, ep->sys)) {
		IPAERR("failed to sys ctx for client %d\n", sys_in->client);
@@ -3880,7 +3883,9 @@ static void ipa_gsi_irq_tx_notify_cb(struct gsi_chan_xfer_notify *notify)
	case GSI_CHAN_EVT_EOT:
		atomic_set(&ipa3_ctx->transport_pm.eot_activity, 1);
		tx_pkt = notify->xfer_user_data;
		queue_work(tx_pkt->sys->wq, &tx_pkt->work);
		tx_pkt->xmit_done = true;
		atomic_inc(&tx_pkt->sys->xmit_eot_cnt);
		tasklet_schedule(&tx_pkt->sys->tasklet);
		break;
	default:
		IPAERR("received unexpected event id %d\n", notify->evt_id);
+7 −3
Original line number Diff line number Diff line
@@ -15,7 +15,7 @@
#include <linux/skbuff.h>
#include <linux/slab.h>
#include <linux/notifier.h>

#include <linux/interrupt.h>
#include <linux/ipa.h>
#include <linux/ipa_usb.h>
#include <asm/dma-iommu.h>
@@ -864,6 +864,8 @@ struct ipa3_repl_ctx {
 * @len: the size of the above list
 * @spinlock: protects the list and its size
 * @ep: IPA EP context
 * @xmit_eot_cnt: count of pending eot for tasklet to process
 * @tasklet: tasklet for eot write_done handle (tx_complete)
 *
 * IPA context specific to the GPI pipes a.k.a LAN IN/OUT and WAN
 */
@@ -893,6 +895,8 @@ struct ipa3_sys_context {
	u32 pkt_sent;
	struct napi_struct *napi_obj;
	struct list_head pending_pkts[GSI_VEID_MAX];
	atomic_t xmit_eot_cnt;
	struct tasklet_struct tasklet;

	/* ordering is important - mutable fields go above */
	struct ipa3_ep_context *ep;
@@ -923,7 +927,6 @@ enum ipa3_desc_type {
 * struct ipa3_tx_pkt_wrapper - IPA Tx packet wrapper
 * @type: specify if this packet is for the skb or immediate command
 * @mem: memory buffer used by this Tx packet
 * @work: work struct for current Tx packet
 * @link: linked to the wrappers on that pipe
 * @callback: IPA client provided callback
 * @user1: cookie1 for above callback
@@ -934,13 +937,13 @@ enum ipa3_desc_type {
 * 0xFFFF for last desc, 0 for rest of "multiple' transfer
 * @bounce: va of bounce buffer
 * @unmap_dma: in case this is true, the buffer will not be dma unmapped
 * @xmit_done: flag to indicate the last desc got tx complete on each ieob
 *
 * This struct can wrap both data packet and immediate command packet.
 */
struct ipa3_tx_pkt_wrapper {
	enum ipa3_desc_type type;
	struct ipa_mem_buffer mem;
	struct work_struct work;
	struct list_head link;
	void (*callback)(void *user1, int user2);
	void *user1;
@@ -949,6 +952,7 @@ struct ipa3_tx_pkt_wrapper {
	u32 cnt;
	void *bounce;
	bool no_unmap_dma;
	bool xmit_done;
};

/**