Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 86644f09 authored by qctecmdr's avatar qctecmdr Committed by Gerrit - the friendly Code Review server
Browse files

Merge "msm: gsi: Avoid ring event ring door bell for empty ieob"

parents b29078c9 5e93a08a
Loading
Loading
Loading
Loading
+26 −10
Original line number Diff line number Diff line
@@ -622,6 +622,7 @@ static void gsi_handle_ieob(int ee)
	unsigned long flags;
	unsigned long cntr;
	uint32_t msk;
	bool empty;

	ch = gsi_readl(gsi_ctx->base +
		GSI_EE_n_CNTXT_SRC_IEOB_IRQ_OFFS(ee));
@@ -653,6 +654,7 @@ static void gsi_handle_ieob(int ee)
			spin_lock_irqsave(&ctx->ring.slock, flags);
check_again:
			cntr = 0;
			empty = true;
			rp = gsi_readl(gsi_ctx->base +
				GSI_EE_n_EV_CH_k_CNTXT_4_OFFS(i, ee));
			rp |= ctx->ring.rp & 0xFFFFFFFF00000000;
@@ -666,7 +668,9 @@ static void gsi_handle_ieob(int ee)
					break;
				}
				gsi_process_evt_re(ctx, &notify, true);
				empty = false;
			}
			if (!empty)
				gsi_ring_evt_doorbell(ctx);
			if (cntr != 0)
				goto check_again;
@@ -3597,20 +3601,32 @@ int gsi_poll_n_channel(unsigned long chan_hdl,
	spin_lock_irqsave(&ctx->evtr->ring.slock, flags);
	if (ctx->evtr->ring.rp == ctx->evtr->ring.rp_local) {
		/* update rp to see of we have anything new to process */
		gsi_writel(1 << ctx->evtr->id, gsi_ctx->base +
			GSI_EE_n_CNTXT_SRC_IEOB_IRQ_CLR_OFFS(ee));
		rp = gsi_readl(gsi_ctx->base +
			GSI_EE_n_EV_CH_k_CNTXT_4_OFFS(ctx->evtr->id, ee));
		rp |= ctx->ring.rp & 0xFFFFFFFF00000000;
		rp |= ctx->ring.rp & 0xFFFFFFFF00000000ULL;

		ctx->evtr->ring.rp = rp;
	}

	if (ctx->evtr->ring.rp == ctx->evtr->ring.rp_local) {
		spin_unlock_irqrestore(&ctx->evtr->ring.slock, flags);
		/* read gsi event ring rp again if last read is empty */
		if (rp == ctx->evtr->ring.rp_local) {
			/* event ring is empty */
			gsi_writel(1 << ctx->evtr->id, gsi_ctx->base +
				GSI_EE_n_CNTXT_SRC_IEOB_IRQ_CLR_OFFS(ee));
			/* do another read to close a small window */
			__iowmb();
			rp = gsi_readl(gsi_ctx->base +
				GSI_EE_n_EV_CH_k_CNTXT_4_OFFS(
				ctx->evtr->id, ee));
			rp |= ctx->ring.rp & 0xFFFFFFFF00000000ULL;
			ctx->evtr->ring.rp = rp;
			if (rp == ctx->evtr->ring.rp_local) {
				spin_unlock_irqrestore(
					&ctx->evtr->ring.slock,
					flags);
				ctx->stats.poll_empty++;
				return GSI_STATUS_POLL_EMPTY;
			}
		}
	}

	*actual_num = gsi_get_complete_num(&ctx->evtr->ring,
			ctx->evtr->ring.rp_local, ctx->evtr->ring.rp);
+22 −17
Original line number Diff line number Diff line
@@ -191,7 +191,7 @@ static void ipa3_wq_write_done_status(int src_pipe,
/**
 * ipa_write_done() - this function will be (eventually) called when a Tx
 * operation is complete
 * * @work:	work_struct used by the work queue
 * @data: user pointer point to the ipa3_sys_context
 *
 * Will be called in deferred context.
 * - invoke the callback supplied by the client who sent this command
@@ -200,26 +200,27 @@ static void ipa3_wq_write_done_status(int src_pipe,
 * - delete all the tx packet descriptors from the system
 *   pipe context (not needed anymore)
 */
static void ipa3_wq_write_done(struct work_struct *work)
static void ipa3_tasklet_write_done(unsigned long data)
{
	struct ipa3_tx_pkt_wrapper *tx_pkt;
	struct ipa3_sys_context *sys;
	struct ipa3_tx_pkt_wrapper *this_pkt;
	bool xmit_done = false;

	tx_pkt = container_of(work, struct ipa3_tx_pkt_wrapper, work);
	sys = tx_pkt->sys;
	sys = (struct ipa3_sys_context *)data;
	spin_lock_bh(&sys->spinlock);
	while (atomic_add_unless(&sys->xmit_eot_cnt, -1, 0)) {
		while (!list_empty(&sys->head_desc_list)) {
			this_pkt = list_first_entry(&sys->head_desc_list,
				struct ipa3_tx_pkt_wrapper, link);
	while (tx_pkt != this_pkt) {
			xmit_done = this_pkt->xmit_done;
			spin_unlock_bh(&sys->spinlock);
			ipa3_wq_write_done_common(sys, this_pkt);
			spin_lock_bh(&sys->spinlock);
		this_pkt = list_first_entry(&sys->head_desc_list,
			struct ipa3_tx_pkt_wrapper, link);
			if (xmit_done)
				break;
		}
	}
	spin_unlock_bh(&sys->spinlock);
	ipa3_wq_write_done_common(sys, tx_pkt);
}


@@ -239,7 +240,6 @@ static void ipa3_send_nop_desc(struct work_struct *work)

	INIT_LIST_HEAD(&tx_pkt->link);
	tx_pkt->cnt = 1;
	INIT_WORK(&tx_pkt->work, ipa3_wq_write_done);
	tx_pkt->no_unmap_dma = true;
	tx_pkt->sys = sys;
	spin_lock_bh(&sys->spinlock);
@@ -350,7 +350,6 @@ int ipa3_send(struct ipa3_sys_context *sys,
		if (i == 0) {
			tx_pkt_first = tx_pkt;
			tx_pkt->cnt = num_desc;
			INIT_WORK(&tx_pkt->work, ipa3_wq_write_done);
		}

		/* populate tag field */
@@ -406,6 +405,7 @@ int ipa3_send(struct ipa3_sys_context *sys,
		tx_pkt->callback = desc[i].callback;
		tx_pkt->user1 = desc[i].user1;
		tx_pkt->user2 = desc[i].user2;
		tx_pkt->xmit_done = false;

		list_add_tail(&tx_pkt->link, &sys->head_desc_list);

@@ -1026,6 +1026,9 @@ int ipa3_setup_sys_pipe(struct ipa_sys_connect_params *sys_in, u32 *clnt_hdl)
		memset(ep->sys, 0, offsetof(struct ipa3_sys_context, ep));
	}

	atomic_set(&ep->sys->xmit_eot_cnt, 0);
	tasklet_init(&ep->sys->tasklet, ipa3_tasklet_write_done,
			(unsigned long) ep->sys);
	ep->skip_ep_cfg = sys_in->skip_ep_cfg;
	if (ipa3_assign_policy(sys_in, ep->sys)) {
		IPAERR("failed to sys ctx for client %d\n", sys_in->client);
@@ -3878,7 +3881,9 @@ static void ipa_gsi_irq_tx_notify_cb(struct gsi_chan_xfer_notify *notify)
	case GSI_CHAN_EVT_EOT:
		atomic_set(&ipa3_ctx->transport_pm.eot_activity, 1);
		tx_pkt = notify->xfer_user_data;
		queue_work(tx_pkt->sys->wq, &tx_pkt->work);
		tx_pkt->xmit_done = true;
		atomic_inc(&tx_pkt->sys->xmit_eot_cnt);
		tasklet_schedule(&tx_pkt->sys->tasklet);
		break;
	default:
		IPAERR("received unexpected event id %d\n", notify->evt_id);
+7 −3
Original line number Diff line number Diff line
@@ -15,7 +15,7 @@
#include <linux/skbuff.h>
#include <linux/slab.h>
#include <linux/notifier.h>

#include <linux/interrupt.h>
#include <linux/ipa.h>
#include <linux/ipa_usb.h>
#include <asm/dma-iommu.h>
@@ -864,6 +864,8 @@ struct ipa3_repl_ctx {
 * @len: the size of the above list
 * @spinlock: protects the list and its size
 * @ep: IPA EP context
 * @xmit_eot_cnt: count of pending eot for tasklet to process
 * @tasklet: tasklet for eot write_done handle (tx_complete)
 *
 * IPA context specific to the GPI pipes a.k.a LAN IN/OUT and WAN
 */
@@ -893,6 +895,8 @@ struct ipa3_sys_context {
	u32 pkt_sent;
	struct napi_struct *napi_obj;
	struct list_head pending_pkts[GSI_VEID_MAX];
	atomic_t xmit_eot_cnt;
	struct tasklet_struct tasklet;

	/* ordering is important - mutable fields go above */
	struct ipa3_ep_context *ep;
@@ -923,7 +927,6 @@ enum ipa3_desc_type {
 * struct ipa3_tx_pkt_wrapper - IPA Tx packet wrapper
 * @type: specify if this packet is for the skb or immediate command
 * @mem: memory buffer used by this Tx packet
 * @work: work struct for current Tx packet
 * @link: linked to the wrappers on that pipe
 * @callback: IPA client provided callback
 * @user1: cookie1 for above callback
@@ -934,13 +937,13 @@ enum ipa3_desc_type {
 * 0xFFFF for last desc, 0 for rest of "multiple' transfer
 * @bounce: va of bounce buffer
 * @unmap_dma: in case this is true, the buffer will not be dma unmapped
 * @xmit_done: flag to indicate the last desc got tx complete on each ieob
 *
 * This struct can wrap both data packet and immediate command packet.
 */
struct ipa3_tx_pkt_wrapper {
	enum ipa3_desc_type type;
	struct ipa_mem_buffer mem;
	struct work_struct work;
	struct list_head link;
	void (*callback)(void *user1, int user2);
	void *user1;
@@ -949,6 +952,7 @@ struct ipa3_tx_pkt_wrapper {
	u32 cnt;
	void *bounce;
	bool no_unmap_dma;
	bool xmit_done;
};

/**