Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 84da4162 authored by Amir Levy's avatar Amir Levy
Browse files

msm: ipa: added likely/unlikely branch prediction to the lan dp



Added branch prediction in an effort to make
the data path more efficent.

Acked-by: default avatarTal Gelbard <tgelbard@qti.qualcomm.com>
Change-Id: I3bd2157ee6c263d89de9425c7a0249370ab918fc
Signed-off-by: default avatarAmir Levy <alevy@codeaurora.org>
parent 7de7fee9
Loading
Loading
Loading
Loading
+41 −39
Original line number Diff line number Diff line
@@ -486,12 +486,12 @@ static uint16_t gsi_get_complete_num(struct gsi_ring_ctx *ctx, uint64_t addr1,
	GSIDBG_LOW("gsi base addr 0x%llx end addr 0x%llx\n",
		ctx->base, ctx->end);

	if (addr1 < ctx->base || addr1 >= ctx->end) {
	if (unlikely(addr1 < ctx->base || addr1 >= ctx->end)) {
		GSIERR("address = 0x%llx not in range\n", addr1);
		GSI_ASSERT();
	}

	if (addr2 < ctx->base || addr2 >= ctx->end) {
	if (unlikely(addr2 < ctx->base || addr2 >= ctx->end)) {
		GSIERR("address = 0x%llx not in range\n", addr2);
		GSI_ASSERT();
	}
@@ -512,14 +512,14 @@ static void gsi_process_chan(struct gsi_xfer_compl_evt *evt,
	uint64_t rp;

	ch_id = evt->chid;
	if (WARN_ON(ch_id >= gsi_ctx->max_ch)) {
	if (unlikely(WARN_ON(ch_id >= gsi_ctx->max_ch))) {
		GSIERR("Unexpected ch %d\n", ch_id);
		return;
	}

	ch_ctx = &gsi_ctx->chan[ch_id];
	if (WARN_ON(ch_ctx->props.prot != GSI_CHAN_PROT_GPI &&
		ch_ctx->props.prot != GSI_CHAN_PROT_GCI))
	if (unlikely(WARN_ON(ch_ctx->props.prot != GSI_CHAN_PROT_GPI &&
		ch_ctx->props.prot != GSI_CHAN_PROT_GCI)))
		return;

	if (evt->type != GSI_XFER_COMPL_TYPE_GCI) {
@@ -555,7 +555,7 @@ static void gsi_process_chan(struct gsi_xfer_compl_evt *evt,
	notify->bytes_xfered = evt->len;

	if (callback) {
		if (atomic_read(&ch_ctx->poll_mode)) {
		if (unlikely(atomic_read(&ch_ctx->poll_mode))) {
			GSIERR("Calling client callback in polling mode\n");
			WARN_ON(1);
		}
@@ -633,7 +633,8 @@ static void gsi_handle_ieob(int ee)

	for (i = 0; i < GSI_STTS_REG_BITS; i++) {
		if ((1 << i) & ch & msk) {
			if (i >= gsi_ctx->max_ev || i >= GSI_EVT_RING_MAX) {
			if (unlikely(i >= gsi_ctx->max_ev
				|| i >= GSI_EVT_RING_MAX)) {
				GSIERR("invalid event %d\n", i);
				break;
			}
@@ -646,7 +647,8 @@ static void gsi_handle_ieob(int ee)
			if (ctx->props.intr == GSI_INTR_MSI)
				continue;

			if (ctx->props.intf != GSI_EVT_CHTYPE_GPI_EV) {
			if (unlikely(ctx->props.intf !=
				GSI_EVT_CHTYPE_GPI_EV)) {
				GSIERR("Unexpected irq intf %d\n",
					ctx->props.intf);
				GSI_ASSERT();
@@ -780,7 +782,7 @@ static void gsi_handle_irq(void)
		if (type & GSI_EE_n_CNTXT_TYPE_IRQ_GENERAL_BMSK)
			gsi_handle_general(ee);

		if (++cnt > GSI_ISR_MAX_ITER) {
		if (unlikely(++cnt > GSI_ISR_MAX_ITER)) {
			/*
			 * Max number of spurious interrupts from hardware.
			 * Unexpected hardware state.
@@ -942,17 +944,17 @@ int gsi_complete_clk_grant(unsigned long dev_hdl)
{
	unsigned long flags;

	if (!gsi_ctx) {
	if (unlikely(!gsi_ctx)) {
		pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
		return -GSI_STATUS_NODEV;
	}

	if (!gsi_ctx->per_registered) {
	if (unlikely(!gsi_ctx->per_registered)) {
		GSIERR("no client registered\n");
		return -GSI_STATUS_INVALID_PARAMS;
	}

	if (dev_hdl != (uintptr_t)gsi_ctx) {
	if (unlikely(dev_hdl != (uintptr_t)gsi_ctx)) {
		GSIERR("bad params dev_hdl=0x%lx gsi_ctx=0x%pK\n", dev_hdl,
				gsi_ctx);
		return -GSI_STATUS_INVALID_PARAMS;
@@ -1871,19 +1873,19 @@ int gsi_ring_evt_ring_db(unsigned long evt_ring_hdl, uint64_t value)
{
	struct gsi_evt_ctx *ctx;

	if (!gsi_ctx) {
	if (unlikely(!gsi_ctx)) {
		pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
		return -GSI_STATUS_NODEV;
	}

	if (evt_ring_hdl >= gsi_ctx->max_ev) {
	if (unlikely(evt_ring_hdl >= gsi_ctx->max_ev)) {
		GSIERR("bad params evt_ring_hdl=%lu\n", evt_ring_hdl);
		return -GSI_STATUS_INVALID_PARAMS;
	}

	ctx = &gsi_ctx->evtr[evt_ring_hdl];

	if (ctx->state != GSI_EVT_RING_STATE_ALLOCATED) {
	if (unlikely(ctx->state != GSI_EVT_RING_STATE_ALLOCATED)) {
		GSIERR("bad state %d\n",
				gsi_ctx->evtr[evt_ring_hdl].state);
		return -GSI_STATUS_UNSUPPORTED_OP;
@@ -1901,19 +1903,19 @@ int gsi_ring_ch_ring_db(unsigned long chan_hdl, uint64_t value)
	struct gsi_chan_ctx *ctx;
	uint32_t val;

	if (!gsi_ctx) {
	if (unlikely(!gsi_ctx)) {
		pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
		return -GSI_STATUS_NODEV;
	}

	if (chan_hdl >= gsi_ctx->max_ch) {
	if (unlikely(chan_hdl >= gsi_ctx->max_ch)) {
		GSIERR("bad chan_hdl=%lu\n", chan_hdl);
		return -GSI_STATUS_INVALID_PARAMS;
	}

	ctx = &gsi_ctx->chan[chan_hdl];

	if (ctx->state != GSI_CHAN_STATE_STARTED) {
	if (unlikely(ctx->state != GSI_CHAN_STATE_STARTED)) {
		GSIERR("bad state %d\n", ctx->state);
		return -GSI_STATUS_UNSUPPORTED_OP;
	}
@@ -3397,13 +3399,13 @@ int __gsi_populate_gci_tre(struct gsi_chan_ctx *ctx,
	uint16_t idx;

	memset(&gci_tre, 0, sizeof(gci_tre));
	if (xfer->addr & 0xFFFFFF0000000000) {
	if (unlikely(xfer->addr & 0xFFFFFF0000000000)) {
		GSIERR("chan_hdl=%u add too large=%llx\n",
			ctx->props.ch_id, xfer->addr);
		return -EINVAL;
	}

	if (xfer->type != GSI_XFER_ELEM_DATA) {
	if (unlikely(xfer->type != GSI_XFER_ELEM_DATA)) {
		GSIERR("chan_hdl=%u bad RE type=%u\n", ctx->props.ch_id,
			xfer->type);
		return -EINVAL;
@@ -3417,7 +3419,7 @@ int __gsi_populate_gci_tre(struct gsi_chan_ctx *ctx,
	gci_tre.buf_len = xfer->len;
	gci_tre.re_type = GSI_RE_COAL;
	gci_tre.cookie = __gsi_get_gci_cookie(ctx, idx);
	if (gci_tre.cookie > (ctx->ring.max_num_elem + GSI_VEID_MAX))
	if (unlikely(gci_tre.cookie > (ctx->ring.max_num_elem + GSI_VEID_MAX)))
		return -EPERM;

	/* write the TRE to ring */
@@ -3476,12 +3478,12 @@ int gsi_queue_xfer(unsigned long chan_hdl, uint16_t num_xfers,
	spinlock_t *slock;
	unsigned long flags;

	if (!gsi_ctx) {
	if (unlikely(!gsi_ctx)) {
		pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
		return -GSI_STATUS_NODEV;
	}

	if (chan_hdl >= gsi_ctx->max_ch || (num_xfers && !xfer)) {
	if (unlikely(chan_hdl >= gsi_ctx->max_ch || (num_xfers && !xfer))) {
		GSIERR("bad params chan_hdl=%lu num_xfers=%u xfer=%pK\n",
				chan_hdl, num_xfers, xfer);
		return -GSI_STATUS_INVALID_PARAMS;
@@ -3489,8 +3491,8 @@ int gsi_queue_xfer(unsigned long chan_hdl, uint16_t num_xfers,

	ctx = &gsi_ctx->chan[chan_hdl];

	if (ctx->props.prot != GSI_CHAN_PROT_GPI &&
			ctx->props.prot != GSI_CHAN_PROT_GCI) {
	if (unlikely(ctx->props.prot != GSI_CHAN_PROT_GPI &&
			ctx->props.prot != GSI_CHAN_PROT_GCI)) {
		GSIERR("op not supported for protocol %u\n", ctx->props.prot);
		return -GSI_STATUS_UNSUPPORTED_OP;
	}
@@ -3512,7 +3514,7 @@ int gsi_queue_xfer(unsigned long chan_hdl, uint16_t num_xfers,
	 */
	if (ctx->props.prot != GSI_CHAN_PROT_GCI) {
		__gsi_query_channel_free_re(ctx, &free);
		if (num_xfers > free) {
		if (unlikely(num_xfers > free)) {
			GSIERR("chan_hdl=%lu num_xfers=%u free=%u\n",
				chan_hdl, num_xfers, free);
			spin_unlock_irqrestore(slock, flags);
@@ -3532,7 +3534,7 @@ int gsi_queue_xfer(unsigned long chan_hdl, uint16_t num_xfers,
		gsi_incr_ring_wp(&ctx->ring);
	}

	if (i != num_xfers) {
	if (unlikely(i != num_xfers)) {
		/* reject all the xfers */
		ctx->ring.wp_local = wp_rollback;
		spin_unlock_irqrestore(slock, flags);
@@ -3609,13 +3611,13 @@ int gsi_poll_n_channel(unsigned long chan_hdl,
	int i;
	unsigned long flags;

	if (!gsi_ctx) {
	if (unlikely(!gsi_ctx)) {
		pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
		return -GSI_STATUS_NODEV;
	}

	if (chan_hdl >= gsi_ctx->max_ch || !notify ||
	    !actual_num || expected_num <= 0) {
	if (unlikely(chan_hdl >= gsi_ctx->max_ch || !notify ||
	    !actual_num || expected_num <= 0)) {
		GSIERR("bad params chan_hdl=%lu notify=%pK\n",
			chan_hdl, notify);
		GSIERR("actual_num=%pK expected_num=%d\n",
@@ -3626,13 +3628,13 @@ int gsi_poll_n_channel(unsigned long chan_hdl,
	ctx = &gsi_ctx->chan[chan_hdl];
	ee = gsi_ctx->per.ee;

	if (ctx->props.prot != GSI_CHAN_PROT_GPI &&
		ctx->props.prot != GSI_CHAN_PROT_GCI) {
	if (unlikely(ctx->props.prot != GSI_CHAN_PROT_GPI &&
		ctx->props.prot != GSI_CHAN_PROT_GCI)) {
		GSIERR("op not supported for protocol %u\n", ctx->props.prot);
		return -GSI_STATUS_UNSUPPORTED_OP;
	}

	if (!ctx->evtr) {
	if (unlikely(!ctx->evtr)) {
		GSIERR("no event ring associated chan_hdl=%lu\n", chan_hdl);
		return -GSI_STATUS_UNSUPPORTED_OP;
	}
@@ -3690,25 +3692,25 @@ int gsi_config_channel_mode(unsigned long chan_hdl, enum gsi_chan_mode mode)
	unsigned long flags;
	enum gsi_chan_mode chan_mode;

	if (!gsi_ctx) {
	if (unlikely(!gsi_ctx)) {
		pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
		return -GSI_STATUS_NODEV;
	}

	if (chan_hdl >= gsi_ctx->max_ch) {
	if (unlikely(chan_hdl >= gsi_ctx->max_ch)) {
		GSIERR("bad params chan_hdl=%lu mode=%u\n", chan_hdl, mode);
		return -GSI_STATUS_INVALID_PARAMS;
	}

	ctx = &gsi_ctx->chan[chan_hdl];

	if (ctx->props.prot != GSI_CHAN_PROT_GPI &&
		ctx->props.prot != GSI_CHAN_PROT_GCI) {
	if (unlikely(ctx->props.prot != GSI_CHAN_PROT_GPI &&
		ctx->props.prot != GSI_CHAN_PROT_GCI)) {
		GSIERR("op not supported for protocol %u\n", ctx->props.prot);
		return -GSI_STATUS_UNSUPPORTED_OP;
	}

	if (!ctx->evtr || !ctx->evtr->props.exclusive) {
	if (unlikely(!ctx->evtr || !ctx->evtr->props.exclusive)) {
		GSIERR("cannot configure mode on chan_hdl=%lu\n",
				chan_hdl);
		return -GSI_STATUS_UNSUPPORTED_OP;
@@ -3719,7 +3721,7 @@ int gsi_config_channel_mode(unsigned long chan_hdl, enum gsi_chan_mode mode)
	else
		curr = GSI_CHAN_MODE_CALLBACK;

	if (mode == curr) {
	if (unlikely(mode == curr)) {
		GSIERR("already in requested mode %u chan_hdl=%lu\n",
				curr, chan_hdl);
		return -GSI_STATUS_UNSUPPORTED_OP;
+8 −8
Original line number Diff line number Diff line
@@ -584,7 +584,7 @@ static netdev_tx_t ecm_ipa_start_xmit
	}

	ret = ipa_pm_activate(ecm_ipa_ctx->pm_hdl);
	if (ret) {
	if (unlikely(ret)) {
		ECM_IPA_DEBUG("Failed to activate PM client\n");
		netif_stop_queue(net);
		goto fail_pm_activate;
@@ -607,7 +607,7 @@ static netdev_tx_t ecm_ipa_start_xmit
				, skb->protocol);

	ret = ipa_tx_dp(ecm_ipa_ctx->ipa_to_usb_client, skb, NULL);
	if (ret) {
	if (unlikely(ret)) {
		ECM_IPA_ERROR("ipa transmit failed (%d)\n", ret);
		goto fail_tx_packet;
	}
@@ -642,7 +642,7 @@ static void ecm_ipa_packet_receive_notify
	int result;
	unsigned int packet_len;

	if (!skb) {
	if (unlikely(!skb)) {
		ECM_IPA_ERROR("Bad SKB received from IPA driver\n");
		return;
	}
@@ -655,7 +655,7 @@ static void ecm_ipa_packet_receive_notify
		return;
	}

	if (evt != IPA_RECEIVE)	{
	if (unlikely(evt != IPA_RECEIVE))	{
		ECM_IPA_ERROR("A none IPA_RECEIVE event in ecm_ipa_receive\n");
		return;
	}
@@ -664,7 +664,7 @@ static void ecm_ipa_packet_receive_notify
	skb->protocol = eth_type_trans(skb, ecm_ipa_ctx->net);

	result = netif_rx(skb);
	if (result)
	if (unlikely(result))
		ECM_IPA_ERROR("fail on netif_rx\n");
	ecm_ipa_ctx->net->stats.rx_packets++;
	ecm_ipa_ctx->net->stats.rx_bytes += packet_len;
@@ -1129,12 +1129,12 @@ static void ecm_ipa_tx_complete_notify
	struct sk_buff *skb = (struct sk_buff *)data;
	struct ecm_ipa_dev *ecm_ipa_ctx = priv;

	if (!skb) {
	if (unlikely(!skb)) {
		ECM_IPA_ERROR("Bad SKB received from IPA driver\n");
		return;
	}

	if (!ecm_ipa_ctx) {
	if (unlikely(!ecm_ipa_ctx)) {
		ECM_IPA_ERROR("ecm_ipa_ctx is NULL pointer\n");
		return;
	}
@@ -1144,7 +1144,7 @@ static void ecm_ipa_tx_complete_notify
		skb->len, skb->protocol,
		atomic_read(&ecm_ipa_ctx->outstanding_pkts));

	if (evt != IPA_WRITE_DONE) {
	if (unlikely(evt != IPA_WRITE_DONE)) {
		ECM_IPA_ERROR("unsupported event on Tx callback\n");
		return;
	}
+5 −5
Original line number Diff line number Diff line
@@ -800,7 +800,7 @@ int odu_bridge_tx_dp(struct sk_buff *skb, struct ipa_tx_meta *metadata)
	case ODU_BRIDGE_MODE_ROUTER:
		/* Router mode - pass skb to IPA */
		res = ipa_tx_dp(IPA_CLIENT_ODU_PROD, skb, metadata);
		if (res) {
		if (unlikely(res)) {
			ODU_BRIDGE_DBG("tx dp failed %d\n", res);
			goto out;
		}
@@ -813,7 +813,7 @@ int odu_bridge_tx_dp(struct sk_buff *skb, struct ipa_tx_meta *metadata)
		    ODU_BRIDGE_IS_QMI_ADDR(ipv6hdr->daddr)) {
			ODU_BRIDGE_DBG_LOW("QMI packet\n");
			skb_copied = skb_clone(skb, GFP_KERNEL);
			if (!skb_copied) {
			if (unlikely(!skb_copied)) {
				ODU_BRIDGE_ERR("No memory\n");
				return -ENOMEM;
			}
@@ -834,13 +834,13 @@ int odu_bridge_tx_dp(struct sk_buff *skb, struct ipa_tx_meta *metadata)
			ODU_BRIDGE_DBG_LOW(
				"Multicast pkt, send to APPS and IPA\n");
			skb_copied = skb_clone(skb, GFP_KERNEL);
			if (!skb_copied) {
			if (unlikely(!skb_copied)) {
				ODU_BRIDGE_ERR("No memory\n");
				return -ENOMEM;
			}

			res = ipa_tx_dp(IPA_CLIENT_ODU_PROD, skb, metadata);
			if (res) {
			if (unlikely(res)) {
				ODU_BRIDGE_DBG("tx dp failed %d\n", res);
				dev_kfree_skb(skb_copied);
				goto out;
@@ -855,7 +855,7 @@ int odu_bridge_tx_dp(struct sk_buff *skb, struct ipa_tx_meta *metadata)
		}

		res = ipa_tx_dp(IPA_CLIENT_ODU_PROD, skb, metadata);
		if (res) {
		if (unlikely(res)) {
			ODU_BRIDGE_DBG("tx dp failed %d\n", res);
			goto out;
		}
+6 −6
Original line number Diff line number Diff line
@@ -940,7 +940,7 @@ static netdev_tx_t rndis_ipa_start_xmit(struct sk_buff *skb,
	}

	ret = ipa_pm_activate(rndis_ipa_ctx->pm_hdl);
	if (ret) {
	if (unlikely(ret)) {
		RNDIS_IPA_DEBUG("Failed activate PM client\n");
		netif_stop_queue(net);
		goto fail_pm_activate;
@@ -959,7 +959,7 @@ static netdev_tx_t rndis_ipa_start_xmit(struct sk_buff *skb,
	skb = rndis_encapsulate_skb(skb, rndis_ipa_ctx);
	trace_rndis_tx_dp(skb->protocol);
	ret = ipa_tx_dp(IPA_TO_USB_CLIENT, skb, NULL);
	if (ret) {
	if (unlikely(ret)) {
		RNDIS_IPA_ERROR("ipa transmit failed (%d)\n", ret);
		goto fail_tx_packet;
	}
@@ -1006,7 +1006,7 @@ static void rndis_ipa_tx_complete_notify(

	ret = 0;
	NULL_CHECK_RETVAL(private);
	if (ret)
	if (unlikely(ret))
		return;

	trace_rndis_status_rcvd(skb->protocol);
@@ -1120,7 +1120,7 @@ static void rndis_ipa_packet_receive_notify(
		return;
	}

	if (evt != IPA_RECEIVE)	{
	if (unlikely(evt != IPA_RECEIVE)) {
		RNDIS_IPA_ERROR("a none IPA_RECEIVE event in driver RX\n");
		return;
	}
@@ -1140,7 +1140,7 @@ static void rndis_ipa_packet_receive_notify(

	trace_rndis_netif_ni(skb->protocol);
	result = netif_rx_ni(skb);
	if (result)
	if (unlikely(result))
		RNDIS_IPA_ERROR("fail on netif_rx_ni\n");
	rndis_ipa_ctx->net->stats.rx_packets++;
	rndis_ipa_ctx->net->stats.rx_bytes += packet_len;
@@ -1817,7 +1817,7 @@ static struct sk_buff *rndis_encapsulate_skb(struct sk_buff *skb,
	if (unlikely(skb_headroom(skb) < sizeof(rndis_template_hdr))) {
		struct sk_buff *new_skb = skb_copy_expand(skb,
			sizeof(rndis_template_hdr), 0, GFP_ATOMIC);
		if (!new_skb) {
		if (unlikely(!new_skb)) {
			RNDIS_IPA_ERROR("no memory for skb expand\n");
			return skb;
		}
+42 −38
Original line number Diff line number Diff line
@@ -344,7 +344,7 @@ int ipa3_send(struct ipa3_sys_context *sys,
	for (i = 0; i < num_desc; i++) {
		tx_pkt = kmem_cache_zalloc(ipa3_ctx->tx_pkt_wrapper_cache,
					   GFP_ATOMIC);
		if (!tx_pkt) {
		if (unlikely(!tx_pkt)) {
			IPAERR("failed to alloc tx wrapper\n");
			result = -ENOMEM;
			goto failure;
@@ -358,8 +358,8 @@ int ipa3_send(struct ipa3_sys_context *sys,

		/* populate tag field */
		if (desc[i].is_tag_status) {
			if (ipa_populate_tag_field(&desc[i], tx_pkt,
				&tag_pyld_ret)) {
			if (unlikely(ipa_populate_tag_field(&desc[i], tx_pkt,
				&tag_pyld_ret))) {
				IPAERR("Failed to populate tag field\n");
				result = -EFAULT;
				goto failure_dma_map;
@@ -399,7 +399,8 @@ int ipa3_send(struct ipa3_sys_context *sys,
				tx_pkt->no_unmap_dma = true;
			}
		}
		if (dma_mapping_error(ipa3_ctx->pdev, tx_pkt->mem.phys_base)) {
		if (unlikely(dma_mapping_error(ipa3_ctx->pdev,
			tx_pkt->mem.phys_base))) {
			IPAERR("failed to do dma map.\n");
			result = -EFAULT;
			goto failure_dma_map;
@@ -450,7 +451,7 @@ int ipa3_send(struct ipa3_sys_context *sys,
	IPADBG_LOW("ch:%lu queue xfer\n", sys->ep->gsi_chan_hdl);
	result = gsi_queue_xfer(sys->ep->gsi_chan_hdl, num_desc,
			gsi_xfer, true);
	if (result != GSI_STATUS_SUCCESS) {
	if (unlikely(result != GSI_STATUS_SUCCESS)) {
		IPAERR_RL("GSI xfer failed.\n");
		result = -EFAULT;
		goto failure;
@@ -1490,7 +1491,7 @@ int ipa3_tx_dp(enum ipa_client_type dst, struct sk_buff *skb,
		return -EINVAL;
	}

	if (skb->len == 0) {
	if (unlikely(skb->len == 0)) {
		IPAERR("packet size is 0\n");
		return -EINVAL;
	}
@@ -1506,7 +1507,7 @@ int ipa3_tx_dp(enum ipa_client_type dst, struct sk_buff *skb,
	 */
	if (IPA_CLIENT_IS_CONS(dst)) {
		src_ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_APPS_LAN_PROD);
		if (-1 == src_ep_idx) {
		if (unlikely(-1 == src_ep_idx)) {
			IPAERR("Client %u is not mapped\n",
				IPA_CLIENT_APPS_LAN_PROD);
			goto fail_gen;
@@ -1514,7 +1515,7 @@ int ipa3_tx_dp(enum ipa_client_type dst, struct sk_buff *skb,
		dst_ep_idx = ipa3_get_ep_mapping(dst);
	} else {
		src_ep_idx = ipa3_get_ep_mapping(dst);
		if (-1 == src_ep_idx) {
		if (unlikely(-1 == src_ep_idx)) {
			IPAERR("Client %u is not mapped\n", dst);
			goto fail_gen;
		}
@@ -1526,7 +1527,7 @@ int ipa3_tx_dp(enum ipa_client_type dst, struct sk_buff *skb,

	sys = ipa3_ctx->ep[src_ep_idx].sys;

	if (!sys || !sys->ep->valid) {
	if (unlikely(!sys || !sys->ep->valid)) {
		IPAERR_RL("pipe %d not valid\n", src_ep_idx);
		goto fail_pipe_not_valid;
	}
@@ -1547,7 +1548,7 @@ int ipa3_tx_dp(enum ipa_client_type dst, struct sk_buff *skb,
		gsi_ep->prefetch_mode == GSI_FREE_PRE_FETCH)
		max_desc -= gsi_ep->prefetch_threshold;
	if (num_frags + 3 > max_desc) {
		if (skb_linearize(skb)) {
		if (unlikely(skb_linearize(skb))) {
			IPAERR("Failed to linear skb with %d frags\n",
				num_frags);
			goto fail_gen;
@@ -1561,7 +1562,7 @@ int ipa3_tx_dp(enum ipa_client_type dst, struct sk_buff *skb,
		 * 1 desc for each frag
		 */
		desc = kzalloc(sizeof(*desc) * (num_frags + 3), GFP_ATOMIC);
		if (!desc) {
		if (unlikely(!desc)) {
			IPAERR("failed to alloc desc array\n");
			goto fail_gen;
		}
@@ -1623,7 +1624,8 @@ int ipa3_tx_dp(enum ipa_client_type dst, struct sk_buff *skb,
			desc[skb_idx].callback = NULL;
		}

		if (ipa3_send(sys, num_frags + data_idx, desc, true)) {
		if (unlikely(ipa3_send(sys, num_frags + data_idx,
		    desc, true))) {
			IPAERR_RL("fail to send skb %pK num_frags %u SWP\n",
				skb, num_frags);
			goto fail_send;
@@ -1654,7 +1656,8 @@ int ipa3_tx_dp(enum ipa_client_type dst, struct sk_buff *skb,
			desc[data_idx].dma_address = meta->dma_address;
		}
		if (num_frags == 0) {
			if (ipa3_send(sys, data_idx + 1, desc, true)) {
			if (unlikely(ipa3_send(sys, data_idx + 1,
				 desc, true))) {
				IPAERR("fail to send skb %pK HWP\n", skb);
				goto fail_mem;
			}
@@ -1673,8 +1676,8 @@ int ipa3_tx_dp(enum ipa_client_type dst, struct sk_buff *skb,
			desc[data_idx+f].user2 = desc[data_idx].user2;
			desc[data_idx].callback = NULL;

			if (ipa3_send(sys, num_frags + data_idx + 1,
				desc, true)) {
			if (unlikely(ipa3_send(sys, num_frags + data_idx + 1,
			    desc, true))) {
				IPAERR("fail to send skb %pK num_frags %u\n",
					skb, num_frags);
				goto fail_mem;
@@ -1729,26 +1732,27 @@ static void ipa3_wq_repl_rx(struct work_struct *work)
begin:
	while (1) {
		next = (curr + 1) % sys->repl->capacity;
		if (next == atomic_read(&sys->repl->head_idx))
		if (unlikely(next == atomic_read(&sys->repl->head_idx)))
			goto fail_kmem_cache_alloc;

		rx_pkt = kmem_cache_zalloc(ipa3_ctx->rx_pkt_wrapper_cache,
					   flag);
		if (!rx_pkt)
		if (unlikely(!rx_pkt))
			goto fail_kmem_cache_alloc;

		INIT_WORK(&rx_pkt->work, ipa3_wq_rx_avail);
		rx_pkt->sys = sys;

		rx_pkt->data.skb = sys->get_skb(sys->rx_buff_sz, flag);
		if (rx_pkt->data.skb == NULL)
		if (unlikely(rx_pkt->data.skb == NULL))
			goto fail_skb_alloc;

		ptr = skb_put(rx_pkt->data.skb, sys->rx_buff_sz);
		rx_pkt->data.dma_addr = dma_map_single(ipa3_ctx->pdev, ptr,
						     sys->rx_buff_sz,
						     DMA_FROM_DEVICE);
		if (dma_mapping_error(ipa3_ctx->pdev, rx_pkt->data.dma_addr)) {
		if (unlikely(dma_mapping_error(ipa3_ctx->pdev,
		    rx_pkt->data.dma_addr))) {
			pr_err_ratelimited("%s dma map fail %pK for %pK sys=%pK\n",
			       __func__, (void *)rx_pkt->data.dma_addr,
			       ptr, sys);
@@ -1802,8 +1806,8 @@ static struct ipa3_rx_pkt_wrapper *ipa3_alloc_rx_pkt_page(
	rx_pkt->page_data.dma_addr = dma_map_page(ipa3_ctx->pdev,
			rx_pkt->page_data.page, 0,
			rx_pkt->len, DMA_FROM_DEVICE);
	if (dma_mapping_error(ipa3_ctx->pdev,
		rx_pkt->page_data.dma_addr)) {
	if (unlikely(dma_mapping_error(ipa3_ctx->pdev,
		rx_pkt->page_data.dma_addr))) {
		pr_err_ratelimited("%s dma map fail %pK for %pK\n",
			__func__, (void *)rx_pkt->page_data.dma_addr,
			rx_pkt->page_data.page);
@@ -1829,7 +1833,7 @@ static void ipa3_replenish_rx_page_cache(struct ipa3_sys_context *sys)

	for (curr = 0; curr < sys->repl->capacity; curr++) {
		rx_pkt = ipa3_alloc_rx_pkt_page(GFP_KERNEL, false);
		if (!rx_pkt) {
		if (unlikely(!rx_pkt)) {
			IPAERR("ipa3_alloc_rx_pkt_page fails\n");
			ipa_assert();
			break;
@@ -1877,7 +1881,7 @@ static void ipa3_replenish_rx_page_recycle(struct ipa3_sys_context *sys)
			rx_pkt = ipa3_alloc_rx_pkt_page(flag, true);
			if (!rx_pkt && flag == GFP_ATOMIC)
				break;
			else if (!rx_pkt)
			else if (unlikely(!rx_pkt))
				goto fail_kmem_cache_alloc;
			rx_pkt->sys = sys;
		}
@@ -1901,7 +1905,7 @@ static void ipa3_replenish_rx_page_recycle(struct ipa3_sys_context *sys)
		if (idx == IPA_REPL_XFER_MAX) {
			ret = gsi_queue_xfer(sys->ep->gsi_chan_hdl, idx,
				gsi_xfer_elem_array, false);
			if (ret != GSI_STATUS_SUCCESS) {
			if (unlikely(ret != GSI_STATUS_SUCCESS)) {
				/* we don't expect this will happen */
				IPAERR("failed to provide buffer: %d\n", ret);
				ipa_assert();
@@ -1913,7 +1917,7 @@ static void ipa3_replenish_rx_page_recycle(struct ipa3_sys_context *sys)
	/* only ring doorbell once here */
	ret = gsi_queue_xfer(sys->ep->gsi_chan_hdl, idx,
			gsi_xfer_elem_array, true);
	if (ret == GSI_STATUS_SUCCESS) {
	if (likely(ret == GSI_STATUS_SUCCESS)) {
		/* ensure write is done before setting head index */
		mb();
		atomic_set(&sys->repl->head_idx, curr);
@@ -1971,7 +1975,7 @@ static void ipa3_replenish_wlan_rx_cache(struct ipa3_sys_context *sys)
			ret = gsi_queue_xfer(sys->ep->gsi_chan_hdl, 1,
				&gsi_xfer_elem_one, true);

			if (ret) {
			if (unlikely(ret)) {
				IPAERR("failed to provide buffer: %d\n", ret);
				goto fail_provide_rx_buffer;
			}
@@ -3124,14 +3128,14 @@ static struct sk_buff *handle_skb_completion(struct gsi_chan_xfer_notify
		rx_skb->truesize = rx_pkt->len + sizeof(struct sk_buff);
	}

	if (notify->veid >= GSI_VEID_MAX) {
	if (unlikely(notify->veid >= GSI_VEID_MAX)) {
		WARN_ON(1);
		return NULL;
	}

	/*Assesrt when WAN consumer channel receive EOB event*/
	if (notify->evt_id == GSI_CHAN_EVT_EOB &&
		sys->ep->client == IPA_CLIENT_APPS_WAN_CONS) {
	if (unlikely(notify->evt_id == GSI_CHAN_EVT_EOB &&
		sys->ep->client == IPA_CLIENT_APPS_WAN_CONS)) {
		IPAERR("EOB event received on WAN consumer channel\n");
		ipa_assert();
	}
@@ -3255,13 +3259,13 @@ static void ipa3_wq_rx_common(struct ipa3_sys_context *sys,
	struct ipa3_sys_context *coal_sys;
	int ipa_ep_idx;

	if (!notify) {
	if (unlikely(!notify)) {
		IPAERR_RL("gsi_chan_xfer_notify is null\n");
		return;
	}
	rx_skb = handle_skb_completion(notify, true);

	if (rx_skb) {
	if (likely(rx_skb)) {
		sys->pyld_hdlr(rx_skb, sys);

		/* For coalescing, we have 2 transfer rings to replenish */
@@ -3269,7 +3273,7 @@ static void ipa3_wq_rx_common(struct ipa3_sys_context *sys,
			ipa_ep_idx = ipa3_get_ep_mapping(
					IPA_CLIENT_APPS_WAN_CONS);

			if (ipa_ep_idx == IPA_EP_NOT_ALLOCATED) {
			if (unlikely(ipa_ep_idx == IPA_EP_NOT_ALLOCATED)) {
				IPAERR("Invalid client.\n");
				return;
			}
@@ -3329,8 +3333,8 @@ static void ipa3_rx_napi_chain(struct ipa3_sys_context *sys,
					 */
					ipa_ep_idx = ipa3_get_ep_mapping(
						IPA_CLIENT_APPS_WAN_CONS);
					if (ipa_ep_idx ==
						IPA_EP_NOT_ALLOCATED) {
					if (unlikely(ipa_ep_idx ==
						IPA_EP_NOT_ALLOCATED)) {
						IPAERR("Invalid client.\n");
						return;
					}
@@ -3366,8 +3370,8 @@ static void ipa3_rx_napi_chain(struct ipa3_sys_context *sys,
				 */
				ipa_ep_idx = ipa3_get_ep_mapping(
						IPA_CLIENT_APPS_WAN_CONS);
				if (ipa_ep_idx ==
					IPA_EP_NOT_ALLOCATED) {
				if (unlikely(ipa_ep_idx ==
					IPA_EP_NOT_ALLOCATED)) {
					IPAERR("Invalid client.\n");
					return;
				}
@@ -4155,7 +4159,7 @@ static void ipa_gsi_irq_rx_notify_cb(struct gsi_chan_xfer_notify *notify)
{
	struct ipa3_sys_context *sys;

	if (!notify) {
	if (unlikely(!notify)) {
		IPAERR("gsi notify is NULL.\n");
		return;
	}
@@ -4187,7 +4191,7 @@ static void ipa_dma_gsi_irq_rx_notify_cb(struct gsi_chan_xfer_notify *notify)
{
	struct ipa3_sys_context *sys;

	if (!notify) {
	if (unlikely(!notify)) {
		IPAERR("gsi notify is NULL.\n");
		return;
	}
Loading