Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 0b1b0689 authored by Ravi Gummadidala's avatar Ravi Gummadidala
Browse files

msm: ipa: fix data path stall issue



Disable bottom halves when working with the pending TX packet
list per IPA system consumer EP. Without this change, we can
miss TX completions or receive TX completions out of order.

Change-Id: I183abf0c4c1bb0e2633c1018c7c0d910c918657e
Signed-off-by: default avatarRavi Gummadidala <rgummadi@codeaurora.org>
parent 1b611297
Loading
Loading
Loading
Loading
+24 −18
Original line number Diff line number Diff line
@@ -53,8 +53,10 @@ static void ipa_wq_write_done_common(struct ipa_sys_context *sys, u32 cnt)
	int i;

	for (i = 0; i < cnt; i++) {
		if (unlikely(list_empty(&sys->head_desc_list)))
		if (unlikely(list_empty(&sys->head_desc_list))) {
			WARN_ON(1);
			return;
		}
		tx_pkt_expected = list_first_entry(&sys->head_desc_list,
						   struct ipa_tx_pkt_wrapper,
						   link);
@@ -87,14 +89,17 @@ static void ipa_wq_write_done_status(int src_pipe)
		return;
	}

	spin_lock(&sys->spinlock);
	if (unlikely(list_empty(&sys->head_desc_list)))
	spin_lock_bh(&sys->spinlock);
	if (unlikely(list_empty(&sys->head_desc_list))) {
		WARN_ON(1);
		spin_unlock_bh(&sys->spinlock);
		return;
	}
	tx_pkt_expected = list_first_entry(&sys->head_desc_list,
					   struct ipa_tx_pkt_wrapper,
					   link);
	ipa_wq_write_done_common(sys, tx_pkt_expected->cnt);
	spin_unlock(&sys->spinlock);
	spin_unlock_bh(&sys->spinlock);
}

/**
@@ -120,9 +125,9 @@ static void ipa_wq_write_done(struct work_struct *work)
	cnt = tx_pkt->cnt;
	sys = tx_pkt->sys;

	spin_lock(&sys->spinlock);
	spin_lock_bh(&sys->spinlock);
	ipa_wq_write_done_common(sys, cnt);
	spin_unlock(&sys->spinlock);
	spin_unlock_bh(&sys->spinlock);
}

static int ipa_handle_tx_core(struct ipa_sys_context *sys, bool process_all,
@@ -145,9 +150,9 @@ static int ipa_handle_tx_core(struct ipa_sys_context *sys, bool process_all,
		if (iov.addr == 0)
			break;

		spin_lock(&sys->spinlock);
		spin_lock_bh(&sys->spinlock);
		ipa_wq_write_done_common(sys, 1);
		spin_unlock(&sys->spinlock);
		spin_unlock_bh(&sys->spinlock);
		cnt++;
	};

@@ -314,7 +319,7 @@ int ipa_send_one(struct ipa_sys_context *sys, struct ipa_desc *desc,

	INIT_WORK(&tx_pkt->work, ipa_wq_write_done);

	spin_lock(&sys->spinlock);
	spin_lock_bh(&sys->spinlock);
	list_add_tail(&tx_pkt->link, &sys->head_desc_list);
	result = sps_transfer_one(sys->ep->ep_hdl, dma_address, len, tx_pkt,
			sps_flags);
@@ -323,13 +328,13 @@ int ipa_send_one(struct ipa_sys_context *sys, struct ipa_desc *desc,
		goto fail_sps_send;
	}

	spin_unlock(&sys->spinlock);
	spin_unlock_bh(&sys->spinlock);

	return 0;

fail_sps_send:
	list_del(&tx_pkt->link);
	spin_unlock(&sys->spinlock);
	spin_unlock_bh(&sys->spinlock);
	if (unlikely(ipa_ctx->ipa_hw_type == IPA_HW_v1_0))
		dma_pool_free(ipa_ctx->dma_pool, tx_pkt->bounce,
				dma_address);
@@ -384,7 +389,7 @@ int ipa_send(struct ipa_sys_context *sys, u32 num_desc, struct ipa_desc *desc,
	transfer.iovec = dma_pool_alloc(ipa_ctx->dma_pool, mem_flag, &dma_addr);
	transfer.iovec_phys = dma_addr;
	transfer.iovec_count = num_desc;
	spin_lock(&sys->spinlock);
	spin_lock_bh(&sys->spinlock);
	if (!transfer.iovec) {
		IPAERR("fail to alloc DMA mem for sps xfr buff\n");
		goto failure_coherent;
@@ -491,7 +496,7 @@ int ipa_send(struct ipa_sys_context *sys, u32 num_desc, struct ipa_desc *desc,
		goto failure;
	}

	spin_unlock(&sys->spinlock);
	spin_unlock_bh(&sys->spinlock);
	return 0;

failure:
@@ -518,7 +523,7 @@ failure:
		dma_pool_free(ipa_ctx->dma_pool, transfer.iovec,
				  transfer.iovec_phys);
failure_coherent:
	spin_unlock(&sys->spinlock);
	spin_unlock_bh(&sys->spinlock);
	return -EFAULT;
}

@@ -1913,8 +1918,10 @@ static void ipa_wq_rx_common(struct ipa_sys_context *sys, u32 size)
	struct ipa_rx_pkt_wrapper *rx_pkt_expected;
	struct sk_buff *rx_skb;

	if (unlikely(list_empty(&sys->head_desc_list)))
	if (unlikely(list_empty(&sys->head_desc_list))) {
		WARN_ON(1);
		return;
	}
	rx_pkt_expected = list_first_entry(&sys->head_desc_list,
					   struct ipa_rx_pkt_wrapper,
					   link);
@@ -1932,7 +1939,6 @@ static void ipa_wq_rx_common(struct ipa_sys_context *sys, u32 size)

	ipa_replenish_rx_cache(sys);
	kmem_cache_free(ipa_ctx->rx_pkt_wrapper_cache, rx_pkt_expected);

}

static void ipa_wq_rx_avail(struct work_struct *work)