Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 57964267 authored by Praveen Kurapati's avatar Praveen Kurapati
Browse files

msm: ipa3: Add change to retry memory allocation on failure



Adding changes to retry with GFP_KENREL flag when memory allocation
fails with GFP_ATOMIC flag. Panic the device if after max retries
also memory allocation fails.

Change-Id: Ic9ad827757de92715be5acc5645b7e860b4bd0e4
Signed-off-by: default avatarPraveen Kurapati <pkurapat@codeaurora.org>
parent 21cd9556
Loading
Loading
Loading
Loading
+31 −14
Original line number Diff line number Diff line
@@ -89,6 +89,8 @@

#define IPA_QMAP_ID_BYTE 0

#define IPA_MEM_ALLOC_RETRY 5

static struct sk_buff *ipa3_get_skb_ipa_rx(unsigned int len, gfp_t flags);
static void ipa3_replenish_wlan_rx_cache(struct ipa3_sys_context *sys);
static void ipa3_replenish_rx_cache(struct ipa3_sys_context *sys);
@@ -4876,6 +4878,33 @@ static int ipa_gsi_setup_channel(struct ipa_sys_connect_params *in,
	return result;
}

static void *ipa3_ring_alloc(struct device *dev, size_t size,
	dma_addr_t *dma_handle, gfp_t gfp)
{
	void *va_addr;
	int retry_cnt = 0;

alloc:
	va_addr = dma_alloc_coherent(dev, size, dma_handle, gfp);
	if (!va_addr) {
		if (retry_cnt < IPA_MEM_ALLOC_RETRY) {
			IPADBG("Fail to dma alloc retry cnt = %d\n",
				retry_cnt);
			retry_cnt++;
			goto alloc;
		}

		if (gfp == GFP_ATOMIC) {
			gfp = GFP_KERNEL;
			goto alloc;
		}
		IPAERR("fail to dma alloc %u bytes\n", size);
		ipa_assert();
	}

	return va_addr;
}

static int ipa_gsi_setup_event_ring(struct ipa3_ep_context *ep,
	u32 ring_size, gfp_t mem_flag)
{
@@ -4892,13 +4921,8 @@ static int ipa_gsi_setup_event_ring(struct ipa3_ep_context *ep,
	gsi_evt_ring_props.re_size = GSI_EVT_RING_RE_SIZE_16B;
	gsi_evt_ring_props.ring_len = ring_size;
	gsi_evt_ring_props.ring_base_vaddr =
		dma_alloc_coherent(ipa3_ctx->pdev, gsi_evt_ring_props.ring_len,
		ipa3_ring_alloc(ipa3_ctx->pdev, gsi_evt_ring_props.ring_len,
		&evt_dma_addr, mem_flag);
	if (!gsi_evt_ring_props.ring_base_vaddr) {
		IPAERR("fail to dma alloc %u bytes\n",
			gsi_evt_ring_props.ring_len);
		return -ENOMEM;
	}
	gsi_evt_ring_props.ring_base_addr = evt_dma_addr;

	/* copy mem info */
@@ -5006,14 +5030,8 @@ static int ipa_gsi_setup_transfer_ring(struct ipa3_ep_context *ep,
	gsi_channel_props.ring_len = ring_size;

	gsi_channel_props.ring_base_vaddr =
		dma_alloc_coherent(ipa3_ctx->pdev, gsi_channel_props.ring_len,
		ipa3_ring_alloc(ipa3_ctx->pdev, gsi_channel_props.ring_len,
			&dma_addr, mem_flag);
	if (!gsi_channel_props.ring_base_vaddr) {
		IPAERR("fail to dma alloc %u bytes\n",
			gsi_channel_props.ring_len);
		result = -ENOMEM;
		goto fail_alloc_channel_ring;
	}
	gsi_channel_props.ring_base_addr = dma_addr;

	/* copy mem info */
@@ -5091,7 +5109,6 @@ static int ipa_gsi_setup_transfer_ring(struct ipa3_ep_context *ep,
	dma_free_coherent(ipa3_ctx->pdev, ep->gsi_mem_info.chan_ring_len,
			ep->gsi_mem_info.chan_ring_base_vaddr,
			ep->gsi_mem_info.chan_ring_base_addr);
fail_alloc_channel_ring:
fail_get_gsi_ep_info:
	if (ep->gsi_evt_ring_hdl != ~0) {
		gsi_dealloc_evt_ring(ep->gsi_evt_ring_hdl);