Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit a17d83f9 authored by qctecmdr's avatar qctecmdr Committed by Gerrit - the friendly Code Review server
Browse files

Merge "msm: ipa: use lower order pages"

parents 1234f5d7 24278e36
Loading
Loading
Loading
Loading
+4 −2
Original line number Diff line number Diff line
@@ -1357,7 +1357,8 @@ static ssize_t ipa3_read_stats(struct file *file, char __user *ubuf,
		"lan_repl_rx_empty=%u\n"
		"flow_enable=%u\n"
		"flow_disable=%u\n"
		"rx_page_drop_cnt=%u\n",
		"rx_page_drop_cnt=%u\n"
		"lower_order=%u\n",
		ipa3_ctx->stats.tx_sw_pkts,
		ipa3_ctx->stats.tx_hw_pkts,
		ipa3_ctx->stats.tx_non_linear,
@@ -1374,7 +1375,8 @@ static ssize_t ipa3_read_stats(struct file *file, char __user *ubuf,
		ipa3_ctx->stats.lan_repl_rx_empty,
		ipa3_ctx->stats.flow_enable,
		ipa3_ctx->stats.flow_disable,
		ipa3_ctx->stats.rx_page_drop_cnt
		ipa3_ctx->stats.rx_page_drop_cnt,
		ipa3_ctx->stats.lower_order
		);
	cnt += nbytes;

+37 −13
Original line number Diff line number Diff line
@@ -2042,6 +2042,27 @@ static void ipa3_wq_repl_rx(struct work_struct *work)
	}
}

static struct page *ipa3_alloc_page(
	gfp_t flag, u32 *page_order, bool try_lower)
{
	struct page *page = NULL;
	u32 p_order = *page_order;

	page = __dev_alloc_pages(flag, p_order);
	/* We will only try 1 page order lower. */
	if (unlikely(!page)) {
		if (try_lower && p_order > 0) {
			p_order = p_order - 1;
			page = __dev_alloc_pages(flag, p_order);
			if (likely(page))
				ipa3_ctx->stats.lower_order++;
		}
	}
	*page_order = p_order;
	return page;
}


static struct ipa3_rx_pkt_wrapper *ipa3_alloc_rx_pkt_page(
	gfp_t flag, bool is_tmp_alloc)
{
@@ -2052,12 +2073,18 @@ static struct ipa3_rx_pkt_wrapper *ipa3_alloc_rx_pkt_page(
		flag);
	if (unlikely(!rx_pkt))
		return NULL;
	rx_pkt->len = PAGE_SIZE << IPA_WAN_PAGE_ORDER;
	rx_pkt->page_data.page = __dev_alloc_pages(flag,
		IPA_WAN_PAGE_ORDER);

	rx_pkt->page_data.page_order = IPA_WAN_PAGE_ORDER;
	/* Try a lower order page for order 3 pages in case allocation fails. */
	rx_pkt->page_data.page = ipa3_alloc_page(flag,
				&rx_pkt->page_data.page_order,
				(is_tmp_alloc && rx_pkt->page_data.page_order == 3));

	if (unlikely(!rx_pkt->page_data.page))
		goto fail_page_alloc;

	rx_pkt->len = PAGE_SIZE << rx_pkt->page_data.page_order;

	rx_pkt->page_data.dma_addr = dma_map_page(ipa3_ctx->pdev,
			rx_pkt->page_data.page, 0,
			rx_pkt->len, DMA_FROM_DEVICE);
@@ -2075,7 +2102,7 @@ static struct ipa3_rx_pkt_wrapper *ipa3_alloc_rx_pkt_page(
	return rx_pkt;

fail_dma_mapping:
	__free_pages(rx_pkt->page_data.page, IPA_WAN_PAGE_ORDER);
	__free_pages(rx_pkt->page_data.page, rx_pkt->page_data.page_order);
fail_page_alloc:
	kmem_cache_free(ipa3_ctx->rx_pkt_wrapper_cache, rx_pkt);
	return NULL;
@@ -2769,8 +2796,7 @@ static void free_rx_page(void *chan_user_data, void *xfer_user_data)
	}
	dma_unmap_page(ipa3_ctx->pdev, rx_pkt->page_data.dma_addr,
		rx_pkt->len, DMA_FROM_DEVICE);
	__free_pages(rx_pkt->page_data.page,
		IPA_WAN_PAGE_ORDER);
	__free_pages(rx_pkt->page_data.page, rx_pkt->page_data.page_order);
	kmem_cache_free(ipa3_ctx->rx_pkt_wrapper_cache, rx_pkt);
}

@@ -2821,8 +2847,7 @@ static void ipa3_cleanup_rx(struct ipa3_sys_context *sys)
					rx_pkt->page_data.dma_addr,
					rx_pkt->len,
					DMA_FROM_DEVICE);
				__free_pages(rx_pkt->page_data.page,
					IPA_WAN_PAGE_ORDER);
				__free_pages(rx_pkt->page_data.page, rx_pkt->page_data.page_order);
			}
			kmem_cache_free(ipa3_ctx->rx_pkt_wrapper_cache,
				rx_pkt);
@@ -2842,7 +2867,7 @@ static void ipa3_cleanup_rx(struct ipa3_sys_context *sys)
					rx_pkt->len,
					DMA_FROM_DEVICE);
				__free_pages(rx_pkt->page_data.page,
					IPA_WAN_PAGE_ORDER);
					rx_pkt->page_data.page_order);
				kmem_cache_free(
					ipa3_ctx->rx_pkt_wrapper_cache,
					rx_pkt);
@@ -3642,8 +3667,7 @@ static struct sk_buff *handle_page_completion(struct gsi_chan_xfer_notify
		} else {
			dma_unmap_page(ipa3_ctx->pdev, rx_page.dma_addr,
					rx_pkt->len, DMA_FROM_DEVICE);
			__free_pages(rx_pkt->page_data.page,
							IPA_WAN_PAGE_ORDER);
			__free_pages(rx_pkt->page_data.page, rx_pkt->page_data.page_order);
		}
		rx_pkt->sys->free_rx_wrapper(rx_pkt);
		IPA_STATS_INC_CNT(ipa3_ctx->stats.rx_page_drop_cnt);
@@ -3671,7 +3695,7 @@ static struct sk_buff *handle_page_completion(struct gsi_chan_xfer_notify
					dma_unmap_page(ipa3_ctx->pdev, rx_page.dma_addr,
						rx_pkt->len, DMA_FROM_DEVICE);
					__free_pages(rx_pkt->page_data.page,
								IPA_WAN_PAGE_ORDER);
								rx_pkt->page_data.page_order);
				}
				rx_pkt->sys->free_rx_wrapper(rx_pkt);
			}
@@ -3696,7 +3720,7 @@ static struct sk_buff *handle_page_completion(struct gsi_chan_xfer_notify
				skb_shinfo(rx_skb)->nr_frags,
				rx_page.page, 0,
				size,
				PAGE_SIZE << IPA_WAN_PAGE_ORDER);
				PAGE_SIZE << rx_page.page_order);
		}
	} else {
		return NULL;
+3 −0
Original line number Diff line number Diff line
@@ -506,11 +506,13 @@ enum ipa3_wdi_polling_mode {
 * @page: skb page
 * @dma_addr: DMA address of this Rx packet
 * @is_tmp_alloc: skb page from tmp_alloc or recycle_list
 * @page_order: page order associated with the page.
 */
struct ipa_rx_page_data {
	struct page *page;
	dma_addr_t dma_addr;
	bool is_tmp_alloc;
	u32 page_order;
};

struct ipa3_active_client_htable_entry {
@@ -1446,6 +1448,7 @@ struct ipa3_stats {
	u32 tx_non_linear;
	u32 rx_page_drop_cnt;
	struct ipa3_page_recycle_stats page_recycle_stats[2];
	u64 lower_order;
};

/* offset for each stats */