Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit d5cee9fd authored by qctecmdr's avatar qctecmdr Committed by Gerrit - the friendly Code Review server
Browse files

Merge "msm: ipa: Page recycling on WAN DL data path"

parents 3baf7ec3 3cd665b5
Loading
Loading
Loading
Loading
+9 −0
Original line number Diff line number Diff line
@@ -6181,6 +6181,7 @@ static int ipa3_pre_init(const struct ipa3_plat_drv_res *resource_p,
	ipa3_ctx->use_64_bit_dma_mask = resource_p->use_64_bit_dma_mask;
	ipa3_ctx->wan_rx_ring_size = resource_p->wan_rx_ring_size;
	ipa3_ctx->lan_rx_ring_size = resource_p->lan_rx_ring_size;
	ipa3_ctx->ipa_wan_skb_page = resource_p->ipa_wan_skb_page;
	ipa3_ctx->skip_uc_pipe_reset = resource_p->skip_uc_pipe_reset;
	ipa3_ctx->tethered_flow_control = resource_p->tethered_flow_control;
	ipa3_ctx->ee = resource_p->ee;
@@ -6783,6 +6784,7 @@ static int get_ipa_dts_configuration(struct platform_device *pdev,
	ipa_drv_res->platform_type = 0;
	ipa_drv_res->modem_cfg_emb_pipe_flt = false;
	ipa_drv_res->ipa_wdi2 = false;
	ipa_drv_res->ipa_wan_skb_page = false;
	ipa_drv_res->ipa_wdi2_over_gsi = false;
	ipa_drv_res->ipa_wdi3_over_gsi = false;
	ipa_drv_res->ipa_mhi_dynamic_config = false;
@@ -6905,6 +6907,13 @@ static int get_ipa_dts_configuration(struct platform_device *pdev,
			ipa_drv_res->ipa_wdi2
			? "True" : "False");

	ipa_drv_res->ipa_wan_skb_page =
			of_property_read_bool(pdev->dev.of_node,
			"qcom,wan-use-skb-page");
	IPADBG(": Use skb page = %s\n",
			ipa_drv_res->ipa_wan_skb_page
			? "True" : "False");

	ipa_drv_res->ipa_fltrt_not_hashable =
			of_property_read_bool(pdev->dev.of_node,
			"qcom,ipa-fltrt-not-hashable");
+385 −43
Original line number Diff line number Diff line
@@ -9,6 +9,7 @@
#include <linux/list.h>
#include <linux/netdevice.h>
#include <linux/msm_gsi.h>
#include <net/sock.h>
#include "ipa_i.h"
#include "ipa_trace.h"
#include "ipahal/ipahal.h"
@@ -16,6 +17,7 @@

#define IPA_WAN_AGGR_PKT_CNT 5
#define IPA_WAN_NAPI_MAX_FRAMES (NAPI_WEIGHT / IPA_WAN_AGGR_PKT_CNT)
#define IPA_WAN_PAGE_ORDER 3
#define IPA_LAST_DESC_CNT 0xFFFF
#define POLLING_INACTIVITY_RX 40
#define POLLING_MIN_SLEEP_RX 1010
@@ -88,10 +90,14 @@ static void ipa3_replenish_wlan_rx_cache(struct ipa3_sys_context *sys);
static void ipa3_replenish_rx_cache(struct ipa3_sys_context *sys);
static void ipa3_replenish_rx_work_func(struct work_struct *work);
static void ipa3_fast_replenish_rx_cache(struct ipa3_sys_context *sys);
static void ipa3_replenish_rx_page_cache(struct ipa3_sys_context *sys);
static void ipa3_replenish_rx_page_recycle(struct ipa3_sys_context *sys);
static struct ipa3_rx_pkt_wrapper *ipa3_alloc_rx_pkt_page(gfp_t flag,
	bool is_tmp_alloc);
static void ipa3_wq_handle_rx(struct work_struct *work);
static void ipa3_wq_rx_common(struct ipa3_sys_context *sys,
	struct gsi_chan_xfer_notify *notify);
static void ipa3_wq_rx_napi_chain(struct ipa3_sys_context *sys,
static void ipa3_rx_napi_chain(struct ipa3_sys_context *sys,
		struct gsi_chan_xfer_notify *notify, uint32_t num);
static void ipa3_wlan_wq_rx_common(struct ipa3_sys_context *sys,
				struct gsi_chan_xfer_notify *notify);
@@ -1095,7 +1101,29 @@ int ipa3_setup_sys_pipe(struct ipa_sys_connect_params *sys_in, u32 *clnt_hdl)
		}
	}

	if (ep->sys->repl_hdlr == ipa3_replenish_rx_page_recycle) {
		ep->sys->repl = kzalloc(sizeof(*ep->sys->repl), GFP_KERNEL);
		if (!ep->sys->repl) {
			IPAERR("failed to alloc repl for client %d\n",
					sys_in->client);
			result = -ENOMEM;
			goto fail_gen2;
		}
		atomic_set(&ep->sys->repl->pending, 0);
		ep->sys->repl->capacity = (ep->sys->rx_pool_sz + 1) * 2;

		ep->sys->repl->cache = kcalloc(ep->sys->repl->capacity,
				sizeof(void *), GFP_KERNEL);
		atomic_set(&ep->sys->repl->head_idx, 0);
		atomic_set(&ep->sys->repl->tail_idx, 0);
		ipa3_replenish_rx_page_cache(ep->sys);
	}

	if (IPA_CLIENT_IS_CONS(sys_in->client)) {
		if (IPA_CLIENT_IS_WAN_CONS(sys_in->client) &&
			ipa3_ctx->ipa_wan_skb_page) {
			ipa3_replenish_rx_page_recycle(ep->sys);
		} else
			ipa3_replenish_rx_cache(ep->sys);
		for (i = 0; i < GSI_VEID_MAX; i++)
			INIT_LIST_HEAD(&ep->sys->pending_pkts[i]);
@@ -1754,6 +1782,159 @@ static void ipa3_wq_repl_rx(struct work_struct *work)
	}
}

static struct ipa3_rx_pkt_wrapper *ipa3_alloc_rx_pkt_page(
	gfp_t flag, bool is_tmp_alloc)
{
	struct ipa3_rx_pkt_wrapper *rx_pkt;

	flag |= __GFP_NOMEMALLOC;
	rx_pkt = kmem_cache_zalloc(ipa3_ctx->rx_pkt_wrapper_cache,
		flag);
	if (!rx_pkt)
		return NULL;
	rx_pkt->len = PAGE_SIZE << IPA_WAN_PAGE_ORDER;
	rx_pkt->page_data.page = __dev_alloc_pages(flag,
		IPA_WAN_PAGE_ORDER);

	if (!rx_pkt->page_data.page)
		goto fail_page_alloc;

	rx_pkt->page_data.dma_addr = dma_map_page(ipa3_ctx->pdev,
			rx_pkt->page_data.page, 0,
			rx_pkt->len, DMA_FROM_DEVICE);
	if (dma_mapping_error(ipa3_ctx->pdev,
		rx_pkt->page_data.dma_addr)) {
		pr_err_ratelimited("%s dma map fail %pK for %pK\n",
			__func__, (void *)rx_pkt->page_data.dma_addr,
			rx_pkt->page_data.page);
		goto fail_dma_mapping;
	}
	if (is_tmp_alloc)
		rx_pkt->page_data.is_tmp_alloc = true;
	else
		rx_pkt->page_data.is_tmp_alloc = false;
	return rx_pkt;

fail_dma_mapping:
	__free_pages(rx_pkt->page_data.page, IPA_WAN_PAGE_ORDER);
fail_page_alloc:
	kmem_cache_free(ipa3_ctx->rx_pkt_wrapper_cache, rx_pkt);
	return NULL;
}

static void ipa3_replenish_rx_page_cache(struct ipa3_sys_context *sys)
{
	struct ipa3_rx_pkt_wrapper *rx_pkt;
	u32 curr;

	for (curr = 0; curr < sys->repl->capacity; curr++) {
		rx_pkt = ipa3_alloc_rx_pkt_page(GFP_KERNEL, false);
		if (!rx_pkt) {
			IPAERR("ipa3_alloc_rx_pkt_page fails\n");
			ipa_assert();
			break;
		}
		rx_pkt->sys = sys;
		sys->repl->cache[curr] = rx_pkt;
	}

	return;

}

static void ipa3_replenish_rx_page_recycle(struct ipa3_sys_context *sys)
{
	struct ipa3_rx_pkt_wrapper *rx_pkt;
	int ret;
	int rx_len_cached = 0;
	struct gsi_xfer_elem gsi_xfer_elem_array[IPA_REPL_XFER_MAX];
	u32 curr;
	int idx = 0;
	struct page *cur_page;
	gfp_t flag;

	/* start replenish only when buffers go lower than the threshold */
	if (sys->rx_pool_sz - sys->len < IPA_REPL_XFER_THRESH)
		return;

	flag = gfp_any();
	spin_lock_bh(&sys->spinlock);
	rx_len_cached = sys->len;
	curr = atomic_read(&sys->repl->head_idx);

	while (rx_len_cached < sys->rx_pool_sz) {
		cur_page = sys->repl->cache[curr]->page_data.page;
		/* Found an idle page that can be used */
		if (page_ref_count(cur_page) == 1) {
			page_ref_inc(cur_page);
			rx_pkt = sys->repl->cache[curr];
			curr = (++curr == sys->repl->capacity) ? 0 : curr;
		} else {
			/*
			 * Could not find idle page at curr index.
			 * Allocate a new one.
			 */
			rx_pkt = ipa3_alloc_rx_pkt_page(flag, true);
			if (!rx_pkt && flag == GFP_ATOMIC)
				break;
			else if (!rx_pkt)
				goto fail_kmem_cache_alloc;
			rx_pkt->sys = sys;
		}

		dma_sync_single_for_device(ipa3_ctx->pdev,
			rx_pkt->page_data.dma_addr,
			rx_pkt->len, DMA_FROM_DEVICE);
		gsi_xfer_elem_array[idx].addr = rx_pkt->page_data.dma_addr;
		gsi_xfer_elem_array[idx].len = rx_pkt->len;
		gsi_xfer_elem_array[idx].flags = GSI_XFER_FLAG_EOT;
		gsi_xfer_elem_array[idx].flags |= GSI_XFER_FLAG_EOB;
		gsi_xfer_elem_array[idx].flags |= GSI_XFER_FLAG_BEI;
		gsi_xfer_elem_array[idx].type = GSI_XFER_ELEM_DATA;
		gsi_xfer_elem_array[idx].xfer_user_data = rx_pkt;
		rx_len_cached++;
		idx++;
		/*
		 * gsi_xfer_elem_buffer has a size of IPA_REPL_XFER_THRESH.
		 * If this size is reached we need to queue the xfers.
		 */
		if (idx == IPA_REPL_XFER_MAX) {
			ret = gsi_queue_xfer(sys->ep->gsi_chan_hdl, idx,
				gsi_xfer_elem_array, false);
			if (ret != GSI_STATUS_SUCCESS) {
				/* we don't expect this will happen */
				IPAERR("failed to provide buffer: %d\n", ret);
				ipa_assert();
				break;
			}
			idx = 0;
		}
	}
	/* only ring doorbell once here */
	ret = gsi_queue_xfer(sys->ep->gsi_chan_hdl, idx,
			gsi_xfer_elem_array, true);
	if (ret == GSI_STATUS_SUCCESS) {
		/* ensure write is done before setting head index */
		mb();
		atomic_set(&sys->repl->head_idx, curr);
		sys->len = rx_len_cached;
	} else {
		/* we don't expect this will happen */
		IPAERR("failed to provide buffer: %d\n", ret);
		ipa_assert();
	}
	spin_unlock_bh(&sys->spinlock);

	if (rx_len_cached < sys->rx_pool_sz) {
		queue_delayed_work(sys->wq, &sys->replenish_rx_work,
			msecs_to_jiffies(1));
	}
	return;
fail_kmem_cache_alloc:
	ipa_assert();
	spin_unlock_bh(&sys->spinlock);
}

static void ipa3_replenish_wlan_rx_cache(struct ipa3_sys_context *sys)
{
	struct ipa3_rx_pkt_wrapper *rx_pkt = NULL;
@@ -2253,6 +2434,7 @@ static void ipa3_cleanup_rx(struct ipa3_sys_context *sys)
	struct ipa3_rx_pkt_wrapper *r;
	u32 head;
	u32 tail;
	int i;

	/*
	 * buffers not consumed by gsi are cleaned up using cleanup callback
@@ -2271,16 +2453,33 @@ static void ipa3_cleanup_rx(struct ipa3_sys_context *sys)
	spin_unlock_bh(&sys->spinlock);

	if (sys->repl) {
		if (!ipa3_ctx->ipa_wan_skb_page) {
			head = atomic_read(&sys->repl->head_idx);
			tail = atomic_read(&sys->repl->tail_idx);
			while (head != tail) {
				rx_pkt = sys->repl->cache[head];
			dma_unmap_single(ipa3_ctx->pdev, rx_pkt->data.dma_addr,
					sys->rx_buff_sz, DMA_FROM_DEVICE);
				dma_unmap_single(ipa3_ctx->pdev,
					rx_pkt->data.dma_addr,
					sys->rx_buff_sz,
					DMA_FROM_DEVICE);
				sys->free_skb(rx_pkt->data.skb);
			kmem_cache_free(ipa3_ctx->rx_pkt_wrapper_cache, rx_pkt);
				kmem_cache_free(ipa3_ctx->rx_pkt_wrapper_cache,
					rx_pkt);
				head = (head + 1) % sys->repl->capacity;
			}
		} else {
			for (i = 0; i < sys->repl->capacity; i++) {
				rx_pkt = sys->repl->cache[i];
				dma_unmap_page(ipa3_ctx->pdev,
					rx_pkt->page_data.dma_addr,
					rx_pkt->len,
					DMA_FROM_DEVICE);
				__free_pages(rx_pkt->page_data.page,
					IPA_WAN_PAGE_ORDER);
				kmem_cache_free(ipa3_ctx->rx_pkt_wrapper_cache,
					rx_pkt);
			}
		}
		kfree(sys->repl->cache);
		kfree(sys->repl);
	}
@@ -2864,6 +3063,11 @@ static void ipa3_recycle_rx_wrapper(struct ipa3_rx_pkt_wrapper *rx_pkt)
	spin_unlock_bh(&rx_pkt->sys->spinlock);
}

static void ipa3_recycle_rx_page_wrapper(struct ipa3_rx_pkt_wrapper *rx_pkt)
{
	/* no-op */
}

/**
 * handle_skb_completion()- Handle event completion EOB or EOT and prep the skb
 *
@@ -2943,6 +3147,88 @@ static struct sk_buff *handle_skb_completion(struct gsi_chan_xfer_notify
	return rx_skb;
}

/**
 * handle_page_completion()- Handle event completion EOB or EOT
 * and prep the skb
 *
 * if eob: Set skb values, put rx_pkt at the end of the list and return NULL
 *
 * if eot: Set skb values, put skb at the end of the list. Then update the
 * length and put the page together to the frags while also
 * freeing and unmapping the corresponding rx pkt. Once finished
 * return the head_skb to be sent up the network stack.
 */
static struct sk_buff *handle_page_completion(struct gsi_chan_xfer_notify
		*notify, bool update_truesize)
{
	struct ipa3_rx_pkt_wrapper *rx_pkt, *tmp;
	struct sk_buff *rx_skb;
	struct list_head *head;
	struct ipa3_sys_context *sys;
	struct ipa_rx_page_data rx_page;

	sys = (struct ipa3_sys_context *) notify->chan_user_data;
	rx_pkt = (struct ipa3_rx_pkt_wrapper *) notify->xfer_user_data;
	rx_page = rx_pkt->page_data;

	spin_lock_bh(&rx_pkt->sys->spinlock);
	rx_pkt->sys->len--;
	spin_unlock_bh(&rx_pkt->sys->spinlock);

	/* TODO: truesize handle for EOB */
	if (update_truesize)
		IPAERR("update_truesize not supported\n");

	if (notify->veid >= GSI_VEID_MAX) {
		rx_pkt->sys->free_rx_wrapper(rx_pkt);
		if (!rx_page.is_tmp_alloc)
			init_page_count(rx_page.page);
		IPAERR("notify->veid > GSI_VEID_MAX\n");
		return NULL;
	}

	head = &rx_pkt->sys->pending_pkts[notify->veid];

	INIT_LIST_HEAD(&rx_pkt->link);
	list_add_tail(&rx_pkt->link, head);

	/* Check added for handling LAN consumer packet without EOT flag */
	if (notify->evt_id == GSI_CHAN_EVT_EOT ||
		sys->ep->client == IPA_CLIENT_APPS_LAN_CONS) {
		rx_skb = alloc_skb(0, GFP_ATOMIC);
		if (unlikely(!rx_skb)) {
			rx_pkt->sys->free_rx_wrapper(rx_pkt);
			if (!rx_page.is_tmp_alloc)
				init_page_count(rx_page.page);
			IPAERR("skb alloc failure\n");
			return NULL;
		}
	/* go over the list backward to save computations on updating length */
		list_for_each_entry_safe_reverse(rx_pkt, tmp, head, link) {
			rx_page = rx_pkt->page_data;

			list_del(&rx_pkt->link);
			if (rx_page.is_tmp_alloc)
				dma_unmap_page(ipa3_ctx->pdev, rx_page.dma_addr,
					rx_pkt->len, DMA_FROM_DEVICE);
			else
				dma_sync_single_for_cpu(ipa3_ctx->pdev,
					rx_page.dma_addr,
					rx_pkt->len, DMA_FROM_DEVICE);
			rx_pkt->sys->free_rx_wrapper(rx_pkt);

			skb_add_rx_frag(rx_skb,
				skb_shinfo(rx_skb)->nr_frags,
				rx_page.page, 0,
				notify->bytes_xfered,
				PAGE_SIZE << IPA_WAN_PAGE_ORDER);
		}
	} else {
		return NULL;
	}
	return rx_skb;
}

static void ipa3_wq_rx_common(struct ipa3_sys_context *sys,
	struct gsi_chan_xfer_notify *notify)
{
@@ -2977,7 +3263,7 @@ static void ipa3_wq_rx_common(struct ipa3_sys_context *sys,
	}
}

static void ipa3_wq_rx_napi_chain(struct ipa3_sys_context *sys,
static void ipa3_rx_napi_chain(struct ipa3_sys_context *sys,
		struct gsi_chan_xfer_notify *notify, uint32_t num)
{
	struct ipa3_sys_context *wan_def_sys;
@@ -2987,7 +3273,12 @@ static void ipa3_wq_rx_napi_chain(struct ipa3_sys_context *sys,
	/* non-coalescing case (SKB chaining enabled) */
	if (sys->ep->client != IPA_CLIENT_APPS_WAN_COAL_CONS) {
		for (i = 0; i < num; i++) {
			rx_skb = handle_skb_completion(&notify[i], false);
			if (!ipa3_ctx->ipa_wan_skb_page)
				rx_skb = handle_skb_completion(
					&notify[i], false);
			else
				rx_skb = handle_page_completion(
					&notify[i], false);

			/* this is always true for EOTs */
			if (rx_skb) {
@@ -3005,26 +3296,64 @@ static void ipa3_wq_rx_napi_chain(struct ipa3_sys_context *sys,
			skb_shinfo(prev_skb)->frag_list = NULL;
			sys->pyld_hdlr(first_skb, sys);
		}

	} else {
		if (!ipa3_ctx->ipa_wan_skb_page) {
			/* TODO: add chaining for coal case */
			for (i = 0; i < num; i++) {
				rx_skb = handle_skb_completion(
					&notify[i], false);
				if (rx_skb) {
					sys->pyld_hdlr(rx_skb, sys);
					/*
					 * For coalescing, we have 2 transfer
					 * rings to replenish
					 */
					ipa_ep_idx = ipa3_get_ep_mapping(
						IPA_CLIENT_APPS_WAN_CONS);
					if (ipa_ep_idx ==
						IPA_EP_NOT_ALLOCATED) {
						IPAERR("Invalid client.\n");
						return;
					}
					wan_def_sys =
						ipa3_ctx->ep[ipa_ep_idx].sys;
					wan_def_sys->repl_hdlr(wan_def_sys);
					sys->repl_hdlr(sys);
				}
			}
		} else {
			for (i = 0; i < num; i++) {
			rx_skb = handle_skb_completion(&notify[i], false);
				rx_skb = handle_page_completion(
					&notify[i], false);

				/* this is always true for EOTs */
				if (rx_skb) {
				sys->pyld_hdlr(rx_skb, sys);
					if (!first_skb)
						first_skb = rx_skb;

					if (prev_skb)
						skb_shinfo(prev_skb)->frag_list
							= rx_skb;

					prev_skb = rx_skb;
				}
			}
			if (prev_skb) {
				skb_shinfo(prev_skb)->frag_list = NULL;
				sys->pyld_hdlr(first_skb, sys);
				/*
			 * For coalescing, we have 2 transfer rings to replenish
				 * For coalescing, we have 2 transfer
				 * rings to replenish
				 */
				ipa_ep_idx = ipa3_get_ep_mapping(
						IPA_CLIENT_APPS_WAN_CONS);

				if (ipa_ep_idx == IPA_EP_NOT_ALLOCATED) {
				if (ipa_ep_idx ==
					IPA_EP_NOT_ALLOCATED) {
					IPAERR("Invalid client.\n");
					return;
				}
				wan_def_sys = ipa3_ctx->ep[ipa_ep_idx].sys;
				wan_def_sys =
					ipa3_ctx->ep[ipa_ep_idx].sys;
				wan_def_sys->repl_hdlr(wan_def_sys);
			}
		}
@@ -3224,9 +3553,21 @@ static int ipa3_assign_policy(struct ipa_sys_connect_params *in,
				IPA_GENERIC_AGGR_PKT_LIMIT;
			} else if (in->client == IPA_CLIENT_APPS_WAN_CONS ||
				in->client == IPA_CLIENT_APPS_WAN_COAL_CONS) {
				if (ipa3_ctx->ipa_wan_skb_page
					&& in->napi_obj) {
					sys->pyld_hdlr = ipa3_wan_rx_pyld_hdlr;
				sys->free_rx_wrapper = ipa3_free_rx_wrapper;
				sys->rx_pool_sz = ipa3_ctx->wan_rx_ring_size;
					sys->free_rx_wrapper =
						ipa3_recycle_rx_page_wrapper;
					sys->repl_hdlr =
						ipa3_replenish_rx_page_recycle;
					sys->rx_pool_sz =
						ipa3_ctx->wan_rx_ring_size;
				} else {
					sys->pyld_hdlr = ipa3_wan_rx_pyld_hdlr;
					sys->free_rx_wrapper =
						ipa3_free_rx_wrapper;
					sys->rx_pool_sz =
						ipa3_ctx->wan_rx_ring_size;
					if (nr_cpu_ids > 1) {
						sys->repl_hdlr =
						ipa3_fast_replenish_rx_cache;
@@ -3237,15 +3578,16 @@ static int ipa3_assign_policy(struct ipa_sys_connect_params *in,
					if (in->napi_obj && in->recycle_enabled)
						sys->repl_hdlr =
						ipa3_replenish_rx_cache_recycle;
				}
				in->ipa_ep_cfg.aggr.aggr_sw_eof_active
						= true;
				if (apps_wan_cons_agg_gro_flag) {
				if (apps_wan_cons_agg_gro_flag)
					ipa3_set_aggr_limit(in, sys);
				} else {
					in->ipa_ep_cfg.aggr.aggr_byte_limit =
					IPA_GENERIC_AGGR_BYTE_LIMIT;
					in->ipa_ep_cfg.aggr.aggr_pkt_limit =
					IPA_GENERIC_AGGR_PKT_LIMIT;
				else {
					in->ipa_ep_cfg.aggr.aggr_byte_limit
						= IPA_GENERIC_AGGR_BYTE_LIMIT;
					in->ipa_ep_cfg.aggr.aggr_pkt_limit
						= IPA_GENERIC_AGGR_PKT_LIMIT;
				}
			}
		} else if (IPA_CLIENT_IS_WLAN_CONS(in->client)) {
@@ -4329,7 +4671,7 @@ int ipa3_rx_poll(u32 clnt_hdl, int weight)
			break;

		trace_ipa3_rx_poll_num(num);
		ipa3_wq_rx_napi_chain(ep->sys, notify, num);
		ipa3_rx_napi_chain(ep->sys, notify, num);
		remain_aggr_weight -= num;

		trace_ipa3_rx_poll_cnt(ep->sys->len);
+6 −1
Original line number Diff line number Diff line
@@ -1122,7 +1122,10 @@ struct ipa3_desc {
 */
struct ipa3_rx_pkt_wrapper {
	struct list_head link;
	union {
		struct ipa_rx_data data;
		struct ipa_rx_page_data page_data;
	};
	u32 len;
	struct work_struct work;
	struct ipa3_sys_context *sys;
@@ -1888,6 +1891,7 @@ struct ipa3_context {
	atomic_t is_ssr;
	struct IpaHwOffloadStatsAllocCmdData_t
		gsi_info[IPA_HW_PROTOCOL_MAX];
	bool ipa_wan_skb_page;
};

struct ipa3_plat_drv_res {
@@ -1934,6 +1938,7 @@ struct ipa3_plat_drv_res {
	u32 secure_debug_check_action;
	bool ipa_endp_delay_wa;
	bool skip_ieob_mask_wa;
	bool ipa_wan_skb_page;
};

/**
+14 −0
Original line number Diff line number Diff line
@@ -797,6 +797,20 @@ struct ipa_rx_data {
	dma_addr_t dma_addr;
};

/**
 * struct  ipa_rx_page_data - information needed
 * to send to wlan driver on receiving data from ipa hw
 * @page: skb page
 * @dma_addr: DMA address of this Rx packet
 * @is_tmp_alloc: skb page from tmp_alloc or recycle_list
 */
struct ipa_rx_page_data {
	struct page *page;
	dma_addr_t dma_addr;
	bool is_tmp_alloc;
};


/**
 * enum ipa_irq_type - IPA Interrupt Type
 * Used to register handlers for IPA interrupts
+4 −0
Original line number Diff line number Diff line
@@ -415,6 +415,10 @@ enum ipa_client_type {
	(client) == IPA_CLIENT_USB_DPL_CONS || \
	(client) == IPA_CLIENT_USB4_CONS)

#define IPA_CLIENT_IS_WAN_CONS(client) \
	((client) == IPA_CLIENT_APPS_WAN_CONS || \
	(client) == IPA_CLIENT_APPS_WAN_COAL_CONS)

#define IPA_CLIENT_IS_WLAN_CONS(client) \
	((client) == IPA_CLIENT_WLAN1_CONS || \
	(client) == IPA_CLIENT_WLAN2_CONS || \