Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 99c65f25 authored by qctecmdr's avatar qctecmdr Committed by Gerrit - the friendly Code Review server
Browse files

Merge "msm: ipa3: dynamic change ul/dl ring/aggre/uc-threshold"

parents 63fc67d3 8c655c05
Loading
Loading
Loading
Loading
+11 −0
Original line number Diff line number Diff line
@@ -58,6 +58,11 @@

#define IPA_SUSPEND_BUSY_TIMEOUT (msecs_to_jiffies(10))

#define DEFAULT_MPM_RING_SIZE_UL 6
#define DEFAULT_MPM_RING_SIZE_DL 16
#define DEFAULT_MPM_TETH_AGGR_SIZE 24
#define DEFAULT_MPM_UC_THRESH_SIZE 4

/*
 * The following for adding code (ie. for EMULATION) not found on x86.
 */
@@ -6559,6 +6564,12 @@ static int ipa3_pre_init(const struct ipa3_plat_drv_res *resource_p,
	ipa3_ctx->do_ram_collection_on_crash =
		resource_p->do_ram_collection_on_crash;
	ipa3_ctx->lan_rx_napi_enable = resource_p->lan_rx_napi_enable;
	ipa3_ctx->mpm_ring_size_ul_cache = DEFAULT_MPM_RING_SIZE_UL;
	ipa3_ctx->mpm_ring_size_ul = DEFAULT_MPM_RING_SIZE_UL;
	ipa3_ctx->mpm_ring_size_dl_cache = DEFAULT_MPM_RING_SIZE_DL;
	ipa3_ctx->mpm_ring_size_dl = DEFAULT_MPM_RING_SIZE_DL;
	ipa3_ctx->mpm_teth_aggr_size = DEFAULT_MPM_TETH_AGGR_SIZE;
	ipa3_ctx->mpm_uc_thresh = DEFAULT_MPM_UC_THRESH_SIZE;

	if (ipa3_ctx->secure_debug_check_action == USE_SCM) {
		if (ipa_is_mem_dump_allowed())
+143 −0
Original line number Diff line number Diff line
@@ -378,6 +378,129 @@ static ssize_t ipa3_read_keep_awake(struct file *file, char __user *ubuf,
	return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, nbytes);
}

static ssize_t ipa3_read_mpm_ring_size_dl(struct file *file, char __user *ubuf,
	size_t count, loff_t *ppos)
{
	int nbytes;

	nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
			"IPA_MPM_RING_SIZE_DL = %d\n",
			ipa3_ctx->mpm_ring_size_dl);

	return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, nbytes);
}

static ssize_t ipa3_read_mpm_ring_size_ul(struct file *file, char __user *ubuf,
	size_t count, loff_t *ppos)
{
	int nbytes;

	nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
			"IPA_MPM_RING_SIZE_UL = %d\n",
			ipa3_ctx->mpm_ring_size_ul);

	return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, nbytes);
}

static ssize_t ipa3_read_mpm_uc_thresh(struct file *file, char __user *ubuf,
	size_t count, loff_t *ppos)
{
	int nbytes;

	nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
			"IPA_MPM_UC_THRESH = %d\n", ipa3_ctx->mpm_uc_thresh);

	return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, nbytes);
}

static ssize_t ipa3_read_mpm_teth_aggr_size(struct file *file,
	char __user *ubuf, size_t count, loff_t *ppos)
{
	int nbytes;

	nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
			"IPA_MPM_TETH_AGGR_SIZE = %d\n",
			ipa3_ctx->mpm_teth_aggr_size);

	return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, nbytes);
}

static ssize_t ipa3_write_mpm_ring_size_dl(struct file *file,
	const char __user *buf,
	size_t count, loff_t *ppos)
{
	s8 option = 0;
	int ret;

	ret = kstrtos8_from_user(buf, count, 0, &option);
	if (ret)
		return ret;
	/* as option is type s8, max it can take is 127 */
	if ((option > 0) && (option <= IPA_MPM_MAX_RING_LEN))
		ipa3_ctx->mpm_ring_size_dl = option;
	else
		IPAERR("Invalid dl ring size =%d: range is 1 to %d\n",
			option, IPA_MPM_MAX_RING_LEN);
	return count;
}

static ssize_t ipa3_write_mpm_ring_size_ul(struct file *file,
	const char __user *buf,
	size_t count, loff_t *ppos)
{
	s8 option = 0;
	int ret;

	ret = kstrtos8_from_user(buf, count, 0, &option);
	if (ret)
		return ret;
	/* as option type is s8, max it can take is 127 */
	if ((option > 0) && (option <= IPA_MPM_MAX_RING_LEN))
		ipa3_ctx->mpm_ring_size_ul = option;
	else
		IPAERR("Invalid ul ring size =%d: range is only 1 to %d\n",
			option, IPA_MPM_MAX_RING_LEN);
	return count;
}

static ssize_t ipa3_write_mpm_uc_thresh(struct file *file,
	const char __user *buf,
	size_t count, loff_t *ppos)
{
	s8 option = 0;
	int ret;

	ret = kstrtos8_from_user(buf, count, 0, &option);
	if (ret)
		return ret;
	/* as option type is s8, max it can take is 127 */
	if ((option > 0) && (option <= IPA_MPM_MAX_UC_THRESH))
		ipa3_ctx->mpm_uc_thresh = option;
	else
		IPAERR("Invalid ul ring size =%d: range is only 1 to %d\n",
			option, IPA_MPM_MAX_UC_THRESH);
	return count;
}

static ssize_t ipa3_write_mpm_teth_aggr_size(struct file *file,
	const char __user *buf,
	size_t count, loff_t *ppos)
{
	s8 option = 0;
	int ret;

	ret = kstrtos8_from_user(buf, count, 0, &option);
	if (ret)
		return ret;
	/* as option type is s8, max it can take is 127 */
	if ((option > 0) && (option <= IPA_MAX_TETH_AGGR_BYTE_LIMIT))
		ipa3_ctx->mpm_teth_aggr_size = option;
	else
		IPAERR("Invalid ul ring size =%d: range is only 1 to %d\n",
			option, IPA_MAX_TETH_AGGR_BYTE_LIMIT);
	return count;
}

static ssize_t ipa3_read_hdr(struct file *file, char __user *ubuf, size_t count,
		loff_t *ppos)
{
@@ -2570,6 +2693,26 @@ static const struct ipa3_debugfs_file debugfs_files[] = {
			.read = ipa3_read_keep_awake,
			.write = ipa3_write_keep_awake,
		}
	}, {
		"mpm_ring_size_dl", IPA_READ_WRITE_MODE, NULL, {
			.read = ipa3_read_mpm_ring_size_dl,
			.write = ipa3_write_mpm_ring_size_dl,
		}
	}, {
		"mpm_ring_size_ul", IPA_READ_WRITE_MODE, NULL, {
			.read = ipa3_read_mpm_ring_size_ul,
			.write = ipa3_write_mpm_ring_size_ul,
		}
	}, {
		"mpm_uc_thresh", IPA_READ_WRITE_MODE, NULL, {
			.read = ipa3_read_mpm_uc_thresh,
			.write = ipa3_write_mpm_uc_thresh,
		}
	}, {
		"mpm_teth_aggr_size", IPA_READ_WRITE_MODE, NULL, {
			.read = ipa3_read_mpm_teth_aggr_size,
			.write = ipa3_write_mpm_teth_aggr_size,
		}
	}, {
		"holb", IPA_WRITE_ONLY_MODE, NULL, {
			.write = ipa3_write_ep_holb,
+16 −0
Original line number Diff line number Diff line
@@ -62,6 +62,10 @@
#define IPA_UC_FINISH_MAX 6
#define IPA_UC_WAIT_MIN_SLEEP 1000
#define IPA_UC_WAII_MAX_SLEEP 1200

#define IPA_MPM_MAX_RING_LEN 64
#define IPA_MAX_TETH_AGGR_BYTE_LIMIT 24
#define IPA_MPM_MAX_UC_THRESH 4
/*
 * The transport descriptor size was changed to GSI_CHAN_RE_SIZE_16B, but
 * IPA users still use sps_iovec size as FIFO element size.
@@ -1819,6 +1823,12 @@ struct ipa3_app_clock_vote {
 * @uc_wigig_ctx: WIGIG specific fields for uC interface
 * @ipa_num_pipes: The number of pipes used by IPA HW
 * @skip_uc_pipe_reset: Indicates whether pipe reset via uC needs to be avoided
 * @mpm_ring_size_dl_cache: To cache the dl ring size configured previously
 * @mpm_ring_size_dl: MHIP all DL pipe's ring size
 * @mpm_ring_size_ul_cache: To cache the ul ring size configured previously
 * @mpm_ring_size_ul: MHIP all UL pipe's ring size
 * @mpm_teth_aggr_size: MHIP teth aggregation byte size
 * @mpm_uc_thresh: uc threshold for enabling uc flow control
 * @ipa_client_apps_wan_cons_agg_gro: RMNET_IOCTL_INGRESS_FORMAT_AGG_DATA
 * @apply_rg10_wa: Indicates whether to use register group 10 workaround
 * @gsi_ch20_wa: Indicates whether to apply GSI physical channel 20 workaround
@@ -1954,6 +1964,12 @@ struct ipa3_context {
	u32 wan_rx_ring_size;
	u32 lan_rx_ring_size;
	bool skip_uc_pipe_reset;
	int mpm_ring_size_dl_cache;
	int mpm_ring_size_dl;
	int mpm_ring_size_ul_cache;
	int mpm_ring_size_ul;
	int mpm_teth_aggr_size;
	int mpm_uc_thresh;
	unsigned long gsi_dev_hdl;
	u32 ee;
	bool apply_rg10_wa;
+78 −29
Original line number Diff line number Diff line
@@ -56,12 +56,9 @@

#define IPA_MPM_MAX_MHIP_CHAN 3

#define IPA_MPM_NUM_RING_DESC 6
#define IPA_MPM_RING_LEN IPA_MPM_NUM_RING_DESC

#define IPA_MPM_MHI_HOST_UL_CHANNEL 4
#define IPA_MPM_MHI_HOST_DL_CHANNEL  5
#define TETH_AGGR_TIME_LIMIT 10000 /* 10ms */
#define TETH_AGGR_TIME_LIMIT 1000 /* 1ms */
#define TETH_AGGR_BYTE_LIMIT 24
#define TETH_AGGR_DL_BYTE_LIMIT 16
#define TRE_BUFF_SIZE 32768
@@ -357,17 +354,17 @@ struct ipa_mpm_clk_cnt_type {
struct producer_rings {
	struct mhi_p_desc *tr_va;
	struct mhi_p_desc *er_va;
	void *tr_buff_va[IPA_MPM_RING_LEN];
	void *tr_buff_va[IPA_MPM_MAX_RING_LEN];
	dma_addr_t tr_pa;
	dma_addr_t er_pa;
	dma_addr_t tr_buff_c_iova[IPA_MPM_RING_LEN];
	dma_addr_t tr_buff_c_iova[IPA_MPM_MAX_RING_LEN];
	/*
	 * The iova generated for AP CB,
	 * used only for dma_map_single to flush the cache.
	 */
	dma_addr_t ap_iova_er;
	dma_addr_t ap_iova_tr;
	dma_addr_t ap_iova_buff[IPA_MPM_RING_LEN];
	dma_addr_t ap_iova_buff[IPA_MPM_MAX_RING_LEN];
};

struct ipa_mpm_mhi_driver {
@@ -409,7 +406,6 @@ struct ipa_mpm_context {
};

#define IPA_MPM_DESC_SIZE (sizeof(struct mhi_p_desc))
#define IPA_MPM_RING_TOTAL_SIZE (IPA_MPM_RING_LEN * IPA_MPM_DESC_SIZE)
/* WA: Make the IPA_MPM_PAGE_SIZE from 16k (next power of ring size) to
 * 32k. This is to make sure IOMMU map happens for the same size
 * for all TR/ER and doorbells.
@@ -668,8 +664,14 @@ static dma_addr_t ipa_mpm_smmu_map(void *va_addr,
		cb->next_addr = iova_p + size_p;
		iova = iova_p;
	} else {
		if (dir == DMA_TO_HIPA)
			iova = dma_map_single(ipa3_ctx->pdev, va_addr,
				ipa3_ctx->mpm_ring_size_dl *
				IPA_MPM_DESC_SIZE, dir);
		else
			iova = dma_map_single(ipa3_ctx->pdev, va_addr,
					IPA_MPM_RING_TOTAL_SIZE, dir);
				ipa3_ctx->mpm_ring_size_ul *
				IPA_MPM_DESC_SIZE, dir);

		if (dma_mapping_error(ipa3_ctx->pdev, iova)) {
			IPA_MPM_ERR("dma_map_single failure for entry\n");
@@ -744,8 +746,14 @@ static void ipa_mpm_smmu_unmap(dma_addr_t carved_iova, int sz, int dir,
		dma_unmap_single(ipa3_ctx->pdev, ap_cb_iova,
			size_p, dir);
	} else {
		if (dir == DMA_TO_HIPA)
			dma_unmap_single(ipa3_ctx->pdev, ap_cb_iova,
				ipa3_ctx->mpm_ring_size_dl *
				IPA_MPM_DESC_SIZE, dir);
		else
			dma_unmap_single(ipa3_ctx->pdev, ap_cb_iova,
			IPA_MPM_RING_TOTAL_SIZE, dir);
				ipa3_ctx->mpm_ring_size_ul *
				IPA_MPM_DESC_SIZE, dir);
	}
}

@@ -897,6 +905,7 @@ static int ipa_mpm_connect_mhip_gsi_pipe(enum ipa_client_type mhip_client,
	int i, k;
	int result;
	struct ipa3_ep_context *ep;
	int ring_size;

	if (mhip_client == IPA_CLIENT_MAX)
		goto fail_gen;
@@ -920,8 +929,17 @@ static int ipa_mpm_connect_mhip_gsi_pipe(enum ipa_client_type mhip_client,

	IPA_MPM_FUNC_ENTRY();

	if (IPA_MPM_RING_TOTAL_SIZE > PAGE_SIZE) {
		IPA_MPM_ERR("Ring Size / allocation mismatch\n");
	if (IPA_CLIENT_IS_PROD(mhip_client) &&
		(ipa3_ctx->mpm_ring_size_dl *
			IPA_MPM_DESC_SIZE > PAGE_SIZE)) {
		IPA_MPM_ERR("Ring Size dl / allocation mismatch\n");
		ipa_assert();
	}

	if (IPA_CLIENT_IS_PROD(mhip_client) &&
		(ipa3_ctx->mpm_ring_size_ul *
			IPA_MPM_DESC_SIZE > PAGE_SIZE)) {
		IPA_MPM_ERR("Ring Size ul / allocation mismatch\n");
		ipa_assert();
	}

@@ -942,7 +960,12 @@ static int ipa_mpm_connect_mhip_gsi_pipe(enum ipa_client_type mhip_client,
		DMA_TO_HIPA : DMA_FROM_HIPA;

	/* allocate transfer ring elements */
	for (i = 1, k = 1; i < IPA_MPM_RING_LEN; i++, k++) {
	if (IPA_CLIENT_IS_PROD(mhip_client))
		ring_size = ipa3_ctx->mpm_ring_size_dl;
	else
		ring_size = ipa3_ctx->mpm_ring_size_ul;

	for (i = 1, k = 1; i < ring_size; i++, k++) {
		buff_va = kzalloc(TRE_BUFF_SIZE, GFP_KERNEL);
		if (!buff_va)
			goto fail_buff_alloc;
@@ -1064,7 +1087,7 @@ static int ipa_mpm_connect_mhip_gsi_pipe(enum ipa_client_type mhip_client,
	gsi_params.evt_ring_params.intr = GSI_INTR_MSI;
	gsi_params.evt_ring_params.re_size = GSI_EVT_RING_RE_SIZE_16B;
	gsi_params.evt_ring_params.ring_len =
		(IPA_MPM_RING_LEN) * GSI_EVT_RING_RE_SIZE_16B;
		(ring_size) * GSI_EVT_RING_RE_SIZE_16B;
	gsi_params.evt_ring_params.ring_base_vaddr = NULL;
	gsi_params.evt_ring_params.int_modt = 0;
	gsi_params.evt_ring_params.int_modc = 0;
@@ -1090,7 +1113,7 @@ static int ipa_mpm_connect_mhip_gsi_pipe(enum ipa_client_type mhip_client,
	/* chan_id is set in ipa3_request_gsi_channel() */
	gsi_params.chan_params.re_size = GSI_CHAN_RE_SIZE_16B;
	gsi_params.chan_params.ring_len =
		(IPA_MPM_RING_LEN) * GSI_EVT_RING_RE_SIZE_16B;
		(ring_size) * GSI_EVT_RING_RE_SIZE_16B;
	gsi_params.chan_params.ring_base_vaddr = NULL;
	gsi_params.chan_params.use_db_eng = GSI_CHAN_DIRECT_MODE;
	gsi_params.chan_params.max_prefetch = GSI_ONE_PREFETCH_SEG;
@@ -1190,6 +1213,7 @@ static void ipa_mpm_clean_mhip_chan(int mhi_idx,
	int i;
	int ipa_ep_idx;
	int result;
	int ring_size;

	IPA_MPM_FUNC_ENTRY();

@@ -1277,7 +1301,12 @@ static void ipa_mpm_clean_mhip_chan(int mhi_idx,
	}

	/* deallocate/Unmap transfer ring buffers */
	for (i = 1; i < IPA_MPM_RING_LEN; i++) {
	if (IPA_CLIENT_IS_PROD(mhip_client))
		ring_size = ipa3_ctx->mpm_ring_size_dl_cache;
	else
		ring_size = ipa3_ctx->mpm_ring_size_ul_cache;

	for (i = 1; i < ring_size; i++) {
		if (IPA_CLIENT_IS_PROD(mhip_client)) {
			ipa_mpm_smmu_unmap(
			(dma_addr_t)
@@ -2206,13 +2235,13 @@ static int ipa_mpm_mhi_probe_cb(struct mhi_device *mhi_dev,
			IPA_MPM_MHI_HOST_UL_CHANNEL;
		ch->chan_props.ch_ctx.erindex =
			mhi_dev->ul_event_id;
		ch->chan_props.ch_ctx.rlen = (IPA_MPM_RING_LEN) *
		ch->chan_props.ch_ctx.rlen = (ipa3_ctx->mpm_ring_size_ul) *
			GSI_EVT_RING_RE_SIZE_16B;
		/* Store Event properties */
		ch->evt_props.ev_ctx.update_rp_modc = 1;
		ch->evt_props.ev_ctx.update_rp_intmodt = 0;
		ch->evt_props.ev_ctx.ertype = 1;
		ch->evt_props.ev_ctx.rlen = (IPA_MPM_RING_LEN) *
		ch->evt_props.ev_ctx.rlen = (ipa3_ctx->mpm_ring_size_ul) *
			GSI_EVT_RING_RE_SIZE_16B;
		ch->evt_props.ev_ctx.buff_size = TRE_BUFF_SIZE;
		ch->evt_props.device_db =
@@ -2260,13 +2289,13 @@ static int ipa_mpm_mhi_probe_cb(struct mhi_device *mhi_dev,
		ch->chan_props.ch_ctx.chtype =
			IPA_MPM_MHI_HOST_DL_CHANNEL;
		ch->chan_props.ch_ctx.erindex = mhi_dev->dl_event_id;
		ch->chan_props.ch_ctx.rlen = (IPA_MPM_RING_LEN) *
		ch->chan_props.ch_ctx.rlen = (ipa3_ctx->mpm_ring_size_dl) *
			GSI_EVT_RING_RE_SIZE_16B;
		/* Store Event properties */
		ch->evt_props.ev_ctx.update_rp_modc = 0;
		ch->evt_props.ev_ctx.update_rp_intmodt = 0;
		ch->evt_props.ev_ctx.ertype = 1;
		ch->evt_props.ev_ctx.rlen = (IPA_MPM_RING_LEN) *
		ch->evt_props.ev_ctx.rlen = (ipa3_ctx->mpm_ring_size_dl) *
			GSI_EVT_RING_RE_SIZE_16B;
		ch->evt_props.ev_ctx.buff_size = TRE_BUFF_SIZE;
		ch->evt_props.device_db =
@@ -2338,7 +2367,8 @@ static int ipa_mpm_mhi_probe_cb(struct mhi_device *mhi_dev,
			(phys_addr_t)(ul_out_params.db_reg_phs_addr_lsb), 4);

		wp_addr = ipa_mpm_ctx->md[probe_id].ul_prod_ring.tr_pa +
			((IPA_MPM_RING_LEN - 1) * GSI_CHAN_RE_SIZE_16B);
			((ipa3_ctx->mpm_ring_size_ul - 1) *
			GSI_CHAN_RE_SIZE_16B);

		iowrite32(wp_addr, db_addr);

@@ -2371,7 +2401,8 @@ static int ipa_mpm_mhi_probe_cb(struct mhi_device *mhi_dev,
		db_addr = ioremap((phys_addr_t)(evt_ring_db_addr_low), 4);

		wp_addr = ipa_mpm_ctx->md[probe_id].ul_prod_ring.er_pa +
			((IPA_MPM_RING_LEN + 1) * GSI_EVT_RING_RE_SIZE_16B);
			((ipa3_ctx->mpm_ring_size_ul + 1) *
			GSI_EVT_RING_RE_SIZE_16B);
		IPA_MPM_DBG("Host UL ER  DB = 0X%pK, wp_addr = 0X%0x",
			db_addr, wp_addr);

@@ -2384,7 +2415,8 @@ static int ipa_mpm_mhi_probe_cb(struct mhi_device *mhi_dev,
			4);

		wp_addr = ipa_mpm_ctx->md[probe_id].ul_prod_ring.tr_pa +
			((IPA_MPM_RING_LEN + 1) * GSI_EVT_RING_RE_SIZE_16B);
			((ipa3_ctx->mpm_ring_size_ul + 1) *
			GSI_EVT_RING_RE_SIZE_16B);

		iowrite32(wp_addr, db_addr);
		iounmap(db_addr);
@@ -2397,7 +2429,8 @@ static int ipa_mpm_mhi_probe_cb(struct mhi_device *mhi_dev,
		4);

		wp_addr = ipa_mpm_ctx->md[probe_id].dl_prod_ring.tr_pa +
			((IPA_MPM_RING_LEN - 1) * GSI_CHAN_RE_SIZE_16B);
			((ipa3_ctx->mpm_ring_size_dl - 1) *
			GSI_CHAN_RE_SIZE_16B);

		IPA_MPM_DBG("Device DL TR  DB = 0X%pK, wp_addr = 0X%0x",
			db_addr, wp_addr);
@@ -2418,7 +2451,8 @@ static int ipa_mpm_mhi_probe_cb(struct mhi_device *mhi_dev,
		4);

		wp_addr = ipa_mpm_ctx->md[probe_id].dl_prod_ring.er_pa +
			((IPA_MPM_RING_LEN + 1) * GSI_EVT_RING_RE_SIZE_16B);
			((ipa3_ctx->mpm_ring_size_dl + 1) *
			GSI_EVT_RING_RE_SIZE_16B);

		iowrite32(wp_addr, db_addr);
		IPA_MPM_DBG("Device  UL ER  DB = 0X%pK,wp_addr = 0X%0x",
@@ -2441,7 +2475,8 @@ static int ipa_mpm_mhi_probe_cb(struct mhi_device *mhi_dev,
		db_addr = ioremap((phys_addr_t)(evt_ring_db_addr_low), 4);

		wp_addr = ipa_mpm_ctx->md[probe_id].dl_prod_ring.tr_pa +
			((IPA_MPM_RING_LEN + 1) * GSI_EVT_RING_RE_SIZE_16B);
			((ipa3_ctx->mpm_ring_size_dl + 1) *
			GSI_EVT_RING_RE_SIZE_16B);
		iowrite32(wp_addr, db_addr);
		IPA_MPM_DBG("Host  DL ER  DB = 0X%pK, wp_addr = 0X%0x",
			db_addr, wp_addr);
@@ -2535,7 +2570,9 @@ static int ipa_mpm_mhi_probe_cb(struct mhi_device *mhi_dev,
		ipa_ep_idx = ipa3_get_ep_mapping(ul_prod);
		ep = &ipa3_ctx->ep[ipa_ep_idx];
		ret = ipa3_uc_send_enable_flow_control(ep->gsi_chan_hdl,
			IPA_MPM_RING_LEN / 4);
			ipa3_ctx->mpm_uc_thresh);
		IPA_MPM_DBG("Updated uc threshold to %d",
			ipa3_ctx->mpm_uc_thresh);
		if (ret) {
			IPA_MPM_ERR("Err %d flow control enable\n", ret);
			goto fail_flow_control;
@@ -2551,7 +2588,14 @@ static int ipa_mpm_mhi_probe_cb(struct mhi_device *mhi_dev,
		}
		IPA_MPM_DBG("Flow Control updated for %d", probe_id);
	}
	/* cache the current ring-size */
	ipa3_ctx->mpm_ring_size_ul_cache = ipa3_ctx->mpm_ring_size_ul;
	ipa3_ctx->mpm_ring_size_dl_cache = ipa3_ctx->mpm_ring_size_dl;
	IPA_MPM_DBG("Mpm ring size ul/dl %d / %d",
		ipa3_ctx->mpm_ring_size_ul, ipa3_ctx->mpm_ring_size_dl);

	IPA_MPM_FUNC_EXIT();

	return 0;

fail_gsi_setup:
@@ -2580,9 +2624,14 @@ static void ipa_mpm_init_mhip_channel_info(void)
		IPA_CLIENT_MHI_PRIME_TETH_CONS;
	ipa_mpm_pipes[IPA_MPM_MHIP_CH_ID_0].ul_prod.ep_cfg =
		mhip_ul_teth_ep_cfg;
	ipa_mpm_pipes[IPA_MPM_MHIP_CH_ID_0].ul_prod.ep_cfg.aggr.aggr_byte_limit
			= ipa3_ctx->mpm_teth_aggr_size;
	ipa_mpm_pipes[IPA_MPM_MHIP_CH_ID_0].mhip_client =
		IPA_MPM_MHIP_TETH;

	IPA_MPM_DBG("Teth Aggregation byte limit =%d\n",
		ipa3_ctx->mpm_teth_aggr_size);

	/* IPA_MPM_MHIP_CH_ID_1 => MHIP RMNET PIPES */
	ipa_mpm_pipes[IPA_MPM_MHIP_CH_ID_1].dl_cons.ipa_client =
		IPA_CLIENT_MHI_PRIME_RMNET_PROD;
+1 −0
Original line number Diff line number Diff line
@@ -2061,6 +2061,7 @@ int ipa3_qmi_set_aggr_info(enum ipa_aggr_enum_type_v01 aggr_enum_type)

	/* replace to right qmap format */
	aggr_req.aggr_info[1].aggr_type = aggr_enum_type;
	aggr_req.aggr_info[1].bytes_count = ipa3_ctx->mpm_teth_aggr_size;
	aggr_req.aggr_info[2].aggr_type = aggr_enum_type;
	aggr_req.aggr_info[3].aggr_type = aggr_enum_type;
	aggr_req.aggr_info[4].aggr_type = aggr_enum_type;