Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 6b1c4897 authored by qctecmdr's avatar qctecmdr Committed by Gerrit - the friendly Code Review server
Browse files

Merge "msm: ipa: ADPL over USB offload on MHI Prime channels"

parents f1b5eee5 85e65075
Loading
Loading
Loading
Loading
+3 −0
Original line number Diff line number Diff line
@@ -212,6 +212,9 @@ int ipa3_active_clients_log_print_table(char *buf, int size)
	cnt += scnprintf(buf + cnt, size - cnt,
			"\nTotal active clients count: %d\n",
			atomic_read(&ipa3_ctx->ipa3_active_clients.cnt));

	cnt += ipa_mpm_panic_handler(buf + cnt, size - cnt);

	spin_unlock_irqrestore(&ipa3_ctx->ipa3_active_clients_logging.lock,
		flags);

+6 −1
Original line number Diff line number Diff line
@@ -269,7 +269,7 @@ enum {

#define IPA_TRANSPORT_PROD_TIMEOUT_MSEC 100

#define IPA3_ACTIVE_CLIENTS_TABLE_BUF_SIZE 2048
#define IPA3_ACTIVE_CLIENTS_TABLE_BUF_SIZE 4096

#define IPA3_ACTIVE_CLIENT_LOG_TYPE_EP 0
#define IPA3_ACTIVE_CLIENT_LOG_TYPE_SIMPLE 1
@@ -2935,6 +2935,7 @@ int ipa_mpm_mhip_ul_data_stop(enum ipa_usb_teth_prot xdci_teth_prot);
int ipa3_is_mhip_offload_enabled(void);
int ipa_mpm_reset_dma_mode(enum ipa_client_type src_pipe,
	enum ipa_client_type dst_pipe);
int ipa_mpm_panic_handler(char *buf, int size);
#else
static inline int ipa_mpm_mhip_xdci_pipe_enable(
	enum ipa_usb_teth_prot prot)
@@ -2964,6 +2965,10 @@ static inline int ipa_mpm_reset_dma_mode(enum ipa_client_type src_pipe,
{
	return 0;
}
static inline int ipa_mpm_panic_handler(char *buf, int size)
{
	return 0;
}

#endif /* CONFIG_IPA3_MHI_PRIME_MANAGER */

+223 −154
Original line number Diff line number Diff line
@@ -66,9 +66,7 @@

#define IPA_MPM_MHI_HOST_UL_CHANNEL 4
#define IPA_MPM_MHI_HOST_DL_CHANNEL  5
#define DEFAULT_AGGR_TIME_LIMIT 1000 /* 1ms */
#define DEFAULT_AGGR_PKT_LIMIT 0
#define TETH_AGGR_TIME_LIMIT 10000
#define TETH_AGGR_TIME_LIMIT 10000 /* 10ms */
#define TETH_AGGR_BYTE_LIMIT 24
#define TETH_AGGR_DL_BYTE_LIMIT 16
#define TRE_BUFF_SIZE 32768
@@ -142,11 +140,16 @@ enum mhip_smmu_domain_type {

/* each pair of UL/DL channels are defined below */
static const struct mhi_device_id mhi_driver_match_table[] = {
	{ .chan = "IP_HW_MHIP_0" }, // for rmnet pipes
	{ .chan = "IP_HW_MHIP_1" }, // for MHIP teth pipes - rndis/wifi
	{ .chan = "IP_HW_ADPL" }, // DPL/ODL DL pipe
	{ .chan = "IP_HW_MHIP_0" }, /* for rndis/Wifi teth pipes */
	{ .chan = "IP_HW_MHIP_1" }, /* for MHIP rmnet */
	{ .chan = "IP_HW_ADPL" }, /* ADPL/ODL DL pipe */
};

static const char *ipa_mpm_mhip_chan_str[IPA_MPM_MHIP_CH_ID_MAX] = {
	__stringify(IPA_MPM_MHIP_TETH),
	__stringify(IPA_MPM_MHIP_USB_RMNET),
	__stringify(IPA_MPM_MHIP_USB_DPL),
};
/*
 * MHI PRIME GSI Descriptor format that Host IPA uses.
 */
@@ -280,7 +283,7 @@ static struct ipa_ep_cfg mhip_ul_rmnet_ep_cfg = {
static struct ipa_ep_cfg mhip_dl_dpl_ep_cfg = {
	.mode = {
		.mode = IPA_DMA,
		.dst = IPA_CLIENT_USB_CONS,
		.dst = IPA_CLIENT_USB_DPL_CONS,
	},
};

@@ -341,6 +344,11 @@ enum ipa_mpm_mhip_chan {
	IPA_MPM_MHIP_CHAN_BOTH,
};

struct ipa_mpm_clk_cnt_type {
	atomic_t pcie_clk_cnt;
	atomic_t ipa_clk_cnt;
};

struct producer_rings {
	struct mhi_p_desc *tr_va;
	struct mhi_p_desc *er_va;
@@ -369,15 +377,16 @@ struct ipa_mpm_mhi_driver {
	bool init_complete;
	struct mutex lpm_mutex;
	bool in_lpm;
	struct ipa_mpm_clk_cnt_type clk_cnt;
};

struct ipa_mpm_context {
	struct ipa_mpm_dev_info dev_info;
	struct ipa_mpm_mhi_driver md[IPA_MPM_MAX_MHIP_CHAN];
	struct mutex mutex;
	atomic_t ipa_clk_ref_cnt;
	atomic_t pcie_clk_ref_cnt;
	atomic_t probe_cnt;
	atomic_t pcie_clk_total_cnt;
	atomic_t ipa_clk_total_cnt;
	struct device *parent_pdev;
	struct ipa_smmu_cb_ctx carved_smmu_cb;
	struct device *mhi_parent_dev;
@@ -403,12 +412,13 @@ static void ipa_mpm_change_teth_state(int probe_id,
static void ipa_mpm_change_gsi_state(int probe_id,
	enum ipa_mpm_mhip_chan mhip_chan,
	enum ipa_mpm_gsi_state next_state);
static int ipa_mpm_start_stop_mhip_data_path(int probe_id,
static int ipa_mpm_start_stop_ul_mhip_data_path(int probe_id,
	enum ipa_mpm_start_stop_type start);
static int ipa_mpm_probe(struct platform_device *pdev);
static int ipa_mpm_vote_unvote_pcie_clk(enum ipa_mpm_clk_vote_type vote,
	int probe_id);
static void ipa_mpm_vote_unvote_ipa_clk(enum ipa_mpm_clk_vote_type vote);
static void ipa_mpm_vote_unvote_ipa_clk(enum ipa_mpm_clk_vote_type vote,
	int probe_id);
static enum mhip_status_type ipa_mpm_start_stop_mhip_chan(
	enum ipa_mpm_mhip_chan mhip_chan,
	int probe_id,
@@ -1036,15 +1046,9 @@ static void ipa_mpm_clean_mhip_chan(int mhi_idx,
		return;
	}

	/* Release channel */
	if (mhi_idx == IPA_MPM_MHIP_CH_ID_2) {
		/* Note: DPL not supported yet */
		IPA_MPM_ERR("DPL not supported yet. returning\n");
		return;
	}

	ipa3_set_reset_client_prod_pipe_delay(true,
					IPA_CLIENT_USB_PROD);
	/* For the uplink channels, enable HOLB. */
	if (IPA_CLIENT_IS_CONS(mhip_client))
		ipa3_disable_data_path(ipa_ep_idx);

	/* Release channel */
	result = ipa3_stop_gsi_channel(ipa_ep_idx);
@@ -1237,9 +1241,7 @@ static int __ipa_mpm_configure_mhi_device(struct ipa_mpm_channel *ch,
		IPA_MPM_ERR("mhi_device_configure failed\n");
		return -EINVAL;
	}

	IPA_MPM_FUNC_EXIT();

	return 0;
}

@@ -1250,21 +1252,26 @@ static void ipa_mpm_mhip_shutdown(int mhip_idx)
	IPA_MPM_FUNC_ENTRY();

	get_ipa3_client(mhip_idx, &ul_prod_chan, &dl_cons_chan);
	if (mhip_idx == IPA_MPM_MHIP_CH_ID_2) {
		IPA_MPM_ERR("DPL - return\n");
		return;
	}

	if (mhip_idx != IPA_MPM_MHIP_CH_ID_2) {
		/* For DPL, stop only DL channel */
		ipa_mpm_start_stop_ul_mhip_data_path(mhip_idx, STOP);
		ipa_mpm_clean_mhip_chan(mhip_idx, ul_prod_chan);
	ipa_mpm_clean_mhip_chan(mhip_idx, dl_cons_chan);
	}

	ipa_mpm_clean_mhip_chan(mhip_idx, dl_cons_chan);

	mutex_lock(&ipa_mpm_ctx->md[mhip_idx].lpm_mutex);
	if (!ipa_mpm_ctx->md[mhip_idx].in_lpm) {
		ipa_mpm_vote_unvote_ipa_clk(CLK_OFF);
		ipa_mpm_vote_unvote_ipa_clk(CLK_OFF, mhip_idx);
		/* while in modem shutdown scenarios such as SSR, no explicit
		 * PCIe vote is needed.
		 */
		ipa_mpm_ctx->md[mhip_idx].in_lpm = true;
	}
	mutex_unlock(&ipa_mpm_ctx->md[mhip_idx].lpm_mutex);
	ipa_mpm_ctx->md[mhip_idx].mhi_dev = NULL;
	ipa_mpm_ctx->md[mhip_idx].init_complete = false;
	IPA_MPM_FUNC_EXIT();
}

@@ -1291,24 +1298,35 @@ static int ipa_mpm_vote_unvote_pcie_clk(enum ipa_mpm_clk_vote_type vote,
		IPA_MPM_ERR("MHI not initialized yet\n");
		return 0;
	}

	IPA_MPM_ERR("PCIe clock vote/unvote = %d probe_id = %d clk_cnt = %d\n",
		vote, probe_id,
		atomic_read(&ipa_mpm_ctx->md[probe_id].clk_cnt.pcie_clk_cnt));

	if (vote == CLK_ON) {
		if ((atomic_read(&ipa_mpm_ctx->pcie_clk_ref_cnt) == 0)) {
		result = mhi_device_get_sync(
			ipa_mpm_ctx->md[probe_id].mhi_dev);
		if (result) {
				IPA_MPM_ERR("mhi_sync_get failed %d\n",
					result);
			IPA_MPM_ERR("mhi_sync_get failed for probe_id %d\n",
				result, probe_id);
			return result;
		}
			IPA_MPM_DBG("PCIE clock now ON\n");
		}
		atomic_inc(&ipa_mpm_ctx->pcie_clk_ref_cnt);

		IPA_MPM_DBG("probe_id %d PCIE clock now ON\n", probe_id);
		atomic_inc(&ipa_mpm_ctx->md[probe_id].clk_cnt.pcie_clk_cnt);
		atomic_inc(&ipa_mpm_ctx->pcie_clk_total_cnt);
	} else {
		if ((atomic_read(&ipa_mpm_ctx->pcie_clk_ref_cnt) == 1)) {
			mhi_device_put(ipa_mpm_ctx->md[probe_id].mhi_dev);
			IPA_MPM_DBG("PCIE clock off ON\n");
		if ((atomic_read(
			&ipa_mpm_ctx->md[probe_id].clk_cnt.pcie_clk_cnt)
								== 0)) {
			IPA_MPM_DBG("probe_id %d PCIE clock already devoted\n",
				probe_id);
			ipa_assert();
		}
		atomic_dec(&ipa_mpm_ctx->pcie_clk_ref_cnt);
		mhi_device_put(ipa_mpm_ctx->md[probe_id].mhi_dev);
		IPA_MPM_DBG("probe_id %d PCIE clock off\n", probe_id);
		atomic_dec(&ipa_mpm_ctx->md[probe_id].clk_cnt.pcie_clk_cnt);
		atomic_dec(&ipa_mpm_ctx->pcie_clk_total_cnt);
	}

	return result;
@@ -1317,23 +1335,33 @@ static int ipa_mpm_vote_unvote_pcie_clk(enum ipa_mpm_clk_vote_type vote,
/*
 * Turning on/OFF IPA Clock is done only once- for all clients
 */
static void ipa_mpm_vote_unvote_ipa_clk(enum ipa_mpm_clk_vote_type vote)
static void ipa_mpm_vote_unvote_ipa_clk(enum ipa_mpm_clk_vote_type vote,
	int probe_id)
{
	if (vote > CLK_OFF)
		return;

	IPA_MPM_ERR("IPA clock vote/unvote = %d probe_id = %d clk_cnt = %d\n",
		vote, probe_id,
		atomic_read(&ipa_mpm_ctx->md[probe_id].clk_cnt.ipa_clk_cnt));

	if (vote == CLK_ON) {
		if ((!atomic_read(&ipa_mpm_ctx->ipa_clk_ref_cnt))) {
			IPA_ACTIVE_CLIENTS_INC_SPECIAL("ipa_mpm");
			IPA_MPM_DBG("IPA clock now ON\n");
		}
		atomic_inc(&ipa_mpm_ctx->ipa_clk_ref_cnt);
		IPA_ACTIVE_CLIENTS_INC_SPECIAL(ipa_mpm_mhip_chan_str[probe_id]);
		IPA_MPM_DBG("IPA clock now ON for probe_id %d\n", probe_id);
		atomic_inc(&ipa_mpm_ctx->md[probe_id].clk_cnt.ipa_clk_cnt);
		atomic_inc(&ipa_mpm_ctx->ipa_clk_total_cnt);
	} else {
		if ((atomic_read(&ipa_mpm_ctx->ipa_clk_ref_cnt) == 1)) {
			IPA_ACTIVE_CLIENTS_DEC_SPECIAL("ipa_mpm");
			IPA_MPM_DBG("IPA clock now OFF\n");
		if ((atomic_read
			(&ipa_mpm_ctx->md[probe_id].clk_cnt.ipa_clk_cnt)
								== 0)) {
			IPA_MPM_DBG("probe_id %d IPA clock count < 0\n",
				probe_id);
			ipa_assert();
		}
		atomic_dec(&ipa_mpm_ctx->ipa_clk_ref_cnt);
		IPA_ACTIVE_CLIENTS_DEC_SPECIAL(ipa_mpm_mhip_chan_str[probe_id]);
		IPA_MPM_DBG("probe_id %d IPA clock off\n", probe_id);
		atomic_dec(&ipa_mpm_ctx->md[probe_id].clk_cnt.ipa_clk_cnt);
		atomic_dec(&ipa_mpm_ctx->ipa_clk_total_cnt);
	}
}

@@ -1517,7 +1545,6 @@ int ipa_mpm_notify_wan_state(void)

	/* Start UL MHIP channel for offloading the tethering connection */
	ret = ipa_mpm_vote_unvote_pcie_clk(CLK_ON, probe_id);

	if (ret) {
		IPA_MPM_ERR("Error cloking on PCIe clk, err = %d\n", ret);
		return ret;
@@ -1529,7 +1556,7 @@ int ipa_mpm_notify_wan_state(void)
	case MHIP_STATUS_SUCCESS:
	case MHIP_STATUS_NO_OP:
		ipa_mpm_change_teth_state(probe_id, IPA_MPM_TETH_CONNECTED);
		ret = ipa_mpm_start_stop_mhip_data_path(probe_id, START);
		ret = ipa_mpm_start_stop_ul_mhip_data_path(probe_id, START);

		if (ret) {
			IPA_MPM_ERR("Couldnt start UL GSI channel");
@@ -1645,7 +1672,7 @@ static void ipa_mpm_read_channel(enum ipa_client_type chan)
		IPA_MPM_ERR("Reading of channel failed for ep %d\n", ep);
}

static int ipa_mpm_start_stop_mhip_data_path(int probe_id,
static int ipa_mpm_start_stop_ul_mhip_data_path(int probe_id,
	enum ipa_mpm_start_stop_type start)
{
	int ipa_ep_idx;
@@ -1741,12 +1768,6 @@ static int ipa_mpm_mhi_probe_cb(struct mhi_device *mhi_dev,

	IPA_MPM_DBG("Received probe for id=%d\n", probe_id);

	if (probe_id == IPA_MPM_MHIP_CH_ID_2) {
		/* NOTE :: DPL not supported yet , remove later */
		IPA_MPM_DBG("DPL not supported yet - returning for DPL..\n");
		return 0;
	}

	get_ipa3_client(probe_id, &ul_prod, &dl_cons);

	/* Vote for IPA clock for first time in initialization seq.
@@ -1762,7 +1783,7 @@ static int ipa_mpm_mhi_probe_cb(struct mhi_device *mhi_dev,

	ipa_mpm_vote_unvote_pcie_clk(CLK_ON, probe_id);
	mutex_lock(&ipa_mpm_ctx->md[probe_id].lpm_mutex);
	ipa_mpm_vote_unvote_ipa_clk(CLK_ON);
	ipa_mpm_vote_unvote_ipa_clk(CLK_ON, probe_id);
	ipa_mpm_ctx->md[probe_id].in_lpm = false;
	mutex_unlock(&ipa_mpm_ctx->md[probe_id].lpm_mutex);
	IPA_MPM_DBG("ul chan = %d, dl_chan = %d\n", ul_prod, dl_cons);
@@ -1781,7 +1802,6 @@ static int ipa_mpm_mhi_probe_cb(struct mhi_device *mhi_dev,
	 * IPA Device PROD TRE -> IPA HOST CONS EV
	 * IPA Device PROD EV ->  IPA HOST CONS TRE
	 */
	if (probe_id != IPA_MPM_MHIP_CH_ID_2) {
	if (ul_prod != IPA_CLIENT_MAX) {
		/* store UL properties */
		ch = &ipa_mpm_ctx->md[probe_id].ul_prod;
@@ -1808,7 +1828,7 @@ static int ipa_mpm_mhi_probe_cb(struct mhi_device *mhi_dev,
			ipa_mpm_ctx->dev_info.erdb_base +
			ch->chan_props.ch_ctx.erindex * 8;
	}
	}

	if (dl_cons != IPA_CLIENT_MAX) {
		/* store DL channel properties */
		ch = &ipa_mpm_ctx->md[probe_id].dl_cons;
@@ -1836,7 +1856,7 @@ static int ipa_mpm_mhi_probe_cb(struct mhi_device *mhi_dev,
			ch->chan_props.ch_ctx.erindex * 8;
	}
	/* connect Host GSI pipes with MHI' protocol */
	if (probe_id != IPA_MPM_MHIP_CH_ID_2)  {
	if (ul_prod != IPA_CLIENT_MAX)  {
		ret = ipa_mpm_connect_mhip_gsi_pipe(ul_prod,
			probe_id, &ul_out_params);
		if (ret) {
@@ -1845,13 +1865,17 @@ static int ipa_mpm_mhi_probe_cb(struct mhi_device *mhi_dev,
			goto fail_gsi_setup;
		}
	}
	ret = ipa_mpm_connect_mhip_gsi_pipe(dl_cons, probe_id, &dl_out_params);

	if (dl_cons != IPA_CLIENT_MAX) {
		ret = ipa_mpm_connect_mhip_gsi_pipe(dl_cons,
			probe_id, &dl_out_params);
		if (ret) {
			IPA_MPM_ERR("connecting MPM client = %d failed\n",
				dl_cons);
			goto fail_gsi_setup;
		}
	if (probe_id != IPA_MPM_MHIP_CH_ID_2)  {
	}

	if (ul_prod != IPA_CLIENT_MAX) {
		ch = &ipa_mpm_ctx->md[probe_id].ul_prod;
		ch->evt_props.ev_ctx.update_rp_addr =
@@ -1871,7 +1895,6 @@ static int ipa_mpm_mhi_probe_cb(struct mhi_device *mhi_dev,
			goto fail_smmu;
		}
	}
	}

	if (dl_cons != IPA_CLIENT_MAX) {
		ch = &ipa_mpm_ctx->md[probe_id].dl_cons;
@@ -2038,15 +2061,17 @@ static int ipa_mpm_mhi_probe_cb(struct mhi_device *mhi_dev,
	 */
	switch (ipa_mpm_ctx->md[probe_id].teth_state) {
	case IPA_MPM_TETH_INIT:
		if (ul_prod != IPA_CLIENT_MAX) {
			/* No teth started yet, disable UL channel */
			ipa_mpm_start_stop_mhip_chan(IPA_MPM_MHIP_CHAN_UL,
						probe_id, STOP);

			/* Disable data path */
		if (ipa_mpm_start_stop_mhip_data_path(probe_id, STOP)) {
			if (ipa_mpm_start_stop_ul_mhip_data_path(probe_id,
				STOP)) {
				IPA_MPM_ERR("MHIP Enable data path failed\n");
				goto fail_start_channel;
			}
		}
		ipa_mpm_vote_unvote_pcie_clk(CLK_OFF, probe_id);
		break;
	case IPA_MPM_TETH_INPROGRESS:
@@ -2055,12 +2080,16 @@ static int ipa_mpm_mhi_probe_cb(struct mhi_device *mhi_dev,
		ipa_mpm_change_teth_state(probe_id, IPA_MPM_TETH_CONNECTED);

		/* Enable data path */
		if (ipa_mpm_start_stop_mhip_data_path(probe_id, START)) {
		if (ul_prod != IPA_CLIENT_MAX) {
			if (ipa_mpm_start_stop_ul_mhip_data_path(probe_id,
				START)) {
				IPA_MPM_ERR("MHIP Enable data path failed\n");
				goto fail_start_channel;
			}
		}

		/* Lyft the delay for rmnet USB prod pipe */
		if (probe_id == IPA_MPM_MHIP_CH_ID_1)
			/* Lift the delay for rmnet USB prod pipe */
			ipa3_set_reset_client_prod_pipe_delay(false,
							IPA_CLIENT_USB_PROD);
		break;
@@ -2071,6 +2100,7 @@ static int ipa_mpm_mhi_probe_cb(struct mhi_device *mhi_dev,
	}

	atomic_inc(&ipa_mpm_ctx->probe_cnt);
	ipa_mpm_ctx->md[probe_id].init_complete = true;
	IPA_MPM_FUNC_EXIT();
	return 0;

@@ -2080,7 +2110,7 @@ static int ipa_mpm_mhi_probe_cb(struct mhi_device *mhi_dev,
	if (ipa_mpm_ctx->dev_info.ipa_smmu_enabled)
		IPA_MPM_DBG("SMMU failed\n");
	ipa_mpm_vote_unvote_pcie_clk(CLK_OFF, probe_id);
	ipa_mpm_vote_unvote_ipa_clk(CLK_OFF);
	ipa_mpm_vote_unvote_ipa_clk(CLK_OFF, probe_id);
	ipa_assert();
	return ret;
}
@@ -2138,6 +2168,8 @@ static void ipa_mpm_mhi_remove_cb(struct mhi_device *mhi_dev)
		return;
	}

	IPA_MPM_DBG("remove_cb for mhip_idx = %d", mhip_idx);

	ipa_mpm_mhip_shutdown(mhip_idx);
	atomic_dec(&ipa_mpm_ctx->probe_cnt);

@@ -2146,7 +2178,13 @@ static void ipa_mpm_mhi_remove_cb(struct mhi_device *mhi_dev)
		ipa_mpm_ctx->mhi_parent_dev = NULL;
		ipa_mpm_ctx->carved_smmu_cb.next_addr =
			ipa_mpm_ctx->carved_smmu_cb.va_start;
		atomic_set(&ipa_mpm_ctx->pcie_clk_ref_cnt, 0);
		atomic_set(&ipa_mpm_ctx->pcie_clk_total_cnt, 0);
		for (mhip_idx = 0;
			mhip_idx < IPA_MPM_MHIP_CH_ID_MAX; mhip_idx++) {
			atomic_set(
				&ipa_mpm_ctx->md[mhip_idx].clk_cnt.pcie_clk_cnt,
				0);
		}
	}

	IPA_MPM_FUNC_EXIT();
@@ -2179,7 +2217,7 @@ static void ipa_mpm_mhi_status_cb(struct mhi_device *mhi_dev,
				IPA_MPM_MHIP_CHAN_DL,
				mhip_idx, STOP);
			IPA_MPM_DBG("status = %d\n", status);
			ipa_mpm_vote_unvote_ipa_clk(CLK_OFF);
			ipa_mpm_vote_unvote_ipa_clk(CLK_OFF, mhip_idx);
			ipa_mpm_ctx->md[mhip_idx].in_lpm = true;
		} else {
			IPA_MPM_DBG("Already in lpm\n");
@@ -2187,7 +2225,7 @@ static void ipa_mpm_mhi_status_cb(struct mhi_device *mhi_dev,
		break;
	case MHI_CB_LPM_EXIT:
		if (ipa_mpm_ctx->md[mhip_idx].in_lpm) {
			ipa_mpm_vote_unvote_ipa_clk(CLK_ON);
			ipa_mpm_vote_unvote_ipa_clk(CLK_ON, mhip_idx);
			status = ipa_mpm_start_stop_mhip_chan(
				IPA_MPM_MHIP_CHAN_DL,
				mhip_idx, START);
@@ -2300,37 +2338,38 @@ int ipa_mpm_mhip_xdci_pipe_enable(enum ipa_usb_teth_prot xdci_teth_prot)

	ipa_mpm_ctx->md[probe_id].mhip_client = mhip_client;

	ret = ipa_mpm_vote_unvote_pcie_clk(CLK_ON, probe_id);
	if (ret) {
		IPA_MPM_ERR("Error cloking on PCIe clk, err = %d\n", ret);
		return ret;
	}

	switch (mhip_client) {
	case IPA_MPM_MHIP_USB_RMNET:
		ipa_mpm_set_dma_mode(IPA_CLIENT_USB_PROD,
			IPA_CLIENT_MHI_PRIME_RMNET_CONS);
		break;
	case IPA_MPM_MHIP_TETH:
		IPA_MPM_DBG("Teth for prot %d\n", mhip_client);
		return 0;
	case IPA_MPM_MHIP_USB_DPL:
		IPA_MPM_DBG("Teth connecting for prot %d\n", mhip_client);
		IPA_MPM_DBG("connecting DPL prot %d\n", mhip_client);
		ipa_mpm_change_teth_state(probe_id, IPA_MPM_TETH_CONNECTED);
		return 0;
	default:
		IPA_MPM_ERR("mhip_client = %d not supported\n", mhip_client);
		ret = 0;
		break;
	}

	/* Start UL MHIP channel for offloading the tethering connection */
	ret = ipa_mpm_vote_unvote_pcie_clk(CLK_ON, probe_id);

	if (ret) {
		IPA_MPM_ERR("Error cloking on PCIe clk, err = %d\n", ret);
		return ret;
	}

	if (mhip_client != IPA_MPM_MHIP_USB_DPL)
		/* Start UL MHIP channel for offloading teth connection */
		status = ipa_mpm_start_stop_mhip_chan(IPA_MPM_MHIP_CHAN_UL,
							probe_id, START);

	switch (status) {
	case MHIP_STATUS_SUCCESS:
	case MHIP_STATUS_NO_OP:
		ipa_mpm_change_teth_state(probe_id, IPA_MPM_TETH_CONNECTED);
		ipa_mpm_start_stop_mhip_data_path(probe_id, START);
		ipa_mpm_start_stop_ul_mhip_data_path(probe_id, START);
		/* Lift the delay for rmnet USB prod pipe */
		ipa3_set_reset_client_prod_pipe_delay(false,
			IPA_CLIENT_USB_PROD);
@@ -2389,7 +2428,7 @@ int ipa_mpm_mhip_ul_data_stop(enum ipa_usb_teth_prot xdci_teth_prot)
	IPA_MPM_DBG("Map xdci prot %d to mhip_client = %d probe_id = %d\n",
		xdci_teth_prot, mhip_client, probe_id);

	ret = ipa_mpm_start_stop_mhip_data_path(probe_id, STOP);
	ret = ipa_mpm_start_stop_ul_mhip_data_path(probe_id, STOP);

	if (ret)
		IPA_MPM_ERR("Error stopping UL path, err = %d\n", ret);
@@ -2424,7 +2463,7 @@ int ipa_mpm_mhip_xdci_pipe_disable(enum ipa_usb_teth_prot xdci_teth_prot)
		return 0;
	}

	IPA_MPM_DBG("xdci disconnect prot %d mhip_client = %d probe_id = %d\n",
	IPA_MPM_ERR("xdci disconnect prot %d mhip_client = %d probe_id = %d\n",
			xdci_teth_prot, mhip_client, probe_id);

	switch (mhip_client) {
@@ -2433,8 +2472,14 @@ int ipa_mpm_mhip_xdci_pipe_disable(enum ipa_usb_teth_prot xdci_teth_prot)
		IPA_MPM_DBG("Teth Disconnecting for prot %d\n", mhip_client);
		break;
	case IPA_MPM_MHIP_USB_DPL:
		IPA_MPM_DBG("Teth Disconnecting for DPL, return\n");
		return 0;
		IPA_MPM_DBG("Teth Disconnecting for DPL\n");
		ipa_mpm_change_teth_state(probe_id, IPA_MPM_TETH_INIT);
		ret = ipa_mpm_vote_unvote_pcie_clk(CLK_OFF, probe_id);
		if (ret)
			IPA_MPM_ERR("Error cloking off PCIe clk, err = %d\n",
				ret);
		ipa_mpm_ctx->md[probe_id].mhip_client = IPA_MPM_MHIP_NONE;
		return ret;
	default:
		IPA_MPM_ERR("mhip_client = %d not supported\n", mhip_client);
		return 0;
@@ -2448,7 +2493,7 @@ int ipa_mpm_mhip_xdci_pipe_disable(enum ipa_usb_teth_prot xdci_teth_prot)
	case MHIP_STATUS_NO_OP:
	case MHIP_STATUS_EP_NOT_READY:
		ipa_mpm_change_teth_state(probe_id, IPA_MPM_TETH_INIT);
		ipa_mpm_start_stop_mhip_data_path(probe_id, STOP);
		ipa_mpm_start_stop_ul_mhip_data_path(probe_id, STOP);
		break;
	case MHIP_STATUS_FAIL:
	case MHIP_STATUS_BAD_STATE:
@@ -2516,7 +2561,7 @@ static int ipa_mpm_populate_smmu_info(struct platform_device *pdev)
	cb->va_size = carved_iova_ap_mapping[1];
	cb->va_end = cb->va_start + cb->va_size;

	if (cb->va_start >= ap_cb->va_start || cb->va_end >= ap_cb->va_start) {
	if (cb->va_end >= ap_cb->va_start) {
		IPA_MPM_ERR("MPM iommu and AP overlap addr 0x%lx\n",
				cb->va_start);
		ipa_assert();
@@ -2596,12 +2641,14 @@ static int ipa_mpm_probe(struct platform_device *pdev)
		goto fail_probe;
	}

	atomic_set(&ipa_mpm_ctx->ipa_clk_ref_cnt, 0);
	atomic_set(&ipa_mpm_ctx->pcie_clk_ref_cnt, 0);
	atomic_set(&ipa_mpm_ctx->ipa_clk_total_cnt, 0);
	atomic_set(&ipa_mpm_ctx->pcie_clk_total_cnt, 0);

	for (idx = 0; idx < IPA_MPM_MHIP_CH_ID_MAX; idx++) {
		ipa_mpm_ctx->md[idx].ul_prod.gsi_state = GSI_INIT;
		ipa_mpm_ctx->md[idx].dl_cons.gsi_state = GSI_INIT;
		atomic_set(&ipa_mpm_ctx->md[idx].clk_cnt.ipa_clk_cnt, 0);
		atomic_set(&ipa_mpm_ctx->md[idx].clk_cnt.pcie_clk_cnt, 0);
	}

	ret = mhi_driver_register(&mhi_driver);
@@ -2671,6 +2718,28 @@ int ipa3_is_mhip_offload_enabled(void)
		return 1;
}

int ipa_mpm_panic_handler(char *buf, int size)
{
	int i;
	int cnt = 0;

	cnt = scnprintf(buf, size,
			"\n---- MHIP Active Clients Table ----\n");
	cnt += scnprintf(buf + cnt, size - cnt,
			"Total PCIe active clients count: %d\n",
			atomic_read(&ipa_mpm_ctx->pcie_clk_total_cnt));
	cnt += scnprintf(buf + cnt, size - cnt,
			"Total IPA active clients count: %d\n",
			atomic_read(&ipa_mpm_ctx->ipa_clk_total_cnt));

	for (i = 0; i < IPA_MPM_MHIP_CH_ID_MAX; i++) {
		cnt += scnprintf(buf + cnt, size - cnt,
			"client id: %d ipa vote cnt: %d pcie vote cnt\n", i,
			atomic_read(&ipa_mpm_ctx->md[i].clk_cnt.ipa_clk_cnt),
			atomic_read(&ipa_mpm_ctx->md[i].clk_cnt.pcie_clk_cnt));
	}
	return cnt;
}
late_initcall(ipa_mpm_init);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("MHI Proxy Manager Driver");