Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 2000f19d authored by Sujeev Dias's avatar Sujeev Dias Committed by Gerrit - the friendly Code Review server
Browse files

mhi: core: add support to ring doorbell (DB) in any active MHI state



Newer MHI based modems support ringing transfer channel DB
register while in M2 state. Host is not required to ring WAKE DB
to trigger M2 exit. This change takes advantage of it by not
ringing WAKE DB to exit M2 during data transfer.

CRs-Fixed: 2418347
Change-Id: I5deedcada0140b6439d8418124566c6a6b6c7410
Signed-off-by: default avatarSujeev Dias <sdias@codeaurora.org>
parent 1d824f85
Loading
Loading
Loading
Loading
+6 −0
Original line number Diff line number Diff line
@@ -278,6 +278,7 @@ int mhi_init_dev_ctxt(struct mhi_controller *mhi_cntrl)

	atomic_set(&mhi_cntrl->dev_wake, 0);
	atomic_set(&mhi_cntrl->alloc_size, 0);
	atomic_set(&mhi_cntrl->pending_pkts, 0);

	mhi_ctxt = kzalloc(sizeof(*mhi_ctxt), GFP_KERNEL);
	if (!mhi_ctxt)
@@ -1083,6 +1084,11 @@ static int of_parse_dt(struct mhi_controller *mhi_cntrl,
	if (ret)
		mhi_cntrl->buffer_len = MHI_MAX_MTU;

	/* by default host allowed to ring DB both M0 and M2 state */
	mhi_cntrl->db_access = MHI_PM_M0 | MHI_PM_M2;
	if (of_property_read_bool(of_node, "mhi,m2-no-db-access"))
		mhi_cntrl->db_access &= ~MHI_PM_M2;

	return 0;

error_ev_cfg:
+2 −1
Original line number Diff line number Diff line
@@ -457,7 +457,8 @@ enum MHI_PM_STATE {
		MHI_PM_SHUTDOWN_PROCESS | MHI_PM_FW_DL_ERR)))
#define MHI_PM_IN_ERROR_STATE(pm_state) (pm_state >= MHI_PM_FW_DL_ERR)
#define MHI_PM_IN_FATAL_STATE(pm_state) (pm_state == MHI_PM_LD_ERR_FATAL_DETECT)
#define MHI_DB_ACCESS_VALID(pm_state) (pm_state & MHI_PM_M0)
#define MHI_DB_ACCESS_VALID(mhi_cntrl) (mhi_cntrl->pm_state & \
					mhi_cntrl->db_access)
#define MHI_WAKE_DB_CLEAR_VALID(pm_state) (pm_state & (MHI_PM_M0 | \
						MHI_PM_M2 | MHI_PM_M3_EXIT))
#define MHI_WAKE_DB_SET_VALID(pm_state) (pm_state & MHI_PM_M2)
+36 −86
Original line number Diff line number Diff line
@@ -355,7 +355,6 @@ int mhi_queue_skb(struct mhi_device *mhi_dev,
	struct mhi_ring *buf_ring = &mhi_chan->buf_ring;
	struct mhi_buf_info *buf_info;
	struct mhi_tre *mhi_tre;
	bool assert_wake = false;
	int ret;

	if (mhi_is_ring_full(mhi_cntrl, tre_ring))
@@ -376,16 +375,8 @@ int mhi_queue_skb(struct mhi_device *mhi_dev,
		mhi_cntrl->runtime_put(mhi_cntrl, mhi_cntrl->priv_data);
	}

	/*
	 * For UL channels always assert WAKE until work is done,
	 * For DL channels only assert if MHI is in a LPM
	 */
	if (mhi_chan->dir == DMA_TO_DEVICE ||
	    (mhi_chan->dir == DMA_FROM_DEVICE &&
	     mhi_cntrl->pm_state != MHI_PM_M0)) {
		assert_wake = true;
		mhi_cntrl->wake_get(mhi_cntrl, false);
	}
	/* toggle wake to exit out of M2 */
	mhi_cntrl->wake_toggle(mhi_cntrl);

	/* generate the tre */
	buf_info = buf_ring->wp;
@@ -412,23 +403,20 @@ int mhi_queue_skb(struct mhi_device *mhi_dev,
	mhi_add_ring_element(mhi_cntrl, tre_ring);
	mhi_add_ring_element(mhi_cntrl, buf_ring);

	if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl->pm_state))) {
	if (mhi_chan->dir == DMA_TO_DEVICE)
		atomic_inc(&mhi_cntrl->pending_pkts);

	if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl))) {
		read_lock_bh(&mhi_chan->lock);
		mhi_ring_chan_db(mhi_cntrl, mhi_chan);
		read_unlock_bh(&mhi_chan->lock);
	}

	if (mhi_chan->dir == DMA_FROM_DEVICE && assert_wake)
		mhi_cntrl->wake_put(mhi_cntrl, true);

	read_unlock_bh(&mhi_cntrl->pm_lock);

	return 0;

map_error:
	if (assert_wake)
		mhi_cntrl->wake_put(mhi_cntrl, false);

	read_unlock_bh(&mhi_cntrl->pm_lock);

	return ret;
@@ -446,7 +434,6 @@ int mhi_queue_dma(struct mhi_device *mhi_dev,
	struct mhi_ring *buf_ring = &mhi_chan->buf_ring;
	struct mhi_buf_info *buf_info;
	struct mhi_tre *mhi_tre;
	bool assert_wake = false;

	if (mhi_is_ring_full(mhi_cntrl, tre_ring))
		return -ENOMEM;
@@ -466,16 +453,8 @@ int mhi_queue_dma(struct mhi_device *mhi_dev,
		mhi_cntrl->runtime_put(mhi_cntrl, mhi_cntrl->priv_data);
	}

	/*
	 * For UL channels always assert WAKE until work is done,
	 * For DL channels only assert if MHI is in a LPM
	 */
	if (mhi_chan->dir == DMA_TO_DEVICE ||
	    (mhi_chan->dir == DMA_FROM_DEVICE &&
	     mhi_cntrl->pm_state != MHI_PM_M0)) {
		assert_wake = true;
		mhi_cntrl->wake_get(mhi_cntrl, false);
	}
	/* toggle wake to exit out of M2 */
	mhi_cntrl->wake_toggle(mhi_cntrl);

	/* generate the tre */
	buf_info = buf_ring->wp;
@@ -510,15 +489,15 @@ int mhi_queue_dma(struct mhi_device *mhi_dev,
	mhi_add_ring_element(mhi_cntrl, tre_ring);
	mhi_add_ring_element(mhi_cntrl, buf_ring);

	if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl->pm_state))) {
	if (mhi_chan->dir == DMA_TO_DEVICE)
		atomic_inc(&mhi_cntrl->pending_pkts);

	if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl))) {
		read_lock_bh(&mhi_chan->lock);
		mhi_ring_chan_db(mhi_cntrl, mhi_chan);
		read_unlock_bh(&mhi_chan->lock);
	}

	if (mhi_chan->dir == DMA_FROM_DEVICE && assert_wake)
		mhi_cntrl->wake_put(mhi_cntrl, true);

	read_unlock_bh(&mhi_cntrl->pm_lock);

	return 0;
@@ -581,7 +560,6 @@ int mhi_queue_buf(struct mhi_device *mhi_dev,
	struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
	struct mhi_ring *tre_ring;
	unsigned long flags;
	bool assert_wake = false;
	int ret;

	/*
@@ -612,18 +590,13 @@ int mhi_queue_buf(struct mhi_device *mhi_dev,
		mhi_cntrl->runtime_put(mhi_cntrl, mhi_cntrl->priv_data);
	}

	/*
	 * For UL channels always assert WAKE until work is done,
	 * For DL channels only assert if MHI is in a LPM
	 */
	if (mhi_chan->dir == DMA_TO_DEVICE ||
	    (mhi_chan->dir == DMA_FROM_DEVICE &&
	     mhi_cntrl->pm_state != MHI_PM_M0)) {
		assert_wake = true;
		mhi_cntrl->wake_get(mhi_cntrl, false);
	}
	/* toggle wake to exit out of M2 */
	mhi_cntrl->wake_toggle(mhi_cntrl);

	if (mhi_chan->dir == DMA_TO_DEVICE)
		atomic_inc(&mhi_cntrl->pending_pkts);

	if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl->pm_state))) {
	if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl))) {
		unsigned long flags;

		read_lock_irqsave(&mhi_chan->lock, flags);
@@ -631,9 +604,6 @@ int mhi_queue_buf(struct mhi_device *mhi_dev,
		read_unlock_irqrestore(&mhi_chan->lock, flags);
	}

	if (mhi_chan->dir == DMA_FROM_DEVICE && assert_wake)
		mhi_cntrl->wake_put(mhi_cntrl, true);

	read_unlock_irqrestore(&mhi_cntrl->pm_lock, flags);

	return 0;
@@ -960,11 +930,8 @@ static int parse_xfer_event(struct mhi_controller *mhi_cntrl,
			/* notify client */
			mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);

			if (mhi_chan->dir == DMA_TO_DEVICE) {
				read_lock_bh(&mhi_cntrl->pm_lock);
				mhi_cntrl->wake_put(mhi_cntrl, false);
				read_unlock_bh(&mhi_cntrl->pm_lock);
			}
			if (mhi_chan->dir == DMA_TO_DEVICE)
				atomic_dec(&mhi_cntrl->pending_pkts);

			/*
			 * recycle the buffer if buffer is pre-allocated,
@@ -993,7 +960,7 @@ static int parse_xfer_event(struct mhi_controller *mhi_cntrl,
		mhi_chan->db_cfg.db_mode = 1;
		read_lock_irqsave(&mhi_cntrl->pm_lock, flags);
		if (tre_ring->wp != tre_ring->rp &&
		    MHI_DB_ACCESS_VALID(mhi_cntrl->pm_state)) {
		    MHI_DB_ACCESS_VALID(mhi_cntrl)) {
			mhi_ring_chan_db(mhi_cntrl, mhi_chan);
		}
		read_unlock_irqrestore(&mhi_cntrl->pm_lock, flags);
@@ -1233,7 +1200,7 @@ int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl,
	}

	read_lock_bh(&mhi_cntrl->pm_lock);
	if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl->pm_state)))
	if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl)))
		mhi_ring_er_db(mhi_event);
	read_unlock_bh(&mhi_cntrl->pm_lock);

@@ -1286,7 +1253,7 @@ int mhi_process_data_event_ring(struct mhi_controller *mhi_cntrl,
		count++;
	}
	read_lock_bh(&mhi_cntrl->pm_lock);
	if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl->pm_state)))
	if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl)))
		mhi_ring_er_db(mhi_event);
	read_unlock_bh(&mhi_cntrl->pm_lock);

@@ -1364,7 +1331,7 @@ int mhi_process_tsync_event_ring(struct mhi_controller *mhi_cntrl,
	}

	read_lock_bh(&mhi_cntrl->pm_lock);
	if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl->pm_state)))
	if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl)))
		mhi_ring_er_db(mhi_event);
	read_unlock_bh(&mhi_cntrl->pm_lock);

@@ -1556,7 +1523,7 @@ int mhi_send_cmd(struct mhi_controller *mhi_cntrl,
	/* queue to hardware */
	mhi_add_ring_element(mhi_cntrl, ring);
	read_lock_bh(&mhi_cntrl->pm_lock);
	if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl->pm_state)))
	if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl)))
		mhi_ring_cmd_db(mhi_cntrl, mhi_cmd);
	read_unlock_bh(&mhi_cntrl->pm_lock);
	spin_unlock_bh(&mhi_cmd->lock);
@@ -1606,7 +1573,7 @@ static int __mhi_prepare_channel(struct mhi_controller *mhi_cntrl,
		goto error_pm_state;
	}

	mhi_cntrl->wake_get(mhi_cntrl, false);
	mhi_cntrl->wake_toggle(mhi_cntrl);
	read_unlock_bh(&mhi_cntrl->pm_lock);
	mhi_cntrl->runtime_get(mhi_cntrl, mhi_cntrl->priv_data);
	mhi_cntrl->runtime_put(mhi_cntrl, mhi_cntrl->priv_data);
@@ -1614,7 +1581,7 @@ static int __mhi_prepare_channel(struct mhi_controller *mhi_cntrl,
	ret = mhi_send_cmd(mhi_cntrl, mhi_chan, MHI_CMD_START_CHAN);
	if (ret) {
		MHI_ERR("Failed to send start chan cmd\n");
		goto error_send_cmd;
		goto error_pm_state;
	}

	ret = wait_for_completion_timeout(&mhi_chan->completion,
@@ -1623,7 +1590,7 @@ static int __mhi_prepare_channel(struct mhi_controller *mhi_cntrl,
		MHI_ERR("Failed to receive cmd completion for chan:%d\n",
			mhi_chan->chan);
		ret = -EIO;
		goto error_send_cmd;
		goto error_pm_state;
	}

	write_lock_irq(&mhi_chan->lock);
@@ -1657,7 +1624,7 @@ static int __mhi_prepare_channel(struct mhi_controller *mhi_cntrl,
		}

		read_lock_bh(&mhi_cntrl->pm_lock);
		if (MHI_DB_ACCESS_VALID(mhi_cntrl->pm_state)) {
		if (MHI_DB_ACCESS_VALID(mhi_cntrl)) {
			read_lock_irq(&mhi_chan->lock);
			mhi_ring_chan_db(mhi_cntrl, mhi_chan);
			read_unlock_irq(&mhi_chan->lock);
@@ -1665,21 +1632,12 @@ static int __mhi_prepare_channel(struct mhi_controller *mhi_cntrl,
		read_unlock_bh(&mhi_cntrl->pm_lock);
	}

	read_lock_bh(&mhi_cntrl->pm_lock);
	mhi_cntrl->wake_put(mhi_cntrl, false);
	read_unlock_bh(&mhi_cntrl->pm_lock);

	mutex_unlock(&mhi_chan->mutex);

	MHI_LOG("Chan:%d successfully moved to start state\n", mhi_chan->chan);

	return 0;

error_send_cmd:
	read_lock_bh(&mhi_cntrl->pm_lock);
	mhi_cntrl->wake_put(mhi_cntrl, false);
	read_unlock_bh(&mhi_cntrl->pm_lock);

error_pm_state:
	if (!mhi_chan->offload_ch)
		mhi_deinit_chan_ctxt(mhi_cntrl, mhi_chan);
@@ -1690,11 +1648,6 @@ static int __mhi_prepare_channel(struct mhi_controller *mhi_cntrl,
	return ret;

error_pre_alloc:

	read_lock_bh(&mhi_cntrl->pm_lock);
	mhi_cntrl->wake_put(mhi_cntrl, false);
	read_unlock_bh(&mhi_cntrl->pm_lock);

	mutex_unlock(&mhi_chan->mutex);
	__mhi_unprepare_channel(mhi_cntrl, mhi_chan);

@@ -1750,7 +1703,8 @@ static void mhi_reset_data_chan(struct mhi_controller *mhi_cntrl,
		struct mhi_buf_info *buf_info = buf_ring->rp;

		if (mhi_chan->dir == DMA_TO_DEVICE)
			mhi_cntrl->wake_put(mhi_cntrl, false);
			atomic_dec(&mhi_cntrl->pending_pkts);

		if (!buf_info->pre_mapped)
			mhi_cntrl->unmap_single(mhi_cntrl, buf_info);
		mhi_del_ring_element(mhi_cntrl, buf_ring);
@@ -1842,7 +1796,7 @@ static void __mhi_unprepare_channel(struct mhi_controller *mhi_cntrl,
		goto error_invalid_state;
	}

	mhi_cntrl->wake_get(mhi_cntrl, false);
	mhi_cntrl->wake_toggle(mhi_cntrl);
	read_unlock_bh(&mhi_cntrl->pm_lock);

	mhi_cntrl->runtime_get(mhi_cntrl, mhi_cntrl->priv_data);
@@ -1850,7 +1804,7 @@ static void __mhi_unprepare_channel(struct mhi_controller *mhi_cntrl,
	ret = mhi_send_cmd(mhi_cntrl, mhi_chan, MHI_CMD_RESET_CHAN);
	if (ret) {
		MHI_ERR("Failed to send reset chan cmd\n");
		goto error_completion;
		goto error_invalid_state;
	}

	/* even if it fails we will still reset */
@@ -1859,11 +1813,6 @@ static void __mhi_unprepare_channel(struct mhi_controller *mhi_cntrl,
	if (!ret || mhi_chan->ccs != MHI_EV_CC_SUCCESS)
		MHI_ERR("Failed to receive cmd completion, still resetting\n");

error_completion:
	read_lock_bh(&mhi_cntrl->pm_lock);
	mhi_cntrl->wake_put(mhi_cntrl, false);
	read_unlock_bh(&mhi_cntrl->pm_lock);

error_invalid_state:
	if (!mhi_chan->offload_ch) {
		mhi_reset_chan(mhi_cntrl, mhi_chan);
@@ -1878,14 +1827,15 @@ int mhi_debugfs_mhi_states_show(struct seq_file *m, void *d)
	struct mhi_controller *mhi_cntrl = m->private;

	seq_printf(m,
		   "pm_state:%s dev_state:%s EE:%s M0:%u M2:%u M3:%u wake:%d dev_wake:%u alloc_size:%u\n",
		   "pm_state:%s dev_state:%s EE:%s M0:%u M2:%u M3:%u wake:%d dev_wake:%u alloc_size:%u pending_pkts:%u\n",
		   to_mhi_pm_state_str(mhi_cntrl->pm_state),
		   TO_MHI_STATE_STR(mhi_cntrl->dev_state),
		   TO_MHI_EXEC_STR(mhi_cntrl->ee),
		   mhi_cntrl->M0, mhi_cntrl->M2, mhi_cntrl->M3,
		   mhi_cntrl->wake_set,
		   atomic_read(&mhi_cntrl->dev_wake),
		   atomic_read(&mhi_cntrl->alloc_size));
		   atomic_read(&mhi_cntrl->alloc_size),
		   atomic_read(&mhi_cntrl->pending_pkts));
	return 0;
}

+28 −8
Original line number Diff line number Diff line
@@ -160,6 +160,17 @@ void mhi_set_mhi_state(struct mhi_controller *mhi_cntrl,
	}
}

/* nop for backward compatibility, allowed to ring db registers in M2 state */
static void mhi_toggle_dev_wake_nop(struct mhi_controller *mhi_cntrl)
{
}

static void mhi_toggle_dev_wake(struct mhi_controller *mhi_cntrl)
{
	mhi_cntrl->wake_get(mhi_cntrl, false);
	mhi_cntrl->wake_put(mhi_cntrl, true);
}

/* set device wake */
void mhi_assert_dev_wake(struct mhi_controller *mhi_cntrl, bool force)
{
@@ -381,12 +392,15 @@ void mhi_pm_m1_transition(struct mhi_controller *mhi_cntrl)
		wake_up_all(&mhi_cntrl->state_event);

		/* transfer pending, exit M2 immediately */
		if (unlikely(atomic_read(&mhi_cntrl->dev_wake))) {
			MHI_VERB("Exiting M2 Immediately, count:%d\n",
		if (unlikely(atomic_read(&mhi_cntrl->pending_pkts) ||
			     atomic_read(&mhi_cntrl->dev_wake))) {
			MHI_VERB(
				 "Exiting M2 Immediately, pending_pkts:%d dev_wake:%d\n",
				 atomic_read(&mhi_cntrl->pending_pkts),
				 atomic_read(&mhi_cntrl->dev_wake));
			read_lock_bh(&mhi_cntrl->pm_lock);
			mhi_cntrl->wake_get(mhi_cntrl, true);
			mhi_cntrl->wake_put(mhi_cntrl, false);
			mhi_cntrl->wake_put(mhi_cntrl, true);
			read_unlock_bh(&mhi_cntrl->pm_lock);
		} else {
			mhi_cntrl->status_cb(mhi_cntrl, mhi_cntrl->priv_data,
@@ -462,7 +476,7 @@ static int mhi_pm_mission_mode_transition(struct mhi_controller *mhi_cntrl)
		smp_wmb();

		spin_lock_irq(&mhi_event->lock);
		if (MHI_DB_ACCESS_VALID(mhi_cntrl->pm_state))
		if (MHI_DB_ACCESS_VALID(mhi_cntrl))
			mhi_ring_er_db(mhi_event);
		spin_unlock_irq(&mhi_event->lock);

@@ -584,6 +598,7 @@ static void mhi_pm_disable_transition(struct mhi_controller *mhi_cntrl,
	mutex_lock(&mhi_cntrl->pm_mutex);

	MHI_ASSERT(atomic_read(&mhi_cntrl->dev_wake), "dev_wake != 0");
	MHI_ASSERT(atomic_read(&mhi_cntrl->pending_pkts), "pending_pkts != 0");

	/* reset the ev rings and cmd rings */
	MHI_LOG("Resetting EV CTXT and CMD CTXT\n");
@@ -761,10 +776,13 @@ int mhi_async_power_up(struct mhi_controller *mhi_cntrl)
	if (mhi_cntrl->msi_allocated < mhi_cntrl->total_ev_rings)
		return -EINVAL;

	/* set to default wake if not set */
	if (!mhi_cntrl->wake_get || !mhi_cntrl->wake_put) {
	/* set to default wake if any one is not set */
	if (!mhi_cntrl->wake_get || !mhi_cntrl->wake_put ||
	    !mhi_cntrl->wake_toggle) {
		mhi_cntrl->wake_get = mhi_assert_dev_wake;
		mhi_cntrl->wake_put = mhi_deassert_dev_wake;
		mhi_cntrl->wake_toggle = (mhi_cntrl->db_access & MHI_PM_M2) ?
			mhi_toggle_dev_wake_nop : mhi_toggle_dev_wake;
	}

	mutex_lock(&mhi_cntrl->pm_mutex);
@@ -915,7 +933,8 @@ int mhi_pm_suspend(struct mhi_controller *mhi_cntrl)
		return -EIO;

	/* do a quick check to see if any pending data, then exit */
	if (atomic_read(&mhi_cntrl->dev_wake)) {
	if (atomic_read(&mhi_cntrl->dev_wake) ||
	    atomic_read(&mhi_cntrl->pending_pkts)) {
		MHI_VERB("Busy, aborting M3\n");
		return -EBUSY;
	}
@@ -943,7 +962,8 @@ int mhi_pm_suspend(struct mhi_controller *mhi_cntrl)
	write_lock_irq(&mhi_cntrl->pm_lock);

	/* we're asserting wake so count would be @ least 1 */
	if (atomic_read(&mhi_cntrl->dev_wake) > 1) {
	if (atomic_read(&mhi_cntrl->dev_wake) > 1 ||
		atomic_read(&mhi_cntrl->pending_pkts)) {
		MHI_VERB("Busy, aborting M3\n");
		write_unlock_irq(&mhi_cntrl->pm_lock);
		ret = -EBUSY;
+3 −0
Original line number Diff line number Diff line
@@ -238,11 +238,13 @@ struct mhi_controller {
	bool pre_init;
	rwlock_t pm_lock;
	u32 pm_state;
	u32 db_access; /* db access only on these states */
	enum mhi_ee ee;
	enum mhi_dev_state dev_state;
	bool wake_set;
	atomic_t dev_wake;
	atomic_t alloc_size;
	atomic_t pending_pkts;
	struct list_head transition_list;
	spinlock_t transition_lock;
	spinlock_t wlock;
@@ -261,6 +263,7 @@ struct mhi_controller {
	int (*link_status)(struct mhi_controller *, void *);
	void (*wake_get)(struct mhi_controller *, bool);
	void (*wake_put)(struct mhi_controller *, bool);
	void (*wake_toggle)(struct mhi_controller *mhi_cntrl);
	int (*runtime_get)(struct mhi_controller *, void *);
	void (*runtime_put)(struct mhi_controller *, void *);
	u64 (*time_get)(struct mhi_controller *mhi_cntrl, void *priv);