Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit db2b6851 authored by Andrei Danaila's avatar Andrei Danaila Committed by Matt Wagantall
Browse files

msm: mhi: Enable M2 exit through CLKREQ assertion



Enable M2 power state transition exit through
CLKREQ GPIO assertion due to MHI doorbell write.

CRs-Fixed: 733370
Change-Id: I27b425ee305fc9c044812a8b15c76970987a5dae
Signed-off-by: default avatarAndrei Danaila <adanaila@codeaurora.org>
parent 9d176440
Loading
Loading
Loading
Loading
+3 −0
Original line number Diff line number Diff line
@@ -395,6 +395,7 @@ struct mhi_flags {
	atomic_t pending_ssr;
	atomic_t pending_powerup;
	int stop_threads;
	atomic_t device_wake;
	u32 ssr;
};

@@ -453,8 +454,10 @@ struct mhi_device_ctxt {
	u32 outbound_evmod_rate;
	struct mhi_counters counters;
	struct mhi_flags flags;
	u32 device_wake_asserted;

	rwlock_t xfer_lock;
	atomic_t m2_transition;
	struct hrtimer m1_timer;
	ktime_t m1_timeout;
	ktime_t ul_acc_tmr_timeout;
+1 −8
Original line number Diff line number Diff line
@@ -115,6 +115,7 @@ static enum MHI_STATUS mhi_init_sync(struct mhi_device_ctxt *mhi_dev_ctxt)
	rwlock_init(&mhi_dev_ctxt->xfer_lock);
	mutex_init(&mhi_dev_ctxt->mhi_link_state);
	mutex_init(&mhi_dev_ctxt->pm_lock);
	atomic_set(&mhi_dev_ctxt->m2_transition, 0);
	return MHI_STATUS_SUCCESS;

db_write_lock_free:
@@ -368,14 +369,6 @@ static enum MHI_STATUS mhi_cmd_ring_init(struct mhi_cmd_ctxt *cmd_ctxt,

static enum MHI_STATUS mhi_init_timers(struct mhi_device_ctxt *mhi_dev_ctxt)
{
	hrtimer_init(&mhi_dev_ctxt->m1_timer,
			CLOCK_MONOTONIC,
			HRTIMER_MODE_REL);
	mhi_dev_ctxt->m1_timeout =
			ktime_set(0, MHI_M1_ENTRY_DELAY_MS * 1E6L);
	mhi_dev_ctxt->m1_timer.function = mhi_initiate_m1;
	mhi_log(MHI_MSG_CRITICAL | MHI_DBG_POWER,
		"Starting M1 timer\n");
	return MHI_STATUS_SUCCESS;
}

+4 −1
Original line number Diff line number Diff line
@@ -15,6 +15,7 @@
#define MHI_IPC_LOG_PAGES (100)
#define MHI_LOG_SIZE 0x1000
#define MHI_LINK_STABILITY_WAIT_MS 100
#define MHI_DEVICE_WAKE_DBOUNCE_TIMEOUT_MS 10
#define MHI_MAX_LINK_RETRIES 9
#define DT_WAIT_RETRIES 30
#define MHI_MAX_SUSPEND_RETRIES 1000
@@ -54,10 +55,12 @@
#define MHI_DATA_SEG_WINDOW_START_ADDR 0x0ULL
#define MHI_DATA_SEG_WINDOW_END_ADDR 0x3E800000ULL

#define MHI_M1_ENTRY_DELAY_MS 100
#define MHI_M2_DEBOUNCE_TMR_MS 10
#define MHI_XFER_DB_INTERVAL 8
#define MHI_EV_DB_INTERVAL 32

#define MHI_DEV_WAKE_DB 127

#define MHI_HANDLE_MAGIC 0x12344321
/* PCIe Device Info */

+59 −26
Original line number Diff line number Diff line
@@ -497,6 +497,28 @@ void mhi_update_chan_db(struct mhi_device_ctxt *mhi_dev_ctxt,
				chan, db_value);
	}
}
enum MHI_STATUS mhi_check_m2_transition(struct mhi_device_ctxt *mhi_dev_ctxt)
{
	enum MHI_STATUS ret_val = MHI_STATUS_SUCCESS;
	mhi_log(MHI_MSG_VERBOSE, "state = %d\n", mhi_dev_ctxt->mhi_state);
	if (mhi_dev_ctxt->mhi_state == MHI_STATE_M2) {
		mhi_log(MHI_MSG_INFO, "M2 Transition flag value = %d\n",
			(atomic_read(&mhi_dev_ctxt->m2_transition)));
		if ((atomic_read(&mhi_dev_ctxt->m2_transition)) == 0) {
			if (mhi_dev_ctxt->flags.link_up) {
				mhi_assert_device_wake(mhi_dev_ctxt);
				ret_val = MHI_STATUS_CHAN_NOT_READY;
			}
		} else{
			mhi_log(MHI_MSG_INFO, "m2_transition flag is set\n");
			ret_val = MHI_STATUS_CHAN_NOT_READY;
		}
	} else {
	   ret_val = MHI_STATUS_SUCCESS;
	}

	return ret_val;
}

static inline enum MHI_STATUS mhi_queue_tre(struct mhi_device_ctxt
							*mhi_dev_ctxt,
@@ -516,11 +538,12 @@ static inline enum MHI_STATUS mhi_queue_tre(struct mhi_device_ctxt
			"Queued outbound pkt. Pending Acks %d\n",
		atomic_read(&mhi_dev_ctxt->counters.outbound_acks));
	}
	if (likely((((
	    (mhi_dev_ctxt->mhi_state == MHI_STATE_M0) ||
	    (mhi_dev_ctxt->mhi_state == MHI_STATE_M1)) &&
	ret_val = mhi_check_m2_transition(mhi_dev_ctxt);
	if (likely(((ret_val == MHI_STATUS_SUCCESS) &&
	    (((mhi_dev_ctxt->mhi_state == MHI_STATE_M0) ||
	      (mhi_dev_ctxt->mhi_state == MHI_STATE_M1))) &&
	    (chan_ctxt->mhi_chan_state != MHI_CHAN_STATE_ERROR)) &&
	    (!mhi_dev_ctxt->flags.pending_M3)))) {
	    (!mhi_dev_ctxt->flags.pending_M3))) {
		if (likely(type == MHI_RING_TYPE_XFER_RING)) {
			spin_lock_irqsave(&mhi_dev_ctxt->db_write_lock[chan],
					   flags);
@@ -574,14 +597,6 @@ enum MHI_STATUS mhi_queue_xfer(struct mhi_client_handle *client_handle,
	chan = client_handle->chan;
	pm_runtime_get(&mhi_dev_ctxt->dev_info->plat_dev->dev);

	/* Bump up the vote for pending data */
	read_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags);

	atomic_inc(&mhi_dev_ctxt->flags.data_pending);
	if (mhi_dev_ctxt->flags.link_up)
		mhi_assert_device_wake(mhi_dev_ctxt);
	read_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags);

	pkt_loc = mhi_dev_ctxt->mhi_local_chan_ctxt[chan].wp;
	pkt_loc->data_tx_pkt.buffer_ptr = buf;
	pkt_loc->type.info = mhi_flags;
@@ -602,19 +617,20 @@ enum MHI_STATUS mhi_queue_xfer(struct mhi_client_handle *client_handle,
	ret_val = ctxt_add_element(&mhi_dev_ctxt->mhi_local_chan_ctxt[chan],
				(void *)&pkt_loc);
	if (unlikely(MHI_STATUS_SUCCESS != ret_val)) {
		mhi_log(MHI_MSG_CRITICAL,
		mhi_log(MHI_MSG_VERBOSE,
				"Failed to insert trb in xfer ring\n");
		goto error;
	}

	read_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags);
	atomic_inc(&mhi_dev_ctxt->flags.data_pending);
	ret_val = mhi_queue_tre(mhi_dev_ctxt, chan, MHI_RING_TYPE_XFER_RING);
	if (unlikely(MHI_STATUS_SUCCESS != ret_val))
		mhi_log(MHI_MSG_VERBOSE, "Failed queue TRE.\n");
	atomic_dec(&mhi_dev_ctxt->flags.data_pending);
	read_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags);

error:
	atomic_dec(&mhi_dev_ctxt->flags.data_pending);
	pm_runtime_mark_last_busy(&mhi_dev_ctxt->dev_info->plat_dev->dev);
	pm_runtime_put_noidle(&mhi_dev_ctxt->dev_info->plat_dev->dev);
	return ret_val;
@@ -645,18 +661,18 @@ enum MHI_STATUS mhi_send_cmd(struct mhi_device_ctxt *mhi_dev_ctxt,
			mhi_dev_ctxt->dev_exec_env,
			chan, cmd);

	mhi_assert_device_wake(mhi_dev_ctxt);
	atomic_inc(&mhi_dev_ctxt->flags.data_pending);
	pm_runtime_get(&mhi_dev_ctxt->dev_info->plat_dev->dev);
	/*
	 * If there is a cmd pending a device confirmation,
	 * do not send anymore for this channel
	 */
	if (MHI_CMD_PENDING == mhi_dev_ctxt->mhi_chan_pend_cmd_ack[chan]) {
		mhi_log(MHI_MSG_ERROR, "Cmd Pending on chan %d", chan);
		ret_val = MHI_STATUS_CMD_PENDING;
		goto error_invalid;
	}

	atomic_inc(&mhi_dev_ctxt->flags.data_pending);
	from_state =
		mhi_dev_ctxt->mhi_ctrl_seg->mhi_cc_list[chan].mhi_chan_state;

@@ -1341,21 +1357,38 @@ int mhi_get_epid(struct mhi_client_handle *client_handle)

int mhi_assert_device_wake(struct mhi_device_ctxt *mhi_dev_ctxt)
{
	mhi_log(MHI_MSG_VERBOSE, "GPIO %d\n",
			mhi_dev_ctxt->dev_props->device_wake_gpio);
	gpio_direction_output(mhi_dev_ctxt->dev_props->device_wake_gpio, 1);
	if ((mhi_dev_ctxt->channel_db_addr) &&
	       (mhi_dev_ctxt->flags.link_up)) {
			mhi_log(MHI_MSG_VERBOSE, "LPM %d\n",
				mhi_dev_ctxt->enable_lpm);
			atomic_set(&mhi_dev_ctxt->flags.device_wake, 1);
			mhi_write_db(mhi_dev_ctxt,
				     mhi_dev_ctxt->channel_db_addr,
				     MHI_DEV_WAKE_DB, 1);
			mhi_dev_ctxt->device_wake_asserted = 1;
	} else {
		mhi_log(MHI_MSG_VERBOSE, "LPM %d\n", mhi_dev_ctxt->enable_lpm);
	}
	return 0;
}

inline int mhi_deassert_device_wake(struct mhi_device_ctxt *mhi_dev_ctxt)
{
	mhi_log(MHI_MSG_VERBOSE, "GPIO %d\n",
			mhi_dev_ctxt->dev_props->device_wake_gpio);
	if (mhi_dev_ctxt->enable_lpm)
		gpio_direction_output(
			mhi_dev_ctxt->dev_props->device_wake_gpio, 0);
	else
		mhi_log(MHI_MSG_VERBOSE, "LPM Enabled\n");
	if ((mhi_dev_ctxt->enable_lpm) &&
	    (atomic_read(&mhi_dev_ctxt->flags.device_wake)) &&
	    (mhi_dev_ctxt->channel_db_addr != NULL) &&
	    (mhi_dev_ctxt->flags.link_up)) {
		mhi_log(MHI_MSG_VERBOSE, "LPM %d\n", mhi_dev_ctxt->enable_lpm);
		atomic_set(&mhi_dev_ctxt->flags.device_wake, 0);
		mhi_write_db(mhi_dev_ctxt, mhi_dev_ctxt->channel_db_addr,
				MHI_DEV_WAKE_DB, 0);
		mhi_dev_ctxt->device_wake_asserted = 0;
	} else {
		mhi_log(MHI_MSG_VERBOSE, "LPM %d DEV_WAKE %d link %d\n",
				mhi_dev_ctxt->enable_lpm,
				atomic_read(&mhi_dev_ctxt->flags.device_wake),
				mhi_dev_ctxt->flags.link_up);
	}
	return 0;
}

+0 −40
Original line number Diff line number Diff line
@@ -113,46 +113,6 @@ exit:
	return r;
}

enum hrtimer_restart mhi_initiate_m1(struct hrtimer *timer)
{
	int ret_val = 0;
	unsigned long flags;
	ktime_t curr_time, timer_inc;
	struct mhi_device_ctxt *mhi_dev_ctxt = container_of(timer,
						struct mhi_device_ctxt,
						m1_timer);
	write_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags);

	/*
	 * We will allow M1 if no data is pending, the current
	 * state is M0 and no M3 transition is pending
	 */
	if ((0 == atomic_read(&mhi_dev_ctxt->flags.data_pending)) &&
			(MHI_STATE_M1 == mhi_dev_ctxt->mhi_state ||
			 MHI_STATE_M0 == mhi_dev_ctxt->mhi_state) &&
			(0 == mhi_dev_ctxt->flags.pending_M3) &&
			mhi_dev_ctxt->flags.mhi_initialized &&
			(0 == atomic_read(
			&mhi_dev_ctxt->counters.outbound_acks))) {
		mhi_dev_ctxt->mhi_state = MHI_STATE_M1;
		ret_val = mhi_deassert_device_wake(mhi_dev_ctxt);
		mhi_dev_ctxt->counters.m0_m1++;
		if (ret_val)
			mhi_log(MHI_MSG_ERROR,
				"Could not set DEVICE WAKE GPIO LOW\n");
	}
	write_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags);
	if (mhi_dev_ctxt->mhi_state == MHI_STATE_M0 ||
	    mhi_dev_ctxt->mhi_state == MHI_STATE_M1 ||
	    mhi_dev_ctxt->mhi_state == MHI_STATE_READY) {
		curr_time = ktime_get();
		timer_inc = ktime_set(0, MHI_M1_ENTRY_DELAY_MS * 1E6L);
		hrtimer_forward(timer, curr_time, timer_inc);
		return HRTIMER_RESTART;
	}
	return HRTIMER_NORESTART;
}

int mhi_init_pm_sysfs(struct device *dev)
{
	return sysfs_create_group(&dev->kobj, &mhi_attribute_group);
Loading