Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 4dc65e12 authored by qctecmdr's avatar qctecmdr Committed by Gerrit - the friendly Code Review server
Browse files

Merge "mhi: core: add mhi_device_get_sync_atomic() to wait until M0"

parents 4892e9e2 c653a9c1
Loading
Loading
Loading
Loading
+2 −0
Original line number Original line Diff line number Diff line
@@ -752,6 +752,8 @@ struct mhi_bus {


/* default MHI timeout */
/* default MHI timeout */
#define MHI_TIMEOUT_MS (1000)
#define MHI_TIMEOUT_MS (1000)
#define MHI_FORCE_WAKE_DELAY_US (100)

extern struct mhi_bus mhi_bus;
extern struct mhi_bus mhi_bus;


struct mhi_controller *find_mhi_controller_by_name(const char *name);
struct mhi_controller *find_mhi_controller_by_name(const char *name);
+83 −25
Original line number Original line Diff line number Diff line
@@ -404,9 +404,19 @@ void mhi_pm_m1_transition(struct mhi_controller *mhi_cntrl)
	enum MHI_PM_STATE state;
	enum MHI_PM_STATE state;


	write_lock_irq(&mhi_cntrl->pm_lock);
	write_lock_irq(&mhi_cntrl->pm_lock);
	/* Just check if we are racing with device_wake assertion */
	if (atomic_read(&mhi_cntrl->dev_wake))
		MHI_VERB("M2 transition request post dev_wake:%d\n",
			 atomic_read(&mhi_cntrl->dev_wake));

	/* if it fails, means we transition to M3 */
	/* if it fails, means we transition to M3 */
	state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M2);
	state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M2);
	if (state == MHI_PM_M2) {
	if (state != MHI_PM_M2) {
		/* Nothing to be done, handle M3 transition later */
		write_unlock_irq(&mhi_cntrl->pm_lock);
		return;
	}

	MHI_VERB("Entered M2 State\n");
	MHI_VERB("Entered M2 State\n");
	mhi_set_mhi_state(mhi_cntrl, MHI_STATE_M2);
	mhi_set_mhi_state(mhi_cntrl, MHI_STATE_M2);
	mhi_cntrl->dev_state = MHI_STATE_M2;
	mhi_cntrl->dev_state = MHI_STATE_M2;
@@ -426,13 +436,10 @@ void mhi_pm_m1_transition(struct mhi_controller *mhi_cntrl)
		mhi_cntrl->wake_get(mhi_cntrl, true);
		mhi_cntrl->wake_get(mhi_cntrl, true);
		mhi_cntrl->wake_put(mhi_cntrl, true);
		mhi_cntrl->wake_put(mhi_cntrl, true);
		read_unlock_bh(&mhi_cntrl->pm_lock);
		read_unlock_bh(&mhi_cntrl->pm_lock);
		} else {
		return;
			mhi_cntrl->status_cb(mhi_cntrl, mhi_cntrl->priv_data,
					     MHI_CB_IDLE);
		}
	} else {
		write_unlock_irq(&mhi_cntrl->pm_lock);
	}
	}

	mhi_cntrl->status_cb(mhi_cntrl, mhi_cntrl->priv_data, MHI_CB_IDLE);
}
}


int mhi_pm_m3_transition(struct mhi_controller *mhi_cntrl)
int mhi_pm_m3_transition(struct mhi_controller *mhi_cntrl)
@@ -1580,6 +1587,57 @@ int mhi_device_get_sync(struct mhi_device *mhi_dev, int vote)
}
}
EXPORT_SYMBOL(mhi_device_get_sync);
EXPORT_SYMBOL(mhi_device_get_sync);


int mhi_device_get_sync_atomic(struct mhi_device *mhi_dev, int timeout_us)
{
	struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;

	read_lock_bh(&mhi_cntrl->pm_lock);
	if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
		read_unlock_bh(&mhi_cntrl->pm_lock);
		return -EIO;
	}

	mhi_cntrl->wake_get(mhi_cntrl, true);
	read_unlock_bh(&mhi_cntrl->pm_lock);

	atomic_inc(&mhi_dev->dev_vote);
	pm_wakeup_hard_event(&mhi_cntrl->mhi_dev->dev);
	mhi_cntrl->runtime_get(mhi_cntrl, mhi_cntrl->priv_data);

	/* Return if client doesn't want us to wait */
	if (!timeout_us) {
		if (mhi_cntrl->pm_state != MHI_PM_M0)
			MHI_ERR("Return without waiting for M0\n");

		mhi_cntrl->runtime_put(mhi_cntrl, mhi_cntrl->priv_data);
		return 0;
	}

	while (mhi_cntrl->pm_state != MHI_PM_M0 &&
			!MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state) &&
			timeout_us > 0) {
		udelay(MHI_FORCE_WAKE_DELAY_US);
		timeout_us -= MHI_FORCE_WAKE_DELAY_US;
	}

	if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state) || timeout_us <= 0) {
		MHI_ERR("Did not enter M0 state, cur_state:%s pm_state:%s\n",
			TO_MHI_STATE_STR(mhi_cntrl->dev_state),
			to_mhi_pm_state_str(mhi_cntrl->pm_state));
		read_lock_bh(&mhi_cntrl->pm_lock);
		mhi_cntrl->wake_put(mhi_cntrl, false);
		read_unlock_bh(&mhi_cntrl->pm_lock);
		atomic_dec(&mhi_dev->dev_vote);
		mhi_cntrl->runtime_put(mhi_cntrl, mhi_cntrl->priv_data);
		return -ETIMEDOUT;
	}

	mhi_cntrl->runtime_put(mhi_cntrl, mhi_cntrl->priv_data);

	return 0;
}
EXPORT_SYMBOL(mhi_device_get_sync_atomic);

void mhi_device_put(struct mhi_device *mhi_dev, int vote)
void mhi_device_put(struct mhi_device *mhi_dev, int vote)
{
{
	struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
	struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
+24 −0
Original line number Original line Diff line number Diff line
@@ -610,6 +610,30 @@ void mhi_device_get(struct mhi_device *mhi_dev, int vote);
 */
 */
int mhi_device_get_sync(struct mhi_device *mhi_dev, int vote);
int mhi_device_get_sync(struct mhi_device *mhi_dev, int vote);


/**
 * mhi_device_get_sync_atomic - Asserts device_wait and moves device to M0
 * @mhi_dev: Device associated with the channels
 * @timeout_us: timeout, in micro-seconds
 *
 * The device_wake is asserted to keep device in M0 or bring it to M0.
 * If device is not in M0 state, then this function will wait for device to
 * move to M0, until @timeout_us elapses.
 * However, if device's M1 state-change event races with this function
 * then there is a possiblity of device moving from M0 to M2 and back
 * to M0. That can't be avoided as host must transition device from M1 to M2
 * as per the spec.
 * Clients can ignore that transition after this function returns as the device
 * is expected to immediately  move from M2 to M0 as wake is asserted and
 * wouldn't enter low power state.
 *
 * Returns:
 * 0 if operation was successful (however, M0 -> M2 -> M0 is possible later) as
 * mentioned above.
 * -ETIMEDOUT is device faled to move to M0 before @timeout_us elapsed
 * -EIO if the MHI state is one of the ERROR states.
 */
int mhi_device_get_sync_atomic(struct mhi_device *mhi_dev, int timeout_us);

/**
/**
 * mhi_device_put - re-enable low power modes
 * mhi_device_put - re-enable low power modes
 * @mhi_dev: Device associated with the channels
 * @mhi_dev: Device associated with the channels