Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 943161fe authored by qctecmdr's avatar qctecmdr Committed by Gerrit - the friendly Code Review server
Browse files

Merge "mhi: core: add support for silent suspend and resume"

parents ece301b4 b5e62ef4
Loading
Loading
Loading
Loading
+101 −8
Original line number Diff line number Diff line
@@ -49,6 +49,7 @@ const char * const mhi_state_str[MHI_STATE_MAX] = {
	[MHI_STATE_M1] = "M1",
	[MHI_STATE_M2] = "M2",
	[MHI_STATE_M3] = "M3",
	[MHI_STATE_M3_FAST] = "M3_FAST",
	[MHI_STATE_BHI] = "BHI",
	[MHI_STATE_SYS_ERR] = "SYS_ERR",
};
@@ -80,6 +81,95 @@ const char *to_mhi_pm_state_str(enum MHI_PM_STATE state)
	return mhi_pm_state_str[index];
}

static ssize_t bus_vote_show(struct device *dev,
			     struct device_attribute *attr,
			     char *buf)
{
	struct mhi_device *mhi_dev = to_mhi_device(dev);

	return snprintf(buf, PAGE_SIZE, "%d\n",
			atomic_read(&mhi_dev->bus_vote));
}

static ssize_t bus_vote_store(struct device *dev,
			      struct device_attribute *attr,
			      const char *buf,
			      size_t count)
{
	struct mhi_device *mhi_dev = to_mhi_device(dev);
	int ret = -EINVAL;

	if (sysfs_streq(buf, "get")) {
		ret = mhi_device_get_sync(mhi_dev, MHI_VOTE_BUS);
	} else if (sysfs_streq(buf, "put")) {
		mhi_device_put(mhi_dev, MHI_VOTE_BUS);
		ret = 0;
	}

	return ret ? ret : count;
}
static DEVICE_ATTR_RW(bus_vote);

static ssize_t device_vote_show(struct device *dev,
				struct device_attribute *attr,
				char *buf)
{
	struct mhi_device *mhi_dev = to_mhi_device(dev);

	return snprintf(buf, PAGE_SIZE, "%d\n",
			atomic_read(&mhi_dev->dev_vote));
}

static ssize_t device_vote_store(struct device *dev,
				 struct device_attribute *attr,
				 const char *buf,
				 size_t count)
{
	struct mhi_device *mhi_dev = to_mhi_device(dev);
	int ret = -EINVAL;

	if (sysfs_streq(buf, "get")) {
		ret = mhi_device_get_sync(mhi_dev, MHI_VOTE_DEVICE);
	} else if (sysfs_streq(buf, "put")) {
		mhi_device_put(mhi_dev, MHI_VOTE_DEVICE);
		ret = 0;
	}

	return ret ? ret : count;
}
static DEVICE_ATTR_RW(device_vote);

static struct attribute *mhi_vote_attrs[] = {
	&dev_attr_bus_vote.attr,
	&dev_attr_device_vote.attr,
	NULL,
};

static const struct attribute_group mhi_vote_group = {
	.attrs = mhi_vote_attrs,
};

int mhi_create_vote_sysfs(struct mhi_controller *mhi_cntrl)
{
	return sysfs_create_group(&mhi_cntrl->mhi_dev->dev.kobj,
				  &mhi_vote_group);
}

void mhi_destroy_vote_sysfs(struct mhi_controller *mhi_cntrl)
{
	struct mhi_device *mhi_dev = mhi_cntrl->mhi_dev;

	sysfs_remove_group(&mhi_dev->dev.kobj, &mhi_vote_group);

	/* relinquish any pending votes for device */
	while (atomic_read(&mhi_dev->dev_vote))
		mhi_device_put(mhi_dev, MHI_VOTE_DEVICE);

	/* remove pending votes for the bus */
	while (atomic_read(&mhi_dev->bus_vote))
		mhi_device_put(mhi_dev, MHI_VOTE_BUS);
}

/* MHI protocol require transfer ring to be aligned to ring length */
static int mhi_alloc_aligned_ring(struct mhi_controller *mhi_cntrl,
				  struct mhi_ring *ring,
@@ -1425,7 +1515,7 @@ static int mhi_driver_probe(struct device *dev)
	int ret;

	/* bring device out of lpm */
	ret = mhi_device_get_sync(mhi_dev);
	ret = mhi_device_get_sync(mhi_dev, MHI_VOTE_DEVICE);
	if (ret)
		return ret;

@@ -1473,7 +1563,7 @@ static int mhi_driver_probe(struct device *dev)
		mhi_prepare_for_transfer(mhi_dev);

exit_probe:
	mhi_device_put(mhi_dev);
	mhi_device_put(mhi_dev, MHI_VOTE_DEVICE);

	return ret;
}
@@ -1548,11 +1638,13 @@ static int mhi_driver_remove(struct device *dev)
	if (mhi_cntrl->tsync_dev == mhi_dev)
		mhi_cntrl->tsync_dev = NULL;

	/* relinquish any pending votes */
	read_lock_bh(&mhi_cntrl->pm_lock);
	while (atomic_read(&mhi_dev->dev_wake))
		mhi_device_put(mhi_dev);
	read_unlock_bh(&mhi_cntrl->pm_lock);
	/* relinquish any pending votes for device */
	while (atomic_read(&mhi_dev->dev_vote))
		mhi_device_put(mhi_dev, MHI_VOTE_DEVICE);

	/* remove pending votes for the bus */
	while (atomic_read(&mhi_dev->bus_vote))
		mhi_device_put(mhi_dev, MHI_VOTE_BUS);

	return 0;
}
@@ -1596,7 +1688,8 @@ struct mhi_device *mhi_alloc_device(struct mhi_controller *mhi_cntrl)
	mhi_dev->bus = mhi_cntrl->bus;
	mhi_dev->slot = mhi_cntrl->slot;
	mhi_dev->mtu = MHI_MAX_MTU;
	atomic_set(&mhi_dev->dev_wake, 0);
	atomic_set(&mhi_dev->dev_vote, 0);
	atomic_set(&mhi_dev->bus_vote, 0);

	return mhi_dev;
}
+2 −0
Original line number Diff line number Diff line
@@ -756,6 +756,8 @@ int mhi_get_capability_offset(struct mhi_controller *mhi_cntrl, u32 capability,
int mhi_init_timesync(struct mhi_controller *mhi_cntrl);
int mhi_create_timesync_sysfs(struct mhi_controller *mhi_cntrl);
void mhi_destroy_timesync(struct mhi_controller *mhi_cntrl);
int mhi_create_vote_sysfs(struct mhi_controller *mhi_cntrl);
void mhi_destroy_vote_sysfs(struct mhi_controller *mhi_cntrl);

/* memory allocation methods */
static inline void *mhi_alloc_coherent(struct mhi_controller *mhi_cntrl,
+11 −3
Original line number Diff line number Diff line
@@ -1402,8 +1402,16 @@ void mhi_ctrl_ev_task(unsigned long data)
	 * pm_state can change from reg access valid to no access while this
	 * therad being executed.
	 */
	if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state))
	if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
		/*
		 * we may have a pending event but not allowed to
		 * process it since we probably in a suspended state,
		 * trigger a resume.
		 */
		mhi_cntrl->runtime_get(mhi_cntrl, mhi_cntrl->priv_data);
		mhi_cntrl->runtime_put(mhi_cntrl, mhi_cntrl->priv_data);
		return;
	}

	/* process ctrl events events */
	ret = mhi_event->process_event(mhi_cntrl, mhi_event, U32_MAX);
@@ -1861,12 +1869,12 @@ int mhi_debugfs_mhi_states_show(struct seq_file *m, void *d)
	struct mhi_controller *mhi_cntrl = m->private;

	seq_printf(m,
		   "pm_state:%s dev_state:%s EE:%s M0:%u M2:%u M3:%u wake:%d dev_wake:%u alloc_size:%u pending_pkts:%u\n",
		   "pm_state:%s dev_state:%s EE:%s M0:%u M2:%u M3:%u M3_Fast:%u wake:%d dev_wake:%u alloc_size:%u pending_pkts:%u\n",
		   to_mhi_pm_state_str(mhi_cntrl->pm_state),
		   TO_MHI_STATE_STR(mhi_cntrl->dev_state),
		   TO_MHI_EXEC_STR(mhi_cntrl->ee),
		   mhi_cntrl->M0, mhi_cntrl->M2, mhi_cntrl->M3,
		   mhi_cntrl->wake_set,
		   mhi_cntrl->M3_FAST, mhi_cntrl->wake_set,
		   atomic_read(&mhi_cntrl->dev_wake),
		   atomic_read(&mhi_cntrl->alloc_size),
		   atomic_read(&mhi_cntrl->pending_pkts));
+261 −22
Original line number Diff line number Diff line
@@ -39,6 +39,7 @@
 *     POR -> M0 -> M2 --> M0
 *     POR -> FW_DL_ERR
 *     FW_DL_ERR <--> FW_DL_ERR
 *     M0 <--> M0
 *     M0 -> FW_DL_ERR
 *     M0 -> M3_ENTER -> M3 -> M3_EXIT --> M0
 * L1: SYS_ERR_DETECT -> SYS_ERR_PROCESS --> POR
@@ -60,9 +61,9 @@ static struct mhi_pm_transitions const mhi_state_transitions[] = {
	},
	{
		MHI_PM_M0,
		MHI_PM_M2 | MHI_PM_M3_ENTER | MHI_PM_SYS_ERR_DETECT |
		MHI_PM_SHUTDOWN_PROCESS | MHI_PM_LD_ERR_FATAL_DETECT |
		MHI_PM_FW_DL_ERR
		MHI_PM_M0 | MHI_PM_M2 | MHI_PM_M3_ENTER |
		MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS |
		MHI_PM_LD_ERR_FATAL_DETECT | MHI_PM_FW_DL_ERR
	},
	{
		MHI_PM_M2,
@@ -328,7 +329,7 @@ int mhi_pm_m0_transition(struct mhi_controller *mhi_cntrl)
	}
	mhi_cntrl->M0++;
	read_lock_bh(&mhi_cntrl->pm_lock);
	mhi_cntrl->wake_get(mhi_cntrl, false);
	mhi_cntrl->wake_get(mhi_cntrl, true);

	/* ring all event rings and CMD ring only if we're in mission mode */
	if (MHI_IN_MISSION_MODE(mhi_cntrl->ee)) {
@@ -492,6 +493,9 @@ static int mhi_pm_mission_mode_transition(struct mhi_controller *mhi_cntrl)
	/* add supported devices */
	mhi_create_devices(mhi_cntrl);

	/* setup sysfs nodes for userspace votes */
	mhi_create_vote_sysfs(mhi_cntrl);

	ret = 0;

	read_lock_bh(&mhi_cntrl->pm_lock);
@@ -590,6 +594,9 @@ static void mhi_pm_disable_transition(struct mhi_controller *mhi_cntrl,

	MHI_LOG("Finish resetting channels\n");

	/* remove support for userspace votes */
	mhi_destroy_vote_sysfs(mhi_cntrl);

	MHI_LOG("Waiting for all pending threads to complete\n");
	wake_up_all(&mhi_cntrl->state_event);
	flush_work(&mhi_cntrl->st_worker);
@@ -925,6 +932,7 @@ int mhi_pm_suspend(struct mhi_controller *mhi_cntrl)
	int ret;
	enum MHI_PM_STATE new_state;
	struct mhi_chan *itr, *tmp;
	struct mhi_device *mhi_dev = mhi_cntrl->mhi_dev;

	if (mhi_cntrl->pm_state == MHI_PM_DISABLE)
		return -EINVAL;
@@ -932,9 +940,10 @@ int mhi_pm_suspend(struct mhi_controller *mhi_cntrl)
	if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))
		return -EIO;

	/* do a quick check to see if any pending data, then exit */
	/* do a quick check to see if any pending votes to keep us busy */
	if (atomic_read(&mhi_cntrl->dev_wake) ||
	    atomic_read(&mhi_cntrl->pending_pkts)) {
	    atomic_read(&mhi_cntrl->pending_pkts) ||
	    atomic_read(&mhi_dev->bus_vote)) {
		MHI_VERB("Busy, aborting M3\n");
		return -EBUSY;
	}
@@ -961,9 +970,13 @@ int mhi_pm_suspend(struct mhi_controller *mhi_cntrl)

	write_lock_irq(&mhi_cntrl->pm_lock);

	/* we're asserting wake so count would be @ least 1 */
	/*
	 * Check the votes once more to see if we should abort
	 * suepend. We're asserting wake so count would be @ least 1
	 */
	if (atomic_read(&mhi_cntrl->dev_wake) > 1 ||
		atomic_read(&mhi_cntrl->pending_pkts)) {
	    atomic_read(&mhi_cntrl->pending_pkts) ||
	    atomic_read(&mhi_dev->bus_vote)) {
		MHI_VERB("Busy, aborting M3\n");
		write_unlock_irq(&mhi_cntrl->pm_lock);
		ret = -EBUSY;
@@ -1020,6 +1033,114 @@ int mhi_pm_suspend(struct mhi_controller *mhi_cntrl)
}
EXPORT_SYMBOL(mhi_pm_suspend);

/**
 * mhi_pm_fast_suspend - Faster suspend path where we transition host to
 * inactive state w/o suspending device.  Useful for cases where we want apps to
 * go into power collapse but keep the physical link in active state.
 */
int mhi_pm_fast_suspend(struct mhi_controller *mhi_cntrl, bool notify_client)
{
	int ret;
	enum MHI_PM_STATE new_state;
	struct mhi_chan *itr, *tmp;

	if (mhi_cntrl->pm_state == MHI_PM_DISABLE)
		return -EINVAL;

	if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))
		return -EIO;

	/* do a quick check to see if any pending votes to keep us busy */
	if (atomic_read(&mhi_cntrl->pending_pkts)) {
		MHI_VERB("Busy, aborting M3\n");
		return -EBUSY;
	}

	/* disable ctrl event processing */
	tasklet_disable(&mhi_cntrl->mhi_event->task);

	write_lock_irq(&mhi_cntrl->pm_lock);

	/*
	 * Check the votes once more to see if we should abort
	 * suspend.
	 */
	if (atomic_read(&mhi_cntrl->pending_pkts)) {
		MHI_VERB("Busy, aborting M3\n");
		ret = -EBUSY;
		goto error_suspend;
	}

	/* anytime after this, we will resume thru runtime pm framework */
	MHI_LOG("Allowing Fast M3 transition\n");

	/* save the current states */
	mhi_cntrl->saved_pm_state = mhi_cntrl->pm_state;
	mhi_cntrl->saved_dev_state = mhi_cntrl->dev_state;

	/* If we're in M2, we need to switch back to M0 first */
	if (mhi_cntrl->pm_state == MHI_PM_M2) {
		new_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M0);
		if (new_state != MHI_PM_M0) {
			MHI_ERR("Error set pm_state to:%s from pm_state:%s\n",
				to_mhi_pm_state_str(MHI_PM_M0),
				to_mhi_pm_state_str(mhi_cntrl->pm_state));
			ret = -EIO;
			goto error_suspend;
		}
	}

	new_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M3_ENTER);
	if (new_state != MHI_PM_M3_ENTER) {
		MHI_ERR("Error setting to pm_state:%s from pm_state:%s\n",
			to_mhi_pm_state_str(MHI_PM_M3_ENTER),
			to_mhi_pm_state_str(mhi_cntrl->pm_state));
		ret = -EIO;
		goto error_suspend;
	}

	/* set dev to M3_FAST and host to M3 */
	new_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M3);
	if (new_state != MHI_PM_M3) {
		MHI_ERR("Error setting to pm_state:%s from pm_state:%s\n",
			to_mhi_pm_state_str(MHI_PM_M3),
			to_mhi_pm_state_str(mhi_cntrl->pm_state));
		ret = -EIO;
		goto error_suspend;
	}

	mhi_cntrl->dev_state = MHI_STATE_M3_FAST;
	mhi_cntrl->M3_FAST++;
	write_unlock_irq(&mhi_cntrl->pm_lock);

	/* now safe to check ctrl event ring */
	tasklet_enable(&mhi_cntrl->mhi_event->task);
	mhi_msi_handlr(0, mhi_cntrl->mhi_event);

	if (!notify_client)
		return 0;

	/* notify any clients we enter lpm */
	list_for_each_entry_safe(itr, tmp, &mhi_cntrl->lpm_chans, node) {
		mutex_lock(&itr->mutex);
		if (itr->mhi_dev)
			mhi_notify(itr->mhi_dev, MHI_CB_LPM_ENTER);
		mutex_unlock(&itr->mutex);
	}

	return 0;

error_suspend:
	write_unlock_irq(&mhi_cntrl->pm_lock);

	/* check ctrl event ring for pending work */
	tasklet_enable(&mhi_cntrl->mhi_event->task);
	mhi_msi_handlr(0, mhi_cntrl->mhi_event);

	return ret;
}
EXPORT_SYMBOL(mhi_pm_fast_suspend);

int mhi_pm_resume(struct mhi_controller *mhi_cntrl)
{
	enum MHI_PM_STATE cur_state;
@@ -1086,6 +1207,80 @@ int mhi_pm_resume(struct mhi_controller *mhi_cntrl)
	return 0;
}

int mhi_pm_fast_resume(struct mhi_controller *mhi_cntrl, bool notify_client)
{
	struct mhi_chan *itr, *tmp;
	struct mhi_event *mhi_event;
	int i;

	MHI_LOG("Entered with pm_state:%s dev_state:%s\n",
		to_mhi_pm_state_str(mhi_cntrl->pm_state),
		TO_MHI_STATE_STR(mhi_cntrl->dev_state));

	if (mhi_cntrl->pm_state == MHI_PM_DISABLE)
		return 0;

	if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))
		return -EIO;

	MHI_ASSERT(mhi_cntrl->pm_state != MHI_PM_M3, "mhi_pm_state != M3");

	/* notify any clients we're about to exit lpm */
	if (notify_client) {
		list_for_each_entry_safe(itr, tmp, &mhi_cntrl->lpm_chans,
					 node) {
			mutex_lock(&itr->mutex);
			if (itr->mhi_dev)
				mhi_notify(itr->mhi_dev, MHI_CB_LPM_EXIT);
			mutex_unlock(&itr->mutex);
		}
	}

	write_lock_irq(&mhi_cntrl->pm_lock);
	/* restore the states */
	mhi_cntrl->pm_state = mhi_cntrl->saved_pm_state;
	mhi_cntrl->dev_state = mhi_cntrl->saved_dev_state;
	write_unlock_irq(&mhi_cntrl->pm_lock);

	switch (mhi_cntrl->pm_state) {
	case MHI_PM_M0:
		mhi_pm_m0_transition(mhi_cntrl);
	case MHI_PM_M2:
		read_lock_bh(&mhi_cntrl->pm_lock);
		/*
		 * we're doing a double check of pm_state because by the time we
		 * grab the pm_lock, device may have already initiate a M0 on
		 * its own. If that's the case we should not be toggling device
		 * wake.
		 */
		if (mhi_cntrl->pm_state == MHI_PM_M2) {
			mhi_cntrl->wake_get(mhi_cntrl, true);
			mhi_cntrl->wake_put(mhi_cntrl, true);
		}
		read_unlock_bh(&mhi_cntrl->pm_lock);
	}

	/*
	 * In fast suspend/resume case device is not aware host transition
	 * to suspend state. So, device could be triggering a interrupt while
	 * host not accepting MSI. We have to manually check each event ring
	 * upon resume.
	 */
	mhi_event = mhi_cntrl->mhi_event;
	for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
		if (mhi_event->offload_ev)
			continue;

		mhi_msi_handlr(0, mhi_event);
	}

	MHI_LOG("Exit with pm_state:%s dev_state:%s\n",
		to_mhi_pm_state_str(mhi_cntrl->pm_state),
		TO_MHI_STATE_STR(mhi_cntrl->dev_state));

	return 0;
}

int __mhi_device_get_sync(struct mhi_controller *mhi_cntrl)
{
	int ret;
@@ -1117,39 +1312,83 @@ int __mhi_device_get_sync(struct mhi_controller *mhi_cntrl)
	return 0;
}

void mhi_device_get(struct mhi_device *mhi_dev)
void mhi_device_get(struct mhi_device *mhi_dev, int vote)
{
	struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;

	atomic_inc(&mhi_dev->dev_wake);
	if (vote & MHI_VOTE_DEVICE) {
		read_lock_bh(&mhi_cntrl->pm_lock);
		mhi_cntrl->wake_get(mhi_cntrl, true);
		read_unlock_bh(&mhi_cntrl->pm_lock);
		atomic_inc(&mhi_dev->dev_vote);
	}

	if (vote & MHI_VOTE_BUS) {
		mhi_cntrl->runtime_get(mhi_cntrl, mhi_cntrl->priv_data);
		atomic_inc(&mhi_dev->bus_vote);
	}
}
EXPORT_SYMBOL(mhi_device_get);

int mhi_device_get_sync(struct mhi_device *mhi_dev)
int mhi_device_get_sync(struct mhi_device *mhi_dev, int vote)
{
	struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
	int ret;

	/*
	 * regardless of any vote we will bring device out lpm and assert
	 * device wake
	 */
	ret = __mhi_device_get_sync(mhi_cntrl);
	if (!ret)
		atomic_inc(&mhi_dev->dev_wake);

	if (ret)
		return ret;

	if (vote & MHI_VOTE_DEVICE) {
		atomic_inc(&mhi_dev->dev_vote);
	} else {
		/* client did not requested device vote so de-assert dev_wake */
		read_lock_bh(&mhi_cntrl->pm_lock);
		mhi_cntrl->wake_put(mhi_cntrl, false);
		read_unlock_bh(&mhi_cntrl->pm_lock);
	}

	if (vote & MHI_VOTE_BUS) {
		mhi_cntrl->runtime_get(mhi_cntrl, mhi_cntrl->priv_data);
		atomic_inc(&mhi_dev->bus_vote);
	}

	return 0;
}
EXPORT_SYMBOL(mhi_device_get_sync);

void mhi_device_put(struct mhi_device *mhi_dev)
void mhi_device_put(struct mhi_device *mhi_dev, int vote)
{
	struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;

	atomic_dec(&mhi_dev->dev_wake);
	if (vote & MHI_VOTE_DEVICE) {
		atomic_dec(&mhi_dev->dev_vote);
		read_lock_bh(&mhi_cntrl->pm_lock);
		if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state)) {
			mhi_cntrl->runtime_get(mhi_cntrl, mhi_cntrl->priv_data);
			mhi_cntrl->runtime_put(mhi_cntrl, mhi_cntrl->priv_data);
		}
		mhi_cntrl->wake_put(mhi_cntrl, false);
		read_unlock_bh(&mhi_cntrl->pm_lock);
	}

	if (vote & MHI_VOTE_BUS) {
		atomic_dec(&mhi_dev->bus_vote);
		mhi_cntrl->runtime_put(mhi_cntrl, mhi_cntrl->priv_data);

		/*
		 * if counts reach 0, clients release all votes
		 * send idle cb to to attempt suspend
		 */
		if (!atomic_read(&mhi_dev->bus_vote))
			mhi_cntrl->status_cb(mhi_cntrl, mhi_cntrl->priv_data,
					     MHI_CB_IDLE);
	}
}
EXPORT_SYMBOL(mhi_device_put);

int mhi_force_rddm_mode(struct mhi_controller *mhi_cntrl)
+2 −2
Original line number Diff line number Diff line
@@ -465,12 +465,12 @@ static int mhi_netdev_ioctl_extended(struct net_device *dev, struct ifreq *ifr)
			/* Request to enable LPM */
			MSG_VERB("Enable MHI LPM");
			mhi_netdev->wake--;
			mhi_device_put(mhi_dev);
			mhi_device_put(mhi_dev, MHI_VOTE_DEVICE);
		} else if (!ext_cmd.u.data && !mhi_netdev->wake) {
			/* Request to disable LPM */
			MSG_VERB("Disable MHI LPM");
			mhi_netdev->wake++;
			mhi_device_get(mhi_dev);
			mhi_device_get(mhi_dev, MHI_VOTE_DEVICE);
		}
		break;
	default:
Loading