Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 9e5e7910 authored by Liu, Chuansheng's avatar Liu, Chuansheng Committed by Rafael J. Wysocki
Browse files

PM / sleep: Asynchronous threads for resume_early



In analogy with commits 5af84b82 and 97df8c12, using
asynchronous threads can improve the overall resume_early
time significantly.

This patch is for resume_early phase.

Signed-off-by: default avatarChuansheng Liu <chuansheng.liu@intel.com>
Signed-off-by: default avatarRafael J. Wysocki <rafael.j.wysocki@intel.com>
parent 76569faa
Loading
Loading
Loading
Loading
+44 −11
Original line number Original line Diff line number Diff line
@@ -595,7 +595,7 @@ static void dpm_resume_noirq(pm_message_t state)
 *
 *
 * Runtime PM is disabled for @dev while this function is being executed.
 * Runtime PM is disabled for @dev while this function is being executed.
 */
 */
static int device_resume_early(struct device *dev, pm_message_t state)
static int device_resume_early(struct device *dev, pm_message_t state, bool async)
{
{
	pm_callback_t callback = NULL;
	pm_callback_t callback = NULL;
	char *info = NULL;
	char *info = NULL;
@@ -610,6 +610,8 @@ static int device_resume_early(struct device *dev, pm_message_t state)
	if (!dev->power.is_late_suspended)
	if (!dev->power.is_late_suspended)
		goto Out;
		goto Out;


	dpm_wait(dev->parent, async);

	if (dev->pm_domain) {
	if (dev->pm_domain) {
		info = "early power domain ";
		info = "early power domain ";
		callback = pm_late_early_op(&dev->pm_domain->ops, state);
		callback = pm_late_early_op(&dev->pm_domain->ops, state);
@@ -636,38 +638,69 @@ static int device_resume_early(struct device *dev, pm_message_t state)
	TRACE_RESUME(error);
	TRACE_RESUME(error);


	pm_runtime_enable(dev);
	pm_runtime_enable(dev);
	complete_all(&dev->power.completion);
	return error;
	return error;
}
}


static void async_resume_early(void *data, async_cookie_t cookie)
{
	struct device *dev = (struct device *)data;
	int error;

	error = device_resume_early(dev, pm_transition, true);
	if (error)
		pm_dev_err(dev, pm_transition, " async", error);

	put_device(dev);
}

/**
/**
 * dpm_resume_early - Execute "early resume" callbacks for all devices.
 * dpm_resume_early - Execute "early resume" callbacks for all devices.
 * @state: PM transition of the system being carried out.
 * @state: PM transition of the system being carried out.
 */
 */
static void dpm_resume_early(pm_message_t state)
static void dpm_resume_early(pm_message_t state)
{
{
	struct device *dev;
	ktime_t starttime = ktime_get();
	ktime_t starttime = ktime_get();


	mutex_lock(&dpm_list_mtx);
	mutex_lock(&dpm_list_mtx);
	while (!list_empty(&dpm_late_early_list)) {
	pm_transition = state;
		struct device *dev = to_device(dpm_late_early_list.next);
		int error;


	/*
	 * Advanced the async threads upfront,
	 * in case the starting of async threads is
	 * delayed by non-async resuming devices.
	 */
	list_for_each_entry(dev, &dpm_late_early_list, power.entry) {
		reinit_completion(&dev->power.completion);
		if (is_async(dev)) {
			get_device(dev);
			async_schedule(async_resume_early, dev);
		}
	}

	while (!list_empty(&dpm_late_early_list)) {
		dev = to_device(dpm_late_early_list.next);
		get_device(dev);
		get_device(dev);
		list_move_tail(&dev->power.entry, &dpm_suspended_list);
		list_move_tail(&dev->power.entry, &dpm_suspended_list);
		mutex_unlock(&dpm_list_mtx);
		mutex_unlock(&dpm_list_mtx);


		error = device_resume_early(dev, state);
		if (!is_async(dev)) {
			int error;

			error = device_resume_early(dev, state, false);
			if (error) {
			if (error) {
				suspend_stats.failed_resume_early++;
				suspend_stats.failed_resume_early++;
				dpm_save_failed_step(SUSPEND_RESUME_EARLY);
				dpm_save_failed_step(SUSPEND_RESUME_EARLY);
				dpm_save_failed_dev(dev_name(dev));
				dpm_save_failed_dev(dev_name(dev));
				pm_dev_err(dev, state, " early", error);
				pm_dev_err(dev, state, " early", error);
			}
			}

		}
		mutex_lock(&dpm_list_mtx);
		mutex_lock(&dpm_list_mtx);
		put_device(dev);
		put_device(dev);
	}
	}
	mutex_unlock(&dpm_list_mtx);
	mutex_unlock(&dpm_list_mtx);
	async_synchronize_full();
	dpm_show_time(starttime, state, "early");
	dpm_show_time(starttime, state, "early");
}
}