Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 36cc86e8 authored by Rafael J. Wysocki's avatar Rafael J. Wysocki
Browse files

Merge branches 'pm-runtime' and 'pm-sleep'

* pm-runtime:
  PM / Runtime: Update runtime_idle() documentation for return value meaning

* pm-sleep:
  PM / sleep: Correct whitespace errors in <linux/pm.h>
  PM: Add missing "freeze" state
  PM / Hibernate: Spelling s/anonymouns/anonymous/
  PM / Runtime: Add missing "it" in comment
  PM / suspend: Remove unnecessary !!
  PCI / PM: Resume runtime-suspended devices later during system suspend
  ACPI / PM: Resume runtime-suspended devices later during system suspend
  PM / sleep: Set pm_generic functions to NULL for !CONFIG_PM_SLEEP
  PM: fix typo in comment
  PM / hibernate: use name_to_dev_t to parse resume
  PM / wakeup: Include appropriate header file in kernel/power/wakelock.c
  PM / sleep: Move prototype declaration to header file kernel/power/power.h
  PM / sleep: Asynchronous threads for suspend_late
  PM / sleep: Asynchronous threads for suspend_noirq
  PM / sleep: Asynchronous threads for resume_early
  PM / sleep: Asynchronous threads for resume_noirq
  PM / sleep: Two flags for async suspend_noirq and suspend_late
Loading
Loading
Loading
Loading
+3 −2
Original line number Diff line number Diff line
@@ -12,8 +12,9 @@ Contact: Rafael J. Wysocki <rjw@rjwysocki.net>
Description:
		The /sys/power/state file controls the system power state.
		Reading from this file returns what states are supported,
		which is hard-coded to 'standby' (Power-On Suspend), 'mem'
		(Suspend-to-RAM), and 'disk' (Suspend-to-Disk).
		which is hard-coded to 'freeze' (Low-Power Idle), 'standby'
		(Power-On Suspend), 'mem' (Suspend-to-RAM), and 'disk'
		(Suspend-to-Disk).

		Writing to this file one of these strings causes the system to
		transition into that state. Please see the file
+38 −3
Original line number Diff line number Diff line
@@ -901,14 +901,29 @@ EXPORT_SYMBOL_GPL(acpi_dev_resume_early);
int acpi_subsys_prepare(struct device *dev)
{
	/*
	 * Follow PCI and resume devices suspended at run time before running
	 * their system suspend callbacks.
	 * Devices having power.ignore_children set may still be necessary for
	 * suspending their children in the next phase of device suspend.
	 */
	if (dev->power.ignore_children)
		pm_runtime_resume(dev);

	return pm_generic_prepare(dev);
}
EXPORT_SYMBOL_GPL(acpi_subsys_prepare);

/**
 * acpi_subsys_suspend - Run the device driver's suspend callback.
 * @dev: Device to handle.
 *
 * Follow PCI and resume devices suspended at run time before running their
 * system suspend callbacks.
 */
int acpi_subsys_suspend(struct device *dev)
{
	pm_runtime_resume(dev);
	return pm_generic_suspend(dev);
}

/**
 * acpi_subsys_suspend_late - Suspend device using ACPI.
 * @dev: Device to suspend.
@@ -937,6 +952,23 @@ int acpi_subsys_resume_early(struct device *dev)
	return ret ? ret : pm_generic_resume_early(dev);
}
EXPORT_SYMBOL_GPL(acpi_subsys_resume_early);

/**
 * acpi_subsys_freeze - Run the device driver's freeze callback.
 * @dev: Device to handle.
 */
int acpi_subsys_freeze(struct device *dev)
{
	/*
	 * This used to be done in acpi_subsys_prepare() for all devices and
	 * some drivers may depend on it, so do it here.  Ideally, however,
	 * runtime-suspended devices should not be touched during freeze/thaw
	 * transitions.
	 */
	pm_runtime_resume(dev);
	return pm_generic_freeze(dev);
}

#endif /* CONFIG_PM_SLEEP */

static struct dev_pm_domain acpi_general_pm_domain = {
@@ -947,8 +979,11 @@ static struct dev_pm_domain acpi_general_pm_domain = {
#endif
#ifdef CONFIG_PM_SLEEP
		.prepare = acpi_subsys_prepare,
		.suspend = acpi_subsys_suspend,
		.suspend_late = acpi_subsys_suspend_late,
		.resume_early = acpi_subsys_resume_early,
		.freeze = acpi_subsys_freeze,
		.poweroff = acpi_subsys_suspend,
		.poweroff_late = acpi_subsys_suspend_late,
		.restore_early = acpi_subsys_resume_early,
#endif
+225 −50
Original line number Diff line number Diff line
@@ -91,6 +91,8 @@ void device_pm_sleep_init(struct device *dev)
{
	dev->power.is_prepared = false;
	dev->power.is_suspended = false;
	dev->power.is_noirq_suspended = false;
	dev->power.is_late_suspended = false;
	init_completion(&dev->power.completion);
	complete_all(&dev->power.completion);
	dev->power.wakeup = NULL;
@@ -467,7 +469,7 @@ static void dpm_watchdog_clear(struct dpm_watchdog *wd)
 * The driver of @dev will not receive interrupts while this function is being
 * executed.
 */
static int device_resume_noirq(struct device *dev, pm_message_t state)
static int device_resume_noirq(struct device *dev, pm_message_t state, bool async)
{
	pm_callback_t callback = NULL;
	char *info = NULL;
@@ -479,6 +481,11 @@ static int device_resume_noirq(struct device *dev, pm_message_t state)
	if (dev->power.syscore)
		goto Out;

	if (!dev->power.is_noirq_suspended)
		goto Out;

	dpm_wait(dev->parent, async);

	if (dev->pm_domain) {
		info = "noirq power domain ";
		callback = pm_noirq_op(&dev->pm_domain->ops, state);
@@ -499,12 +506,32 @@ static int device_resume_noirq(struct device *dev, pm_message_t state)
	}

	error = dpm_run_callback(callback, dev, state, info);
	dev->power.is_noirq_suspended = false;

 Out:
	complete_all(&dev->power.completion);
	TRACE_RESUME(error);
	return error;
}

static bool is_async(struct device *dev)
{
	return dev->power.async_suspend && pm_async_enabled
		&& !pm_trace_is_enabled();
}

static void async_resume_noirq(void *data, async_cookie_t cookie)
{
	struct device *dev = (struct device *)data;
	int error;

	error = device_resume_noirq(dev, pm_transition, true);
	if (error)
		pm_dev_err(dev, pm_transition, " async", error);

	put_device(dev);
}

/**
 * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices.
 * @state: PM transition of the system being carried out.
@@ -514,29 +541,48 @@ static int device_resume_noirq(struct device *dev, pm_message_t state)
 */
static void dpm_resume_noirq(pm_message_t state)
{
	struct device *dev;
	ktime_t starttime = ktime_get();

	mutex_lock(&dpm_list_mtx);
	while (!list_empty(&dpm_noirq_list)) {
		struct device *dev = to_device(dpm_noirq_list.next);
		int error;
	pm_transition = state;

	/*
	 * Advanced the async threads upfront,
	 * in case the starting of async threads is
	 * delayed by non-async resuming devices.
	 */
	list_for_each_entry(dev, &dpm_noirq_list, power.entry) {
		reinit_completion(&dev->power.completion);
		if (is_async(dev)) {
			get_device(dev);
			async_schedule(async_resume_noirq, dev);
		}
	}

	while (!list_empty(&dpm_noirq_list)) {
		dev = to_device(dpm_noirq_list.next);
		get_device(dev);
		list_move_tail(&dev->power.entry, &dpm_late_early_list);
		mutex_unlock(&dpm_list_mtx);

		error = device_resume_noirq(dev, state);
		if (!is_async(dev)) {
			int error;

			error = device_resume_noirq(dev, state, false);
			if (error) {
				suspend_stats.failed_resume_noirq++;
				dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
				dpm_save_failed_dev(dev_name(dev));
				pm_dev_err(dev, state, " noirq", error);
			}
		}

		mutex_lock(&dpm_list_mtx);
		put_device(dev);
	}
	mutex_unlock(&dpm_list_mtx);
	async_synchronize_full();
	dpm_show_time(starttime, state, "noirq");
	resume_device_irqs();
	cpuidle_resume();
@@ -549,7 +595,7 @@ static void dpm_resume_noirq(pm_message_t state)
 *
 * Runtime PM is disabled for @dev while this function is being executed.
 */
static int device_resume_early(struct device *dev, pm_message_t state)
static int device_resume_early(struct device *dev, pm_message_t state, bool async)
{
	pm_callback_t callback = NULL;
	char *info = NULL;
@@ -561,6 +607,11 @@ static int device_resume_early(struct device *dev, pm_message_t state)
	if (dev->power.syscore)
		goto Out;

	if (!dev->power.is_late_suspended)
		goto Out;

	dpm_wait(dev->parent, async);

	if (dev->pm_domain) {
		info = "early power domain ";
		callback = pm_late_early_op(&dev->pm_domain->ops, state);
@@ -581,43 +632,75 @@ static int device_resume_early(struct device *dev, pm_message_t state)
	}

	error = dpm_run_callback(callback, dev, state, info);
	dev->power.is_late_suspended = false;

 Out:
	TRACE_RESUME(error);

	pm_runtime_enable(dev);
	complete_all(&dev->power.completion);
	return error;
}

static void async_resume_early(void *data, async_cookie_t cookie)
{
	struct device *dev = (struct device *)data;
	int error;

	error = device_resume_early(dev, pm_transition, true);
	if (error)
		pm_dev_err(dev, pm_transition, " async", error);

	put_device(dev);
}

/**
 * dpm_resume_early - Execute "early resume" callbacks for all devices.
 * @state: PM transition of the system being carried out.
 */
static void dpm_resume_early(pm_message_t state)
{
	struct device *dev;
	ktime_t starttime = ktime_get();

	mutex_lock(&dpm_list_mtx);
	while (!list_empty(&dpm_late_early_list)) {
		struct device *dev = to_device(dpm_late_early_list.next);
		int error;
	pm_transition = state;

	/*
	 * Advanced the async threads upfront,
	 * in case the starting of async threads is
	 * delayed by non-async resuming devices.
	 */
	list_for_each_entry(dev, &dpm_late_early_list, power.entry) {
		reinit_completion(&dev->power.completion);
		if (is_async(dev)) {
			get_device(dev);
			async_schedule(async_resume_early, dev);
		}
	}

	while (!list_empty(&dpm_late_early_list)) {
		dev = to_device(dpm_late_early_list.next);
		get_device(dev);
		list_move_tail(&dev->power.entry, &dpm_suspended_list);
		mutex_unlock(&dpm_list_mtx);

		error = device_resume_early(dev, state);
		if (!is_async(dev)) {
			int error;

			error = device_resume_early(dev, state, false);
			if (error) {
				suspend_stats.failed_resume_early++;
				dpm_save_failed_step(SUSPEND_RESUME_EARLY);
				dpm_save_failed_dev(dev_name(dev));
				pm_dev_err(dev, state, " early", error);
			}

		}
		mutex_lock(&dpm_list_mtx);
		put_device(dev);
	}
	mutex_unlock(&dpm_list_mtx);
	async_synchronize_full();
	dpm_show_time(starttime, state, "early");
}

@@ -732,12 +815,6 @@ static void async_resume(void *data, async_cookie_t cookie)
	put_device(dev);
}

static bool is_async(struct device *dev)
{
	return dev->power.async_suspend && pm_async_enabled
		&& !pm_trace_is_enabled();
}

/**
 * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
 * @state: PM transition of the system being carried out.
@@ -913,13 +990,24 @@ static pm_message_t resume_event(pm_message_t sleep_state)
 * The driver of @dev will not receive interrupts while this function is being
 * executed.
 */
static int device_suspend_noirq(struct device *dev, pm_message_t state)
static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool async)
{
	pm_callback_t callback = NULL;
	char *info = NULL;
	int error = 0;

	if (async_error)
		goto Complete;

	if (pm_wakeup_pending()) {
		async_error = -EBUSY;
		goto Complete;
	}

	if (dev->power.syscore)
		return 0;
		goto Complete;

	dpm_wait_for_children(dev, async);

	if (dev->pm_domain) {
		info = "noirq power domain ";
@@ -940,7 +1028,41 @@ static int device_suspend_noirq(struct device *dev, pm_message_t state)
		callback = pm_noirq_op(dev->driver->pm, state);
	}

	return dpm_run_callback(callback, dev, state, info);
	error = dpm_run_callback(callback, dev, state, info);
	if (!error)
		dev->power.is_noirq_suspended = true;
	else
		async_error = error;

Complete:
	complete_all(&dev->power.completion);
	return error;
}

static void async_suspend_noirq(void *data, async_cookie_t cookie)
{
	struct device *dev = (struct device *)data;
	int error;

	error = __device_suspend_noirq(dev, pm_transition, true);
	if (error) {
		dpm_save_failed_dev(dev_name(dev));
		pm_dev_err(dev, pm_transition, " async", error);
	}

	put_device(dev);
}

static int device_suspend_noirq(struct device *dev)
{
	reinit_completion(&dev->power.completion);

	if (pm_async_enabled && dev->power.async_suspend) {
		get_device(dev);
		async_schedule(async_suspend_noirq, dev);
		return 0;
	}
	return __device_suspend_noirq(dev, pm_transition, false);
}

/**
@@ -958,19 +1080,20 @@ static int dpm_suspend_noirq(pm_message_t state)
	cpuidle_pause();
	suspend_device_irqs();
	mutex_lock(&dpm_list_mtx);
	pm_transition = state;
	async_error = 0;

	while (!list_empty(&dpm_late_early_list)) {
		struct device *dev = to_device(dpm_late_early_list.prev);

		get_device(dev);
		mutex_unlock(&dpm_list_mtx);

		error = device_suspend_noirq(dev, state);
		error = device_suspend_noirq(dev);

		mutex_lock(&dpm_list_mtx);
		if (error) {
			pm_dev_err(dev, state, " noirq", error);
			suspend_stats.failed_suspend_noirq++;
			dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
			dpm_save_failed_dev(dev_name(dev));
			put_device(dev);
			break;
@@ -979,16 +1102,21 @@ static int dpm_suspend_noirq(pm_message_t state)
			list_move(&dev->power.entry, &dpm_noirq_list);
		put_device(dev);

		if (pm_wakeup_pending()) {
			error = -EBUSY;
		if (async_error)
			break;
	}
	}
	mutex_unlock(&dpm_list_mtx);
	if (error)
	async_synchronize_full();
	if (!error)
		error = async_error;

	if (error) {
		suspend_stats.failed_suspend_noirq++;
		dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
		dpm_resume_noirq(resume_event(state));
	else
	} else {
		dpm_show_time(starttime, state, "noirq");
	}
	return error;
}

@@ -999,15 +1127,26 @@ static int dpm_suspend_noirq(pm_message_t state)
 *
 * Runtime PM is disabled for @dev while this function is being executed.
 */
static int device_suspend_late(struct device *dev, pm_message_t state)
static int __device_suspend_late(struct device *dev, pm_message_t state, bool async)
{
	pm_callback_t callback = NULL;
	char *info = NULL;
	int error = 0;

	__pm_runtime_disable(dev, false);

	if (async_error)
		goto Complete;

	if (pm_wakeup_pending()) {
		async_error = -EBUSY;
		goto Complete;
	}

	if (dev->power.syscore)
		return 0;
		goto Complete;

	dpm_wait_for_children(dev, async);

	if (dev->pm_domain) {
		info = "late power domain ";
@@ -1028,7 +1167,41 @@ static int device_suspend_late(struct device *dev, pm_message_t state)
		callback = pm_late_early_op(dev->driver->pm, state);
	}

	return dpm_run_callback(callback, dev, state, info);
	error = dpm_run_callback(callback, dev, state, info);
	if (!error)
		dev->power.is_late_suspended = true;
	else
		async_error = error;

Complete:
	complete_all(&dev->power.completion);
	return error;
}

static void async_suspend_late(void *data, async_cookie_t cookie)
{
	struct device *dev = (struct device *)data;
	int error;

	error = __device_suspend_late(dev, pm_transition, true);
	if (error) {
		dpm_save_failed_dev(dev_name(dev));
		pm_dev_err(dev, pm_transition, " async", error);
	}
	put_device(dev);
}

static int device_suspend_late(struct device *dev)
{
	reinit_completion(&dev->power.completion);

	if (pm_async_enabled && dev->power.async_suspend) {
		get_device(dev);
		async_schedule(async_suspend_late, dev);
		return 0;
	}

	return __device_suspend_late(dev, pm_transition, false);
}

/**
@@ -1041,19 +1214,20 @@ static int dpm_suspend_late(pm_message_t state)
	int error = 0;

	mutex_lock(&dpm_list_mtx);
	pm_transition = state;
	async_error = 0;

	while (!list_empty(&dpm_suspended_list)) {
		struct device *dev = to_device(dpm_suspended_list.prev);

		get_device(dev);
		mutex_unlock(&dpm_list_mtx);

		error = device_suspend_late(dev, state);
		error = device_suspend_late(dev);

		mutex_lock(&dpm_list_mtx);
		if (error) {
			pm_dev_err(dev, state, " late", error);
			suspend_stats.failed_suspend_late++;
			dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
			dpm_save_failed_dev(dev_name(dev));
			put_device(dev);
			break;
@@ -1062,17 +1236,18 @@ static int dpm_suspend_late(pm_message_t state)
			list_move(&dev->power.entry, &dpm_late_early_list);
		put_device(dev);

		if (pm_wakeup_pending()) {
			error = -EBUSY;
		if (async_error)
			break;
	}
	}
	mutex_unlock(&dpm_list_mtx);
	if (error)
	async_synchronize_full();
	if (error) {
		suspend_stats.failed_suspend_late++;
		dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
		dpm_resume_early(resume_event(state));
	else
	} else {
		dpm_show_time(starttime, state, "late");

	}
	return error;
}

+1 −1
Original line number Diff line number Diff line
@@ -1131,7 +1131,7 @@ EXPORT_SYMBOL_GPL(pm_runtime_barrier);
 * @dev: Device to handle.
 * @check_resume: If set, check if there's a resume request for the device.
 *
 * Increment power.disable_depth for the device and if was zero previously,
 * Increment power.disable_depth for the device and if it was zero previously,
 * cancel all pending runtime PM requests for the device and wait for all
 * operations in progress to complete.  The device can be either active or
 * suspended after its runtime PM has been disabled.
+25 −8
Original line number Diff line number Diff line
@@ -616,14 +616,10 @@ static int pci_pm_prepare(struct device *dev)
	int error = 0;

	/*
	 * PCI devices suspended at run time need to be resumed at this
	 * point, because in general it is necessary to reconfigure them for
	 * system suspend.  Namely, if the device is supposed to wake up the
	 * system from the sleep state, we may need to reconfigure it for this
	 * purpose.  In turn, if the device is not supposed to wake up the
	 * system from the sleep state, we'll have to prevent it from signaling
	 * wake-up.
	 * Devices having power.ignore_children set may still be necessary for
	 * suspending their children in the next phase of device suspend.
	 */
	if (dev->power.ignore_children)
		pm_runtime_resume(dev);

	if (drv && drv->pm && drv->pm->prepare)
@@ -654,6 +650,16 @@ static int pci_pm_suspend(struct device *dev)
		goto Fixup;
	}

	/*
	 * PCI devices suspended at run time need to be resumed at this point,
	 * because in general it is necessary to reconfigure them for system
	 * suspend.  Namely, if the device is supposed to wake up the system
	 * from the sleep state, we may need to reconfigure it for this purpose.
	 * In turn, if the device is not supposed to wake up the system from the
	 * sleep state, we'll have to prevent it from signaling wake-up.
	 */
	pm_runtime_resume(dev);

	pci_dev->state_saved = false;
	if (pm->suspend) {
		pci_power_t prev = pci_dev->current_state;
@@ -808,6 +814,14 @@ static int pci_pm_freeze(struct device *dev)
		return 0;
	}

	/*
	 * This used to be done in pci_pm_prepare() for all devices and some
	 * drivers may depend on it, so do it here.  Ideally, runtime-suspended
	 * devices should not be touched during freeze/thaw transitions,
	 * however.
	 */
	pm_runtime_resume(dev);

	pci_dev->state_saved = false;
	if (pm->freeze) {
		int error;
@@ -915,6 +929,9 @@ static int pci_pm_poweroff(struct device *dev)
		goto Fixup;
	}

	/* The reason to do that is the same as in pci_pm_suspend(). */
	pm_runtime_resume(dev);

	pci_dev->state_saved = false;
	if (pm->poweroff) {
		int error;
Loading