Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 852b7fa2 authored by Rafael J. Wysocki's avatar Rafael J. Wysocki
Browse files

Merge branches 'pm-core', 'pm-qos' and 'pm-avs'

* pm-core:
  PM / core: Fix bug in the error handling of async suspend
  PM / wakeirq: Fix dedicated wakeirq for drivers not using autosuspend
  PM / Runtime: Defer resuming of the device in pm_runtime_force_resume()
  PM / Runtime: Don't allow to suspend a device with an active child
  net: smsc911x: Synchronize the runtime PM status during system suspend
  PM / Runtime: Convert pm_runtime_set_suspended() to return an int
  PM / Runtime: Clarify comment in rpm_resume() when resuming the parent
  PM / Runtime: Remove the exported function pm_children_suspended()

* pm-qos:
  PM / QoS: Export dev_pm_qos_update_user_latency_tolerance
  PM / QoS: Fix writing 'auto' to pm_qos_latency_tolerance_us
  PM / QoS: Improve sysfs pm_qos_latency_tolerance validation

* pm-avs:
  PM / AVS: rockchip-io: make the log more consistent
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -1460,10 +1460,10 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
	dpm_watchdog_clear(&wd);

 Complete:
	complete_all(&dev->power.completion);
	if (error)
		async_error = error;

	complete_all(&dev->power.completion);
	TRACE_SUSPEND(error);
	return error;
}
+18 −1
Original line number Diff line number Diff line
@@ -21,14 +21,22 @@ extern void pm_runtime_init(struct device *dev);
extern void pm_runtime_reinit(struct device *dev);
extern void pm_runtime_remove(struct device *dev);

#define WAKE_IRQ_DEDICATED_ALLOCATED	BIT(0)
#define WAKE_IRQ_DEDICATED_MANAGED	BIT(1)
#define WAKE_IRQ_DEDICATED_MASK		(WAKE_IRQ_DEDICATED_ALLOCATED | \
					 WAKE_IRQ_DEDICATED_MANAGED)

struct wake_irq {
	struct device *dev;
	unsigned int status;
	int irq;
	bool dedicated_irq:1;
};

extern void dev_pm_arm_wake_irq(struct wake_irq *wirq);
extern void dev_pm_disarm_wake_irq(struct wake_irq *wirq);
extern void dev_pm_enable_wake_irq_check(struct device *dev,
					 bool can_change_status);
extern void dev_pm_disable_wake_irq_check(struct device *dev);

#ifdef CONFIG_PM_SLEEP

@@ -104,6 +112,15 @@ static inline void dev_pm_disarm_wake_irq(struct wake_irq *wirq)
{
}

static inline void dev_pm_enable_wake_irq_check(struct device *dev,
						bool can_change_status)
{
}

static inline void dev_pm_disable_wake_irq_check(struct device *dev)
{
}

#endif

#ifdef CONFIG_PM_SLEEP
+5 −1
Original line number Diff line number Diff line
@@ -856,6 +856,9 @@ int dev_pm_qos_update_user_latency_tolerance(struct device *dev, s32 val)
		struct dev_pm_qos_request *req;

		if (val < 0) {
			if (val == PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT)
				ret = 0;
			else
				ret = -EINVAL;
			goto out;
		}
@@ -883,6 +886,7 @@ int dev_pm_qos_update_user_latency_tolerance(struct device *dev, s32 val)
	mutex_unlock(&dev_pm_qos_mtx);
	return ret;
}
EXPORT_SYMBOL_GPL(dev_pm_qos_update_user_latency_tolerance);

/**
 * dev_pm_qos_expose_latency_tolerance - Expose latency tolerance to userspace
+49 −13
Original line number Diff line number Diff line
@@ -241,7 +241,8 @@ static int rpm_check_suspend_allowed(struct device *dev)
		retval = -EACCES;
	else if (atomic_read(&dev->power.usage_count) > 0)
		retval = -EAGAIN;
	else if (!pm_children_suspended(dev))
	else if (!dev->power.ignore_children &&
			atomic_read(&dev->power.child_count))
		retval = -EBUSY;

	/* Pending resume requests take precedence over suspends. */
@@ -515,7 +516,7 @@ static int rpm_suspend(struct device *dev, int rpmflags)

	callback = RPM_GET_CALLBACK(dev, runtime_suspend);

	dev_pm_enable_wake_irq(dev);
	dev_pm_enable_wake_irq_check(dev, true);
	retval = rpm_callback(callback, dev);
	if (retval)
		goto fail;
@@ -554,7 +555,7 @@ static int rpm_suspend(struct device *dev, int rpmflags)
	return retval;

 fail:
	dev_pm_disable_wake_irq(dev);
	dev_pm_disable_wake_irq_check(dev);
	__update_runtime_status(dev, RPM_ACTIVE);
	dev->power.deferred_resume = false;
	wake_up_all(&dev->power.wait_queue);
@@ -712,8 +713,8 @@ static int rpm_resume(struct device *dev, int rpmflags)

		spin_lock(&parent->power.lock);
		/*
		 * We can resume if the parent's runtime PM is disabled or it
		 * is set to ignore children.
		 * Resume the parent if it has runtime PM enabled and not been
		 * set to ignore its children.
		 */
		if (!parent->power.disable_depth
		    && !parent->power.ignore_children) {
@@ -737,12 +738,12 @@ static int rpm_resume(struct device *dev, int rpmflags)

	callback = RPM_GET_CALLBACK(dev, runtime_resume);

	dev_pm_disable_wake_irq(dev);
	dev_pm_disable_wake_irq_check(dev);
	retval = rpm_callback(callback, dev);
	if (retval) {
		__update_runtime_status(dev, RPM_SUSPENDED);
		pm_runtime_cancel_pending(dev);
		dev_pm_enable_wake_irq(dev);
		dev_pm_enable_wake_irq_check(dev, false);
	} else {
 no_callback:
		__update_runtime_status(dev, RPM_ACTIVE);
@@ -1027,7 +1028,17 @@ int __pm_runtime_set_status(struct device *dev, unsigned int status)
		goto out_set;

	if (status == RPM_SUSPENDED) {
		/* It always is possible to set the status to 'suspended'. */
		/*
		 * It is invalid to suspend a device with an active child,
		 * unless it has been set to ignore its children.
		 */
		if (!dev->power.ignore_children &&
			atomic_read(&dev->power.child_count)) {
			dev_err(dev, "runtime PM trying to suspend device but active child\n");
			error = -EBUSY;
			goto out;
		}

		if (parent) {
			atomic_add_unless(&parent->power.child_count, -1, 0);
			notify_parent = !parent->power.ignore_children;
@@ -1478,6 +1489,16 @@ int pm_runtime_force_suspend(struct device *dev)
	if (ret)
		goto err;

	/*
	 * Increase the runtime PM usage count for the device's parent, in case
	 * when we find the device being used when system suspend was invoked.
	 * This informs pm_runtime_force_resume() to resume the parent
	 * immediately, which is needed to be able to resume its children,
	 * when not deferring the resume to be managed via runtime PM.
	 */
	if (dev->parent && atomic_read(&dev->power.usage_count) > 1)
		pm_runtime_get_noresume(dev->parent);

	pm_runtime_set_suspended(dev);
	return 0;
err:
@@ -1487,16 +1508,20 @@ int pm_runtime_force_suspend(struct device *dev)
EXPORT_SYMBOL_GPL(pm_runtime_force_suspend);

/**
 * pm_runtime_force_resume - Force a device into resume state.
 * pm_runtime_force_resume - Force a device into resume state if needed.
 * @dev: Device to resume.
 *
 * Prior invoking this function we expect the user to have brought the device
 * into low power state by a call to pm_runtime_force_suspend(). Here we reverse
 * those actions and brings the device into full power. We update the runtime PM
 * status and re-enables runtime PM.
 * those actions and brings the device into full power, if it is expected to be
 * used on system resume. To distinguish that, we check whether the runtime PM
 * usage count is greater than 1 (the PM core increases the usage count in the
 * system PM prepare phase), as that indicates a real user (such as a subsystem,
 * driver, userspace, etc.) is using it. If that is the case, the device is
 * expected to be used on system resume as well, so then we resume it. In the
 * other case, we defer the resume to be managed via runtime PM.
 *
 * Typically this function may be invoked from a system resume callback to make
 * sure the device is put into full power state.
 * Typically this function may be invoked from a system resume callback.
 */
int pm_runtime_force_resume(struct device *dev)
{
@@ -1513,6 +1538,17 @@ int pm_runtime_force_resume(struct device *dev)
	if (!pm_runtime_status_suspended(dev))
		goto out;

	/*
	 * Decrease the parent's runtime PM usage count, if we increased it
	 * during system suspend in pm_runtime_force_suspend().
	*/
	if (atomic_read(&dev->power.usage_count) > 1) {
		if (dev->parent)
			pm_runtime_put_noidle(dev->parent);
	} else {
		goto out;
	}

	ret = pm_runtime_set_active(dev);
	if (ret)
		goto out;
+5 −1
Original line number Diff line number Diff line
@@ -263,7 +263,11 @@ static ssize_t pm_qos_latency_tolerance_store(struct device *dev,
	s32 value;
	int ret;

	if (kstrtos32(buf, 0, &value)) {
	if (kstrtos32(buf, 0, &value) == 0) {
		/* Users can't write negative values directly */
		if (value < 0)
			return -EINVAL;
	} else {
		if (!strcmp(buf, "auto") || !strcmp(buf, "auto\n"))
			value = PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT;
		else if (!strcmp(buf, "any") || !strcmp(buf, "any\n"))
Loading