Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit b306976e authored by Rafael J. Wysocki's avatar Rafael J. Wysocki
Browse files

Merge branches 'pm-clk', 'pm-domains' and 'powercap'

* pm-clk:
  PM / clk: Print acquired clock name in addition to con_id
  PM / clk: Fix clock error check in __pm_clk_add()
  drivers: sh: remove boilerplate code and use USE_PM_CLK_RUNTIME_OPS
  arm: davinci: remove boilerplate code and use USE_PM_CLK_RUNTIME_OPS
  arm: omap1: remove boilerplate code and use USE_PM_CLK_RUNTIME_OPS
  arm: keystone: remove boilerplate code and use USE_PM_CLK_RUNTIME_OPS
  PM / clock_ops: Provide default runtime ops to users

* pm-domains:
  PM / Domains: Skip timings during syscore suspend/resume

* powercap:
  powercap / RAPL: Support Knights Landing
  powercap / RAPL: Floor frequency setting in Atom SoC
Loading
Loading
Loading
Loading
+1 −31
Original line number Diff line number Diff line
@@ -14,39 +14,9 @@
#include <linux/pm_clock.h>
#include <linux/platform_device.h>

#ifdef CONFIG_PM
static int davinci_pm_runtime_suspend(struct device *dev)
{
	int ret;

	dev_dbg(dev, "%s\n", __func__);

	ret = pm_generic_runtime_suspend(dev);
	if (ret)
		return ret;

	ret = pm_clk_suspend(dev);
	if (ret) {
		pm_generic_runtime_resume(dev);
		return ret;
	}

	return 0;
}

static int davinci_pm_runtime_resume(struct device *dev)
{
	dev_dbg(dev, "%s\n", __func__);

	pm_clk_resume(dev);
	return pm_generic_runtime_resume(dev);
}
#endif

static struct dev_pm_domain davinci_pm_domain = {
	.ops = {
		SET_RUNTIME_PM_OPS(davinci_pm_runtime_suspend,
				   davinci_pm_runtime_resume, NULL)
		USE_PM_CLK_RUNTIME_OPS
		USE_PLATFORM_PM_SLEEP_OPS
	},
};
+1 −32
Original line number Diff line number Diff line
@@ -19,40 +19,9 @@
#include <linux/clk-provider.h>
#include <linux/of.h>

#ifdef CONFIG_PM
static int keystone_pm_runtime_suspend(struct device *dev)
{
	int ret;

	dev_dbg(dev, "%s\n", __func__);

	ret = pm_generic_runtime_suspend(dev);
	if (ret)
		return ret;

	ret = pm_clk_suspend(dev);
	if (ret) {
		pm_generic_runtime_resume(dev);
		return ret;
	}

	return 0;
}

static int keystone_pm_runtime_resume(struct device *dev)
{
	dev_dbg(dev, "%s\n", __func__);

	pm_clk_resume(dev);

	return pm_generic_runtime_resume(dev);
}
#endif

static struct dev_pm_domain keystone_pm_domain = {
	.ops = {
		SET_RUNTIME_PM_OPS(keystone_pm_runtime_suspend,
				   keystone_pm_runtime_resume, NULL)
		USE_PM_CLK_RUNTIME_OPS
		USE_PLATFORM_PM_SLEEP_OPS
	},
};
+2 −35
Original line number Diff line number Diff line
@@ -21,48 +21,15 @@

#include "soc.h"

#ifdef CONFIG_PM
static int omap1_pm_runtime_suspend(struct device *dev)
{
	int ret;

	dev_dbg(dev, "%s\n", __func__);

	ret = pm_generic_runtime_suspend(dev);
	if (ret)
		return ret;

	ret = pm_clk_suspend(dev);
	if (ret) {
		pm_generic_runtime_resume(dev);
		return ret;
	}

	return 0;
}

static int omap1_pm_runtime_resume(struct device *dev)
{
	dev_dbg(dev, "%s\n", __func__);

	pm_clk_resume(dev);
	return pm_generic_runtime_resume(dev);
}

static struct dev_pm_domain default_pm_domain = {
	.ops = {
		.runtime_suspend = omap1_pm_runtime_suspend,
		.runtime_resume = omap1_pm_runtime_resume,
		USE_PM_CLK_RUNTIME_OPS
		USE_PLATFORM_PM_SLEEP_OPS
	},
};
#define OMAP1_PM_DOMAIN (&default_pm_domain)
#else
#define OMAP1_PM_DOMAIN NULL
#endif /* CONFIG_PM */

static struct pm_clk_notifier_block platform_bus_notifier = {
	.pm_domain = OMAP1_PM_DOMAIN,
	.pm_domain = &default_pm_domain,
	.con_ids = { "ick", "fck", NULL, },
};

+41 −2
Original line number Diff line number Diff line
@@ -15,6 +15,7 @@
#include <linux/clkdev.h>
#include <linux/slab.h>
#include <linux/err.h>
#include <linux/pm_runtime.h>

#ifdef CONFIG_PM

@@ -67,7 +68,8 @@ static void pm_clk_acquire(struct device *dev, struct pm_clock_entry *ce)
	} else {
		clk_prepare(ce->clk);
		ce->status = PCE_STATUS_ACQUIRED;
		dev_dbg(dev, "Clock %s managed by runtime PM.\n", ce->con_id);
		dev_dbg(dev, "Clock %pC con_id %s managed by runtime PM.\n",
			ce->clk, ce->con_id);
	}
}

@@ -93,7 +95,7 @@ static int __pm_clk_add(struct device *dev, const char *con_id,
			return -ENOMEM;
		}
	} else {
		if (IS_ERR(ce->clk) || !__clk_get(clk)) {
		if (IS_ERR(clk) || !__clk_get(clk)) {
			kfree(ce);
			return -ENOENT;
		}
@@ -367,6 +369,43 @@ static int pm_clk_notify(struct notifier_block *nb,
	return 0;
}

int pm_clk_runtime_suspend(struct device *dev)
{
	int ret;

	dev_dbg(dev, "%s\n", __func__);

	ret = pm_generic_runtime_suspend(dev);
	if (ret) {
		dev_err(dev, "failed to suspend device\n");
		return ret;
	}

	ret = pm_clk_suspend(dev);
	if (ret) {
		dev_err(dev, "failed to suspend clock\n");
		pm_generic_runtime_resume(dev);
		return ret;
	}

	return 0;
}

int pm_clk_runtime_resume(struct device *dev)
{
	int ret;

	dev_dbg(dev, "%s\n", __func__);

	ret = pm_clk_resume(dev);
	if (ret) {
		dev_err(dev, "failed to resume clock\n");
		return ret;
	}

	return pm_generic_runtime_resume(dev);
}

#else /* !CONFIG_PM */

/**
+26 −16
Original line number Diff line number Diff line
@@ -181,7 +181,7 @@ static void genpd_recalc_cpu_exit_latency(struct generic_pm_domain *genpd)
	genpd->cpuidle_data->idle_state->exit_latency = usecs64;
}

static int genpd_power_on(struct generic_pm_domain *genpd)
static int genpd_power_on(struct generic_pm_domain *genpd, bool timed)
{
	ktime_t time_start;
	s64 elapsed_ns;
@@ -190,6 +190,9 @@ static int genpd_power_on(struct generic_pm_domain *genpd)
	if (!genpd->power_on)
		return 0;

	if (!timed)
		return genpd->power_on(genpd);

	time_start = ktime_get();
	ret = genpd->power_on(genpd);
	if (ret)
@@ -208,7 +211,7 @@ static int genpd_power_on(struct generic_pm_domain *genpd)
	return ret;
}

static int genpd_power_off(struct generic_pm_domain *genpd)
static int genpd_power_off(struct generic_pm_domain *genpd, bool timed)
{
	ktime_t time_start;
	s64 elapsed_ns;
@@ -217,6 +220,9 @@ static int genpd_power_off(struct generic_pm_domain *genpd)
	if (!genpd->power_off)
		return 0;

	if (!timed)
		return genpd->power_off(genpd);

	time_start = ktime_get();
	ret = genpd->power_off(genpd);
	if (ret == -EBUSY)
@@ -305,7 +311,7 @@ static int __pm_genpd_poweron(struct generic_pm_domain *genpd)
		}
	}

	ret = genpd_power_on(genpd);
	ret = genpd_power_on(genpd, true);
	if (ret)
		goto err;

@@ -615,7 +621,7 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
		 * the pm_genpd_poweron() restore power for us (this shouldn't
		 * happen very often).
		 */
		ret = genpd_power_off(genpd);
		ret = genpd_power_off(genpd, true);
		if (ret == -EBUSY) {
			genpd_set_active(genpd);
			goto out;
@@ -827,6 +833,7 @@ static bool genpd_dev_active_wakeup(struct generic_pm_domain *genpd,
/**
 * pm_genpd_sync_poweroff - Synchronously power off a PM domain and its masters.
 * @genpd: PM domain to power off, if possible.
 * @timed: True if latency measurements are allowed.
 *
 * Check if the given PM domain can be powered off (during system suspend or
 * hibernation) and do that if so.  Also, in that case propagate to its masters.
@@ -836,7 +843,8 @@ static bool genpd_dev_active_wakeup(struct generic_pm_domain *genpd,
 * executed sequentially, so it is guaranteed that it will never run twice in
 * parallel).
 */
static void pm_genpd_sync_poweroff(struct generic_pm_domain *genpd)
static void pm_genpd_sync_poweroff(struct generic_pm_domain *genpd,
				   bool timed)
{
	struct gpd_link *link;

@@ -847,26 +855,28 @@ static void pm_genpd_sync_poweroff(struct generic_pm_domain *genpd)
	    || atomic_read(&genpd->sd_count) > 0)
		return;

	genpd_power_off(genpd);
	genpd_power_off(genpd, timed);

	genpd->status = GPD_STATE_POWER_OFF;

	list_for_each_entry(link, &genpd->slave_links, slave_node) {
		genpd_sd_counter_dec(link->master);
		pm_genpd_sync_poweroff(link->master);
		pm_genpd_sync_poweroff(link->master, timed);
	}
}

/**
 * pm_genpd_sync_poweron - Synchronously power on a PM domain and its masters.
 * @genpd: PM domain to power on.
 * @timed: True if latency measurements are allowed.
 *
 * This function is only called in "noirq" and "syscore" stages of system power
 * transitions, so it need not acquire locks (all of the "noirq" callbacks are
 * executed sequentially, so it is guaranteed that it will never run twice in
 * parallel).
 */
static void pm_genpd_sync_poweron(struct generic_pm_domain *genpd)
static void pm_genpd_sync_poweron(struct generic_pm_domain *genpd,
				  bool timed)
{
	struct gpd_link *link;

@@ -874,11 +884,11 @@ static void pm_genpd_sync_poweron(struct generic_pm_domain *genpd)
		return;

	list_for_each_entry(link, &genpd->slave_links, slave_node) {
		pm_genpd_sync_poweron(link->master);
		pm_genpd_sync_poweron(link->master, timed);
		genpd_sd_counter_inc(link->master);
	}

	genpd_power_on(genpd);
	genpd_power_on(genpd, timed);

	genpd->status = GPD_STATE_ACTIVE;
}
@@ -1056,7 +1066,7 @@ static int pm_genpd_suspend_noirq(struct device *dev)
	 * the same PM domain, so it is not necessary to use locking here.
	 */
	genpd->suspended_count++;
	pm_genpd_sync_poweroff(genpd);
	pm_genpd_sync_poweroff(genpd, true);

	return 0;
}
@@ -1086,7 +1096,7 @@ static int pm_genpd_resume_noirq(struct device *dev)
	 * guaranteed that this function will never run twice in parallel for
	 * the same PM domain, so it is not necessary to use locking here.
	 */
	pm_genpd_sync_poweron(genpd);
	pm_genpd_sync_poweron(genpd, true);
	genpd->suspended_count--;

	return genpd_start_dev(genpd, dev);
@@ -1300,7 +1310,7 @@ static int pm_genpd_restore_noirq(struct device *dev)
			 * If the domain was off before the hibernation, make
			 * sure it will be off going forward.
			 */
			genpd_power_off(genpd);
			genpd_power_off(genpd, true);

			return 0;
		}
@@ -1309,7 +1319,7 @@ static int pm_genpd_restore_noirq(struct device *dev)
	if (genpd->suspend_power_off)
		return 0;

	pm_genpd_sync_poweron(genpd);
	pm_genpd_sync_poweron(genpd, true);

	return genpd_start_dev(genpd, dev);
}
@@ -1367,9 +1377,9 @@ static void genpd_syscore_switch(struct device *dev, bool suspend)

	if (suspend) {
		genpd->suspended_count++;
		pm_genpd_sync_poweroff(genpd);
		pm_genpd_sync_poweroff(genpd, false);
	} else {
		pm_genpd_sync_poweron(genpd);
		pm_genpd_sync_poweron(genpd, false);
		genpd->suspended_count--;
	}
}
Loading