Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit b9da25ff authored by Mahesh Sivasubramanian's avatar Mahesh Sivasubramanian
Browse files

drivers: cpuidle: lpm-levels: Remove support for non-PSCI



On 4.8 kernel, all MSM targets would suppose PSCI. Removing support for
non-PSCI targets from sleep driver. Also remove any support for any
legacy RPM communication as that would be replaced by HW aggregators.

Change-Id: Id262e0e5320f4185ecfc02bd93a59baf3f46d0ec
Signed-off-by: default avatarMahesh Sivasubramanian <msivasub@codeaurora.org>
parent 514248db
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -27,4 +27,4 @@ obj-$(CONFIG_MIPS_CPS_CPUIDLE) += cpuidle-cps.o
# POWERPC drivers
obj-$(CONFIG_PSERIES_CPUIDLE)		+= cpuidle-pseries.o
obj-$(CONFIG_POWERNV_CPUIDLE)		+= cpuidle-powernv.o
obj-$(CONFIG_MSM_PM) += lpm-levels.o  lpm-levels-of.o lpm-workarounds.o
obj-$(CONFIG_MSM_PM) += lpm-levels.o lpm-levels-of.o
+6 −165
Original line number Diff line number Diff line
@@ -382,86 +382,6 @@ bool lpm_cluster_mode_allow(struct lpm_cluster *cluster,
				avail->suspend_enabled);
}

static int parse_legacy_cluster_params(struct device_node *node,
		struct lpm_cluster *c)
{
	int i;
	char *key;
	int ret;
	struct lpm_match {
		char *devname;
		int (*set_mode)(struct low_power_ops *, int, bool);
	};
	struct lpm_match match_tbl[] = {
		{"l2", set_l2_mode},
		{"cci", set_system_mode},
		{"l3", set_l3_mode},
		{"cbf", set_system_mode},
	};


	key = "qcom,spm-device-names";
	c->ndevices = of_property_count_strings(node, key);

	if (c->ndevices < 0) {
		pr_info("%s(): Ignoring cluster params\n", __func__);
		c->no_saw_devices = true;
		c->ndevices = 0;
		return 0;
	}

	c->name = devm_kzalloc(&lpm_pdev->dev, c->ndevices * sizeof(*c->name),
				GFP_KERNEL);
	c->lpm_dev = devm_kzalloc(&lpm_pdev->dev,
				c->ndevices * sizeof(*c->lpm_dev),
				GFP_KERNEL);
	if (!c->name || !c->lpm_dev) {
		ret = -ENOMEM;
		goto failed;
	}

	for (i = 0; i < c->ndevices; i++) {
		char device_name[20];
		int j;

		ret = of_property_read_string_index(node, key, i, &c->name[i]);
		if (ret)
			goto failed;
		snprintf(device_name, sizeof(device_name), "%s-%s",
				c->cluster_name, c->name[i]);

		c->lpm_dev[i].spm = msm_spm_get_device_by_name(device_name);

		if (IS_ERR_OR_NULL(c->lpm_dev[i].spm)) {
			pr_err("Failed to get spm device by name:%s\n",
					device_name);
			ret = PTR_ERR(c->lpm_dev[i].spm);
			goto failed;
		}
		for (j = 0; j < ARRAY_SIZE(match_tbl); j++) {
			if (!strcmp(c->name[i], match_tbl[j].devname))
				c->lpm_dev[i].set_mode = match_tbl[j].set_mode;
		}

		if (!c->lpm_dev[i].set_mode) {
			ret = -ENODEV;
			goto failed;
		}
	}

	key = "qcom,default-level";
	if (of_property_read_u32(node, key, &c->default_level))
		c->default_level = 0;
	return 0;
failed:
	pr_err("%s(): Failed reading %s\n", __func__, key);
	kfree(c->name);
	kfree(c->lpm_dev);
	c->name = NULL;
	c->lpm_dev = NULL;
	return ret;
}

static int parse_cluster_params(struct device_node *node,
		struct lpm_cluster *c)
{
@@ -497,28 +417,9 @@ static int parse_cluster_params(struct device_node *node,
		/* Set ndevice to 1 as default */
		c->ndevices = 1;

		return 0;
	} else
		return parse_legacy_cluster_params(node, c);
}

static int parse_lpm_mode(const char *str)
{
	int i;
	struct lpm_lookup_table mode_lookup[] = {
		{MSM_SPM_MODE_POWER_COLLAPSE, "pc"},
		{MSM_SPM_MODE_STANDALONE_POWER_COLLAPSE, "spc"},
		{MSM_SPM_MODE_FASTPC, "fpc"},
		{MSM_SPM_MODE_GDHS, "gdhs"},
		{MSM_SPM_MODE_RETENTION, "retention"},
		{MSM_SPM_MODE_CLOCK_GATING, "wfi"},
		{MSM_SPM_MODE_DISABLED, "active"}
	};

	for (i = 0; i < ARRAY_SIZE(mode_lookup); i++)
		if (!strcmp(str, mode_lookup[i].mode_name))
			return  mode_lookup[i].modes;
	return -EINVAL;
		pr_warn("Target supports PSCI only\n");
	return 0;
}

static int parse_power_params(struct device_node *node,
@@ -557,7 +458,6 @@ static int parse_power_params(struct device_node *node,
static int parse_cluster_level(struct device_node *node,
		struct lpm_cluster *cluster)
{
	int i = 0;
	struct lpm_cluster_level *level = &cluster->levels[cluster->nlevels];
	int ret = -ENOMEM;
	char *key;
@@ -575,37 +475,8 @@ static int parse_cluster_level(struct device_node *node,
			goto failed;

		level->is_reset = of_property_read_bool(node, "qcom,is-reset");
	} else if (!cluster->no_saw_devices) {
		key  = "no saw-devices";

		level->mode = devm_kzalloc(&lpm_pdev->dev,
				cluster->ndevices * sizeof(*level->mode),
				GFP_KERNEL);
		if (!level->mode) {
			pr_err("Memory allocation failed\n");
			goto failed;
		}

		for (i = 0; i < cluster->ndevices; i++) {
			const char *spm_mode;
			char key[25] = {0};

			snprintf(key, 25, "qcom,spm-%s-mode", cluster->name[i]);
			ret = of_property_read_string(node, key, &spm_mode);
			if (ret)
				goto failed;

			level->mode[i] = parse_lpm_mode(spm_mode);

			if (level->mode[i] < 0)
				goto failed;

			if (level->mode[i] == MSM_SPM_MODE_POWER_COLLAPSE
				|| level->mode[i] ==
				MSM_SPM_MODE_STANDALONE_POWER_COLLAPSE)
				level->is_reset |= true;
		}
	}
	} else
		pr_warn("Build supports PSCI targets only");

	key = "label";
	ret = of_property_read_string(node, key, &level->level_name);
@@ -650,32 +521,6 @@ static int parse_cluster_level(struct device_node *node,
	return ret;
}

static int parse_cpu_spm_mode(const char *mode_name)
{
	struct lpm_lookup_table pm_sm_lookup[] = {
		{MSM_PM_SLEEP_MODE_WAIT_FOR_INTERRUPT,
			"wfi"},
		{MSM_PM_SLEEP_MODE_POWER_COLLAPSE_STANDALONE,
			"standalone_pc"},
		{MSM_PM_SLEEP_MODE_POWER_COLLAPSE,
			"pc"},
		{MSM_PM_SLEEP_MODE_RETENTION,
			"retention"},
		{MSM_PM_SLEEP_MODE_FASTPC,
			"fpc"},
	};
	int i;
	int ret = -EINVAL;

	for (i = 0; i < ARRAY_SIZE(pm_sm_lookup); i++) {
		if (!strcmp(mode_name, pm_sm_lookup[i].mode_name)) {
			ret = pm_sm_lookup[i].modes;
			break;
		}
	}
	return ret;
}

static int parse_cpu_mode(struct device_node *n, struct lpm_cpu_level *l)
{
	char *key;
@@ -700,12 +545,8 @@ static int parse_cpu_mode(struct device_node *n, struct lpm_cpu_level *l)
		key = "qcom,hyp-psci";

		l->hyp_psci = of_property_read_bool(n, key);
	} else {
		l->mode = parse_cpu_spm_mode(l->name);

		if (l->mode < 0)
			return l->mode;
	}
	} else
		pr_warn("Build supports PSCI targets only");
	return 0;

}
+8 −107
Original line number Diff line number Diff line
@@ -21,7 +21,6 @@
#include <linux/mutex.h>
#include <linux/cpu.h>
#include <linux/of.h>
#include <linux/irqchip/msm-mpm-irq.h>
#include <linux/hrtimer.h>
#include <linux/ktime.h>
#include <linux/tick.h>
@@ -38,10 +37,10 @@
#include <linux/cpu_pm.h>
#include <soc/qcom/spm.h>
#include <soc/qcom/pm.h>
#include <soc/qcom/rpm-notifier.h>
#include <soc/qcom/event_timer.h>
#include <soc/qcom/lpm-stats.h>
#include <soc/qcom/jtag.h>
#include <soc/qcom/system_pm.h>
#include <asm/cputype.h>
#include <asm/arch_timer.h>
#include <asm/cacheflush.h>
@@ -51,7 +50,6 @@
#include <trace/events/power.h>
#define CREATE_TRACE_POINTS
#include <trace/events/trace_msm_low_power.h>
#include "../../drivers/clk/msm/clock.h"

#define SCLK_HZ (32768)
#define SCM_HANDOFF_LOCK_ID "S:7"
@@ -114,8 +112,6 @@ static struct hrtimer histtimer;
static struct lpm_debug *lpm_debug;
static phys_addr_t lpm_debug_phys;
static const int num_dbg_elements = 0x100;
static int lpm_cpu_callback(struct notifier_block *cpu_nb,
				unsigned long action, void *hcpu);

static void cluster_unprepare(struct lpm_cluster *cluster,
		const struct cpumask *cpu, int child_idx, bool from_idle,
@@ -124,10 +120,6 @@ static void cluster_prepare(struct lpm_cluster *cluster,
		const struct cpumask *cpu, int child_idx, bool from_idle,
		int64_t time);

static struct notifier_block __refdata lpm_cpu_nblk = {
	.notifier_call = lpm_cpu_callback,
};

static bool menu_select;
module_param_named(menu_select, menu_select, bool, 0664);

@@ -424,70 +416,6 @@ static void msm_pm_set_timer(uint32_t modified_time_us)
	hrtimer_start(&lpm_hrtimer, modified_ktime, HRTIMER_MODE_REL_PINNED);
}

int set_l2_mode(struct low_power_ops *ops, int mode, bool notify_rpm)
{
	int lpm = mode;
	int rc = 0;
	struct low_power_ops *cpu_ops = per_cpu(cpu_cluster,
			smp_processor_id())->lpm_dev;

	if (cpu_ops->tz_flag & MSM_SCM_L2_OFF ||
			cpu_ops->tz_flag & MSM_SCM_L2_GDHS)
		coresight_cti_ctx_restore();

	switch (mode) {
	case MSM_SPM_MODE_STANDALONE_POWER_COLLAPSE:
	case MSM_SPM_MODE_POWER_COLLAPSE:
	case MSM_SPM_MODE_FASTPC:
		cpu_ops->tz_flag = MSM_SCM_L2_OFF;
		coresight_cti_ctx_save();
		break;
	case MSM_SPM_MODE_GDHS:
		cpu_ops->tz_flag = MSM_SCM_L2_GDHS;
		coresight_cti_ctx_save();
		break;
	case MSM_SPM_MODE_CLOCK_GATING:
	case MSM_SPM_MODE_RETENTION:
	case MSM_SPM_MODE_DISABLED:
		cpu_ops->tz_flag = MSM_SCM_L2_ON;
		break;
	default:
		cpu_ops->tz_flag = MSM_SCM_L2_ON;
		lpm = MSM_SPM_MODE_DISABLED;
		break;
	}
	rc = msm_spm_config_low_power_mode(ops->spm, lpm, notify_rpm);

	if (rc)
		pr_err("%s: Failed to set L2 low power mode %d, ERR %d",
				__func__, lpm, rc);

	return rc;
}

int set_l3_mode(struct low_power_ops *ops, int mode, bool notify_rpm)
{
	struct low_power_ops *cpu_ops = per_cpu(cpu_cluster,
			smp_processor_id())->lpm_dev;

	switch (mode) {
	case MSM_SPM_MODE_STANDALONE_POWER_COLLAPSE:
	case MSM_SPM_MODE_POWER_COLLAPSE:
	case MSM_SPM_MODE_FASTPC:
		cpu_ops->tz_flag |= MSM_SCM_L3_PC_OFF;
		break;
	default:
		break;
	}
	return msm_spm_config_low_power_mode(ops->spm, mode, notify_rpm);
}


int set_system_mode(struct low_power_ops *ops, int mode, bool notify_rpm)
{
	return msm_spm_config_low_power_mode(ops->spm, mode, notify_rpm);
}

static int set_device_mode(struct lpm_cluster *cluster, int ndevice,
		struct lpm_cluster_level *level)
{
@@ -957,7 +885,6 @@ static void clear_cl_history_each(struct cluster_history *history)
	history->hinvalid = 0;
	history->htmr_wkup = 0;
}

static void clear_cl_predict_history(void)
{
	struct lpm_cluster *cluster = lpm_root_node;
@@ -1045,9 +972,6 @@ static int cluster_select(struct lpm_cluster *cluster, bool from_idle,
		if (suspend_in_progress && from_idle && level->notify_rpm)
			continue;

		if (level->notify_rpm && msm_rpm_waiting_for_ack())
			continue;

		best_level = i;

		if (predicted ? (pred_us <= pwr_params->max_residency)
@@ -1105,7 +1029,6 @@ static int cluster_configure(struct lpm_cluster *cluster, int idx,
		if (ret)
			goto failed_set_mode;
	}

	if (level->notify_rpm) {
		struct cpumask nextcpu, *cpumask;
		uint64_t us;
@@ -1115,7 +1038,6 @@ static int cluster_configure(struct lpm_cluster *cluster, int idx,
						from_idle, &pred_us);
		cpumask = level->disable_dynamic_routing ? NULL : &nextcpu;

		ret = msm_rpm_enter_sleep(0, cpumask);
		if (ret) {
			pr_info("Failed msm_rpm_enter_sleep() rc = %d\n", ret);
			goto failed_set_mode;
@@ -1126,9 +1048,8 @@ static int cluster_configure(struct lpm_cluster *cluster, int idx,
		clear_cl_predict_history();

		do_div(us, USEC_PER_SEC/SCLK_HZ);
		msm_mpm_enter_sleep(us, from_idle, cpumask);
		system_sleep_enter(us);
	}

	/* Notify cluster enter event after successfully config completion */
	cluster_notify(cluster, level, true);

@@ -1145,16 +1066,16 @@ static int cluster_configure(struct lpm_cluster *cluster, int idx,
	}

	return 0;

failed_set_mode:

	for (i = 0; i < cluster->ndevices; i++) {
		int rc = 0;

		level = &cluster->levels[cluster->default_level];
		rc = set_device_mode(cluster, i, level);
		// rc = set_device_mode(cluster, i, level);
		WARN_ON(rc);
	}

	return ret;
}

@@ -1271,16 +1192,9 @@ static void cluster_unprepare(struct lpm_cluster *cluster,
	lpm_stats_cluster_exit(cluster->stats, cluster->last_level, true);

	level = &cluster->levels[cluster->last_level];
	if (level->notify_rpm) {
		msm_rpm_exit_sleep();

		/* If RPM bumps up CX to turbo, unvote CX turbo vote
		 * during exit of rpm assisted power collapse to
		 * reduce the power impact
		 */

		msm_mpm_exit_sleep(from_idle);
	}
	if (level->notify_rpm)
		system_sleep_exit();

	update_debug_pc_event(CLUSTER_EXIT, cluster->last_level,
			cluster->num_children_in_sync.bits[0],
@@ -1329,8 +1243,7 @@ static inline void cpu_prepare(struct lpm_cluster *cluster, int cpu_index,
	 * next wakeup within a cluster, in which case, CPU switches over to
	 * use broadcast timer.
	 */
	if (from_idle && (cpu_level->use_bc_timer ||
			(cpu_index >= cluster->min_child_level)))
	if (from_idle && cpu_level->use_bc_timer)
		tick_broadcast_enter();

	if (from_idle && ((cpu_level->mode == MSM_PM_SLEEP_MODE_POWER_COLLAPSE)
@@ -1353,8 +1266,7 @@ static inline void cpu_unprepare(struct lpm_cluster *cluster, int cpu_index,
	bool jtag_save_restore =
			cluster->cpu->levels[cpu_index].jtag_save_restore;

	if (from_idle && (cpu_level->use_bc_timer ||
			(cpu_index >= cluster->min_child_level)))
	if (from_idle && cpu_level->use_bc_timer)
		tick_broadcast_exit();

	if (from_idle && ((cpu_level->mode == MSM_PM_SLEEP_MODE_POWER_COLLAPSE)
@@ -1399,7 +1311,6 @@ int get_cluster_id(struct lpm_cluster *cluster, int *aff_lvl)
}

#if !defined(CONFIG_CPU_V7)
asmlinkage int __invoke_psci_fn_smc(u64, u64, u64, u64);
bool psci_enter_sleep(struct lpm_cluster *cluster, int idx, bool from_idle)
{
	int affinity_level = 0;
@@ -1417,13 +1328,6 @@ bool psci_enter_sleep(struct lpm_cluster *cluster, int idx, bool from_idle)
		return 1;
	}

	if (cluster->cpu->levels[idx].hyp_psci) {
		stop_critical_timings();
		__invoke_psci_fn_smc(0xC4000021, 0, 0, 0);
		start_critical_timings();
		return 1;
	}

	affinity_level = PSCI_AFFINITY_LEVEL(affinity_level);
	state_id |= (power_state | affinity_level
			| cluster->cpu->levels[idx].psci_id);
@@ -1751,7 +1655,6 @@ static void register_cluster_lpm_stats(struct lpm_cluster *cl,
static int lpm_suspend_prepare(void)
{
	suspend_in_progress = true;
	msm_mpm_suspend_prepare();
	lpm_stats_suspend_enter();

	return 0;
@@ -1760,7 +1663,6 @@ static int lpm_suspend_prepare(void)
static void lpm_suspend_wake(void)
{
	suspend_in_progress = false;
	msm_mpm_suspend_wake();
	lpm_stats_suspend_exit();
}

@@ -1793,7 +1695,6 @@ static int lpm_suspend_enter(suspend_state_t state)
	 * clocks that are enabled and preventing the system level
	 * LPMs(XO and Vmin).
	 */
	clock_debug_print_enabled();

	WARN_ON(!use_psci);
	psci_enter_sleep(cluster, idx, true);
+1 −1
Original line number Diff line number Diff line
@@ -146,7 +146,7 @@ uint32_t *get_per_cpu_max_residency(int cpu);
uint32_t *get_per_cpu_min_residency(int cpu);
extern struct lpm_cluster *lpm_root_node;

#ifdef CONFIG_SMP
#if CONFIG_SMP
extern DEFINE_PER_CPU(bool, pending_ipi);
static inline bool is_IPI_pending(const struct cpumask *mask)
{