Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 602ce579 authored by Raghavendra Kakarla's avatar Raghavendra Kakarla Committed by Gerrit - the friendly Code Review server
Browse files

lpm-levels: Add RIMPS timer base rail/PLL turn off functionality



This patch adds the logic to enable the RIMPS timer based
rail-pc trigger functionality. This patch programs the RIMPS
timer when each core of corresponding clock domain entering
into the rail-pc LPM and enables the RIMPS timer feature so
that RIMPS allows to turn off rail/PLL based on the timer
threshold and timeout values.

Change-Id: Ib3589933d339a7224147a7c7afc006f260d8205b
Signed-off-by: default avatarRaghavendra Kakarla <rkakarla@codeaurora.org>
parent e5d85696
Loading
Loading
Loading
Loading
+35 −1
Original line number Diff line number Diff line
// SPDX-License-Identifier: GPL-2.0-only

/*
 * Copyright (c) 2014-2019, The Linux Foundation. All rights reserved.
 * Copyright (c) 2014-2020, The Linux Foundation. All rights reserved.
 */

#define pr_fmt(fmt) "%s: " fmt, KBUILD_MODNAME
@@ -10,6 +10,7 @@
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/err.h>
#include <linux/sysfs.h>
#include <linux/device.h>
@@ -542,6 +543,7 @@ static int parse_cpu_levels(struct device_node *dn, struct lpm_cluster *c)
	if (!cpu)
		return -ENOMEM;

	spin_lock_init(&cpu->cpu_lock);
	if (get_cpumask_for_node(dn, &cpu->related_cpus))
		return -EINVAL;

@@ -689,6 +691,37 @@ static struct lpm_cluster *parse_cluster(struct device_node *node,
	return NULL;
}

static void add_rimps_tmr_register(struct device_node *dn,
		struct lpm_cluster *cl)
{
	struct lpm_cpu *lpm_cpu;
	uint32_t rimps_threshold = 0;
	static uint32_t i;

	if (list_empty(&cl->cpu)) {
		struct lpm_cluster *n;

		list_for_each_entry(n, &cl->child, list)
			add_rimps_tmr_register(dn, n);
	}

	list_for_each_entry(lpm_cpu, &cl->cpu, list) {
		int idx = lpm_cpu->nlevels-1;

		lpm_cpu->rimps_tmr_base = of_iomap(dn, i++);
		if (!lpm_cpu->rimps_tmr_base) {
			pr_debug("Unable to get rimps base address\n");
			return;
		}
		rimps_threshold = lpm_cpu->levels[idx].pwr.min_residency;
		rimps_threshold = us_to_ticks(rimps_threshold);
		writel_relaxed(rimps_threshold, lpm_cpu->rimps_tmr_base
							 + TIMER_THRESHOLD);
		/* Ensure the write is complete before returning. */
		wmb();
	}
}

struct lpm_cluster *lpm_of_parse_cluster(struct platform_device *pdev)
{
	struct device_node *top = NULL;
@@ -702,6 +735,7 @@ struct lpm_cluster *lpm_of_parse_cluster(struct platform_device *pdev)

	lpm_pdev = pdev;
	c = parse_cluster(top, NULL);
	add_rimps_tmr_register(pdev->dev.of_node, c);
	of_node_put(top);
	return c;
}
+76 −0
Original line number Diff line number Diff line
@@ -223,15 +223,88 @@ static void update_debug_pc_event(enum debug_event event, uint32_t arg1,
	spin_unlock(&debug_lock);
}

uint32_t us_to_ticks(uint64_t sleep_val)
{
	uint64_t sec, nsec;

	sec = sleep_val;
	do_div(sec, USEC_PER_SEC);

	if (sec > 0) {
		nsec = sleep_val - sec * USEC_PER_SEC;
		sleep_val = sec * ARCH_TIMER_HZ;
		if (nsec > 0) {
			nsec = nsec * NSEC_PER_USEC;
			do_div(nsec, NSEC_PER_SEC/ARCH_TIMER_HZ);
		}
		sleep_val = sleep_val + nsec;
	} else {
		sleep_val = sleep_val * ARCH_TIMER_HZ;
		do_div(sleep_val, USEC_PER_SEC);
	}
	return sleep_val;
}

static uint32_t get_next_event(struct lpm_cpu *cpu)
{
	ktime_t next_event = KTIME_MAX;
	unsigned int next_cpu;

	for_each_cpu(next_cpu, &cpu->related_cpus) {
		ktime_t next_event_c = per_cpu(cpu_lpm, next_cpu)->next_hrtimer;

		if (next_event > next_event_c)
			next_event = next_event_c;
	}

	return ktime_to_us(ktime_sub(next_event, ktime_get()));
}

static void program_rimps_timer(struct lpm_cpu *cpu)
{
	uint32_t ctrl_val, next_event;
	struct cpumask cpu_lpm_mask;
	struct lpm_cluster *cl = cpu->parent;

	if (!cpu->rimps_tmr_base)
		return;

	cpumask_and(&cpu_lpm_mask, &cl->num_children_in_sync,
						&cpu->related_cpus);
	if (!cpumask_equal(&cpu_lpm_mask, &cpu->related_cpus))
		return;

	next_event = get_next_event(cpu);
	if (!next_event)
		return;

	next_event = us_to_ticks(next_event);
	spin_lock(&cpu->cpu_lock);

	/* RIMPS timer pending should be read before programming timeout val */
	readl_relaxed(cpu->rimps_tmr_base + TIMER_PENDING);
	ctrl_val = readl_relaxed(cpu->rimps_tmr_base + TIMER_CTRL);
	writel_relaxed(ctrl_val & ~(TIMER_CONTROL_EN),
				cpu->rimps_tmr_base + TIMER_CTRL);
	writel_relaxed(next_event, cpu->rimps_tmr_base + TIMER_VAL);
	writel_relaxed(ctrl_val | (TIMER_CONTROL_EN),
				cpu->rimps_tmr_base + TIMER_CTRL);
	/* Ensure the write is complete before returning. */
	wmb();
	spin_unlock(&cpu->cpu_lock);
}

#ifdef CONFIG_SMP
static int lpm_dying_cpu(unsigned int cpu)
{
	struct lpm_cluster *cluster = per_cpu(cpu_lpm, cpu)->parent;
	struct lpm_cpu *lpm_cpu = per_cpu(cpu_lpm, cpu);

	update_debug_pc_event(CPU_HP_DYING, cpu,
				cluster->num_children_in_sync.bits[0],
				cluster->child_cpus.bits[0], false);
	cluster_prepare(cluster, get_cpu_mask(cpu), NR_LPM_LEVELS, false, 0);
	program_rimps_timer(lpm_cpu);
	return 0;
}

@@ -1369,6 +1442,9 @@ static int lpm_cpuidle_enter(struct cpuidle_device *dev,
	if (need_resched())
		goto exit;

	if (idx == cpu->nlevels - 1)
		program_rimps_timer(cpu);

	ret = psci_enter_sleep(cpu, idx, true);
	success = (ret == 0);

+15 −0
Original line number Diff line number Diff line
@@ -24,6 +24,18 @@
#define PREMATURE_CNT_LOW 1
#define PREMATURE_CNT_HIGH 5

/* RIMPS registers */
#define TIMER_CTRL		0x0
#define TIMER_VAL		0x4
#define TIMER_PENDING		0x18
#define TIMER_THRESHOLD		0x1C

/* RIMPS registers offset */
#define TIMER_CONTROL_EN	0x1

/* RIMPS timer clock */
#define ARCH_TIMER_HZ	19200000

struct power_params {
	uint32_t entry_latency;		/* Entry latency */
	uint32_t exit_latency;		/* Exit latency */
@@ -50,6 +62,8 @@ struct lpm_cpu {
	uint32_t ref_premature_cnt;
	uint32_t tmr_add;
	bool lpm_prediction;
	void __iomem *rimps_tmr_base;
	spinlock_t cpu_lock;
	bool ipi_prediction;
	uint64_t bias;
	struct cpuidle_driver *drv;
@@ -122,6 +136,7 @@ struct lpm_cluster {
struct lpm_cluster *lpm_of_parse_cluster(struct platform_device *pdev);
void free_cluster_node(struct lpm_cluster *cluster);
void cluster_dt_walkthrough(struct lpm_cluster *cluster);
uint32_t us_to_ticks(uint64_t sleep_val);

int create_cluster_lvl_nodes(struct lpm_cluster *p, struct kobject *kobj);
int lpm_cpu_mode_allow(unsigned int cpu,