Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit bb84b6f0 authored by Linux Build Service Account's avatar Linux Build Service Account Committed by Gerrit - the friendly Code Review server
Browse files

Merge "lpm-levels: Use residency instead of power and energy overhead values"

parents c7809dfd 7bd40ba9
Loading
Loading
Loading
Loading
+148 −12
Original line number Diff line number Diff line
@@ -38,25 +38,87 @@ static const struct lpm_type_str lpm_types[] = {
	{SUSPEND, "suspend_enabled"},
};

static DEFINE_PER_CPU(uint32_t *, max_residency);
static struct lpm_level_avail *cpu_level_available[NR_CPUS];
static struct platform_device *lpm_pdev;

static void *get_avail_val(struct kobject *kobj, struct kobj_attribute *attr)
static void *get_enabled_ptr(struct kobj_attribute *attr,
					struct lpm_level_avail *avail)
{
	void *arg = NULL;

	if (!strcmp(attr->attr.name, lpm_types[IDLE].str))
		arg = (void *) &avail->idle_enabled;
	else if (!strcmp(attr->attr.name, lpm_types[SUSPEND].str))
		arg = (void *) &avail->suspend_enabled;

	return arg;
}

static struct lpm_level_avail *get_avail_ptr(struct kobject *kobj,
					struct kobj_attribute *attr)
{
	struct lpm_level_avail *avail = NULL;

	if (!strcmp(attr->attr.name, lpm_types[IDLE].str)) {
	if (!strcmp(attr->attr.name, lpm_types[IDLE].str))
		avail = container_of(attr, struct lpm_level_avail,
					idle_enabled_attr);
		arg = (void *) &avail->idle_enabled;
	} else if (!strcmp(attr->attr.name, lpm_types[SUSPEND].str)) {
	else if (!strcmp(attr->attr.name, lpm_types[SUSPEND].str))
		avail = container_of(attr, struct lpm_level_avail,
					suspend_enabled_attr);
		arg = (void *) &avail->suspend_enabled;

	return avail;
}

	return arg;
static void set_optimum_cpu_residency(struct lpm_cpu *cpu, int cpu_id,
		bool probe_time)
{
	int i, j;
	bool mode_avail;
	uint32_t *residency = per_cpu(max_residency, cpu_id);

	for (i = 0; i < cpu->nlevels; i++) {
		struct power_params *pwr = &cpu->levels[i].pwr;

		residency[i] = ~0;
		for (j = i + 1; j < cpu->nlevels; j++) {
			mode_avail = probe_time ||
					lpm_cpu_mode_allow(cpu_id, j, true);

			if (mode_avail &&
				(residency[i] > pwr->residencies[j]) &&
				(pwr->residencies[j] != 0))
				residency[i] = pwr->residencies[j];
		}
	}
}

static void set_optimum_cluster_residency(struct lpm_cluster *cluster,
		bool probe_time)
{
	int i, j;
	bool mode_avail;

	for (i = 0; i < cluster->nlevels; i++) {
		struct power_params *pwr = &cluster->levels[i].pwr;

		pwr->max_residency = ~0;
		for (j = 0; j < cluster->nlevels; j++) {
			if (i >= j)
				mode_avail = probe_time ||
					lpm_cluster_mode_allow(cluster, i,
							true);
			if (mode_avail &&
				(pwr->max_residency > pwr->residencies[j]) &&
				(pwr->residencies[j] != 0))
				pwr->max_residency = pwr->residencies[j];
		}
	}
}

uint32_t *get_per_cpu_max_residency(int cpu)
{
	return per_cpu(max_residency, cpu);
}

ssize_t lpm_enable_show(struct kobject *kobj, struct kobj_attribute *attr,
@@ -65,7 +127,7 @@ ssize_t lpm_enable_show(struct kobject *kobj, struct kobj_attribute *attr,
	int ret = 0;
	struct kernel_param kp;

	kp.arg = get_avail_val(kobj, attr);
	kp.arg = get_enabled_ptr(attr, get_avail_ptr(kobj, attr));
	ret = param_get_bool(buf, &kp);
	if (ret > 0) {
		strlcat(buf, "\n", PAGE_SIZE);
@@ -80,15 +142,23 @@ ssize_t lpm_enable_store(struct kobject *kobj, struct kobj_attribute *attr,
{
	int ret = 0;
	struct kernel_param kp;
	struct lpm_level_avail *avail;

	kp.arg = get_avail_val(kobj, attr);
	avail = get_avail_ptr(kobj, attr);
	kp.arg = get_enabled_ptr(attr, avail);
	ret = param_set_bool(buf, &kp);

	if (avail->cpu_node)
		set_optimum_cpu_residency(avail->data, avail->idx, false);
	else
		set_optimum_cluster_residency(avail->data, false);

	return ret ? ret : len;
}

static int create_lvl_avail_nodes(const char *name,
			struct kobject *parent, struct lpm_level_avail *avail)
			struct kobject *parent, struct lpm_level_avail *avail,
			void *data, int index, bool cpu_node)
{
	struct attribute_group *attr_group = NULL;
	struct attribute **attr = NULL;
@@ -139,6 +209,9 @@ static int create_lvl_avail_nodes(const char *name,
	avail->idle_enabled = true;
	avail->suspend_enabled = true;
	avail->kobj = kobj;
	avail->data = data;
	avail->idx = index;
	avail->cpu_node = cpu_node;

	return ret;

@@ -181,7 +254,8 @@ static int create_cpu_lvl_nodes(struct lpm_cluster *p, struct kobject *parent)
		for (i = 0; i < p->cpu->nlevels; i++) {

			ret = create_lvl_avail_nodes(p->cpu->levels[i].name,
					cpu_kobj[cpu_idx], &level_list[i]);
					cpu_kobj[cpu_idx], &level_list[i],
					(void *)p->cpu, cpu, true);
			if (ret)
				goto release_kobj;
		}
@@ -215,7 +289,8 @@ int create_cluster_lvl_nodes(struct lpm_cluster *p, struct kobject *kobj)

	for (i = 0; i < p->nlevels; i++) {
		ret = create_lvl_avail_nodes(p->levels[i].level_name,
				cluster_kobj, &p->levels[i].available);
				cluster_kobj, &p->levels[i].available,
				(void *)p, 0, false);
		if (ret)
			return ret;
	}
@@ -421,6 +496,9 @@ static int parse_power_params(struct device_node *node,

	key = "qcom,time-overhead";
	ret = of_property_read_u32(node, key, &pwr->time_overhead_us);
	if (ret)
		goto fail;

fail:
	if (ret)
		pr_err("%s(): %s Error reading %s\n", __func__, node->name,
@@ -615,11 +693,31 @@ static int get_cpumask_for_node(struct device_node *node, struct cpumask *mask)
	return 0;
}

static int calculate_residency(struct power_params *base_pwr,
					struct power_params *next_pwr)
{
	int32_t residency = (int32_t)(next_pwr->energy_overhead -
						base_pwr->energy_overhead) -
		((int32_t)(next_pwr->ss_power * next_pwr->time_overhead_us)
		- (int32_t)(base_pwr->ss_power * base_pwr->time_overhead_us));

	residency /= (int32_t)(base_pwr->ss_power  - next_pwr->ss_power);

	if (residency < 0) {
		__WARN_printf("%s: Incorrect power attributes for LPM\n",
				__func__);
		return next_pwr->time_overhead_us;
	}

	return residency < next_pwr->time_overhead_us ?
				next_pwr->time_overhead_us : residency;
}

static int parse_cpu_levels(struct device_node *node, struct lpm_cluster *c)
{
	struct device_node *n;
	int ret = -ENOMEM;
	int i;
	int i, j;
	char *key;

	c->cpu = devm_kzalloc(&lpm_pdev->dev, sizeof(*c->cpu), GFP_KERNEL);
@@ -676,6 +774,22 @@ static int parse_cpu_levels(struct device_node *node, struct lpm_cluster *c)
		else if (ret)
			goto failed;
	}
	for (i = 0; i < c->cpu->nlevels; i++) {
		for (j = 0; j < c->cpu->nlevels; j++) {
			if (i >= j) {
				c->cpu->levels[i].pwr.residencies[j] = 0;
				continue;
			}

			c->cpu->levels[i].pwr.residencies[j] =
				calculate_residency(&c->cpu->levels[i].pwr,
					&c->cpu->levels[j].pwr);

			pr_err("%s: idx %d %u\n", __func__, j,
					c->cpu->levels[i].pwr.residencies[j]);
		}
	}

	return 0;
failed:
	for (i = 0; i < c->cpu->nlevels; i++) {
@@ -732,6 +846,7 @@ struct lpm_cluster *parse_cluster(struct device_node *node,
	struct device_node *n;
	char *key;
	int ret = 0;
	int i, j;

	c = devm_kzalloc(&lpm_pdev->dev, sizeof(*c), GFP_KERNEL);
	if (!c)
@@ -789,6 +904,16 @@ struct lpm_cluster *parse_cluster(struct device_node *node,
				goto failed_parse_cluster;

			c->aff_level = 1;

			for_each_cpu(i, &c->child_cpus) {
				per_cpu(max_residency, i) = devm_kzalloc(
					&lpm_pdev->dev,
					sizeof(uint32_t) * c->cpu->nlevels,
					GFP_KERNEL);
				if (!per_cpu(max_residency, i))
					return ERR_PTR(-ENOMEM);
				set_optimum_cpu_residency(c->cpu, i, true);
			}
		}
	}

@@ -797,6 +922,17 @@ struct lpm_cluster *parse_cluster(struct device_node *node,
	else
		c->last_level = c->nlevels-1;

	for (i = 0; i < c->nlevels; i++) {
		for (j = 0; j < c->nlevels; j++) {
			if (i >= j) {
				c->levels[i].pwr.residencies[j] = 0;
				continue;
			}
			c->levels[i].pwr.residencies[j] = calculate_residency(
				&c->levels[i].pwr, &c->levels[j].pwr);
		}
	}
	set_optimum_cluster_residency(c, true);
	return c;

failed_parse_cluster:
+6 −41
Original line number Diff line number Diff line
@@ -433,18 +433,15 @@ static int cpu_power_select(struct cpuidle_device *dev,
		struct lpm_cpu *cpu)
{
	int best_level = -1;
	uint32_t best_level_pwr = ~0U;
	uint32_t latency_us = pm_qos_request_for_cpu(PM_QOS_CPU_DMA_LATENCY,
							dev->cpu);
	uint32_t sleep_us =
		(uint32_t)(ktime_to_us(tick_nohz_get_sleep_length()));
	uint32_t modified_time_us = 0;
	uint32_t next_event_us = 0;
	uint32_t pwr;
	int i;
	uint32_t lvl_latency_us = 0;
	uint32_t lvl_overhead_us = 0;
	uint32_t lvl_overhead_energy = 0;
	uint32_t *residency = get_per_cpu_max_residency(dev->cpu);

	if (!cpu)
		return -EINVAL;
@@ -468,12 +465,8 @@ static int cpu_power_select(struct cpuidle_device *dev,

		lvl_latency_us = pwr_params->latency_us;

		lvl_overhead_us = pwr_params->time_overhead_us;

		lvl_overhead_energy = pwr_params->energy_overhead;

		if (latency_us < lvl_latency_us)
			continue;
			break;

		if (next_event_us) {
			if (next_event_us < lvl_latency_us)
@@ -484,32 +477,15 @@ static int cpu_power_select(struct cpuidle_device *dev,
				next_wakeup_us = next_event_us - lvl_latency_us;
		}

		if (next_wakeup_us <= pwr_params->time_overhead_us)
			continue;

		/*
		 * If wakeup time greater than overhead by a factor of 1000
		 * assume that core steady state power dominates the power
		 * equation
		 */
		if ((next_wakeup_us >> 10) > lvl_overhead_us) {
			pwr = pwr_params->ss_power;
		} else {
			pwr = pwr_params->ss_power;
			pwr -= (lvl_overhead_us * pwr_params->ss_power) /
						next_wakeup_us;
			pwr += pwr_params->energy_overhead / next_wakeup_us;
		}

		if (best_level_pwr >= pwr) {
		if (next_wakeup_us <= residency[i]) {
			best_level = i;
			best_level_pwr = pwr;
			if (next_event_us && next_event_us < sleep_us &&
				(mode != MSM_PM_SLEEP_MODE_WAIT_FOR_INTERRUPT))
				modified_time_us
					= next_event_us - lvl_latency_us;
			else
				modified_time_us = 0;
			break;
		}
	}

@@ -567,8 +543,6 @@ static int cluster_select(struct lpm_cluster *cluster, bool from_idle)
{
	int best_level = -1;
	int i;
	uint32_t best_level_pwr = ~0U;
	uint32_t pwr;
	struct cpumask mask;
	uint32_t latency_us = ~0U;
	uint32_t sleep_us;
@@ -620,18 +594,9 @@ static int cluster_select(struct lpm_cluster *cluster, bool from_idle)
		if (level->notify_rpm && msm_rpm_waiting_for_ack())
			continue;

		if ((sleep_us >> 10) > pwr_params->time_overhead_us) {
			pwr = pwr_params->ss_power;
		} else {
			pwr = pwr_params->ss_power;
			pwr -= (pwr_params->time_overhead_us *
					pwr_params->ss_power) / sleep_us;
			pwr += pwr_params->energy_overhead / sleep_us;
		}

		if (best_level_pwr >= pwr) {
		if (sleep_us <= pwr_params->max_residency) {
			best_level = i;
			best_level_pwr = pwr;
			break;
		}
	}

+6 −1
Original line number Diff line number Diff line
@@ -27,6 +27,8 @@ struct power_params {
	uint32_t ss_power;		/* Steady state power */
	uint32_t energy_overhead;	/* Enter + exit over head */
	uint32_t time_overhead_us;	/* Enter + exit overhead */
	uint32_t residencies[NR_LPM_LEVELS];
	uint32_t max_residency;
};

struct lpm_cpu_level {
@@ -55,6 +57,9 @@ struct lpm_level_avail {
	struct kobject *kobj;
	struct kobj_attribute idle_enabled_attr;
	struct kobj_attribute suspend_enabled_attr;
	void *data;
	int idx;
	bool cpu_node;
};

struct lpm_cluster_level {
@@ -119,7 +124,7 @@ bool lpm_cpu_mode_allow(unsigned int cpu,
		unsigned int mode, bool from_idle);
bool lpm_cluster_mode_allow(struct lpm_cluster *cluster,
		unsigned int mode, bool from_idle);

uint32_t *get_per_cpu_max_residency(int cpu);
extern struct lpm_cluster *lpm_root_node;

#ifdef CONFIG_SMP