Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 71c9596a authored by Linux Build Service Account's avatar Linux Build Service Account Committed by Gerrit - the friendly Code Review server
Browse files

Merge "defconfig: Enable min frequency adjustment for big cluster"

parents 576166bc a8da68cf
Loading
Loading
Loading
Loading
+1 −0
Original line number Original line Diff line number Diff line
@@ -581,6 +581,7 @@ CONFIG_MSM_BAM_DMUX=y
CONFIG_WCNSS_CORE=y
CONFIG_WCNSS_CORE=y
CONFIG_WCNSS_CORE_PRONTO=y
CONFIG_WCNSS_CORE_PRONTO=y
CONFIG_WCNSS_REGISTER_DUMP_ON_BITE=y
CONFIG_WCNSS_REGISTER_DUMP_ON_BITE=y
CONFIG_BIG_CLUSTER_MIN_FREQ_ADJUST=y
CONFIG_QCOM_BIMC_BWMON=y
CONFIG_QCOM_BIMC_BWMON=y
CONFIG_DEVFREQ_GOV_QCOM_BW_HWMON=y
CONFIG_DEVFREQ_GOV_QCOM_BW_HWMON=y
CONFIG_DEVFREQ_SIMPLE_DEV=y
CONFIG_DEVFREQ_SIMPLE_DEV=y
+1 −0
Original line number Original line Diff line number Diff line
@@ -601,6 +601,7 @@ CONFIG_MSM_BAM_DMUX=y
CONFIG_WCNSS_CORE=y
CONFIG_WCNSS_CORE=y
CONFIG_WCNSS_CORE_PRONTO=y
CONFIG_WCNSS_CORE_PRONTO=y
CONFIG_WCNSS_REGISTER_DUMP_ON_BITE=y
CONFIG_WCNSS_REGISTER_DUMP_ON_BITE=y
CONFIG_BIG_CLUSTER_MIN_FREQ_ADJUST=y
CONFIG_QCOM_BIMC_BWMON=y
CONFIG_QCOM_BIMC_BWMON=y
CONFIG_DEVFREQ_GOV_QCOM_BW_HWMON=y
CONFIG_DEVFREQ_GOV_QCOM_BW_HWMON=y
CONFIG_DEVFREQ_SIMPLE_DEV=y
CONFIG_DEVFREQ_SIMPLE_DEV=y
+8 −0
Original line number Original line Diff line number Diff line
@@ -868,3 +868,11 @@ config MSM_BAM_DMUX
		interrupt event and event data.
		interrupt event and event data.


source "drivers/soc/qcom/wcnss/Kconfig"
source "drivers/soc/qcom/wcnss/Kconfig"

config BIG_CLUSTER_MIN_FREQ_ADJUST
	bool "Adjust BIG cluster min frequency based on power collapse state"
	default n
	help
	  This driver is used to set the floor of the min frequency of big cluster
	  to the user specified value when the cluster is not power collapsed. When
	  the cluster is power collpsed it resets the value to physical limits.
+1 −0
Original line number Original line Diff line number Diff line
@@ -105,3 +105,4 @@ obj-$(CONFIG_QSEE_IPC_IRQ_BRIDGE) += qsee_ipc_irq_bridge.o
obj-$(CONFIG_QCOM_QDSS_BRIDGE) += qdss_bridge.o
obj-$(CONFIG_QCOM_QDSS_BRIDGE) += qdss_bridge.o
obj-$(CONFIG_MSM_BAM_DMUX) += bam_dmux.o
obj-$(CONFIG_MSM_BAM_DMUX) += bam_dmux.o
obj-$(CONFIG_WCNSS_CORE) += wcnss/
obj-$(CONFIG_WCNSS_CORE) += wcnss/
obj-$(CONFIG_BIG_CLUSTER_MIN_FREQ_ADJUST) += big_cluster_min_freq_adjust.o
+278 −0
Original line number Original line Diff line number Diff line
/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 and
 * only version 2 as published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 */

#define pr_fmt(fmt)     "big_min_freq_adjust: " fmt

#include <linux/init.h>
#include <linux/cpufreq.h>
#include <linux/cpumask.h>
#include <linux/cpu_pm.h>
#include <linux/types.h>
#include <linux/smp.h>
#include <linux/moduleparam.h>

enum min_freq_adjust {
	ADJUST_MIN_FLOOR,	/* Set min floor to user supplied value */
	RESET_MIN_FLOOR,	/* Reset min floor cpuinfo value */
};

struct big_min_freq_adjust_data {
	struct cpumask cluster_cpumask;
	unsigned int min_freq_floor;
	struct delayed_work min_freq_work;
	unsigned long min_down_delay_jiffies;
	enum min_freq_adjust min_freq_state;
	enum min_freq_adjust min_freq_request;
	spinlock_t lock;
	bool big_min_freq_on;
	bool is_init;
};
static struct big_min_freq_adjust_data big_min_freq_adjust_data;

static void cpufreq_min_freq_work(struct work_struct *work)
{
	struct big_min_freq_adjust_data *p = &big_min_freq_adjust_data;

	spin_lock(&p->lock);
	if (p->min_freq_state == p->min_freq_request) {
		spin_unlock(&p->lock);
		return;
	}
	spin_unlock(&p->lock);
	cpufreq_update_policy(cpumask_first(&p->cluster_cpumask));
}

static int cpufreq_callback(struct notifier_block *nb, unsigned long val,
				void *data)
{
	struct big_min_freq_adjust_data *p = &big_min_freq_adjust_data;
	struct cpufreq_policy *policy = data;
	unsigned int min_freq_floor;

	if (p->big_min_freq_on == false)
		return NOTIFY_DONE;

	if (val != CPUFREQ_ADJUST)
		return NOTIFY_DONE;

	if (!cpumask_test_cpu(cpumask_first(&p->cluster_cpumask),
				policy->related_cpus))
		return NOTIFY_DONE;

	spin_lock(&p->lock);
	if (p->min_freq_request == ADJUST_MIN_FLOOR)
		min_freq_floor = p->min_freq_floor;
	else
		min_freq_floor = policy->cpuinfo.min_freq;
	cpufreq_verify_within_limits(policy, min_freq_floor,
			policy->cpuinfo.max_freq);
	p->min_freq_state = p->min_freq_request;
	spin_unlock(&p->lock);

	return NOTIFY_OK;
}

static struct notifier_block cpufreq_nb = {
	.notifier_call = cpufreq_callback
};

#define AFFINITY_LEVEL_L2 1
static int cpu_pm_callback(struct notifier_block *self,
			       unsigned long cmd, void *v)
{
	struct big_min_freq_adjust_data *p = &big_min_freq_adjust_data;
	unsigned long aff_level = (unsigned long) v;
	unsigned long delay;
	int cpu;

	if (p->big_min_freq_on == false)
		return NOTIFY_DONE;

	if (aff_level != AFFINITY_LEVEL_L2)
		return NOTIFY_DONE;

	cpu = smp_processor_id();

	if (!cpumask_test_cpu(cpu, &p->cluster_cpumask))
		return NOTIFY_DONE;

	spin_lock(&p->lock);
	switch (cmd) {
	case CPU_CLUSTER_PM_ENTER:
		p->min_freq_request = RESET_MIN_FLOOR;
		delay = p->min_down_delay_jiffies;
		break;
	case CPU_CLUSTER_PM_ENTER_FAILED:
	case CPU_CLUSTER_PM_EXIT:
		p->min_freq_request = ADJUST_MIN_FLOOR;
		/* To avoid unnecessary oscillations between exit and idle */
		delay = 1;
		break;
	default:
		spin_unlock(&p->lock);
		return NOTIFY_DONE;
	}

	cancel_delayed_work(&p->min_freq_work);

	if (p->min_freq_state != p->min_freq_request)
		schedule_delayed_work(&p->min_freq_work, delay);
	spin_unlock(&p->lock);

	return NOTIFY_OK;
}

static struct notifier_block cpu_pm_nb = {
	.notifier_call = cpu_pm_callback
};

static unsigned long __read_mostly big_min_down_delay_ms;
#define MIN_DOWN_DELAY_MSEC 80 /* Default big_min_down_delay in msec */
#define POLICY_MIN 1094400 /* Default min_freq_floor in KHz */

static void trigger_state_machine(void *d)
{
	struct big_min_freq_adjust_data *p = &big_min_freq_adjust_data;
	bool *update_policy = d;

	if (p->min_freq_request != ADJUST_MIN_FLOOR) {
		p->min_freq_request = ADJUST_MIN_FLOOR;
		*update_policy = true;
	}
}

static int enable_big_min_freq_adjust(void)
{
	struct big_min_freq_adjust_data *p = &big_min_freq_adjust_data;
	int ret;
	bool update_policy = false;

	if (p->big_min_freq_on == true)
		return 0;

	INIT_DEFERRABLE_WORK(&p->min_freq_work, cpufreq_min_freq_work);

	cpumask_clear(&p->cluster_cpumask);
	cpumask_set_cpu(4, &p->cluster_cpumask);
	cpumask_set_cpu(5, &p->cluster_cpumask);
	cpumask_set_cpu(6, &p->cluster_cpumask);
	cpumask_set_cpu(7, &p->cluster_cpumask);

	if (!big_min_down_delay_ms) {
		big_min_down_delay_ms = MIN_DOWN_DELAY_MSEC;
		p->min_down_delay_jiffies = msecs_to_jiffies(
				big_min_down_delay_ms);
	}
	if (!p->min_freq_floor)
		p->min_freq_floor = POLICY_MIN;

	ret = cpu_pm_register_notifier(&cpu_pm_nb);
	if (ret) {
		pr_err("Failed to register for PM notification\n");
		return ret;
	}

	ret = cpufreq_register_notifier(&cpufreq_nb, CPUFREQ_POLICY_NOTIFIER);
	if (ret) {
		pr_err("Failed to register for CPUFREQ POLICY notification\n");
		cpu_pm_unregister_notifier(&cpu_pm_nb);
		return ret;
	}

	p->min_freq_state = RESET_MIN_FLOOR;
	p->min_freq_request = RESET_MIN_FLOOR;
	spin_lock_init(&p->lock);
	p->big_min_freq_on = true;

	/* If BIG cluster is active at this time and continue to be active
	 * forever, in that case min frequency of the cluster will never be
	 * set to floor value.  This is to trigger the state machine and set
	 * the min freq and  min_freq_state to appropriate values.
	 *
	 * Two possibilities here.
	 * 1) If cluster is idle before this, the wakeup is unnecessary but
	 * the state machine is set to proper state.
	 * 2) If cluster is active before this, the wakeup is necessary and
	 * the state machine is set to proper state.
	 */
	smp_call_function_any(&p->cluster_cpumask,
			trigger_state_machine, &update_policy, true);
	if (update_policy)
		cpufreq_update_policy(cpumask_first(&p->cluster_cpumask));

	pr_info("big min freq ajustment enabled\n");

	return 0;
}

static bool __read_mostly big_min_freq_adjust_enabled;

static int set_big_min_freq_adjust(const char *buf,
		const struct kernel_param *kp)
{
	int ret;

	ret = param_set_bool_enable_only(buf, kp);
	if (ret) {
		pr_err("Unable to set big_min_freq_adjust_enabled: %d\n", ret);
		return ret;
	}

	if (!big_min_freq_adjust_data.is_init)
		return ret;

	return enable_big_min_freq_adjust();
}

static const struct kernel_param_ops param_ops_big_min_freq_adjust = {
	.set = set_big_min_freq_adjust,
	.get = param_get_bool,
};
module_param_cb(min_freq_adjust, &param_ops_big_min_freq_adjust,
		&big_min_freq_adjust_enabled, 0644);

module_param_named(min_freq_floor, big_min_freq_adjust_data.min_freq_floor,
		uint, 0644);

static int set_min_down_delay_ms(const char *buf, const struct kernel_param *kp)
{
	int ret;

	ret = param_set_ulong(buf, kp);
	if (ret) {
		pr_err("Unable to set big_min_down_delay_ms: %d\n", ret);
		return ret;
	}

	big_min_freq_adjust_data.min_down_delay_jiffies = msecs_to_jiffies(
			big_min_down_delay_ms);

	return 0;
}

static const struct kernel_param_ops param_ops_big_min_down_delay_ms = {
	.set = set_min_down_delay_ms,
	.get = param_get_ulong,
};
module_param_cb(min_down_delay_ms, &param_ops_big_min_down_delay_ms,
		&big_min_down_delay_ms, 0644);

static int __init big_min_freq_adjust_init(void)
{
	big_min_freq_adjust_data.is_init = true;
	if (!big_min_freq_adjust_enabled)
		return 0;

	return enable_big_min_freq_adjust();
}
late_initcall(big_min_freq_adjust_init);