Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 9f92f861 authored by Linux Build Service Account's avatar Linux Build Service Account Committed by Gerrit - the friendly Code Review server
Browse files

Merge "drivers: thermal: cpu-cooling: Fix core isolation support"

parents 17719555 46fc12e8
Loading
Loading
Loading
Loading
+10 −0
Original line number Diff line number Diff line
@@ -135,6 +135,16 @@ config THERMAL_GOV_USER_SPACE
	help
	  Enable this to let the user space manage the platform thermals.

config THERMAL_GOV_LOW_LIMITS
	bool "Low limits mitigation governor"
	help
	  Enable this to manage platform limits using low limits
	  governor.

	  Enable this governor to monitor and trigger floor mitigation.
	  This governor will monitor the limits going below a
	  trip threshold to trigger a floor mitigation.

config THERMAL_GOV_POWER_ALLOCATOR
	bool "Power allocator thermal governor"
	help
+2 −1
Original line number Diff line number Diff line
@@ -16,6 +16,7 @@ thermal_sys-$(CONFIG_THERMAL_GOV_FAIR_SHARE) += fair_share.o
thermal_sys-$(CONFIG_THERMAL_GOV_BANG_BANG)	+= gov_bang_bang.o
thermal_sys-$(CONFIG_THERMAL_GOV_STEP_WISE)	+= step_wise.o
thermal_sys-$(CONFIG_THERMAL_GOV_USER_SPACE)	+= user_space.o
thermal_sys-$(CONFIG_THERMAL_GOV_LOW_LIMITS) += gov_low_limits.o
thermal_sys-$(CONFIG_THERMAL_GOV_POWER_ALLOCATOR)	+= power_allocator.o

# cpufreq cooling
@@ -54,7 +55,7 @@ obj-$(CONFIG_INT340X_THERMAL) += int340x_thermal/
obj-$(CONFIG_INTEL_BXT_PMIC_THERMAL) += intel_bxt_pmic_thermal.o
obj-$(CONFIG_INTEL_PCH_THERMAL)	+= intel_pch_thermal.o
obj-$(CONFIG_ST_THERMAL)	+= st/
obj-$(CONFIG_QCOM_TSENS)	+= qcom/
obj-$(CONFIG_ARCH_QCOM)		+= qcom/
obj-$(CONFIG_TEGRA_SOCTHERM)	+= tegra/
obj-$(CONFIG_HISI_THERMAL)     += hisi_thermal.o
obj-$(CONFIG_MTK_THERMAL)	+= mtk_thermal.o
+359 −34
Original line number Diff line number Diff line
@@ -31,6 +31,9 @@
#include <linux/slab.h>
#include <linux/cpu.h>
#include <linux/cpu_cooling.h>
#include <linux/sched.h>
#include <linux/of_device.h>
#include <linux/suspend.h>

#include <trace/events/thermal.h>

@@ -46,6 +49,7 @@
 *	level 0 --> 1st Max Freq
 *	level 1 --> 2nd Max Freq
 *	...
 *	leven n --> core isolated
 */

/**
@@ -75,11 +79,16 @@ struct time_in_idle {
 * struct cpufreq_cooling_device - data for cooling device with cpufreq
 * @id: unique integer value corresponding to each cpufreq_cooling_device
 *	registered.
 * @cpu_id: The CPU for which the cooling device will do the mitigation.
 * @last_load: load measured by the latest call to cpufreq_get_requested_power()
 * @cpufreq_state: integer value representing the current state of cpufreq
 *	cooling	devices.
 * @clipped_freq: integer value representing the absolute value of the clipped
 *	frequency.
 * @cpufreq_floor_state: integer value representing the frequency floor state
 *	of cpufreq cooling devices.
 * @floor_freq: integer value representing the absolute value of the floor
 *	frequency.
 * @max_level: maximum cooling level. One less than total number of valid
 *	cpufreq frequencies.
 * @freq_table: Freq table in descending order of frequencies
@@ -96,8 +105,11 @@ struct time_in_idle {
struct cpufreq_cooling_device {
	int id;
	u32 last_load;
	int cpu_id;
	unsigned int cpufreq_state;
	unsigned int clipped_freq;
	unsigned int cpufreq_floor_state;
	unsigned int floor_freq;
	unsigned int max_level;
	struct freq_table *freq_table;	/* In descending order */
	struct thermal_cooling_device *cdev;
@@ -105,8 +117,16 @@ struct cpufreq_cooling_device {
	struct list_head node;
	struct time_in_idle *idle_time;
	get_static_t plat_get_static_power;
	struct cpu_cooling_ops *plat_ops;
};

static atomic_t in_suspend;
static int8_t cpuhp_registered;
static struct work_struct cpuhp_register_work;
static struct cpumask cpus_pending_online;
static struct cpumask cpus_isolated_by_thermal;
static DEFINE_MUTEX(core_isolate_lock);

static DEFINE_IDA(cpufreq_ida);
static DEFINE_MUTEX(cooling_list_lock);
static LIST_HEAD(cpufreq_cdev_list);
@@ -133,6 +153,117 @@ static unsigned long get_level(struct cpufreq_cooling_device *cpufreq_cdev,
	return level - 1;
}

static int cpufreq_cooling_pm_notify(struct notifier_block *nb,
				unsigned long mode, void *_unused)
{
	struct cpufreq_cooling_device *cpufreq_cdev;
	unsigned int cpu;

	switch (mode) {
	case PM_HIBERNATION_PREPARE:
	case PM_RESTORE_PREPARE:
	case PM_SUSPEND_PREPARE:
		atomic_set(&in_suspend, 1);
		break;
	case PM_POST_HIBERNATION:
	case PM_POST_RESTORE:
	case PM_POST_SUSPEND:
		mutex_lock(&cooling_list_lock);
		list_for_each_entry(cpufreq_cdev, &cpufreq_cdev_list, node) {
			if (cpufreq_cdev->cpu_id == -1)
				continue;
			mutex_lock(&core_isolate_lock);
			if (cpufreq_cdev->cpufreq_state ==
				cpufreq_cdev->max_level) {
				cpu = cpufreq_cdev->cpu_id;
				/*
				 * Unlock this lock before calling
				 * schedule_isolate. as this could lead to
				 * deadlock with hotplug path.
				 */
				mutex_unlock(&core_isolate_lock);
				if (cpu_online(cpu) &&
					!cpumask_test_and_set_cpu(cpu,
					&cpus_isolated_by_thermal)) {
					if (sched_isolate_cpu(cpu))
						cpumask_clear_cpu(cpu,
						&cpus_isolated_by_thermal);
				}
				continue;
			}
			mutex_unlock(&core_isolate_lock);
		}
		mutex_unlock(&cooling_list_lock);

		atomic_set(&in_suspend, 0);
		break;
	default:
		break;
	}
	return 0;
}

static struct notifier_block cpufreq_cooling_pm_nb = {
	.notifier_call = cpufreq_cooling_pm_notify,
};

static int cpufreq_hp_offline(unsigned int offline_cpu)
{
	struct cpufreq_cooling_device *cpufreq_cdev;

	mutex_lock(&cooling_list_lock);
	list_for_each_entry(cpufreq_cdev, &cpufreq_cdev_list, node) {
		if (!cpumask_test_cpu(offline_cpu,
					cpufreq_cdev->policy->related_cpus))
			continue;
		if (cpufreq_cdev->cpu_id != -1 &&
				offline_cpu != cpufreq_cdev->cpu_id)
			continue;

		mutex_lock(&core_isolate_lock);
		if ((cpufreq_cdev->cpufreq_state == cpufreq_cdev->max_level) &&
			(cpumask_test_and_clear_cpu(offline_cpu,
			&cpus_isolated_by_thermal)))
			sched_unisolate_cpu_unlocked(offline_cpu);
		mutex_unlock(&core_isolate_lock);
		break;
	}
	mutex_unlock(&cooling_list_lock);

	return 0;
}

static int cpufreq_hp_online(unsigned int online_cpu)
{
	struct cpufreq_cooling_device *cpufreq_cdev;
	int ret = 0;

	if (atomic_read(&in_suspend))
		return 0;

	mutex_lock(&cooling_list_lock);
	list_for_each_entry(cpufreq_cdev, &cpufreq_cdev_list, node) {
		if (!cpumask_test_cpu(online_cpu,
					cpufreq_cdev->policy->related_cpus))
			continue;
		if (cpufreq_cdev->cpu_id != -1 &&
				online_cpu != cpufreq_cdev->cpu_id)
			continue;

		mutex_lock(&core_isolate_lock);
		if (cpufreq_cdev->cpufreq_state == cpufreq_cdev->max_level) {
			cpumask_set_cpu(online_cpu, &cpus_pending_online);
			ret = NOTIFY_BAD;
		}
		mutex_unlock(&core_isolate_lock);
		break;
	}
	mutex_unlock(&cooling_list_lock);

	return ret;
}


/**
 * cpufreq_thermal_notifier - notifier callback for cpufreq policy change.
 * @nb:	struct notifier_block * with callback info.
@@ -149,7 +280,7 @@ static int cpufreq_thermal_notifier(struct notifier_block *nb,
				    unsigned long event, void *data)
{
	struct cpufreq_policy *policy = data;
	unsigned long clipped_freq;
	unsigned long clipped_freq = ULONG_MAX, floor_freq = 0;
	struct cpufreq_cooling_device *cpufreq_cdev;

	if (event != CPUFREQ_ADJUST)
@@ -161,9 +292,14 @@ static int cpufreq_thermal_notifier(struct notifier_block *nb,
		 * A new copy of the policy is sent to the notifier and can't
		 * compare that directly.
		 */
		if (policy->cpu != cpufreq_cdev->policy->cpu)
		if (!cpumask_intersects(cpufreq_cdev->policy->related_cpus,
					policy->related_cpus))
			continue;

		if (cpufreq_cdev->clipped_freq < clipped_freq)
			clipped_freq = cpufreq_cdev->clipped_freq;
		if (cpufreq_cdev->floor_freq > floor_freq)
			floor_freq = cpufreq_cdev->floor_freq;
	}
	/*
	 * policy->max is the maximum allowed frequency defined by user
	 * and clipped_freq is the maximum that thermal constraints
@@ -174,13 +310,12 @@ static int cpufreq_thermal_notifier(struct notifier_block *nb,
	 *
	 * But, if clipped_freq is greater than policy->max, we don't
	 * need to do anything.
	 *
	 * Similarly, if policy minimum set by the user is less than
	 * the floor_frequency, then adjust the policy->min.
	 */
		clipped_freq = cpufreq_cdev->clipped_freq;

		if (policy->max > clipped_freq)
			cpufreq_verify_within_limits(policy, 0, clipped_freq);
		break;
	}
	if (policy->max > clipped_freq || policy->min < floor_freq)
		cpufreq_verify_within_limits(policy, floor_freq, clipped_freq);
	mutex_unlock(&cooling_list_lock);

	return NOTIFY_OK;
@@ -410,6 +545,76 @@ static int cpufreq_get_max_state(struct thermal_cooling_device *cdev,
	return 0;
}

/**
 * cpufreq_get_min_state - callback function to get the device floor state.
 * @cdev: thermal cooling device pointer.
 * @state: fill this variable with the cooling device floor.
 *
 * Callback for the thermal cooling device to return the cpufreq
 * floor state.
 *
 * Return: 0 on success, an error code otherwise.
 */
static int cpufreq_get_min_state(struct thermal_cooling_device *cdev,
				 unsigned long *state)
{
	struct cpufreq_cooling_device *cpufreq_cdev = cdev->devdata;

	*state = cpufreq_cdev->cpufreq_floor_state;

	return 0;
}

/**
 * cpufreq_set_min_state - callback function to set the device floor state.
 * @cdev: thermal cooling device pointer.
 * @state: set this variable to the current cooling state.
 *
 * Callback for the thermal cooling device to change the cpufreq
 * floor state.
 *
 * Return: 0 on success, an error code otherwise.
 */
static int cpufreq_set_min_state(struct thermal_cooling_device *cdev,
				 unsigned long state)
{
	struct cpufreq_cooling_device *cpufreq_cdev = cdev->devdata;
	unsigned int cpu = cpufreq_cdev->policy->cpu;
	unsigned int floor_freq;

	if (state > cpufreq_cdev->max_level)
		state = cpufreq_cdev->max_level;

	if (cpufreq_cdev->cpufreq_floor_state == state)
		return 0;

	cpufreq_cdev->cpufreq_floor_state = state;

	/*
	 * Check if the device has a platform mitigation function that
	 * can handle the CPU freq mitigation, if not, notify cpufreq
	 * framework.
	 */
	if (cpufreq_cdev->plat_ops &&
		cpufreq_cdev->plat_ops->floor_limit) {
		/*
		 * Last level is core isolation so use the frequency
		 * of previous state.
		 */
		if (state == cpufreq_cdev->max_level)
			state--;
		floor_freq = cpufreq_cdev->freq_table[state].frequency;
		cpufreq_cdev->floor_freq = floor_freq;
		cpufreq_cdev->plat_ops->floor_limit(cpu, floor_freq);
	} else {
		floor_freq = cpufreq_cdev->freq_table[state].frequency;
		cpufreq_cdev->floor_freq = floor_freq;
		cpufreq_update_policy(cpu);
	}

	return 0;
}

/**
 * cpufreq_get_cur_state - callback function to get the current cooling state.
 * @cdev: thermal cooling device pointer.
@@ -445,6 +650,10 @@ static int cpufreq_set_cur_state(struct thermal_cooling_device *cdev,
{
	struct cpufreq_cooling_device *cpufreq_cdev = cdev->devdata;
	unsigned int clip_freq;
	unsigned long prev_state;
	struct device *cpu_dev;
	int ret = 0;
	int cpu = 0;

	/* Request state should be less than max_level */
	if (WARN_ON(state > cpufreq_cdev->max_level))
@@ -454,11 +663,51 @@ static int cpufreq_set_cur_state(struct thermal_cooling_device *cdev,
	if (cpufreq_cdev->cpufreq_state == state)
		return 0;

	mutex_lock(&core_isolate_lock);
	cpu = (cpufreq_cdev->cpu_id == -1) ?
		cpufreq_cdev->policy->cpu : cpufreq_cdev->cpu_id;
	prev_state = cpufreq_cdev->cpufreq_state;
	cpufreq_cdev->cpufreq_state = state;
	mutex_unlock(&core_isolate_lock);
	/* If state is the last, isolate the CPU */
	if (state == cpufreq_cdev->max_level) {
		if (cpu_online(cpu) &&
			(!cpumask_test_and_set_cpu(cpu,
			&cpus_isolated_by_thermal))) {
			if (sched_isolate_cpu(cpu))
				cpumask_clear_cpu(cpu,
					&cpus_isolated_by_thermal);
		}
		return ret;
	} else if ((prev_state == cpufreq_cdev->max_level)
			&& (state < cpufreq_cdev->max_level)) {
		if (cpumask_test_and_clear_cpu(cpu, &cpus_pending_online)) {
			cpu_dev = get_cpu_device(cpu);
			ret = device_online(cpu_dev);
			if (ret)
				pr_err("CPU:%d online error:%d\n", cpu, ret);
			goto update_frequency;
		} else if (cpumask_test_and_clear_cpu(cpu,
			&cpus_isolated_by_thermal)) {
			sched_unisolate_cpu(cpu);
		}
	}
update_frequency:
	clip_freq = cpufreq_cdev->freq_table[state].frequency;
	cpufreq_cdev->cpufreq_state = state;
	cpufreq_cdev->clipped_freq = clip_freq;

	cpufreq_update_policy(cpufreq_cdev->policy->cpu);
	/* Check if the device has a platform mitigation function that
	 * can handle the CPU freq mitigation, if not, notify cpufreq
	 * framework.
	 */
	if (cpufreq_cdev->plat_ops) {
		if (cpufreq_cdev->plat_ops->ceil_limit)
			cpufreq_cdev->plat_ops->ceil_limit(cpu,
						clip_freq);
	} else {
		cpufreq_update_policy(cpu);
	}

	return 0;
}
@@ -635,6 +884,8 @@ static struct thermal_cooling_device_ops cpufreq_cooling_ops = {
	.get_max_state = cpufreq_get_max_state,
	.get_cur_state = cpufreq_get_cur_state,
	.set_cur_state = cpufreq_set_cur_state,
	.set_min_state = cpufreq_set_min_state,
	.get_min_state = cpufreq_get_min_state,
};

static struct thermal_cooling_device_ops cpufreq_power_cooling_ops = {
@@ -665,6 +916,16 @@ static unsigned int find_next_max(struct cpufreq_frequency_table *table,
	return max;
}

static void register_cdev(struct work_struct *work)
{
	int ret = 0;

	ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
				"cpu_cooling/no-sched",	cpufreq_hp_online,
				cpufreq_hp_offline);
	if (ret < 0)
		pr_err("Error registering for hotpug callback:%d\n", ret);
}
/**
 * __cpufreq_cooling_register - helper function to create cpufreq cooling device
 * @np: a valid struct device_node to the cooling device device tree node
@@ -673,6 +934,9 @@ static unsigned int find_next_max(struct cpufreq_frequency_table *table,
 * @capacitance: dynamic power coefficient for these cpus
 * @plat_static_func: function to calculate the static power consumed by these
 *                    cpus (optional)
 * @plat_mitig_func: function that does the mitigation by changing the
 *                   frequencies (Optional). By default, cpufreq framework will
 *                   be notified of the new limits.
 *
 * This interface function registers the cpufreq cooling device with the name
 * "thermal-cpufreq-%x". This api can support multiple instances of cpufreq
@@ -685,12 +949,13 @@ static unsigned int find_next_max(struct cpufreq_frequency_table *table,
static struct thermal_cooling_device *
__cpufreq_cooling_register(struct device_node *np,
			struct cpufreq_policy *policy, u32 capacitance,
			get_static_t plat_static_func)
			get_static_t plat_static_func,
			struct cpu_cooling_ops *plat_ops)
{
	struct thermal_cooling_device *cdev;
	struct cpufreq_cooling_device *cpufreq_cdev;
	char dev_name[THERMAL_NAME_LENGTH];
	unsigned int freq, i, num_cpus;
	unsigned int freq, i, num_cpus, cpu_idx;
	int ret;
	struct thermal_cooling_device_ops *cooling_ops;
	bool first;
@@ -720,17 +985,26 @@ __cpufreq_cooling_register(struct device_node *np,
		cdev = ERR_PTR(-ENOMEM);
		goto free_cdev;
	}
	cpufreq_cdev->cpu_id = -1;
	for_each_cpu(cpu_idx, policy->related_cpus) {
		if (np == of_cpu_device_node_get(cpu_idx)) {
			cpufreq_cdev->cpu_id = cpu_idx;
			break;
		}
	}

	/* max_level is an index, not a counter */
	cpufreq_cdev->max_level = i - 1;
	/* Last level will indicate the core will be isolated. */
	cpufreq_cdev->max_level = i + 1;

	cpufreq_cdev->freq_table = kmalloc_array(i,
	cpufreq_cdev->freq_table = kmalloc_array(cpufreq_cdev->max_level,
					sizeof(*cpufreq_cdev->freq_table),
					GFP_KERNEL);
	if (!cpufreq_cdev->freq_table) {
		cdev = ERR_PTR(-ENOMEM);
		goto free_idle_time;
	}
	/* max_level is an index */
	cpufreq_cdev->max_level--;

	ret = ida_simple_get(&cpufreq_ida, 0, 0, GFP_KERNEL);
	if (ret < 0) {
@@ -743,7 +1017,7 @@ __cpufreq_cooling_register(struct device_node *np,
		 cpufreq_cdev->id);

	/* Fill freq-table in descending order of frequencies */
	for (i = 0, freq = -1; i <= cpufreq_cdev->max_level; i++) {
	for (i = 0, freq = -1; i < cpufreq_cdev->max_level; i++) {
		freq = find_next_max(policy->freq_table, freq);
		cpufreq_cdev->freq_table[i].frequency = freq;

@@ -768,12 +1042,17 @@ __cpufreq_cooling_register(struct device_node *np,
		cooling_ops = &cpufreq_cooling_ops;
	}

	cpufreq_cdev->plat_ops = plat_ops;

	cdev = thermal_of_cooling_device_register(np, dev_name, cpufreq_cdev,
						  cooling_ops);
	if (IS_ERR(cdev))
		goto remove_ida;

	cpufreq_cdev->clipped_freq = cpufreq_cdev->freq_table[0].frequency;
	cpufreq_cdev->floor_freq =
		cpufreq_cdev->freq_table[cpufreq_cdev->max_level].frequency;
	cpufreq_cdev->cpufreq_floor_state = cpufreq_cdev->max_level;
	cpufreq_cdev->cdev = cdev;

	mutex_lock(&cooling_list_lock);
@@ -782,9 +1061,17 @@ __cpufreq_cooling_register(struct device_node *np,
	list_add(&cpufreq_cdev->node, &cpufreq_cdev_list);
	mutex_unlock(&cooling_list_lock);

	if (first)
	if (first && !cpufreq_cdev->plat_ops)
		cpufreq_register_notifier(&thermal_cpufreq_notifier_block,
					  CPUFREQ_POLICY_NOTIFIER);
	if (!cpuhp_registered) {
		cpuhp_registered = 1;
		register_pm_notifier(&cpufreq_cooling_pm_nb);
		cpumask_clear(&cpus_pending_online);
		cpumask_clear(&cpus_isolated_by_thermal);
		INIT_WORK(&cpuhp_register_work, register_cdev);
		queue_work(system_wq, &cpuhp_register_work);
	}

	return cdev;

@@ -813,7 +1100,7 @@ __cpufreq_cooling_register(struct device_node *np,
struct thermal_cooling_device *
cpufreq_cooling_register(struct cpufreq_policy *policy)
{
	return __cpufreq_cooling_register(NULL, policy, 0, NULL);
	return __cpufreq_cooling_register(NULL, policy, 0, NULL, NULL);
}
EXPORT_SYMBOL_GPL(cpufreq_cooling_register);

@@ -837,7 +1124,7 @@ of_cpufreq_cooling_register(struct device_node *np,
	if (!np)
		return ERR_PTR(-EINVAL);

	return __cpufreq_cooling_register(np, policy, 0, NULL);
	return __cpufreq_cooling_register(np, policy, 0, NULL, NULL);
}
EXPORT_SYMBOL_GPL(of_cpufreq_cooling_register);

@@ -867,10 +1154,44 @@ cpufreq_power_cooling_register(struct cpufreq_policy *policy, u32 capacitance,
			       get_static_t plat_static_func)
{
	return __cpufreq_cooling_register(NULL, policy, capacitance,
				plat_static_func);
				plat_static_func, NULL);
}
EXPORT_SYMBOL(cpufreq_power_cooling_register);

/**
 * cpufreq_platform_cooling_register() - create cpufreq cooling device with
 * additional platform specific mitigation function.
 *
 * @clip_cpus: cpumask of cpus where the frequency constraints will happen
 * @plat_ops: the platform mitigation functions that will be called insted of
 * cpufreq, if provided.
 *
 * Return: a valid struct thermal_cooling_device pointer on success,
 * on failure, it returns a corresponding ERR_PTR().
 */
struct thermal_cooling_device *
cpufreq_platform_cooling_register(const struct cpumask *clip_cpus,
				struct cpu_cooling_ops *plat_ops)
{
	struct device_node *cpu_node;
	struct cpufreq_policy *policy;

	cpu_node = of_cpu_device_node_get(cpumask_first(clip_cpus));
	if (!cpu_node) {
		pr_err("No cpu node\n");
		return ERR_PTR(-EINVAL);
	}
	policy = cpufreq_cpu_get(cpumask_first(clip_cpus));
	if (!policy) {
		pr_err("no policy for cpu%d\n", cpumask_first(clip_cpus));
		return ERR_PTR(-EINVAL);
	}

	return __cpufreq_cooling_register(cpu_node, policy, 0, NULL,
			plat_ops);
}
EXPORT_SYMBOL(cpufreq_platform_cooling_register);

/**
 * of_cpufreq_power_cooling_register() - create cpufreq cooling device with power extensions
 * @np:	a valid struct device_node to the cooling device device tree node
@@ -904,7 +1225,7 @@ of_cpufreq_power_cooling_register(struct device_node *np,
		return ERR_PTR(-EINVAL);

	return __cpufreq_cooling_register(np, policy, capacitance,
				plat_static_func);
				plat_static_func, NULL);
}
EXPORT_SYMBOL(of_cpufreq_power_cooling_register);

@@ -930,9 +1251,13 @@ void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev)
	last = list_empty(&cpufreq_cdev_list);
	mutex_unlock(&cooling_list_lock);

	if (last)
		cpufreq_unregister_notifier(&thermal_cpufreq_notifier_block,
	if (last) {
		unregister_pm_notifier(&cpufreq_cooling_pm_nb);
		if (!cpufreq_cdev->plat_ops)
			cpufreq_unregister_notifier(
					&thermal_cpufreq_notifier_block,
					CPUFREQ_POLICY_NOTIFIER);
	}

	thermal_cooling_device_unregister(cpufreq_cdev->cdev);
	ida_simple_remove(&cpufreq_ida, cpufreq_cdev->id);
+131 −0
Original line number Diff line number Diff line
/*
 *  Copyright (C) 2012 Intel Corp
 *  Copyright (C) 2012 Durgadoss R <durgadoss.r@intel.com>
 *  Copyright (c) 2017, The Linux Foundation. All rights reserved.
 *
 *  ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 *
 *  This program is free software; you can redistribute it and/or modify
 *  it under the terms of the GNU General Public License as published by
 *  the Free Software Foundation; version 2 of the License.
 *
 *  This program is distributed in the hope that it will be useful, but
 *  WITHOUT ANY WARRANTY; without even the implied warranty of
 *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 *  General Public License for more details.
 *
 *  ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 */

#include <linux/thermal.h>
#include <trace/events/thermal.h>

#include "thermal_core.h"

static void thermal_zone_trip_update(struct thermal_zone_device *tz, int trip)
{
	int trip_temp, trip_hyst;
	enum thermal_trip_type trip_type;
	struct thermal_instance *instance;
	bool throttle;
	int old_target;

	tz->ops->get_trip_temp(tz, trip, &trip_temp);
	tz->ops->get_trip_type(tz, trip, &trip_type);
	if (tz->ops->get_trip_hyst) {
		tz->ops->get_trip_hyst(tz, trip, &trip_hyst);
		trip_hyst = trip_temp + trip_hyst;
	} else {
		trip_hyst = trip_temp;
	}

	mutex_lock(&tz->lock);

	list_for_each_entry(instance, &tz->thermal_instances, tz_node) {
		if (instance->trip != trip)
			continue;

		if ((tz->temperature <= trip_temp) ||
			(instance->target != THERMAL_NO_TARGET
				&& tz->temperature < trip_hyst))
			throttle = true;
		else
			throttle = false;

		dev_dbg(&tz->device,
			"Trip%d[type=%d,temp=%d,hyst=%d],throttle=%d\n",
			trip, trip_type, trip_temp, trip_hyst, throttle);

		old_target = instance->target;
		instance->target = (throttle) ? instance->upper
					: THERMAL_NO_TARGET;
		dev_dbg(&instance->cdev->device, "old_target=%d, target=%d\n",
					old_target, (int)instance->target);

		if (old_target == instance->target)
			continue;

		if (old_target == THERMAL_NO_TARGET &&
				instance->target != THERMAL_NO_TARGET) {
			trace_thermal_zone_trip(tz, trip, trip_type, true);
			tz->passive += 1;
		} else if (old_target != THERMAL_NO_TARGET &&
				instance->target == THERMAL_NO_TARGET) {
			trace_thermal_zone_trip(tz, trip, trip_type, false);
			tz->passive -= 1;
		}

		instance->cdev->updated = false; /* cdev needs update */
	}

	mutex_unlock(&tz->lock);
}

/**
 * low_limits_throttle - throttles devices associated with the given zone
 * @tz - thermal_zone_device
 * @trip - the trip point
 *
 * Throttling Logic: If the sensor reading goes below a trip point, the
 * pre-defined mitigation will be applied for the cooling device.
 * If the sensor reading goes above the trip hysteresis, the
 * mitigation will be removed.
 */
static int low_limits_throttle(struct thermal_zone_device *tz, int trip)
{
	struct thermal_instance *instance;

	thermal_zone_trip_update(tz, trip);

	mutex_lock(&tz->lock);

	list_for_each_entry(instance, &tz->thermal_instances, tz_node)
		thermal_cdev_update(instance->cdev);

	mutex_unlock(&tz->lock);

	return 0;
}

static struct thermal_governor thermal_gov_low_limits_floor = {
	.name		= "low_limits_floor",
	.throttle	= low_limits_throttle,
	.min_state_throttle = 1,
};

static struct thermal_governor thermal_gov_low_limits_cap = {
	.name		= "low_limits_cap",
	.throttle	= low_limits_throttle,
};

int thermal_gov_low_limits_register(void)
{
	thermal_register_governor(&thermal_gov_low_limits_cap);
	return thermal_register_governor(&thermal_gov_low_limits_floor);
}

void thermal_gov_low_limits_unregister(void)
{
	thermal_unregister_governor(&thermal_gov_low_limits_cap);
	thermal_unregister_governor(&thermal_gov_low_limits_floor);
}
+424 −41

File changed.

Preview size limit exceeded, changes collapsed.

Loading