Loading drivers/cpufreq/Kconfig +11 −0 Original line number Diff line number Diff line Loading @@ -192,6 +192,17 @@ config CPU_FREQ_GOV_CONSERVATIVE If in doubt, say N. config CPU_BOOST tristate "Event base short term CPU freq boost" depends on CPU_FREQ help This driver boosts the frequency of one or more CPUs based on various events that might occur in the system. As of now, the events it reacts to are: - Migration of important threads from one CPU to another. If in doubt, say N. config CPU_FREQ_GOV_SCHEDUTIL bool "'schedutil' cpufreq policy governor" depends on CPU_FREQ && SMP Loading drivers/cpufreq/Makefile +1 −0 Original line number Diff line number Diff line Loading @@ -16,6 +16,7 @@ obj-$(CONFIG_CPU_FREQ_GOV_ONDEMAND) += cpufreq_ondemand.o obj-$(CONFIG_CPU_FREQ_GOV_CONSERVATIVE) += cpufreq_conservative.o obj-$(CONFIG_CPU_FREQ_GOV_COMMON) += cpufreq_governor.o obj-$(CONFIG_CPU_FREQ_GOV_ATTR_SET) += cpufreq_governor_attr_set.o obj-$(CONFIG_CPU_BOOST) += cpu-boost.o obj-$(CONFIG_CPUFREQ_DT) += cpufreq-dt.o obj-$(CONFIG_CPUFREQ_DT_PLATDEV) += cpufreq-dt-platdev.o Loading drivers/cpufreq/cpu-boost.c 0 → 100644 +274 −0 Original line number Diff line number Diff line // SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2013-2015,2019, The Linux Foundation. All rights reserved. */ #define pr_fmt(fmt) "cpu-boost: " fmt #include <linux/kernel.h> #include <linux/init.h> #include <linux/cpufreq.h> #include <linux/cpu.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/input.h> #include <linux/time.h> #include <linux/sysfs.h> #define cpu_boost_attr_rw(_name) \ static struct kobj_attribute _name##_attr = \ __ATTR(_name, 0644, show_##_name, store_##_name) #define show_one(file_name) \ static ssize_t show_##file_name \ (struct kobject *kobj, struct kobj_attribute *attr, char *buf) \ { \ return scnprintf(buf, PAGE_SIZE, "%u\n", file_name); \ } #define store_one(file_name) \ static ssize_t store_##file_name \ (struct kobject *kobj, struct kobj_attribute *attr, \ const char *buf, size_t count) \ { \ \ sscanf(buf, "%u", &file_name); \ return count; \ } struct cpu_sync { int cpu; unsigned int input_boost_min; }; static DEFINE_PER_CPU(struct cpu_sync, sync_info); static struct workqueue_struct *cpu_boost_wq; static struct work_struct input_boost_work; static unsigned int input_boost_freq; show_one(input_boost_freq); store_one(input_boost_freq); cpu_boost_attr_rw(input_boost_freq); static unsigned int input_boost_ms = 40; show_one(input_boost_ms); store_one(input_boost_ms); cpu_boost_attr_rw(input_boost_ms); static struct delayed_work input_boost_rem; static u64 last_input_time; #define MIN_INPUT_INTERVAL (150 * USEC_PER_MSEC) /* * The CPUFREQ_ADJUST notifier is used to override the current policy min to * make sure policy min >= boost_min. The cpufreq framework then does the job * of enforcing the new policy. */ static int boost_adjust_notify(struct notifier_block *nb, unsigned long val, void *data) { struct cpufreq_policy *policy = data; unsigned int cpu = policy->cpu; struct cpu_sync *s = &per_cpu(sync_info, cpu); unsigned int ib_min = s->input_boost_min; switch (val) { case CPUFREQ_ADJUST: if (!ib_min) break; pr_debug("CPU%u policy min before boost: %u kHz\n", cpu, policy->min); pr_debug("CPU%u boost min: %u kHz\n", cpu, ib_min); cpufreq_verify_within_limits(policy, ib_min, UINT_MAX); pr_debug("CPU%u policy min after boost: %u kHz\n", cpu, policy->min); break; } return NOTIFY_OK; } static struct notifier_block boost_adjust_nb = { .notifier_call = boost_adjust_notify, }; static void update_policy_online(void) { unsigned int i; /* Re-evaluate policy to trigger adjust notifier for online CPUs */ get_online_cpus(); for_each_online_cpu(i) { pr_debug("Updating policy for CPU%d\n", i); cpufreq_update_policy(i); } put_online_cpus(); } static void do_input_boost_rem(struct work_struct *work) { unsigned int i; struct cpu_sync *i_sync_info; /* Reset the input_boost_min for all CPUs in the system */ pr_debug("Resetting input boost min for all CPUs\n"); for_each_possible_cpu(i) { i_sync_info = &per_cpu(sync_info, i); i_sync_info->input_boost_min = 0; } /* Update policies for all online CPUs */ update_policy_online(); } static void do_input_boost(struct work_struct *work) { unsigned int i; struct cpu_sync *i_sync_info; cancel_delayed_work_sync(&input_boost_rem); /* Set the input_boost_min for all CPUs in the system */ pr_debug("Setting input boost min for all CPUs\n"); for_each_possible_cpu(i) { i_sync_info = &per_cpu(sync_info, i); i_sync_info->input_boost_min = input_boost_freq; } /* Update policies for all online CPUs */ update_policy_online(); queue_delayed_work(cpu_boost_wq, &input_boost_rem, msecs_to_jiffies(input_boost_ms)); } static void cpuboost_input_event(struct input_handle *handle, unsigned int type, unsigned int code, int value) { u64 now; if (!input_boost_freq) return; now = ktime_to_us(ktime_get()); if (now - last_input_time < MIN_INPUT_INTERVAL) return; if (work_pending(&input_boost_work)) return; queue_work(cpu_boost_wq, &input_boost_work); last_input_time = ktime_to_us(ktime_get()); } static int cpuboost_input_connect(struct input_handler *handler, struct input_dev *dev, const struct input_device_id *id) { struct input_handle *handle; int error; handle = kzalloc(sizeof(struct input_handle), GFP_KERNEL); if (!handle) return -ENOMEM; handle->dev = dev; handle->handler = handler; handle->name = "cpufreq"; error = input_register_handle(handle); if (error) goto err2; error = input_open_device(handle); if (error) goto err1; return 0; err1: input_unregister_handle(handle); err2: kfree(handle); return error; } static void cpuboost_input_disconnect(struct input_handle *handle) { input_close_device(handle); input_unregister_handle(handle); kfree(handle); } static const struct input_device_id cpuboost_ids[] = { /* multi-touch touchscreen */ { .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_ABSBIT, .evbit = { BIT_MASK(EV_ABS) }, .absbit = { [BIT_WORD(ABS_MT_POSITION_X)] = BIT_MASK(ABS_MT_POSITION_X) | BIT_MASK(ABS_MT_POSITION_Y) }, }, /* touchpad */ { .flags = INPUT_DEVICE_ID_MATCH_KEYBIT | INPUT_DEVICE_ID_MATCH_ABSBIT, .keybit = { [BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH) }, .absbit = { [BIT_WORD(ABS_X)] = BIT_MASK(ABS_X) | BIT_MASK(ABS_Y) }, }, /* Keypad */ { .flags = INPUT_DEVICE_ID_MATCH_EVBIT, .evbit = { BIT_MASK(EV_KEY) }, }, { }, }; static struct input_handler cpuboost_input_handler = { .event = cpuboost_input_event, .connect = cpuboost_input_connect, .disconnect = cpuboost_input_disconnect, .name = "cpu-boost", .id_table = cpuboost_ids, }; struct kobject *cpu_boost_kobj; static int cpu_boost_init(void) { int cpu, ret; struct cpu_sync *s; cpu_boost_wq = alloc_workqueue("cpuboost_wq", WQ_HIGHPRI, 0); if (!cpu_boost_wq) return -EFAULT; INIT_WORK(&input_boost_work, do_input_boost); INIT_DELAYED_WORK(&input_boost_rem, do_input_boost_rem); for_each_possible_cpu(cpu) { s = &per_cpu(sync_info, cpu); s->cpu = cpu; } cpufreq_register_notifier(&boost_adjust_nb, CPUFREQ_POLICY_NOTIFIER); cpu_boost_kobj = kobject_create_and_add("cpu_boost", &cpu_subsys.dev_root->kobj); if (!cpu_boost_kobj) pr_err("Failed to initialize sysfs node for cpu_boost.\n"); ret = sysfs_create_file(cpu_boost_kobj, &input_boost_ms_attr.attr); if (ret) pr_err("Failed to create input_boost_ms node: %d\n", ret); ret = sysfs_create_file(cpu_boost_kobj, &input_boost_freq_attr.attr); if (ret) pr_err("Failed to create input_boost_freq node: %d\n", ret); ret = input_register_handler(&cpuboost_input_handler); return 0; } late_initcall(cpu_boost_init); Loading
drivers/cpufreq/Kconfig +11 −0 Original line number Diff line number Diff line Loading @@ -192,6 +192,17 @@ config CPU_FREQ_GOV_CONSERVATIVE If in doubt, say N. config CPU_BOOST tristate "Event base short term CPU freq boost" depends on CPU_FREQ help This driver boosts the frequency of one or more CPUs based on various events that might occur in the system. As of now, the events it reacts to are: - Migration of important threads from one CPU to another. If in doubt, say N. config CPU_FREQ_GOV_SCHEDUTIL bool "'schedutil' cpufreq policy governor" depends on CPU_FREQ && SMP Loading
drivers/cpufreq/Makefile +1 −0 Original line number Diff line number Diff line Loading @@ -16,6 +16,7 @@ obj-$(CONFIG_CPU_FREQ_GOV_ONDEMAND) += cpufreq_ondemand.o obj-$(CONFIG_CPU_FREQ_GOV_CONSERVATIVE) += cpufreq_conservative.o obj-$(CONFIG_CPU_FREQ_GOV_COMMON) += cpufreq_governor.o obj-$(CONFIG_CPU_FREQ_GOV_ATTR_SET) += cpufreq_governor_attr_set.o obj-$(CONFIG_CPU_BOOST) += cpu-boost.o obj-$(CONFIG_CPUFREQ_DT) += cpufreq-dt.o obj-$(CONFIG_CPUFREQ_DT_PLATDEV) += cpufreq-dt-platdev.o Loading
drivers/cpufreq/cpu-boost.c 0 → 100644 +274 −0 Original line number Diff line number Diff line // SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2013-2015,2019, The Linux Foundation. All rights reserved. */ #define pr_fmt(fmt) "cpu-boost: " fmt #include <linux/kernel.h> #include <linux/init.h> #include <linux/cpufreq.h> #include <linux/cpu.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/input.h> #include <linux/time.h> #include <linux/sysfs.h> #define cpu_boost_attr_rw(_name) \ static struct kobj_attribute _name##_attr = \ __ATTR(_name, 0644, show_##_name, store_##_name) #define show_one(file_name) \ static ssize_t show_##file_name \ (struct kobject *kobj, struct kobj_attribute *attr, char *buf) \ { \ return scnprintf(buf, PAGE_SIZE, "%u\n", file_name); \ } #define store_one(file_name) \ static ssize_t store_##file_name \ (struct kobject *kobj, struct kobj_attribute *attr, \ const char *buf, size_t count) \ { \ \ sscanf(buf, "%u", &file_name); \ return count; \ } struct cpu_sync { int cpu; unsigned int input_boost_min; }; static DEFINE_PER_CPU(struct cpu_sync, sync_info); static struct workqueue_struct *cpu_boost_wq; static struct work_struct input_boost_work; static unsigned int input_boost_freq; show_one(input_boost_freq); store_one(input_boost_freq); cpu_boost_attr_rw(input_boost_freq); static unsigned int input_boost_ms = 40; show_one(input_boost_ms); store_one(input_boost_ms); cpu_boost_attr_rw(input_boost_ms); static struct delayed_work input_boost_rem; static u64 last_input_time; #define MIN_INPUT_INTERVAL (150 * USEC_PER_MSEC) /* * The CPUFREQ_ADJUST notifier is used to override the current policy min to * make sure policy min >= boost_min. The cpufreq framework then does the job * of enforcing the new policy. */ static int boost_adjust_notify(struct notifier_block *nb, unsigned long val, void *data) { struct cpufreq_policy *policy = data; unsigned int cpu = policy->cpu; struct cpu_sync *s = &per_cpu(sync_info, cpu); unsigned int ib_min = s->input_boost_min; switch (val) { case CPUFREQ_ADJUST: if (!ib_min) break; pr_debug("CPU%u policy min before boost: %u kHz\n", cpu, policy->min); pr_debug("CPU%u boost min: %u kHz\n", cpu, ib_min); cpufreq_verify_within_limits(policy, ib_min, UINT_MAX); pr_debug("CPU%u policy min after boost: %u kHz\n", cpu, policy->min); break; } return NOTIFY_OK; } static struct notifier_block boost_adjust_nb = { .notifier_call = boost_adjust_notify, }; static void update_policy_online(void) { unsigned int i; /* Re-evaluate policy to trigger adjust notifier for online CPUs */ get_online_cpus(); for_each_online_cpu(i) { pr_debug("Updating policy for CPU%d\n", i); cpufreq_update_policy(i); } put_online_cpus(); } static void do_input_boost_rem(struct work_struct *work) { unsigned int i; struct cpu_sync *i_sync_info; /* Reset the input_boost_min for all CPUs in the system */ pr_debug("Resetting input boost min for all CPUs\n"); for_each_possible_cpu(i) { i_sync_info = &per_cpu(sync_info, i); i_sync_info->input_boost_min = 0; } /* Update policies for all online CPUs */ update_policy_online(); } static void do_input_boost(struct work_struct *work) { unsigned int i; struct cpu_sync *i_sync_info; cancel_delayed_work_sync(&input_boost_rem); /* Set the input_boost_min for all CPUs in the system */ pr_debug("Setting input boost min for all CPUs\n"); for_each_possible_cpu(i) { i_sync_info = &per_cpu(sync_info, i); i_sync_info->input_boost_min = input_boost_freq; } /* Update policies for all online CPUs */ update_policy_online(); queue_delayed_work(cpu_boost_wq, &input_boost_rem, msecs_to_jiffies(input_boost_ms)); } static void cpuboost_input_event(struct input_handle *handle, unsigned int type, unsigned int code, int value) { u64 now; if (!input_boost_freq) return; now = ktime_to_us(ktime_get()); if (now - last_input_time < MIN_INPUT_INTERVAL) return; if (work_pending(&input_boost_work)) return; queue_work(cpu_boost_wq, &input_boost_work); last_input_time = ktime_to_us(ktime_get()); } static int cpuboost_input_connect(struct input_handler *handler, struct input_dev *dev, const struct input_device_id *id) { struct input_handle *handle; int error; handle = kzalloc(sizeof(struct input_handle), GFP_KERNEL); if (!handle) return -ENOMEM; handle->dev = dev; handle->handler = handler; handle->name = "cpufreq"; error = input_register_handle(handle); if (error) goto err2; error = input_open_device(handle); if (error) goto err1; return 0; err1: input_unregister_handle(handle); err2: kfree(handle); return error; } static void cpuboost_input_disconnect(struct input_handle *handle) { input_close_device(handle); input_unregister_handle(handle); kfree(handle); } static const struct input_device_id cpuboost_ids[] = { /* multi-touch touchscreen */ { .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_ABSBIT, .evbit = { BIT_MASK(EV_ABS) }, .absbit = { [BIT_WORD(ABS_MT_POSITION_X)] = BIT_MASK(ABS_MT_POSITION_X) | BIT_MASK(ABS_MT_POSITION_Y) }, }, /* touchpad */ { .flags = INPUT_DEVICE_ID_MATCH_KEYBIT | INPUT_DEVICE_ID_MATCH_ABSBIT, .keybit = { [BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH) }, .absbit = { [BIT_WORD(ABS_X)] = BIT_MASK(ABS_X) | BIT_MASK(ABS_Y) }, }, /* Keypad */ { .flags = INPUT_DEVICE_ID_MATCH_EVBIT, .evbit = { BIT_MASK(EV_KEY) }, }, { }, }; static struct input_handler cpuboost_input_handler = { .event = cpuboost_input_event, .connect = cpuboost_input_connect, .disconnect = cpuboost_input_disconnect, .name = "cpu-boost", .id_table = cpuboost_ids, }; struct kobject *cpu_boost_kobj; static int cpu_boost_init(void) { int cpu, ret; struct cpu_sync *s; cpu_boost_wq = alloc_workqueue("cpuboost_wq", WQ_HIGHPRI, 0); if (!cpu_boost_wq) return -EFAULT; INIT_WORK(&input_boost_work, do_input_boost); INIT_DELAYED_WORK(&input_boost_rem, do_input_boost_rem); for_each_possible_cpu(cpu) { s = &per_cpu(sync_info, cpu); s->cpu = cpu; } cpufreq_register_notifier(&boost_adjust_nb, CPUFREQ_POLICY_NOTIFIER); cpu_boost_kobj = kobject_create_and_add("cpu_boost", &cpu_subsys.dev_root->kobj); if (!cpu_boost_kobj) pr_err("Failed to initialize sysfs node for cpu_boost.\n"); ret = sysfs_create_file(cpu_boost_kobj, &input_boost_ms_attr.attr); if (ret) pr_err("Failed to create input_boost_ms node: %d\n", ret); ret = sysfs_create_file(cpu_boost_kobj, &input_boost_freq_attr.attr); if (ret) pr_err("Failed to create input_boost_freq node: %d\n", ret); ret = input_register_handler(&cpuboost_input_handler); return 0; } late_initcall(cpu_boost_init);