Loading kernel/sched/core_ctl.h→include/linux/sched/core_ctl.h +5 −2 Original line number Diff line number Diff line Loading @@ -16,9 +16,12 @@ #ifdef CONFIG_SCHED_CORE_CTL void core_ctl_check(u64 wallclock); void core_ctl_set_boost(bool boost); int core_ctl_set_boost(bool boost); #else static inline void core_ctl_check(u64 wallclock) {} static inline void core_ctl_set_boost(bool boost) {} static inline int core_ctl_set_boost(bool boost) { return 0; } #endif #endif include/trace/events/sched.h +15 −0 Original line number Diff line number Diff line Loading @@ -1323,6 +1323,21 @@ TRACE_EVENT(core_ctl_set_busy, __entry->is_busy) ); TRACE_EVENT(core_ctl_set_boost, TP_PROTO(u32 refcount, s32 ret), TP_ARGS(refcount, ret), TP_STRUCT__entry( __field(u32, refcount) __field(s32, ret) ), TP_fast_assign( __entry->refcount = refcount; __entry->ret = ret; ), TP_printk("refcount=%u, ret=%d", __entry->refcount, __entry->ret) ); /** * sched_isolate - called when cores are isolated/unisolated * Loading kernel/sched/core.c +1 −1 Original line number Diff line number Diff line Loading @@ -75,6 +75,7 @@ #include <linux/context_tracking.h> #include <linux/compiler.h> #include <linux/irq.h> #include <linux/sched/core_ctl.h> #include <asm/switch_to.h> #include <asm/tlb.h> Loading @@ -85,7 +86,6 @@ #endif #include "sched.h" #include "core_ctl.h" #include "../workqueue_internal.h" #include "../smpboot.h" Loading kernel/sched/core_ctl.c +28 −5 Original line number Diff line number Diff line Loading @@ -45,7 +45,7 @@ struct cluster_data { bool nrrun_changed; struct task_struct *core_ctl_thread; unsigned int first_cpu; bool boost; unsigned int boost; struct kobject kobj; }; Loading Loading @@ -652,17 +652,40 @@ static bool do_check(u64 wallclock) return do_check; } void core_ctl_set_boost(bool boost) int core_ctl_set_boost(bool boost) { unsigned int index = 0; struct cluster_data *cluster; unsigned long flags; int ret = 0; bool boost_state_changed = false; spin_lock_irqsave(&state_lock, flags); for_each_cluster(cluster, index) { if (cluster->is_big_cluster && cluster->boost != boost) { cluster->boost = boost; apply_need(cluster); if (cluster->is_big_cluster) { if (boost) { boost_state_changed = !cluster->boost; ++cluster->boost; } else { if (!cluster->boost) { pr_err("Error turning off boost. Boost already turned off\n"); ret = -EINVAL; } else { --cluster->boost; boost_state_changed = !cluster->boost; } } break; } } spin_unlock_irqrestore(&state_lock, flags); if (boost_state_changed) apply_need(cluster); trace_core_ctl_set_boost(cluster->boost, ret); return ret; } void core_ctl_check(u64 wallclock) Loading kernel/sched/hmp.c +1 −1 Original line number Diff line number Diff line Loading @@ -18,9 +18,9 @@ #include <linux/list_sort.h> #include <linux/syscore_ops.h> #include <linux/of.h> #include <linux/sched/core_ctl.h> #include "sched.h" #include "core_ctl.h" #include <trace/events/sched.h> Loading Loading
kernel/sched/core_ctl.h→include/linux/sched/core_ctl.h +5 −2 Original line number Diff line number Diff line Loading @@ -16,9 +16,12 @@ #ifdef CONFIG_SCHED_CORE_CTL void core_ctl_check(u64 wallclock); void core_ctl_set_boost(bool boost); int core_ctl_set_boost(bool boost); #else static inline void core_ctl_check(u64 wallclock) {} static inline void core_ctl_set_boost(bool boost) {} static inline int core_ctl_set_boost(bool boost) { return 0; } #endif #endif
include/trace/events/sched.h +15 −0 Original line number Diff line number Diff line Loading @@ -1323,6 +1323,21 @@ TRACE_EVENT(core_ctl_set_busy, __entry->is_busy) ); TRACE_EVENT(core_ctl_set_boost, TP_PROTO(u32 refcount, s32 ret), TP_ARGS(refcount, ret), TP_STRUCT__entry( __field(u32, refcount) __field(s32, ret) ), TP_fast_assign( __entry->refcount = refcount; __entry->ret = ret; ), TP_printk("refcount=%u, ret=%d", __entry->refcount, __entry->ret) ); /** * sched_isolate - called when cores are isolated/unisolated * Loading
kernel/sched/core.c +1 −1 Original line number Diff line number Diff line Loading @@ -75,6 +75,7 @@ #include <linux/context_tracking.h> #include <linux/compiler.h> #include <linux/irq.h> #include <linux/sched/core_ctl.h> #include <asm/switch_to.h> #include <asm/tlb.h> Loading @@ -85,7 +86,6 @@ #endif #include "sched.h" #include "core_ctl.h" #include "../workqueue_internal.h" #include "../smpboot.h" Loading
kernel/sched/core_ctl.c +28 −5 Original line number Diff line number Diff line Loading @@ -45,7 +45,7 @@ struct cluster_data { bool nrrun_changed; struct task_struct *core_ctl_thread; unsigned int first_cpu; bool boost; unsigned int boost; struct kobject kobj; }; Loading Loading @@ -652,17 +652,40 @@ static bool do_check(u64 wallclock) return do_check; } void core_ctl_set_boost(bool boost) int core_ctl_set_boost(bool boost) { unsigned int index = 0; struct cluster_data *cluster; unsigned long flags; int ret = 0; bool boost_state_changed = false; spin_lock_irqsave(&state_lock, flags); for_each_cluster(cluster, index) { if (cluster->is_big_cluster && cluster->boost != boost) { cluster->boost = boost; apply_need(cluster); if (cluster->is_big_cluster) { if (boost) { boost_state_changed = !cluster->boost; ++cluster->boost; } else { if (!cluster->boost) { pr_err("Error turning off boost. Boost already turned off\n"); ret = -EINVAL; } else { --cluster->boost; boost_state_changed = !cluster->boost; } } break; } } spin_unlock_irqrestore(&state_lock, flags); if (boost_state_changed) apply_need(cluster); trace_core_ctl_set_boost(cluster->boost, ret); return ret; } void core_ctl_check(u64 wallclock) Loading
kernel/sched/hmp.c +1 −1 Original line number Diff line number Diff line Loading @@ -18,9 +18,9 @@ #include <linux/list_sort.h> #include <linux/syscore_ops.h> #include <linux/of.h> #include <linux/sched/core_ctl.h> #include "sched.h" #include "core_ctl.h" #include <trace/events/sched.h> Loading