Loading include/linux/timer.h +3 −0 Original line number Diff line number Diff line Loading @@ -173,6 +173,9 @@ extern int mod_timer_pending(struct timer_list *timer, unsigned long expires); extern int mod_timer_pinned(struct timer_list *timer, unsigned long expires); extern void set_timer_slack(struct timer_list *time, int slack_hz); #ifdef CONFIG_SMP extern bool check_pending_deferrable_timers(int cpu); #endif #define TIMER_NOT_PINNED 0 #define TIMER_PINNED 1 Loading kernel/time/tick-sched.c +6 −0 Original line number Diff line number Diff line Loading @@ -19,6 +19,7 @@ #include <linux/percpu.h> #include <linux/profile.h> #include <linux/sched.h> #include <linux/timer.h> #include <linux/module.h> #include <linux/irq_work.h> #include <linux/posix-timers.h> Loading Loading @@ -809,6 +810,11 @@ static void __tick_nohz_idle_enter(struct tick_sched *ts) now = tick_nohz_start_idle(ts); #ifdef CONFIG_SMP if (check_pending_deferrable_timers(cpu)) raise_softirq_irqoff(TIMER_SOFTIRQ); #endif if (can_stop_idle_tick(cpu, ts)) { int was_stopped = ts->tick_stopped; Loading kernel/time/timer.c +31 −3 Original line number Diff line number Diff line Loading @@ -102,6 +102,7 @@ static DEFINE_PER_CPU(struct tvec_base, tvec_bases); unsigned int sysctl_timer_migration = 1; struct tvec_base tvec_base_deferrable; static atomic_t deferrable_pending; void timers_update_migration(bool update_nohz) { Loading Loading @@ -150,10 +151,13 @@ static inline struct tvec_base *get_target_base(struct tvec_base *base, static inline void __run_deferrable_timers(void) { if (smp_processor_id() == tick_do_timer_cpu && time_after_eq(jiffies, tvec_base_deferrable.timer_jiffies)) if (time_after_eq(jiffies, tvec_base_deferrable.timer_jiffies)) { if ((atomic_cmpxchg(&deferrable_pending, 1, 0) && tick_do_timer_cpu == TICK_DO_TIMER_NONE) || tick_do_timer_cpu == smp_processor_id()) __run_timers(&tvec_base_deferrable); } } static inline void init_timer_deferrable_global(void) { Loading Loading @@ -1428,6 +1432,30 @@ static u64 cmp_next_hrtimer_event(u64 basem, u64 expires) return DIV_ROUND_UP_ULL(nextevt, TICK_NSEC) * TICK_NSEC; } #ifdef CONFIG_SMP /* * check_pending_deferrable_timers - Check for unbound deferrable timer expiry. * @cpu - Current CPU * * The function checks whether any global deferrable pending timers * are exipired or not. This function does not check cpu bounded * diferrable pending timers expiry. * * The function returns true when a cpu unbounded deferrable timer is expired. */ bool check_pending_deferrable_timers(int cpu) { if (cpu == tick_do_timer_cpu || tick_do_timer_cpu == TICK_DO_TIMER_NONE) { if (time_after_eq(jiffies, tvec_base_deferrable.timer_jiffies) && !atomic_cmpxchg(&deferrable_pending, 0, 1)) { return true; } } return false; } #endif /** * get_next_timer_interrupt - return the time (clock mono) of the next timer * @basej: base time jiffies Loading Loading
include/linux/timer.h +3 −0 Original line number Diff line number Diff line Loading @@ -173,6 +173,9 @@ extern int mod_timer_pending(struct timer_list *timer, unsigned long expires); extern int mod_timer_pinned(struct timer_list *timer, unsigned long expires); extern void set_timer_slack(struct timer_list *time, int slack_hz); #ifdef CONFIG_SMP extern bool check_pending_deferrable_timers(int cpu); #endif #define TIMER_NOT_PINNED 0 #define TIMER_PINNED 1 Loading
kernel/time/tick-sched.c +6 −0 Original line number Diff line number Diff line Loading @@ -19,6 +19,7 @@ #include <linux/percpu.h> #include <linux/profile.h> #include <linux/sched.h> #include <linux/timer.h> #include <linux/module.h> #include <linux/irq_work.h> #include <linux/posix-timers.h> Loading Loading @@ -809,6 +810,11 @@ static void __tick_nohz_idle_enter(struct tick_sched *ts) now = tick_nohz_start_idle(ts); #ifdef CONFIG_SMP if (check_pending_deferrable_timers(cpu)) raise_softirq_irqoff(TIMER_SOFTIRQ); #endif if (can_stop_idle_tick(cpu, ts)) { int was_stopped = ts->tick_stopped; Loading
kernel/time/timer.c +31 −3 Original line number Diff line number Diff line Loading @@ -102,6 +102,7 @@ static DEFINE_PER_CPU(struct tvec_base, tvec_bases); unsigned int sysctl_timer_migration = 1; struct tvec_base tvec_base_deferrable; static atomic_t deferrable_pending; void timers_update_migration(bool update_nohz) { Loading Loading @@ -150,10 +151,13 @@ static inline struct tvec_base *get_target_base(struct tvec_base *base, static inline void __run_deferrable_timers(void) { if (smp_processor_id() == tick_do_timer_cpu && time_after_eq(jiffies, tvec_base_deferrable.timer_jiffies)) if (time_after_eq(jiffies, tvec_base_deferrable.timer_jiffies)) { if ((atomic_cmpxchg(&deferrable_pending, 1, 0) && tick_do_timer_cpu == TICK_DO_TIMER_NONE) || tick_do_timer_cpu == smp_processor_id()) __run_timers(&tvec_base_deferrable); } } static inline void init_timer_deferrable_global(void) { Loading Loading @@ -1428,6 +1432,30 @@ static u64 cmp_next_hrtimer_event(u64 basem, u64 expires) return DIV_ROUND_UP_ULL(nextevt, TICK_NSEC) * TICK_NSEC; } #ifdef CONFIG_SMP /* * check_pending_deferrable_timers - Check for unbound deferrable timer expiry. * @cpu - Current CPU * * The function checks whether any global deferrable pending timers * are exipired or not. This function does not check cpu bounded * diferrable pending timers expiry. * * The function returns true when a cpu unbounded deferrable timer is expired. */ bool check_pending_deferrable_timers(int cpu) { if (cpu == tick_do_timer_cpu || tick_do_timer_cpu == TICK_DO_TIMER_NONE) { if (time_after_eq(jiffies, tvec_base_deferrable.timer_jiffies) && !atomic_cmpxchg(&deferrable_pending, 0, 1)) { return true; } } return false; } #endif /** * get_next_timer_interrupt - return the time (clock mono) of the next timer * @basej: base time jiffies Loading