Loading include/linux/timer.h +3 −0 Original line number Diff line number Diff line Loading @@ -161,6 +161,9 @@ extern int del_timer(struct timer_list * timer); extern int mod_timer(struct timer_list *timer, unsigned long expires); extern int mod_timer_pending(struct timer_list *timer, unsigned long expires); extern int timer_reduce(struct timer_list *timer, unsigned long expires); #ifdef CONFIG_SMP extern bool check_pending_deferrable_timers(int cpu); #endif /* * The jiffies value which is added to now, when there is no timer Loading kernel/time/tick-sched.c +6 −0 Original line number Diff line number Diff line Loading @@ -26,6 +26,7 @@ #include <linux/module.h> #include <linux/irq_work.h> #include <linux/posix-timers.h> #include <linux/timer.h> #include <linux/context_tracking.h> #include <linux/mm.h> Loading Loading @@ -920,6 +921,11 @@ static void __tick_nohz_idle_stop_tick(struct tick_sched *ts) ktime_t expires; int cpu = smp_processor_id(); #ifdef CONFIG_SMP if (check_pending_deferrable_timers(cpu)) raise_softirq_irqoff(TIMER_SOFTIRQ); #endif /* * If tick_nohz_get_sleep_length() ran tick_nohz_next_event(), the * tick timer expiration time is known already. Loading kernel/time/timer.c +31 −1 Original line number Diff line number Diff line Loading @@ -208,6 +208,7 @@ struct timer_base { static DEFINE_PER_CPU(struct timer_base, timer_bases[NR_BASES]); struct timer_base timer_base_deferrable; static atomic_t deferrable_pending; #ifdef CONFIG_NO_HZ_COMMON Loading Loading @@ -1526,6 +1527,31 @@ static u64 cmp_next_hrtimer_event(u64 basem, u64 expires) return DIV_ROUND_UP_ULL(nextevt, TICK_NSEC) * TICK_NSEC; } #ifdef CONFIG_SMP /* * check_pending_deferrable_timers - Check for unbound deferrable timer expiry * @cpu - Current CPU * * The function checks whether any global deferrable pending timers * are exipired or not. This function does not check cpu bounded * diferrable pending timers expiry. * * The function returns true when a cpu unbounded deferrable timer is expired. */ bool check_pending_deferrable_timers(int cpu) { if (cpu == tick_do_timer_cpu || tick_do_timer_cpu == TICK_DO_TIMER_NONE) { if (time_after_eq(jiffies, timer_base_deferrable.clk) && !atomic_cmpxchg(&deferrable_pending, 0, 1)) { return true; } } return false; } #endif /** * get_next_timer_interrupt - return the time (clock mono) of the next timer * @basej: base time jiffies Loading Loading @@ -1712,9 +1738,13 @@ static __latent_entropy void run_timer_softirq(struct softirq_action *h) __run_timers(base); if (IS_ENABLED(CONFIG_NO_HZ_COMMON)) { __run_timers(&timer_base_deferrable); __run_timers(this_cpu_ptr(&timer_bases[BASE_DEF])); } if ((atomic_cmpxchg(&deferrable_pending, 1, 0) && tick_do_timer_cpu == TICK_DO_TIMER_NONE) || tick_do_timer_cpu == smp_processor_id()) __run_timers(&timer_base_deferrable); } /* Loading Loading
include/linux/timer.h +3 −0 Original line number Diff line number Diff line Loading @@ -161,6 +161,9 @@ extern int del_timer(struct timer_list * timer); extern int mod_timer(struct timer_list *timer, unsigned long expires); extern int mod_timer_pending(struct timer_list *timer, unsigned long expires); extern int timer_reduce(struct timer_list *timer, unsigned long expires); #ifdef CONFIG_SMP extern bool check_pending_deferrable_timers(int cpu); #endif /* * The jiffies value which is added to now, when there is no timer Loading
kernel/time/tick-sched.c +6 −0 Original line number Diff line number Diff line Loading @@ -26,6 +26,7 @@ #include <linux/module.h> #include <linux/irq_work.h> #include <linux/posix-timers.h> #include <linux/timer.h> #include <linux/context_tracking.h> #include <linux/mm.h> Loading Loading @@ -920,6 +921,11 @@ static void __tick_nohz_idle_stop_tick(struct tick_sched *ts) ktime_t expires; int cpu = smp_processor_id(); #ifdef CONFIG_SMP if (check_pending_deferrable_timers(cpu)) raise_softirq_irqoff(TIMER_SOFTIRQ); #endif /* * If tick_nohz_get_sleep_length() ran tick_nohz_next_event(), the * tick timer expiration time is known already. Loading
kernel/time/timer.c +31 −1 Original line number Diff line number Diff line Loading @@ -208,6 +208,7 @@ struct timer_base { static DEFINE_PER_CPU(struct timer_base, timer_bases[NR_BASES]); struct timer_base timer_base_deferrable; static atomic_t deferrable_pending; #ifdef CONFIG_NO_HZ_COMMON Loading Loading @@ -1526,6 +1527,31 @@ static u64 cmp_next_hrtimer_event(u64 basem, u64 expires) return DIV_ROUND_UP_ULL(nextevt, TICK_NSEC) * TICK_NSEC; } #ifdef CONFIG_SMP /* * check_pending_deferrable_timers - Check for unbound deferrable timer expiry * @cpu - Current CPU * * The function checks whether any global deferrable pending timers * are exipired or not. This function does not check cpu bounded * diferrable pending timers expiry. * * The function returns true when a cpu unbounded deferrable timer is expired. */ bool check_pending_deferrable_timers(int cpu) { if (cpu == tick_do_timer_cpu || tick_do_timer_cpu == TICK_DO_TIMER_NONE) { if (time_after_eq(jiffies, timer_base_deferrable.clk) && !atomic_cmpxchg(&deferrable_pending, 0, 1)) { return true; } } return false; } #endif /** * get_next_timer_interrupt - return the time (clock mono) of the next timer * @basej: base time jiffies Loading Loading @@ -1712,9 +1738,13 @@ static __latent_entropy void run_timer_softirq(struct softirq_action *h) __run_timers(base); if (IS_ENABLED(CONFIG_NO_HZ_COMMON)) { __run_timers(&timer_base_deferrable); __run_timers(this_cpu_ptr(&timer_bases[BASE_DEF])); } if ((atomic_cmpxchg(&deferrable_pending, 1, 0) && tick_do_timer_cpu == TICK_DO_TIMER_NONE) || tick_do_timer_cpu == smp_processor_id()) __run_timers(&timer_base_deferrable); } /* Loading