Loading kernel/irq/cpuhotplug.c +6 −2 Original line number Diff line number Diff line Loading @@ -116,6 +116,8 @@ static bool migrate_one_irq(struct irq_desc *desc) affinity = &available_cpus; if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) { const struct cpumask *default_affinity; /* * If the interrupt is managed, then shut it down and leave * the affinity untouched. Loading @@ -125,6 +127,8 @@ static bool migrate_one_irq(struct irq_desc *desc) irq_shutdown_and_deactivate(desc); return false; } default_affinity = desc->affinity_hint ? : irq_default_affinity; /* * The order of preference for selecting a fallback CPU is * Loading @@ -134,9 +138,9 @@ static bool migrate_one_irq(struct irq_desc *desc) */ cpumask_andnot(&available_cpus, cpu_online_mask, cpu_isolated_mask); if (cpumask_intersects(&available_cpus, irq_default_affinity)) if (cpumask_intersects(&available_cpus, default_affinity)) cpumask_and(&available_cpus, &available_cpus, irq_default_affinity); default_affinity); else if (cpumask_empty(&available_cpus)) affinity = cpu_online_mask; Loading kernel/sched/cpupri.c +2 −0 Original line number Diff line number Diff line Loading @@ -122,6 +122,8 @@ int cpupri_find(struct cpupri *cp, struct task_struct *p, if (lowest_mask) { cpumask_and(lowest_mask, p->cpus_ptr, vec->mask); cpumask_andnot(lowest_mask, lowest_mask, cpu_isolated_mask); if (drop_nopreempts) drop_nopreempt_cpus(lowest_mask); /* Loading kernel/softirq.c +22 −16 Original line number Diff line number Diff line Loading @@ -85,16 +85,12 @@ static void wakeup_softirqd(void) /* * If ksoftirqd is scheduled, we do not want to process pending softirqs * right now. Let ksoftirqd handle this at its own rate, to get fairness, * unless we're doing some of the synchronous softirqs. * right now. Let ksoftirqd handle this at its own rate, to get fairness. */ #define SOFTIRQ_NOW_MASK ((1 << HI_SOFTIRQ) | (1 << TASKLET_SOFTIRQ)) static bool ksoftirqd_running(unsigned long pending) static bool ksoftirqd_running(void) { struct task_struct *tsk = __this_cpu_read(ksoftirqd); if (pending & SOFTIRQ_NOW_MASK) return false; return tsk && (tsk->state == TASK_RUNNING) && !__kthread_should_park(tsk); } Loading Loading @@ -253,8 +249,16 @@ static inline bool lockdep_softirq_start(void) { return false; } static inline void lockdep_softirq_end(bool in_hardirq) { } #endif #define long_softirq_pending() (local_softirq_pending() & LONG_SOFTIRQ_MASK) #define defer_for_rt() (long_softirq_pending() && cpupri_check_rt()) #define softirq_deferred_for_rt(pending) \ ({ \ __u32 deferred = 0; \ if (cpupri_check_rt()) { \ deferred = pending & LONG_SOFTIRQ_MASK; \ pending &= ~LONG_SOFTIRQ_MASK; \ } \ deferred; \ }) asmlinkage __visible void __softirq_entry __do_softirq(void) { unsigned long end = jiffies + MAX_SOFTIRQ_TIME; Loading @@ -262,6 +266,7 @@ asmlinkage __visible void __softirq_entry __do_softirq(void) int max_restart = MAX_SOFTIRQ_RESTART; struct softirq_action *h; bool in_hardirq; __u32 deferred; __u32 pending; int softirq_bit; Loading @@ -273,14 +278,14 @@ asmlinkage __visible void __softirq_entry __do_softirq(void) current->flags &= ~PF_MEMALLOC; pending = local_softirq_pending(); deferred = softirq_deferred_for_rt(pending); account_irq_enter_time(current); __local_bh_disable_ip(_RET_IP_, SOFTIRQ_OFFSET); in_hardirq = lockdep_softirq_start(); restart: /* Reset the pending bitmask before enabling irqs */ set_softirq_pending(0); set_softirq_pending(deferred); __this_cpu_write(active_softirqs, pending); local_irq_enable(); Loading Loading @@ -317,15 +322,16 @@ asmlinkage __visible void __softirq_entry __do_softirq(void) local_irq_disable(); pending = local_softirq_pending(); deferred = softirq_deferred_for_rt(pending); if (pending) { if (time_before(jiffies, end) && !need_resched() && !defer_for_rt() && --max_restart) goto restart; wakeup_softirqd(); } if (pending | deferred) wakeup_softirqd(); lockdep_softirq_end(in_hardirq); account_irq_exit_time(current); __local_bh_enable(SOFTIRQ_OFFSET); Loading @@ -345,7 +351,7 @@ asmlinkage __visible void do_softirq(void) pending = local_softirq_pending(); if (pending && !ksoftirqd_running(pending)) if (pending && !ksoftirqd_running()) do_softirq_own_stack(); local_irq_restore(flags); Loading @@ -372,10 +378,10 @@ void irq_enter(void) static inline void invoke_softirq(void) { if (ksoftirqd_running(local_softirq_pending())) if (ksoftirqd_running()) return; if (!force_irqthreads && !defer_for_rt()) { if (!force_irqthreads) { #ifdef CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK /* * We can safely execute softirq on the current stack if Loading Loading
kernel/irq/cpuhotplug.c +6 −2 Original line number Diff line number Diff line Loading @@ -116,6 +116,8 @@ static bool migrate_one_irq(struct irq_desc *desc) affinity = &available_cpus; if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) { const struct cpumask *default_affinity; /* * If the interrupt is managed, then shut it down and leave * the affinity untouched. Loading @@ -125,6 +127,8 @@ static bool migrate_one_irq(struct irq_desc *desc) irq_shutdown_and_deactivate(desc); return false; } default_affinity = desc->affinity_hint ? : irq_default_affinity; /* * The order of preference for selecting a fallback CPU is * Loading @@ -134,9 +138,9 @@ static bool migrate_one_irq(struct irq_desc *desc) */ cpumask_andnot(&available_cpus, cpu_online_mask, cpu_isolated_mask); if (cpumask_intersects(&available_cpus, irq_default_affinity)) if (cpumask_intersects(&available_cpus, default_affinity)) cpumask_and(&available_cpus, &available_cpus, irq_default_affinity); default_affinity); else if (cpumask_empty(&available_cpus)) affinity = cpu_online_mask; Loading
kernel/sched/cpupri.c +2 −0 Original line number Diff line number Diff line Loading @@ -122,6 +122,8 @@ int cpupri_find(struct cpupri *cp, struct task_struct *p, if (lowest_mask) { cpumask_and(lowest_mask, p->cpus_ptr, vec->mask); cpumask_andnot(lowest_mask, lowest_mask, cpu_isolated_mask); if (drop_nopreempts) drop_nopreempt_cpus(lowest_mask); /* Loading
kernel/softirq.c +22 −16 Original line number Diff line number Diff line Loading @@ -85,16 +85,12 @@ static void wakeup_softirqd(void) /* * If ksoftirqd is scheduled, we do not want to process pending softirqs * right now. Let ksoftirqd handle this at its own rate, to get fairness, * unless we're doing some of the synchronous softirqs. * right now. Let ksoftirqd handle this at its own rate, to get fairness. */ #define SOFTIRQ_NOW_MASK ((1 << HI_SOFTIRQ) | (1 << TASKLET_SOFTIRQ)) static bool ksoftirqd_running(unsigned long pending) static bool ksoftirqd_running(void) { struct task_struct *tsk = __this_cpu_read(ksoftirqd); if (pending & SOFTIRQ_NOW_MASK) return false; return tsk && (tsk->state == TASK_RUNNING) && !__kthread_should_park(tsk); } Loading Loading @@ -253,8 +249,16 @@ static inline bool lockdep_softirq_start(void) { return false; } static inline void lockdep_softirq_end(bool in_hardirq) { } #endif #define long_softirq_pending() (local_softirq_pending() & LONG_SOFTIRQ_MASK) #define defer_for_rt() (long_softirq_pending() && cpupri_check_rt()) #define softirq_deferred_for_rt(pending) \ ({ \ __u32 deferred = 0; \ if (cpupri_check_rt()) { \ deferred = pending & LONG_SOFTIRQ_MASK; \ pending &= ~LONG_SOFTIRQ_MASK; \ } \ deferred; \ }) asmlinkage __visible void __softirq_entry __do_softirq(void) { unsigned long end = jiffies + MAX_SOFTIRQ_TIME; Loading @@ -262,6 +266,7 @@ asmlinkage __visible void __softirq_entry __do_softirq(void) int max_restart = MAX_SOFTIRQ_RESTART; struct softirq_action *h; bool in_hardirq; __u32 deferred; __u32 pending; int softirq_bit; Loading @@ -273,14 +278,14 @@ asmlinkage __visible void __softirq_entry __do_softirq(void) current->flags &= ~PF_MEMALLOC; pending = local_softirq_pending(); deferred = softirq_deferred_for_rt(pending); account_irq_enter_time(current); __local_bh_disable_ip(_RET_IP_, SOFTIRQ_OFFSET); in_hardirq = lockdep_softirq_start(); restart: /* Reset the pending bitmask before enabling irqs */ set_softirq_pending(0); set_softirq_pending(deferred); __this_cpu_write(active_softirqs, pending); local_irq_enable(); Loading Loading @@ -317,15 +322,16 @@ asmlinkage __visible void __softirq_entry __do_softirq(void) local_irq_disable(); pending = local_softirq_pending(); deferred = softirq_deferred_for_rt(pending); if (pending) { if (time_before(jiffies, end) && !need_resched() && !defer_for_rt() && --max_restart) goto restart; wakeup_softirqd(); } if (pending | deferred) wakeup_softirqd(); lockdep_softirq_end(in_hardirq); account_irq_exit_time(current); __local_bh_enable(SOFTIRQ_OFFSET); Loading @@ -345,7 +351,7 @@ asmlinkage __visible void do_softirq(void) pending = local_softirq_pending(); if (pending && !ksoftirqd_running(pending)) if (pending && !ksoftirqd_running()) do_softirq_own_stack(); local_irq_restore(flags); Loading @@ -372,10 +378,10 @@ void irq_enter(void) static inline void invoke_softirq(void) { if (ksoftirqd_running(local_softirq_pending())) if (ksoftirqd_running()) return; if (!force_irqthreads && !defer_for_rt()) { if (!force_irqthreads) { #ifdef CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK /* * We can safely execute softirq on the current stack if Loading