Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit bb2e97c0 authored by Pavankumar Kondeti's avatar Pavankumar Kondeti Committed by Satya Durga Srinivasu Prabhala
Browse files

softirq: defer softirq processing to ksoftirqd if CPU is busy with RT



Defer the softirq processing to ksoftirqd if a RT task is running
or queued on the current CPU. This complements the RT task placement
algorithm which tries to find a CPU that is not currently busy with
softirqs.

Currently NET_TX, NET_RX, BLOCK and TASKLET softirqs are only deferred
as they can potentially run for long time.

Change-Id: Id7665244af6bbd5a96d9e591cf26154e9eaa860c
Signed-off-by: default avatarPavankumar Kondeti <pkondeti@codeaurora.org>
[satyap@codeaurora.org: trivial merge conflict resolution.]
Signed-off-by: default avatarSatya Durga Srinivasu Prabhala <satyap@codeaurora.org>
parent d8c75da0
Loading
Loading
Loading
Loading
+5 −0
Original line number Diff line number Diff line
@@ -1565,6 +1565,7 @@ extern int task_can_attach(struct task_struct *p, const struct cpumask *cs_cpus_
#ifdef CONFIG_SMP
extern void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask);
extern int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask);
extern bool cpupri_check_rt(void);
#else
static inline void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
{
@@ -1575,6 +1576,10 @@ static inline int set_cpus_allowed_ptr(struct task_struct *p, const struct cpuma
		return -EINVAL;
	return 0;
}
static inline bool cpupri_check_rt(void)
{
	return false;
}
#endif

#ifndef cpu_relax_yield
+11 −0
Original line number Diff line number Diff line
@@ -277,3 +277,14 @@ void cpupri_cleanup(struct cpupri *cp)
	for (i = 0; i < CPUPRI_NR_PRIORITIES; i++)
		free_cpumask_var(cp->pri_to_cpu[i].mask);
}

/*
 * cpupri_check_rt - check if CPU has a RT task
 * should be called from rcu-sched read section.
 */
bool cpupri_check_rt(void)
{
	int cpu = raw_smp_processor_id();

	return cpu_rq(cpu)->rd->cpupri.cpu_to_pri[cpu] > CPUPRI_NORMAL;
}
+4 −1
Original line number Diff line number Diff line
@@ -245,6 +245,8 @@ static inline bool lockdep_softirq_start(void) { return false; }
static inline void lockdep_softirq_end(bool in_hardirq) { }
#endif

#define long_softirq_pending()	(local_softirq_pending() & LONG_SOFTIRQ_MASK)
#define defer_for_rt()		(long_softirq_pending() && cpupri_check_rt())
asmlinkage __visible void __softirq_entry __do_softirq(void)
{
	unsigned long end = jiffies + MAX_SOFTIRQ_TIME;
@@ -308,6 +310,7 @@ asmlinkage __visible void __softirq_entry __do_softirq(void)
	pending = local_softirq_pending();
	if (pending) {
		if (time_before(jiffies, end) && !need_resched() &&
		    !defer_for_rt() &&
		    --max_restart)
			goto restart;

@@ -363,7 +366,7 @@ static inline void invoke_softirq(void)
	if (ksoftirqd_running())
		return;

	if (!force_irqthreads) {
	if (!force_irqthreads && !defer_for_rt()) {
#ifdef CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK
		/*
		 * We can safely execute softirq on the current stack if