Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit db6940d5 authored by Pavankumar Kondeti's avatar Pavankumar Kondeti Committed by Gerrit - the friendly Code Review server
Browse files

softirq: defer softirq processing to ksoftirqd if CPU is busy with RT



Defer the softirq processing to ksoftirqd if a RT task is running
or queued on the current CPU. This complements the RT task placement
algorithm which tries to find a CPU that is not currently busy with
softirqs.

Currently NET_TX, NET_RX, BLOCK and TASKLET softirqs are only deferred
as they can potentially run for long time.

Change-Id: Id7665244af6bbd5a96d9e591cf26154e9eaa860c
Signed-off-by: default avatarPavankumar Kondeti <pkondeti@codeaurora.org>
[satyap@codeaurora.org: trivial merge conflict resolution.]
Signed-off-by: default avatarSatya Durga Srinivasu Prabhala <satyap@codeaurora.org>
parent 68efde1a
Loading
Loading
Loading
Loading
+5 −0
Original line number Original line Diff line number Diff line
@@ -1721,6 +1721,7 @@ extern int task_can_attach(struct task_struct *p, const struct cpumask *cs_cpus_
#ifdef CONFIG_SMP
#ifdef CONFIG_SMP
extern void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask);
extern void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask);
extern int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask);
extern int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask);
extern bool cpupri_check_rt(void);
#else
#else
static inline void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
static inline void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
{
{
@@ -1731,6 +1732,10 @@ static inline int set_cpus_allowed_ptr(struct task_struct *p, const struct cpuma
		return -EINVAL;
		return -EINVAL;
	return 0;
	return 0;
}
}
static inline bool cpupri_check_rt(void)
{
	return false;
}
#endif
#endif


extern int yield_to(struct task_struct *p, bool preempt);
extern int yield_to(struct task_struct *p, bool preempt);
+11 −0
Original line number Original line Diff line number Diff line
@@ -266,3 +266,14 @@ void cpupri_cleanup(struct cpupri *cp)
	for (i = 0; i < CPUPRI_NR_PRIORITIES; i++)
	for (i = 0; i < CPUPRI_NR_PRIORITIES; i++)
		free_cpumask_var(cp->pri_to_cpu[i].mask);
		free_cpumask_var(cp->pri_to_cpu[i].mask);
}
}

/*
 * cpupri_check_rt - check if CPU has a RT task
 * should be called from rcu-sched read section.
 */
bool cpupri_check_rt(void)
{
	int cpu = raw_smp_processor_id();

	return cpu_rq(cpu)->rd->cpupri.cpu_to_pri[cpu] > CPUPRI_NORMAL;
}
+4 −1
Original line number Original line Diff line number Diff line
@@ -253,6 +253,8 @@ static inline bool lockdep_softirq_start(void) { return false; }
static inline void lockdep_softirq_end(bool in_hardirq) { }
static inline void lockdep_softirq_end(bool in_hardirq) { }
#endif
#endif


#define long_softirq_pending()	(local_softirq_pending() & LONG_SOFTIRQ_MASK)
#define defer_for_rt()		(long_softirq_pending() && cpupri_check_rt())
asmlinkage __visible void __softirq_entry __do_softirq(void)
asmlinkage __visible void __softirq_entry __do_softirq(void)
{
{
	unsigned long end = jiffies + MAX_SOFTIRQ_TIME;
	unsigned long end = jiffies + MAX_SOFTIRQ_TIME;
@@ -317,6 +319,7 @@ asmlinkage __visible void __softirq_entry __do_softirq(void)
	pending = local_softirq_pending();
	pending = local_softirq_pending();
	if (pending) {
	if (pending) {
		if (time_before(jiffies, end) && !need_resched() &&
		if (time_before(jiffies, end) && !need_resched() &&
		    !defer_for_rt() &&
		    --max_restart)
		    --max_restart)
			goto restart;
			goto restart;


@@ -372,7 +375,7 @@ static inline void invoke_softirq(void)
	if (ksoftirqd_running(local_softirq_pending()))
	if (ksoftirqd_running(local_softirq_pending()))
		return;
		return;


	if (!force_irqthreads) {
	if (!force_irqthreads && !defer_for_rt()) {
#ifdef CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK
#ifdef CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK
		/*
		/*
		 * We can safely execute softirq on the current stack if
		 * We can safely execute softirq on the current stack if