Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 298e04a2 authored by Björn Davidsson's avatar Björn Davidsson Committed by Satya Durga Srinivasu Prabhala
Browse files

softirq: Don't defer all softirq during RT task



Since 'commit f332a9d53e33 ("softirq: defer softirq processing to
ksoftirqd if CPU is busy with RT")', all softirqs are deferred
to ksoftirqd if one of the potentially long softirqs are pending.
This can significantly delay processing of tasklets and timers,
which are frequently designed to be running at high priority.

Defer only the potentially slow softirqs to ksoftirqd.

Change-Id: I7a2ef7c59749ccf086066c59962ff326786da6f4
Signed-off-by: default avatarBjörn Davidsson <bjorn.davidsson@sony.com>
[clingutla@codeaurora.org:- Renamed local variables and refactored
 deferred logic to macro for avoiding potential deadlock.]
Signed-off-by: default avatarLingutla Chandrasekhar <clingutla@codeaurora.org>
Signed-off-by: default avatarSatya Durga Srinivasu Prabhala <satyap@codeaurora.org>
parent f646d63f
Loading
Loading
Loading
Loading
+18 −8
Original line number Diff line number Diff line
@@ -234,8 +234,16 @@ static inline bool lockdep_softirq_start(void) { return false; }
static inline void lockdep_softirq_end(bool in_hardirq) { }
#endif

#define long_softirq_pending()	(local_softirq_pending() & LONG_SOFTIRQ_MASK)
#define defer_for_rt()		(long_softirq_pending() && cpupri_check_rt())
#define softirq_deferred_for_rt(pending)		\
({							\
	__u32 deferred = 0;				\
	if (cpupri_check_rt()) {			\
		deferred = pending & LONG_SOFTIRQ_MASK; \
		pending &= ~LONG_SOFTIRQ_MASK;		\
	}						\
	deferred;					\
})

asmlinkage __visible void __softirq_entry __do_softirq(void)
{
	unsigned long end = jiffies + MAX_SOFTIRQ_TIME;
@@ -243,6 +251,7 @@ asmlinkage __visible void __softirq_entry __do_softirq(void)
	int max_restart = MAX_SOFTIRQ_RESTART;
	struct softirq_action *h;
	bool in_hardirq;
	__u32 deferred;
	__u32 pending;
	int softirq_bit;

@@ -254,14 +263,14 @@ asmlinkage __visible void __softirq_entry __do_softirq(void)
	current->flags &= ~PF_MEMALLOC;

	pending = local_softirq_pending();
	deferred = softirq_deferred_for_rt(pending);
	account_irq_enter_time(current);

	__local_bh_disable_ip(_RET_IP_, SOFTIRQ_OFFSET);
	in_hardirq = lockdep_softirq_start();

restart:
	/* Reset the pending bitmask before enabling irqs */
	set_softirq_pending(0);
	set_softirq_pending(deferred);
	__this_cpu_write(active_softirqs, pending);

	local_irq_enable();
@@ -297,15 +306,16 @@ asmlinkage __visible void __softirq_entry __do_softirq(void)
	local_irq_disable();

	pending = local_softirq_pending();
	deferred = softirq_deferred_for_rt(pending);

	if (pending) {
		if (time_before(jiffies, end) && !need_resched() &&
		    !defer_for_rt() &&
		    --max_restart)
			goto restart;

		wakeup_softirqd();
	}

	if (pending | deferred)
		wakeup_softirqd();
	lockdep_softirq_end(in_hardirq);
	account_irq_exit_time(current);
	__local_bh_enable(SOFTIRQ_OFFSET);
@@ -352,7 +362,7 @@ void irq_enter(void)

static inline void invoke_softirq(void)
{
	if (!force_irqthreads && !defer_for_rt()) {
	if (!force_irqthreads) {
#ifdef CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK
		/*
		 * We can safely execute softirq on the current stack if