Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 5f4b9a0a authored by qctecmdr Service's avatar qctecmdr Service Committed by Gerrit - the friendly Code Review server
Browse files

Merge "Revert "softirq: Let ksoftirqd do its job""

parents 151d391b f646d63f
Loading
Loading
Loading
Loading
+5 −0
Original line number Diff line number Diff line
@@ -1565,6 +1565,7 @@ extern int task_can_attach(struct task_struct *p, const struct cpumask *cs_cpus_
#ifdef CONFIG_SMP
extern void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask);
extern int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask);
extern bool cpupri_check_rt(void);
#else
static inline void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
{
@@ -1575,6 +1576,10 @@ static inline int set_cpus_allowed_ptr(struct task_struct *p, const struct cpuma
		return -EINVAL;
	return 0;
}
static inline bool cpupri_check_rt(void)
{
	return false;
}
#endif

#ifndef cpu_relax_yield
+11 −0
Original line number Diff line number Diff line
@@ -277,3 +277,14 @@ void cpupri_cleanup(struct cpupri *cp)
	for (i = 0; i < CPUPRI_NR_PRIORITIES; i++)
		free_cpumask_var(cp->pri_to_cpu[i].mask);
}

/*
 * cpupri_check_rt - check if CPU has a RT task
 * should be called from rcu-sched read section.
 */
bool cpupri_check_rt(void)
{
	int cpu = raw_smp_processor_id();

	return cpu_rq(cpu)->rd->cpupri.cpu_to_pri[cpu] > CPUPRI_NORMAL;
}
+5 −16
Original line number Diff line number Diff line
@@ -84,17 +84,6 @@ static void wakeup_softirqd(void)
		wake_up_process(tsk);
}

/*
 * If ksoftirqd is scheduled, we do not want to process pending softirqs
 * right now. Let ksoftirqd handle this at its own rate, to get fairness.
 */
static bool ksoftirqd_running(void)
{
	struct task_struct *tsk = __this_cpu_read(ksoftirqd);

	return tsk && (tsk->state == TASK_RUNNING);
}

/*
 * preempt_count and SOFTIRQ_OFFSET usage:
 * - preempt_count is changed by SOFTIRQ_OFFSET on entering or leaving
@@ -245,6 +234,8 @@ static inline bool lockdep_softirq_start(void) { return false; }
static inline void lockdep_softirq_end(bool in_hardirq) { }
#endif

#define long_softirq_pending()	(local_softirq_pending() & LONG_SOFTIRQ_MASK)
#define defer_for_rt()		(long_softirq_pending() && cpupri_check_rt())
asmlinkage __visible void __softirq_entry __do_softirq(void)
{
	unsigned long end = jiffies + MAX_SOFTIRQ_TIME;
@@ -308,6 +299,7 @@ asmlinkage __visible void __softirq_entry __do_softirq(void)
	pending = local_softirq_pending();
	if (pending) {
		if (time_before(jiffies, end) && !need_resched() &&
		    !defer_for_rt() &&
		    --max_restart)
			goto restart;

@@ -333,7 +325,7 @@ asmlinkage __visible void do_softirq(void)

	pending = local_softirq_pending();

	if (pending && !ksoftirqd_running())
	if (pending)
		do_softirq_own_stack();

	local_irq_restore(flags);
@@ -360,10 +352,7 @@ void irq_enter(void)

static inline void invoke_softirq(void)
{
	if (ksoftirqd_running())
		return;

	if (!force_irqthreads) {
	if (!force_irqthreads && !defer_for_rt()) {
#ifdef CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK
		/*
		 * We can safely execute softirq on the current stack if