Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit e8fa4500 authored by Linux Build Service Account's avatar Linux Build Service Account Committed by Gerrit - the friendly Code Review server
Browse files

Merge "sched: avoid migrating when softint on tgt cpu should be short"

parents 75cfe59d 20bcc472
Loading
Loading
Loading
Loading
+7 −0
Original line number Diff line number Diff line
@@ -473,6 +473,12 @@ enum
};

#define SOFTIRQ_STOP_IDLE_MASK (~(1 << RCU_SOFTIRQ))
/* Softirq's where the handling might be long: */
#define LONG_SOFTIRQ_MASK ((1 << NET_TX_SOFTIRQ)       | \
			   (1 << NET_RX_SOFTIRQ)       | \
			   (1 << BLOCK_SOFTIRQ)        | \
			   (1 << IRQ_POLL_SOFTIRQ)     | \
			   (1 << TASKLET_SOFTIRQ))

/* map softirq index to softirq name. update 'softirq_to_name' in
 * kernel/softirq.c when adding a new softirq.
@@ -508,6 +514,7 @@ extern void raise_softirq_irqoff(unsigned int nr);
extern void raise_softirq(unsigned int nr);

DECLARE_PER_CPU(struct task_struct *, ksoftirqd);
DECLARE_PER_CPU(__u32, active_softirqs);

static inline struct task_struct *this_cpu_ksoftirqd(void)
{
+8 −4
Original line number Diff line number Diff line
@@ -1453,16 +1453,20 @@ static int find_lowest_rq(struct task_struct *task);

/*
 * Return whether the task on the given cpu is currently non-preemptible
 * while handling a softirq or is likely to block preemptions soon because
 * it is a ksoftirq thread.
 * while handling a potentially long softint, or if the task is likely
 * to block preemptions soon because it is a ksoftirq thread that is
 * handling slow softints.
 */
bool
task_may_not_preempt(struct task_struct *task, int cpu)
{
	__u32 softirqs = per_cpu(active_softirqs, cpu) |
			 __IRQ_STAT(cpu, __softirq_pending);
	struct task_struct *cpu_ksoftirqd = per_cpu(ksoftirqd, cpu);

	return (task_thread_info(task)->preempt_count & SOFTIRQ_MASK) ||
	       task == cpu_ksoftirqd;
	return ((softirqs & LONG_SOFTIRQ_MASK) &&
		(task == cpu_ksoftirqd ||
		 task_thread_info(task)->preempt_count & SOFTIRQ_MASK));
}

static int
+9 −0
Original line number Diff line number Diff line
@@ -57,6 +57,13 @@ static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp

DEFINE_PER_CPU(struct task_struct *, ksoftirqd);

/*
 * active_softirqs -- per cpu, a mask of softirqs that are being handled,
 * with the expectation that approximate answers are acceptable and therefore
 * no synchronization.
 */
DEFINE_PER_CPU(__u32, active_softirqs);

const char * const softirq_to_name[NR_SOFTIRQS] = {
	"HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "IRQ_POLL",
	"TASKLET", "SCHED", "HRTIMER", "RCU"
@@ -264,6 +271,7 @@ asmlinkage __visible void __softirq_entry __do_softirq(void)
restart:
	/* Reset the pending bitmask before enabling irqs */
	set_softirq_pending(0);
	__this_cpu_write(active_softirqs, pending);

	local_irq_enable();

@@ -293,6 +301,7 @@ asmlinkage __visible void __softirq_entry __do_softirq(void)
		pending >>= softirq_bit;
	}

	__this_cpu_write(active_softirqs, 0);
	rcu_bh_qs();
	local_irq_disable();