Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 4bf60464 authored by qctecmdr's avatar qctecmdr Committed by Gerrit - the friendly Code Review server
Browse files

Merge "FROMLIST: sched/fair: Use wake_q length as a hint for wake_wide"

parents 41bb745c aa8b37ba
Loading
Loading
Loading
Loading
+3 −0
Original line number Diff line number Diff line
@@ -860,6 +860,9 @@ struct task_struct {
	int				nr_cpus_allowed;
	const cpumask_t			*cpus_ptr;
	cpumask_t			cpus_mask;
#ifdef CONFIG_SCHED_WALT
	cpumask_t			cpus_requested;
#endif

#ifdef CONFIG_PREEMPT_RCU
	int				rcu_read_lock_nesting;
+6 −0
Original line number Diff line number Diff line
@@ -38,6 +38,9 @@
struct wake_q_head {
	struct wake_q_node *first;
	struct wake_q_node **lastp;
#ifdef CONFIG_SCHED_WALT
	int count;
#endif
};

#define WAKE_Q_TAIL ((struct wake_q_node *) 0x01)
@@ -49,6 +52,9 @@ static inline void wake_q_init(struct wake_q_head *head)
{
	head->first = WAKE_Q_TAIL;
	head->lastp = &head->first;
#ifdef CONFIG_SCHED_WALT
	head->count = 0;
#endif
}

static inline bool wake_q_empty(struct wake_q_head *head)
+3 −0
Original line number Diff line number Diff line
@@ -73,6 +73,9 @@ struct task_struct init_task
	.cpus_ptr	= &init_task.cpus_mask,
	.cpus_mask	= CPU_MASK_ALL,
	.nr_cpus_allowed= NR_CPUS,
#ifdef CONFIG_SCHED_WALT
	.cpus_requested	= CPU_MASK_ALL,
#endif
	.mm		= NULL,
	.active_mm	= &init_mm,
	.restart_block	= {
+18 −2
Original line number Diff line number Diff line
@@ -1022,6 +1022,22 @@ void rebuild_sched_domains(void)
	put_online_cpus();
}

static int update_cpus_allowed(struct cpuset *cs, struct task_struct *p,
			       const struct cpumask *new_mask)
{
#ifdef CONFIG_SCHED_WALT
	int ret;

	if (cpumask_subset(&p->cpus_requested, cs->cpus_allowed)) {
		ret = set_cpus_allowed_ptr(p, &p->cpus_requested);
		if (!ret)
			return ret;
	}
#endif

	return set_cpus_allowed_ptr(p, new_mask);
}

/**
 * update_tasks_cpumask - Update the cpumasks of tasks in the cpuset.
 * @cs: the cpuset in which each task's cpus_allowed mask needs to be changed
@@ -1037,7 +1053,7 @@ static void update_tasks_cpumask(struct cpuset *cs)

	css_task_iter_start(&cs->css, 0, &it);
	while ((task = css_task_iter_next(&it)))
		set_cpus_allowed_ptr(task, cs->effective_cpus);
		update_cpus_allowed(cs, task, cs->effective_cpus);
	css_task_iter_end(&it);
}

@@ -2187,7 +2203,7 @@ static void cpuset_attach(struct cgroup_taskset *tset)
		 * can_attach beforehand should guarantee that this doesn't
		 * fail.  TODO: have a better way to handle failure here
		 */
		WARN_ON_ONCE(set_cpus_allowed_ptr(task, cpus_attach));
		WARN_ON_ONCE(update_cpus_allowed(cs, task, cpus_attach));

		cpuset_change_task_nodemask(task, &cpuset_attach_nodemask_to);
		cpuset_update_task_spread_flag(cs, task);
+36 −11
Original line number Diff line number Diff line
@@ -431,6 +431,9 @@ static bool __wake_q_add(struct wake_q_head *head, struct task_struct *task)
	/*
	 * The head is context local, there can be no concurrency.
	 */
#ifdef CONFIG_SCHED_WALT
	head->count++;
#endif
	*head->lastp = node;
	head->lastp = &node->next;
	return true;
@@ -477,6 +480,10 @@ void wake_q_add_safe(struct wake_q_head *head, struct task_struct *task)
		put_task_struct(task);
}

static int
try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags,
	      int sibling_count_hint);

void wake_up_q(struct wake_q_head *head)
{
	struct wake_q_node *node = head->first;
@@ -491,10 +498,14 @@ void wake_up_q(struct wake_q_head *head)
		task->wake_q.next = NULL;

		/*
		 * wake_up_process() executes a full barrier, which pairs with
		 * try_to_wake_up() executes a full barrier, which pairs with
		 * the queueing in wake_q_add() so as not to miss wakeups.
		 */
		wake_up_process(task);
#ifdef CONFIG_SCHED_WALT
		try_to_wake_up(task, TASK_NORMAL, 0, head->count);
#else
		try_to_wake_up(task, TASK_NORMAL, 0, 1);
#endif
		put_task_struct(task);
	}
}
@@ -2142,14 +2153,16 @@ static int select_fallback_rq(int cpu, struct task_struct *p, bool allow_iso)
 * The caller (fork, wakeup) owns p->pi_lock, ->cpus_ptr is stable.
 */
static inline
int select_task_rq(struct task_struct *p, int cpu, int sd_flags, int wake_flags)
int select_task_rq(struct task_struct *p, int cpu, int sd_flags, int wake_flags,
		  int sibling_count_hint)
{
	bool allow_isolated = (p->flags & PF_KTHREAD);

	lockdep_assert_held(&p->pi_lock);

	if (p->nr_cpus_allowed > 1)
		cpu = p->sched_class->select_task_rq(p, cpu, sd_flags, wake_flags);
		cpu = p->sched_class->select_task_rq(p, cpu, sd_flags, wake_flags,
						     sibling_count_hint);
	else
		cpu = cpumask_any(p->cpus_ptr);

@@ -2544,6 +2557,8 @@ static void ttwu_queue(struct task_struct *p, int cpu, int wake_flags)
 * @p: the thread to be awakened
 * @state: the mask of task states that can be woken
 * @wake_flags: wake modifier flags (WF_*)
 * @sibling_count_hint: A hint at the number of threads that are being woken up
 *                      in this event.
 *
 * If (@state & @p->state) @p->state = TASK_RUNNING.
 *
@@ -2559,7 +2574,8 @@ static void ttwu_queue(struct task_struct *p, int cpu, int wake_flags)
 *	   %false otherwise.
 */
static int
try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags,
	       int sibling_count_hint)
{
	unsigned long flags;
	int cpu, success = 0;
@@ -2672,7 +2688,8 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
		atomic_dec(&task_rq(p)->nr_iowait);
	}

	cpu = select_task_rq(p, p->wake_cpu, SD_BALANCE_WAKE, wake_flags);
	cpu = select_task_rq(p, p->wake_cpu, SD_BALANCE_WAKE, wake_flags,
			     sibling_count_hint);
	if (task_cpu(p) != cpu) {
		wake_flags |= WF_MIGRATED;
		psi_ttwu_dequeue(p);
@@ -2723,13 +2740,13 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
 */
int wake_up_process(struct task_struct *p)
{
	return try_to_wake_up(p, TASK_NORMAL, 0);
	return try_to_wake_up(p, TASK_NORMAL, 0, 1);
}
EXPORT_SYMBOL(wake_up_process);

int wake_up_state(struct task_struct *p, unsigned int state)
{
	return try_to_wake_up(p, state, 0);
	return try_to_wake_up(p, state, 0, 1);
}

/*
@@ -3026,7 +3043,7 @@ void wake_up_new_task(struct task_struct *p)
	 * as we're not fully set-up yet.
	 */
	p->recent_used_cpu = task_cpu(p);
	__set_task_cpu(p, select_task_rq(p, task_cpu(p), SD_BALANCE_FORK, 0));
	__set_task_cpu(p, select_task_rq(p, task_cpu(p), SD_BALANCE_FORK, 0, 1));
#endif
	rq = __task_rq_lock(p, &rf);
	update_rq_clock(rq);
@@ -3571,7 +3588,7 @@ void sched_exec(void)
		return;

	raw_spin_lock_irqsave(&p->pi_lock, flags);
	dest_cpu = p->sched_class->select_task_rq(p, task_cpu(p), SD_BALANCE_EXEC, 0);
	dest_cpu = p->sched_class->select_task_rq(p, task_cpu(p), SD_BALANCE_EXEC, 0, 1);
	if (dest_cpu == smp_processor_id())
		goto unlock;

@@ -4500,7 +4517,7 @@ asmlinkage __visible void __sched preempt_schedule_irq(void)
int default_wake_function(wait_queue_entry_t *curr, unsigned mode, int wake_flags,
			  void *key)
{
	return try_to_wake_up(curr->private, mode, wake_flags);
	return try_to_wake_up(curr->private, mode, wake_flags, 1);
}
EXPORT_SYMBOL(default_wake_function);

@@ -5634,6 +5651,11 @@ long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
		retval = -EINVAL;
	}

#ifdef CONFIG_SCHED_WALT
	if (!retval && !(p->flags & PF_KTHREAD))
		cpumask_and(&p->cpus_requested, in_mask, cpu_possible_mask);
#endif

out_free_new_mask:
	free_cpumask_var(new_mask);
out_free_cpus_allowed:
@@ -6780,6 +6802,9 @@ void __init sched_init_smp(void)
	/* Move init over to a non-isolated CPU */
	if (set_cpus_allowed_ptr(current, housekeeping_cpumask(HK_FLAG_DOMAIN)) < 0)
		BUG();
#ifdef CONFIG_SCHED_WALT
	cpumask_copy(&current->cpus_requested, cpu_possible_mask);
#endif
	sched_init_granularity();

	init_sched_rt_class();
Loading