Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 467386fb authored by Ingo Molnar's avatar Ingo Molnar
Browse files

Merge branch 'sched/urgent' into sched/core, to pick up fixes before applying new changes



Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parents 525628c7 ecf7d01c
Loading
Loading
Loading
Loading
+30 −6
Original line number Diff line number Diff line
@@ -1957,6 +1957,25 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
		goto stat;

#ifdef CONFIG_SMP
	/*
	 * Ensure we load p->on_cpu _after_ p->on_rq, otherwise it would be
	 * possible to, falsely, observe p->on_cpu == 0.
	 *
	 * One must be running (->on_cpu == 1) in order to remove oneself
	 * from the runqueue.
	 *
	 *  [S] ->on_cpu = 1;	[L] ->on_rq
	 *      UNLOCK rq->lock
	 *			RMB
	 *      LOCK   rq->lock
	 *  [S] ->on_rq = 0;    [L] ->on_cpu
	 *
	 * Pairs with the full barrier implied in the UNLOCK+LOCK on rq->lock
	 * from the consecutive calls to schedule(); the first switching to our
	 * task, the second putting it to sleep.
	 */
	smp_rmb();

	/*
	 * If the owning (remote) cpu is still in the middle of schedule() with
	 * this task as prev, wait until its done referencing the task.
@@ -1964,7 +1983,13 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
	while (p->on_cpu)
		cpu_relax();
	/*
	 * Pairs with the smp_wmb() in finish_lock_switch().
	 * Combined with the control dependency above, we have an effective
	 * smp_load_acquire() without the need for full barriers.
	 *
	 * Pairs with the smp_store_release() in finish_lock_switch().
	 *
	 * This ensures that tasks getting woken will be fully ordered against
	 * their previous state and preserve Program Order.
	 */
	smp_rmb();

@@ -2050,7 +2075,6 @@ static void try_to_wake_up_local(struct task_struct *p)
 */
int wake_up_process(struct task_struct *p)
{
	WARN_ON(task_is_stopped_or_traced(p));
	return try_to_wake_up(p, TASK_NORMAL, 0);
}
EXPORT_SYMBOL(wake_up_process);
@@ -5858,13 +5882,13 @@ static int init_rootdomain(struct root_domain *rd)
{
	memset(rd, 0, sizeof(*rd));

	if (!alloc_cpumask_var(&rd->span, GFP_KERNEL))
	if (!zalloc_cpumask_var(&rd->span, GFP_KERNEL))
		goto out;
	if (!alloc_cpumask_var(&rd->online, GFP_KERNEL))
	if (!zalloc_cpumask_var(&rd->online, GFP_KERNEL))
		goto free_span;
	if (!alloc_cpumask_var(&rd->dlo_mask, GFP_KERNEL))
	if (!zalloc_cpumask_var(&rd->dlo_mask, GFP_KERNEL))
		goto free_online;
	if (!alloc_cpumask_var(&rd->rto_mask, GFP_KERNEL))
	if (!zalloc_cpumask_var(&rd->rto_mask, GFP_KERNEL))
		goto free_dlo_mask;

	init_dl_bw(&rd->dl_bw);
+3 −0
Original line number Diff line number Diff line
@@ -788,6 +788,9 @@ cputime_t task_gtime(struct task_struct *t)
	unsigned int seq;
	cputime_t gtime;

	if (!context_tracking_is_enabled())
		return t->gtime;

	do {
		seq = read_seqbegin(&t->vtime_seqlock);

+3 −0
Original line number Diff line number Diff line
@@ -1073,6 +1073,9 @@ static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
	 * We must ensure this doesn't happen until the switch is completely
	 * finished.
	 *
	 * In particular, the load of prev->state in finish_task_switch() must
	 * happen before this.
	 *
	 * Pairs with the control dependency and rmb in try_to_wake_up().
	 */
	smp_store_release(&prev->on_cpu, 0);
+8 −8
Original line number Diff line number Diff line
@@ -583,18 +583,18 @@ EXPORT_SYMBOL(wake_up_atomic_t);

__sched int bit_wait(struct wait_bit_key *word)
{
	if (signal_pending_state(current->state, current))
		return 1;
	schedule();
	if (signal_pending(current))
		return -EINTR;
	return 0;
}
EXPORT_SYMBOL(bit_wait);

__sched int bit_wait_io(struct wait_bit_key *word)
{
	if (signal_pending_state(current->state, current))
		return 1;
	io_schedule();
	if (signal_pending(current))
		return -EINTR;
	return 0;
}
EXPORT_SYMBOL(bit_wait_io);
@@ -602,11 +602,11 @@ EXPORT_SYMBOL(bit_wait_io);
__sched int bit_wait_timeout(struct wait_bit_key *word)
{
	unsigned long now = READ_ONCE(jiffies);
	if (signal_pending_state(current->state, current))
		return 1;
	if (time_after_eq(now, word->timeout))
		return -EAGAIN;
	schedule_timeout(word->timeout - now);
	if (signal_pending(current))
		return -EINTR;
	return 0;
}
EXPORT_SYMBOL_GPL(bit_wait_timeout);
@@ -614,11 +614,11 @@ EXPORT_SYMBOL_GPL(bit_wait_timeout);
__sched int bit_wait_io_timeout(struct wait_bit_key *word)
{
	unsigned long now = READ_ONCE(jiffies);
	if (signal_pending_state(current->state, current))
		return 1;
	if (time_after_eq(now, word->timeout))
		return -EAGAIN;
	io_schedule_timeout(word->timeout - now);
	if (signal_pending(current))
		return -EINTR;
	return 0;
}
EXPORT_SYMBOL_GPL(bit_wait_io_timeout);