Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit f7951c33 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull scheduler updates from Thomas Gleixner:

 - Cleanup and improvement of NUMA balancing

 - Refactoring and improvements to the PELT (Per Entity Load Tracking)
   code

 - Watchdog simplification and related cleanups

 - The usual pile of small incremental fixes and improvements

* 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (41 commits)
  watchdog: Reduce message verbosity
  stop_machine: Reflow cpu_stop_queue_two_works()
  sched/numa: Move task_numa_placement() closer to numa_migrate_preferred()
  sched/numa: Use group_weights to identify if migration degrades locality
  sched/numa: Update the scan period without holding the numa_group lock
  sched/numa: Remove numa_has_capacity()
  sched/numa: Modify migrate_swap() to accept additional parameters
  sched/numa: Remove unused task_capacity from 'struct numa_stats'
  sched/numa: Skip nodes that are at 'hoplimit'
  sched/debug: Reverse the order of printing faults
  sched/numa: Use task faults only if numa_group is not yet set up
  sched/numa: Set preferred_node based on best_cpu
  sched/numa: Simplify load_too_imbalanced()
  sched/numa: Evaluate move once per node
  sched/numa: Remove redundant field
  sched/debug: Show the sum wait time of a task group
  sched/fair: Remove #ifdefs from scale_rt_capacity()
  sched/core: Remove get_cpu() from sched_fork()
  sched/cpufreq: Clarify sugov_get_util()
  sched/sysctl: Remove unused sched_time_avg_ms sysctl
  ...
parents 2406fb8d 1b6266eb
Loading
Loading
Loading
Loading
+2 −2
Original line number Diff line number Diff line
@@ -515,7 +515,7 @@ int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
	dvcpu->arch.wait = 0;

	if (swq_has_sleeper(&dvcpu->wq))
		swake_up(&dvcpu->wq);
		swake_up_one(&dvcpu->wq);

	return 0;
}
@@ -1204,7 +1204,7 @@ static void kvm_mips_comparecount_func(unsigned long data)

	vcpu->arch.wait = 0;
	if (swq_has_sleeper(&vcpu->wq))
		swake_up(&vcpu->wq);
		swake_up_one(&vcpu->wq);
}

/* low level hrtimer wake routine */
+3 −3
Original line number Diff line number Diff line
@@ -216,7 +216,7 @@ static void kvmppc_fast_vcpu_kick_hv(struct kvm_vcpu *vcpu)

	wqp = kvm_arch_vcpu_wq(vcpu);
	if (swq_has_sleeper(wqp)) {
		swake_up(wqp);
		swake_up_one(wqp);
		++vcpu->stat.halt_wakeup;
	}

@@ -3188,7 +3188,7 @@ static void kvmppc_vcore_blocked(struct kvmppc_vcore *vc)
		}
	}

	prepare_to_swait(&vc->wq, &wait, TASK_INTERRUPTIBLE);
	prepare_to_swait_exclusive(&vc->wq, &wait, TASK_INTERRUPTIBLE);

	if (kvmppc_vcore_check_block(vc)) {
		finish_swait(&vc->wq, &wait);
@@ -3311,7 +3311,7 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
			kvmppc_start_thread(vcpu, vc);
			trace_kvm_guest_enter(vcpu);
		} else if (vc->vcore_state == VCORE_SLEEPING) {
			swake_up(&vc->wq);
			swake_up_one(&vc->wq);
		}

	}
+1 −1
Original line number Diff line number Diff line
@@ -1145,7 +1145,7 @@ void kvm_s390_vcpu_wakeup(struct kvm_vcpu *vcpu)
		 * yield-candidate.
		 */
		vcpu->preempted = true;
		swake_up(&vcpu->wq);
		swake_up_one(&vcpu->wq);
		vcpu->stat.halt_wakeup++;
	}
	/*
+2 −2
Original line number Diff line number Diff line
@@ -154,7 +154,7 @@ void kvm_async_pf_task_wait(u32 token, int interrupt_kernel)

	for (;;) {
		if (!n.halted)
			prepare_to_swait(&n.wq, &wait, TASK_UNINTERRUPTIBLE);
			prepare_to_swait_exclusive(&n.wq, &wait, TASK_UNINTERRUPTIBLE);
		if (hlist_unhashed(&n.link))
			break;

@@ -188,7 +188,7 @@ static void apf_task_wake_one(struct kvm_task_sleep_node *n)
	if (n->halted)
		smp_send_reschedule(n->cpu);
	else if (swq_has_sleeper(&n->wq))
		swake_up(&n->wq);
		swake_up_one(&n->wq);
}

static void apf_task_wake_all(void)
+1 −1
Original line number Diff line number Diff line
@@ -1379,7 +1379,7 @@ static void apic_timer_expired(struct kvm_lapic *apic)
	 * using swait_active() is safe.
	 */
	if (swait_active(q))
		swake_up(q);
		swake_up_one(q);

	if (apic_lvtt_tscdeadline(apic))
		ktimer->expired_tscdeadline = ktimer->tscdeadline;
Loading