Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 19ccb76a authored by Paul Mackerras's avatar Paul Mackerras Committed by Avi Kivity
Browse files

KVM: PPC: Implement H_CEDE hcall for book3s_hv in real-mode code



With a KVM guest operating in SMT4 mode (i.e. 4 hardware threads per
core), whenever a CPU goes idle, we have to pull all the other
hardware threads in the core out of the guest, because the H_CEDE
hcall is handled in the kernel.  This is inefficient.

This adds code to book3s_hv_rmhandlers.S to handle the H_CEDE hcall
in real mode.  When a guest vcpu does an H_CEDE hcall, we now only
exit to the kernel if all the other vcpus in the same core are also
idle.  Otherwise we mark this vcpu as napping, save state that could
be lost in nap mode (mainly GPRs and FPRs), and execute the nap
instruction.  When the thread wakes up, because of a decrementer or
external interrupt, we come back in at kvm_start_guest (from the
system reset interrupt vector), find the `napping' flag set in the
paca, and go to the resume path.

This has some other ramifications.  First, when starting a core, we
now start all the threads, both those that are immediately runnable and
those that are idle.  This is so that we don't have to pull all the
threads out of the guest when an idle thread gets a decrementer interrupt
and wants to start running.  In fact the idle threads will all start
with the H_CEDE hcall returning; being idle they will just do another
H_CEDE immediately and go to nap mode.

This required some changes to kvmppc_run_core() and kvmppc_run_vcpu().
These functions have been restructured to make them simpler and clearer.
We introduce a level of indirection in the wait queue that gets woken
when external and decrementer interrupts get generated for a vcpu, so
that we can have the 4 vcpus in a vcore using the same wait queue.
We need this because the 4 vcpus are being handled by one thread.

Secondly, when we need to exit from the guest to the kernel, we now
have to generate an IPI for any napping threads, because an HDEC
interrupt doesn't wake up a napping thread.

Thirdly, we now need to be able to handle virtual external interrupts
and decrementer interrupts becoming pending while a thread is napping,
and deliver those interrupts to the guest when the thread wakes.
This is done in kvmppc_cede_reentry, just before fast_guest_return.

Finally, since we are not using the generic kvm_vcpu_block for book3s_hv,
and hence not calling kvm_arch_vcpu_runnable, we can remove the #ifdef
from kvm_arch_vcpu_runnable.

Signed-off-by: default avatarPaul Mackerras <paulus@samba.org>
Signed-off-by: default avatarAlexander Graf <agraf@suse.de>
parent 02143947
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -76,6 +76,7 @@ struct kvmppc_host_state {
	ulong scratch1;
	u8 in_guest;
	u8 restore_hid5;
	u8 napping;

#ifdef CONFIG_KVM_BOOK3S_64_HV
	struct kvm_vcpu *kvm_vcpu;
+15 −4
Original line number Diff line number Diff line
@@ -198,21 +198,29 @@ struct kvm_arch {
 */
struct kvmppc_vcore {
	int n_runnable;
	int n_blocked;
	int n_busy;
	int num_threads;
	int entry_exit_count;
	int n_woken;
	int nap_count;
	int napping_threads;
	u16 pcpu;
	u8 vcore_running;
	u8 vcore_state;
	u8 in_guest;
	struct list_head runnable_threads;
	spinlock_t lock;
	wait_queue_head_t wq;
};

#define VCORE_ENTRY_COUNT(vc)	((vc)->entry_exit_count & 0xff)
#define VCORE_EXIT_COUNT(vc)	((vc)->entry_exit_count >> 8)

/* Values for vcore_state */
#define VCORE_INACTIVE	0
#define VCORE_RUNNING	1
#define VCORE_EXITING	2
#define VCORE_SLEEPING	3

struct kvmppc_pte {
	ulong eaddr;
	u64 vpage;
@@ -403,11 +411,13 @@ struct kvm_vcpu_arch {
	struct dtl *dtl;
	struct dtl *dtl_end;

	wait_queue_head_t *wqp;
	struct kvmppc_vcore *vcore;
	int ret;
	int trap;
	int state;
	int ptid;
	bool timer_running;
	wait_queue_head_t cpu_run;

	struct kvm_vcpu_arch_shared *shared;
@@ -423,8 +433,9 @@ struct kvm_vcpu_arch {
#endif
};

#define KVMPPC_VCPU_BUSY_IN_HOST	0
#define KVMPPC_VCPU_BLOCKED		1
/* Values for vcpu->arch.state */
#define KVMPPC_VCPU_STOPPED		0
#define KVMPPC_VCPU_BUSY_IN_HOST	1
#define KVMPPC_VCPU_RUNNABLE		2

#endif /* __POWERPC_KVM_HOST_H__ */
+6 −0
Original line number Diff line number Diff line
@@ -44,6 +44,7 @@
#include <asm/compat.h>
#include <asm/mmu.h>
#include <asm/hvcall.h>
#include <asm/xics.h>
#endif
#ifdef CONFIG_PPC_ISERIES
#include <asm/iseries/alpaca.h>
@@ -460,6 +461,8 @@ int main(void)
	DEFINE(VCPU_DEC, offsetof(struct kvm_vcpu, arch.dec));
	DEFINE(VCPU_DEC_EXPIRES, offsetof(struct kvm_vcpu, arch.dec_expires));
	DEFINE(VCPU_PENDING_EXC, offsetof(struct kvm_vcpu, arch.pending_exceptions));
	DEFINE(VCPU_CEDED, offsetof(struct kvm_vcpu, arch.ceded));
	DEFINE(VCPU_PRODDED, offsetof(struct kvm_vcpu, arch.prodded));
	DEFINE(VCPU_VPA, offsetof(struct kvm_vcpu, arch.vpa));
	DEFINE(VCPU_MMCR, offsetof(struct kvm_vcpu, arch.mmcr));
	DEFINE(VCPU_PMC, offsetof(struct kvm_vcpu, arch.pmc));
@@ -475,6 +478,7 @@ int main(void)
	DEFINE(VCORE_ENTRY_EXIT, offsetof(struct kvmppc_vcore, entry_exit_count));
	DEFINE(VCORE_NAP_COUNT, offsetof(struct kvmppc_vcore, nap_count));
	DEFINE(VCORE_IN_GUEST, offsetof(struct kvmppc_vcore, in_guest));
	DEFINE(VCORE_NAPPING_THREADS, offsetof(struct kvmppc_vcore, napping_threads));
	DEFINE(VCPU_SVCPU, offsetof(struct kvmppc_vcpu_book3s, shadow_vcpu) -
			   offsetof(struct kvmppc_vcpu_book3s, vcpu));
	DEFINE(VCPU_SLB_E, offsetof(struct kvmppc_slb, orige));
@@ -532,6 +536,7 @@ int main(void)
	HSTATE_FIELD(HSTATE_SCRATCH1, scratch1);
	HSTATE_FIELD(HSTATE_IN_GUEST, in_guest);
	HSTATE_FIELD(HSTATE_RESTORE_HID5, restore_hid5);
	HSTATE_FIELD(HSTATE_NAPPING, napping);

#ifdef CONFIG_KVM_BOOK3S_64_HV
	HSTATE_FIELD(HSTATE_KVM_VCPU, kvm_vcpu);
@@ -544,6 +549,7 @@ int main(void)
	HSTATE_FIELD(HSTATE_DSCR, host_dscr);
	HSTATE_FIELD(HSTATE_DABR, dabr);
	HSTATE_FIELD(HSTATE_DECEXP, dec_expires);
	DEFINE(IPI_PRIORITY, IPI_PRIORITY);
#endif /* CONFIG_KVM_BOOK3S_64_HV */

#else /* CONFIG_PPC_BOOK3S */
+188 −147
Original line number Diff line number Diff line
@@ -62,6 +62,8 @@
/* #define EXIT_DEBUG_SIMPLE */
/* #define EXIT_DEBUG_INT */

static void kvmppc_end_cede(struct kvm_vcpu *vcpu);

void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
{
	local_paca->kvm_hstate.kvm_vcpu = vcpu;
@@ -72,40 +74,10 @@ void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
{
}

static void kvmppc_vcpu_blocked(struct kvm_vcpu *vcpu);
static void kvmppc_vcpu_unblocked(struct kvm_vcpu *vcpu);

void kvmppc_vcpu_block(struct kvm_vcpu *vcpu)
{
	u64 now;
	unsigned long dec_nsec;

	now = get_tb();
	if (now >= vcpu->arch.dec_expires && !kvmppc_core_pending_dec(vcpu))
		kvmppc_core_queue_dec(vcpu);
	if (vcpu->arch.pending_exceptions)
		return;
	if (vcpu->arch.dec_expires != ~(u64)0) {
		dec_nsec = (vcpu->arch.dec_expires - now) * NSEC_PER_SEC /
			tb_ticks_per_sec;
		hrtimer_start(&vcpu->arch.dec_timer, ktime_set(0, dec_nsec),
			      HRTIMER_MODE_REL);
	}

	kvmppc_vcpu_blocked(vcpu);

	kvm_vcpu_block(vcpu);
	vcpu->stat.halt_wakeup++;

	if (vcpu->arch.dec_expires != ~(u64)0)
		hrtimer_try_to_cancel(&vcpu->arch.dec_timer);

	kvmppc_vcpu_unblocked(vcpu);
}

void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr)
{
	vcpu->arch.shregs.msr = msr;
	kvmppc_end_cede(vcpu);
}

void kvmppc_set_pvr(struct kvm_vcpu *vcpu, u32 pvr)
@@ -257,15 +229,6 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)

	switch (req) {
	case H_CEDE:
		vcpu->arch.shregs.msr |= MSR_EE;
		vcpu->arch.ceded = 1;
		smp_mb();
		if (!vcpu->arch.prodded)
			kvmppc_vcpu_block(vcpu);
		else
			vcpu->arch.prodded = 0;
		smp_mb();
		vcpu->arch.ceded = 0;
		break;
	case H_PROD:
		target = kvmppc_get_gpr(vcpu, 4);
@@ -388,20 +351,6 @@ static int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
		break;
	}


	if (!(r & RESUME_HOST)) {
		/* To avoid clobbering exit_reason, only check for signals if
		 * we aren't already exiting to userspace for some other
		 * reason. */
		if (signal_pending(tsk)) {
			vcpu->stat.signal_exits++;
			run->exit_reason = KVM_EXIT_INTR;
			r = -EINTR;
		} else {
			kvmppc_core_deliver_interrupts(vcpu);
		}
	}

	return r;
}

@@ -479,13 +428,9 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
	kvmppc_mmu_book3s_hv_init(vcpu);

	/*
	 * Some vcpus may start out in stopped state.  If we initialize
	 * them to busy-in-host state they will stop other vcpus in the
	 * vcore from running.  Instead we initialize them to blocked
	 * state, effectively considering them to be stopped until we
	 * see the first run ioctl for them.
	 * We consider the vcpu stopped until we see the first run ioctl for it.
	 */
	vcpu->arch.state = KVMPPC_VCPU_BLOCKED;
	vcpu->arch.state = KVMPPC_VCPU_STOPPED;

	init_waitqueue_head(&vcpu->arch.cpu_run);

@@ -496,6 +441,7 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
		if (vcore) {
			INIT_LIST_HEAD(&vcore->runnable_threads);
			spin_lock_init(&vcore->lock);
			init_waitqueue_head(&vcore->wq);
		}
		kvm->arch.vcores[core] = vcore;
	}
@@ -506,7 +452,6 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)

	spin_lock(&vcore->lock);
	++vcore->num_threads;
	++vcore->n_blocked;
	spin_unlock(&vcore->lock);
	vcpu->arch.vcore = vcore;

@@ -527,30 +472,31 @@ void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
	kfree(vcpu);
}

static void kvmppc_vcpu_blocked(struct kvm_vcpu *vcpu)
static void kvmppc_set_timer(struct kvm_vcpu *vcpu)
{
	struct kvmppc_vcore *vc = vcpu->arch.vcore;
	unsigned long dec_nsec, now;

	spin_lock(&vc->lock);
	vcpu->arch.state = KVMPPC_VCPU_BLOCKED;
	++vc->n_blocked;
	if (vc->n_runnable > 0 &&
	    vc->n_runnable + vc->n_blocked == vc->num_threads) {
		vcpu = list_first_entry(&vc->runnable_threads, struct kvm_vcpu,
					arch.run_list);
		wake_up(&vcpu->arch.cpu_run);
	now = get_tb();
	if (now > vcpu->arch.dec_expires) {
		/* decrementer has already gone negative */
		kvmppc_core_queue_dec(vcpu);
		kvmppc_core_deliver_interrupts(vcpu);
		return;
	}
	spin_unlock(&vc->lock);
	dec_nsec = (vcpu->arch.dec_expires - now) * NSEC_PER_SEC
		   / tb_ticks_per_sec;
	hrtimer_start(&vcpu->arch.dec_timer, ktime_set(0, dec_nsec),
		      HRTIMER_MODE_REL);
	vcpu->arch.timer_running = 1;
}

static void kvmppc_vcpu_unblocked(struct kvm_vcpu *vcpu)
static void kvmppc_end_cede(struct kvm_vcpu *vcpu)
{
	struct kvmppc_vcore *vc = vcpu->arch.vcore;

	spin_lock(&vc->lock);
	vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST;
	--vc->n_blocked;
	spin_unlock(&vc->lock);
	vcpu->arch.ceded = 0;
	if (vcpu->arch.timer_running) {
		hrtimer_try_to_cancel(&vcpu->arch.dec_timer);
		vcpu->arch.timer_running = 0;
	}
}

extern int __kvmppc_vcore_entry(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu);
@@ -565,6 +511,7 @@ static void kvmppc_remove_runnable(struct kvmppc_vcore *vc,
		return;
	vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST;
	--vc->n_runnable;
	++vc->n_busy;
	/* decrement the physical thread id of each following vcpu */
	v = vcpu;
	list_for_each_entry_continue(v, &vc->runnable_threads, arch.run_list)
@@ -578,15 +525,20 @@ static void kvmppc_start_thread(struct kvm_vcpu *vcpu)
	struct paca_struct *tpaca;
	struct kvmppc_vcore *vc = vcpu->arch.vcore;

	if (vcpu->arch.timer_running) {
		hrtimer_try_to_cancel(&vcpu->arch.dec_timer);
		vcpu->arch.timer_running = 0;
	}
	cpu = vc->pcpu + vcpu->arch.ptid;
	tpaca = &paca[cpu];
	tpaca->kvm_hstate.kvm_vcpu = vcpu;
	tpaca->kvm_hstate.kvm_vcore = vc;
	tpaca->kvm_hstate.napping = 0;
	vcpu->cpu = vc->pcpu;
	smp_wmb();
#ifdef CONFIG_PPC_ICP_NATIVE
	if (vcpu->arch.ptid) {
		tpaca->cpu_start = 0x80;
		tpaca->kvm_hstate.in_guest = KVM_GUEST_MODE_GUEST;
		wmb();
		xics_wake_cpu(cpu);
		++vc->n_woken;
@@ -634,9 +586,10 @@ static int on_primary_thread(void)
 */
static int kvmppc_run_core(struct kvmppc_vcore *vc)
{
	struct kvm_vcpu *vcpu, *vnext;
	struct kvm_vcpu *vcpu, *vcpu0, *vnext;
	long ret;
	u64 now;
	int ptid;

	/* don't start if any threads have a signal pending */
	list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list)
@@ -655,29 +608,50 @@ static int kvmppc_run_core(struct kvmppc_vcore *vc)
		goto out;
	}

	/*
	 * Assign physical thread IDs, first to non-ceded vcpus
	 * and then to ceded ones.
	 */
	ptid = 0;
	vcpu0 = NULL;
	list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) {
		if (!vcpu->arch.ceded) {
			if (!ptid)
				vcpu0 = vcpu;
			vcpu->arch.ptid = ptid++;
		}
	}
	if (!vcpu0)
		return 0;		/* nothing to run */
	list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list)
		if (vcpu->arch.ceded)
			vcpu->arch.ptid = ptid++;

	vc->n_woken = 0;
	vc->nap_count = 0;
	vc->entry_exit_count = 0;
	vc->vcore_running = 1;
	vc->vcore_state = VCORE_RUNNING;
	vc->in_guest = 0;
	vc->pcpu = smp_processor_id();
	vc->napping_threads = 0;
	list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list)
		kvmppc_start_thread(vcpu);
	vcpu = list_first_entry(&vc->runnable_threads, struct kvm_vcpu,
				arch.run_list);

	preempt_disable();
	spin_unlock(&vc->lock);

	preempt_disable();
	kvm_guest_enter();
	__kvmppc_vcore_entry(NULL, vcpu);
	__kvmppc_vcore_entry(NULL, vcpu0);

	/* wait for secondary threads to finish writing their state to memory */
	spin_lock(&vc->lock);
	/* disable sending of IPIs on virtual external irqs */
	list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list)
		vcpu->cpu = -1;
	/* wait for secondary threads to finish writing their state to memory */
	if (vc->nap_count < vc->n_woken)
		kvmppc_wait_for_nap(vc);
	/* prevent other vcpu threads from doing kvmppc_start_thread() now */
	vc->vcore_running = 2;
	vc->vcore_state = VCORE_EXITING;
	spin_unlock(&vc->lock);

	/* make sure updates to secondary vcpu structs are visible now */
@@ -693,22 +667,26 @@ static int kvmppc_run_core(struct kvmppc_vcore *vc)
		if (now < vcpu->arch.dec_expires &&
		    kvmppc_core_pending_dec(vcpu))
			kvmppc_core_dequeue_dec(vcpu);
		if (!vcpu->arch.trap) {
			if (signal_pending(vcpu->arch.run_task)) {
				vcpu->arch.kvm_run->exit_reason = KVM_EXIT_INTR;
				vcpu->arch.ret = -EINTR;
			}
			continue;		/* didn't get to run */
		}

		ret = RESUME_GUEST;
		if (vcpu->arch.trap)
			ret = kvmppc_handle_exit(vcpu->arch.kvm_run, vcpu,
						 vcpu->arch.run_task);

		vcpu->arch.ret = ret;
		vcpu->arch.trap = 0;

		if (vcpu->arch.ceded) {
			if (ret != RESUME_GUEST)
				kvmppc_end_cede(vcpu);
			else
				kvmppc_set_timer(vcpu);
		}
	}

	spin_lock(&vc->lock);
 out:
	vc->vcore_running = 0;
	vc->vcore_state = VCORE_INACTIVE;
	list_for_each_entry_safe(vcpu, vnext, &vc->runnable_threads,
				 arch.run_list) {
		if (vcpu->arch.ret != RESUME_GUEST) {
@@ -720,82 +698,130 @@ static int kvmppc_run_core(struct kvmppc_vcore *vc)
	return 1;
}

static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
/*
 * Wait for some other vcpu thread to execute us, and
 * wake us up when we need to handle something in the host.
 */
static void kvmppc_wait_for_exec(struct kvm_vcpu *vcpu, int wait_state)
{
	int ptid;
	int wait_state;
	struct kvmppc_vcore *vc;
	DEFINE_WAIT(wait);

	/* No need to go into the guest when all we do is going out */
	if (signal_pending(current)) {
		kvm_run->exit_reason = KVM_EXIT_INTR;
		return -EINTR;
	prepare_to_wait(&vcpu->arch.cpu_run, &wait, wait_state);
	if (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE)
		schedule();
	finish_wait(&vcpu->arch.cpu_run, &wait);
}

	/* On PPC970, check that we have an RMA region */
	if (!vcpu->kvm->arch.rma && cpu_has_feature(CPU_FTR_ARCH_201))
		return -EPERM;
/*
 * All the vcpus in this vcore are idle, so wait for a decrementer
 * or external interrupt to one of the vcpus.  vc->lock is held.
 */
static void kvmppc_vcore_blocked(struct kvmppc_vcore *vc)
{
	DEFINE_WAIT(wait);
	struct kvm_vcpu *v;
	int all_idle = 1;

	prepare_to_wait(&vc->wq, &wait, TASK_INTERRUPTIBLE);
	vc->vcore_state = VCORE_SLEEPING;
	spin_unlock(&vc->lock);
	list_for_each_entry(v, &vc->runnable_threads, arch.run_list) {
		if (!v->arch.ceded || v->arch.pending_exceptions) {
			all_idle = 0;
			break;
		}
	}
	if (all_idle)
		schedule();
	finish_wait(&vc->wq, &wait);
	spin_lock(&vc->lock);
	vc->vcore_state = VCORE_INACTIVE;
}

static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
{
	int n_ceded;
	int prev_state;
	struct kvmppc_vcore *vc;
	struct kvm_vcpu *v, *vn;

	kvm_run->exit_reason = 0;
	vcpu->arch.ret = RESUME_GUEST;
	vcpu->arch.trap = 0;

	flush_fp_to_thread(current);
	flush_altivec_to_thread(current);
	flush_vsx_to_thread(current);

	/*
	 * Synchronize with other threads in this virtual core
	 */
	vc = vcpu->arch.vcore;
	spin_lock(&vc->lock);
	/* This happens the first time this is called for a vcpu */
	if (vcpu->arch.state == KVMPPC_VCPU_BLOCKED)
		--vc->n_blocked;
	vcpu->arch.state = KVMPPC_VCPU_RUNNABLE;
	ptid = vc->n_runnable;
	vcpu->arch.ceded = 0;
	vcpu->arch.run_task = current;
	vcpu->arch.kvm_run = kvm_run;
	vcpu->arch.ptid = ptid;
	prev_state = vcpu->arch.state;
	vcpu->arch.state = KVMPPC_VCPU_RUNNABLE;
	list_add_tail(&vcpu->arch.run_list, &vc->runnable_threads);
	++vc->n_runnable;

	wait_state = TASK_INTERRUPTIBLE;
	while (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE) {
		if (signal_pending(current)) {
			if (!vc->vcore_running) {
				kvm_run->exit_reason = KVM_EXIT_INTR;
				vcpu->arch.ret = -EINTR;
				break;
			}
			/* have to wait for vcore to stop executing guest */
			wait_state = TASK_UNINTERRUPTIBLE;
			smp_send_reschedule(vc->pcpu);
	/*
	 * This happens the first time this is called for a vcpu.
	 * If the vcore is already running, we may be able to start
	 * this thread straight away and have it join in.
	 */
	if (prev_state == KVMPPC_VCPU_STOPPED) {
		if (vc->vcore_state == VCORE_RUNNING &&
		    VCORE_EXIT_COUNT(vc) == 0) {
			vcpu->arch.ptid = vc->n_runnable - 1;
			kvmppc_start_thread(vcpu);
		}

		if (!vc->vcore_running &&
		    vc->n_runnable + vc->n_blocked == vc->num_threads) {
			/* we can run now */
			if (kvmppc_run_core(vc))
	} else if (prev_state == KVMPPC_VCPU_BUSY_IN_HOST)
		--vc->n_busy;

	while (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE &&
	       !signal_pending(current)) {
		if (vc->n_busy || vc->vcore_state != VCORE_INACTIVE) {
			spin_unlock(&vc->lock);
			kvmppc_wait_for_exec(vcpu, TASK_INTERRUPTIBLE);
			spin_lock(&vc->lock);
			continue;
		}
		n_ceded = 0;
		list_for_each_entry(v, &vc->runnable_threads, arch.run_list)
			n_ceded += v->arch.ceded;
		if (n_ceded == vc->n_runnable)
			kvmppc_vcore_blocked(vc);
		else
			kvmppc_run_core(vc);

		if (vc->vcore_running == 1 && VCORE_EXIT_COUNT(vc) == 0)
			kvmppc_start_thread(vcpu);
		list_for_each_entry_safe(v, vn, &vc->runnable_threads,
					 arch.run_list) {
			kvmppc_core_deliver_interrupts(v);
			if (signal_pending(v->arch.run_task)) {
				kvmppc_remove_runnable(vc, v);
				v->stat.signal_exits++;
				v->arch.kvm_run->exit_reason = KVM_EXIT_INTR;
				v->arch.ret = -EINTR;
				wake_up(&v->arch.cpu_run);
			}
		}
	}

		/* wait for other threads to come in, or wait for vcore */
		prepare_to_wait(&vcpu->arch.cpu_run, &wait, wait_state);
	if (signal_pending(current)) {
		if (vc->vcore_state == VCORE_RUNNING ||
		    vc->vcore_state == VCORE_EXITING) {
			spin_unlock(&vc->lock);
		schedule();
		finish_wait(&vcpu->arch.cpu_run, &wait);
			kvmppc_wait_for_exec(vcpu, TASK_UNINTERRUPTIBLE);
			spin_lock(&vc->lock);
		}

	if (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE)
		if (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE) {
			kvmppc_remove_runnable(vc, vcpu);
	spin_unlock(&vc->lock);
			vcpu->stat.signal_exits++;
			kvm_run->exit_reason = KVM_EXIT_INTR;
			vcpu->arch.ret = -EINTR;
		}
	}

	spin_unlock(&vc->lock);
	return vcpu->arch.ret;
}

@@ -808,6 +834,21 @@ int kvmppc_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu)
		return -EINVAL;
	}

	/* No need to go into the guest when all we'll do is come back out */
	if (signal_pending(current)) {
		run->exit_reason = KVM_EXIT_INTR;
		return -EINTR;
	}

	/* On PPC970, check that we have an RMA region */
	if (!vcpu->kvm->arch.rma && cpu_has_feature(CPU_FTR_ARCH_201))
		return -EPERM;

	flush_fp_to_thread(current);
	flush_altivec_to_thread(current);
	flush_vsx_to_thread(current);
	vcpu->arch.wqp = &vcpu->arch.vcore->wq;

	do {
		r = kvmppc_run_vcpu(run, vcpu);

+263 −34

File changed.

Preview size limit exceeded, changes collapsed.

Loading