Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit ccae663c authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull KVM bugfixes from Marcelo Tosatti.

* git://git.kernel.org/pub/scm/virt/kvm/kvm:
  KVM: x86: use dynamic percpu allocations for shared msrs area
  KVM: PPC: Book3S HV: Fix compilation without CONFIG_PPC_POWERNV
  powerpc: Corrected include header path in kvm_para.h
  Add rcu user eqs exception hooks for async page fault
parents 4ffd4ebf 013f6a5d
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -78,7 +78,7 @@ struct kvm_vcpu_arch_shared {

#define KVM_HCALL_TOKEN(num)     _EV_HCALL_TOKEN(EV_KVM_VENDOR_ID, num)

#include <uapi/asm/epapr_hcalls.h>
#include <asm/epapr_hcalls.h>

#define KVM_FEATURE_MAGIC_PAGE	1

+4 −0
Original line number Diff line number Diff line
@@ -79,7 +79,9 @@ static void flush_tlb_power7(struct kvm_vcpu *vcpu)
static long kvmppc_realmode_mc_power7(struct kvm_vcpu *vcpu)
{
	unsigned long srr1 = vcpu->arch.shregs.msr;
#ifdef CONFIG_PPC_POWERNV
	struct opal_machine_check_event *opal_evt;
#endif
	long handled = 1;

	if (srr1 & SRR1_MC_LDSTERR) {
@@ -117,6 +119,7 @@ static long kvmppc_realmode_mc_power7(struct kvm_vcpu *vcpu)
		handled = 0;
	}

#ifdef CONFIG_PPC_POWERNV
	/*
	 * See if OPAL has already handled the condition.
	 * We assume that if the condition is recovered then OPAL
@@ -131,6 +134,7 @@ static long kvmppc_realmode_mc_power7(struct kvm_vcpu *vcpu)

	if (handled)
		opal_evt->in_use = 0;
#endif

	return handled;
}
+10 −2
Original line number Diff line number Diff line
@@ -43,6 +43,7 @@
#include <asm/apicdef.h>
#include <asm/hypervisor.h>
#include <asm/kvm_guest.h>
#include <asm/context_tracking.h>

static int kvmapf = 1;

@@ -121,6 +122,8 @@ void kvm_async_pf_task_wait(u32 token)
	struct kvm_task_sleep_node n, *e;
	DEFINE_WAIT(wait);

	rcu_irq_enter();

	spin_lock(&b->lock);
	e = _find_apf_task(b, token);
	if (e) {
@@ -128,6 +131,8 @@ void kvm_async_pf_task_wait(u32 token)
		hlist_del(&e->link);
		kfree(e);
		spin_unlock(&b->lock);

		rcu_irq_exit();
		return;
	}

@@ -152,13 +157,16 @@ void kvm_async_pf_task_wait(u32 token)
			/*
			 * We cannot reschedule. So halt.
			 */
			rcu_irq_exit();
			native_safe_halt();
			rcu_irq_enter();
			local_irq_disable();
		}
	}
	if (!n.halted)
		finish_wait(&n.wq, &wait);

	rcu_irq_exit();
	return;
}
EXPORT_SYMBOL_GPL(kvm_async_pf_task_wait);
@@ -252,10 +260,10 @@ do_async_page_fault(struct pt_regs *regs, unsigned long error_code)
		break;
	case KVM_PV_REASON_PAGE_NOT_PRESENT:
		/* page is swapped out by the host. */
		rcu_irq_enter();
		exception_enter(regs);
		exit_idle();
		kvm_async_pf_task_wait((u32)read_cr2());
		rcu_irq_exit();
		exception_exit(regs);
		break;
	case KVM_PV_REASON_PAGE_READY:
		rcu_irq_enter();
+18 −6
Original line number Diff line number Diff line
@@ -120,7 +120,7 @@ struct kvm_shared_msrs {
};

static struct kvm_shared_msrs_global __read_mostly shared_msrs_global;
static DEFINE_PER_CPU(struct kvm_shared_msrs, shared_msrs);
static struct kvm_shared_msrs __percpu *shared_msrs;

struct kvm_stats_debugfs_item debugfs_entries[] = {
	{ "pf_fixed", VCPU_STAT(pf_fixed) },
@@ -191,10 +191,10 @@ static void kvm_on_user_return(struct user_return_notifier *urn)

static void shared_msr_update(unsigned slot, u32 msr)
{
	struct kvm_shared_msrs *smsr;
	u64 value;
	unsigned int cpu = smp_processor_id();
	struct kvm_shared_msrs *smsr = per_cpu_ptr(shared_msrs, cpu);

	smsr = &__get_cpu_var(shared_msrs);
	/* only read, and nobody should modify it at this time,
	 * so don't need lock */
	if (slot >= shared_msrs_global.nr) {
@@ -226,7 +226,8 @@ static void kvm_shared_msr_cpu_online(void)

void kvm_set_shared_msr(unsigned slot, u64 value, u64 mask)
{
	struct kvm_shared_msrs *smsr = &__get_cpu_var(shared_msrs);
	unsigned int cpu = smp_processor_id();
	struct kvm_shared_msrs *smsr = per_cpu_ptr(shared_msrs, cpu);

	if (((value ^ smsr->values[slot].curr) & mask) == 0)
		return;
@@ -242,7 +243,8 @@ EXPORT_SYMBOL_GPL(kvm_set_shared_msr);

static void drop_user_return_notifiers(void *ignore)
{
	struct kvm_shared_msrs *smsr = &__get_cpu_var(shared_msrs);
	unsigned int cpu = smp_processor_id();
	struct kvm_shared_msrs *smsr = per_cpu_ptr(shared_msrs, cpu);

	if (smsr->registered)
		kvm_on_user_return(&smsr->urn);
@@ -5233,9 +5235,16 @@ int kvm_arch_init(void *opaque)
		goto out;
	}

	r = -ENOMEM;
	shared_msrs = alloc_percpu(struct kvm_shared_msrs);
	if (!shared_msrs) {
		printk(KERN_ERR "kvm: failed to allocate percpu kvm_shared_msrs\n");
		goto out;
	}

	r = kvm_mmu_module_init();
	if (r)
		goto out;
		goto out_free_percpu;

	kvm_set_mmio_spte_mask();
	kvm_init_msr_list();
@@ -5258,6 +5267,8 @@ int kvm_arch_init(void *opaque)

	return 0;

out_free_percpu:
	free_percpu(shared_msrs);
out:
	return r;
}
@@ -5275,6 +5286,7 @@ void kvm_arch_exit(void)
#endif
	kvm_x86_ops = NULL;
	kvm_mmu_module_exit();
	free_percpu(shared_msrs);
}

int kvm_emulate_halt(struct kvm_vcpu *vcpu)