Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit d8efcf38 authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull KVM fixes from Paolo Bonzini:
 "Three x86 fixes and one for ARM/ARM64.

  In particular, nested virtualization on Intel is broken in 3.13 and
  fixed by this pull request"

* tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm:
  kvm, vmx: Really fix lazy FPU on nested guest
  kvm: x86: fix emulator buffer overflow (CVE-2014-0049)
  arm/arm64: KVM: detect CPU reset on CPU_PM_EXIT
  KVM: MMU: drop read-only large sptes when creating lower level sptes
parents 78d9e934 1b385cbd
Loading
Loading
Loading
Loading
+2 −1
Original line number Diff line number Diff line
@@ -878,7 +878,8 @@ static int hyp_init_cpu_pm_notifier(struct notifier_block *self,
				    unsigned long cmd,
				    void *v)
{
	if (cmd == CPU_PM_EXIT) {
	if (cmd == CPU_PM_EXIT &&
	    __hyp_get_vectors() == hyp_default_vectors) {
		cpu_init_hyp_mode(NULL);
		return NOTIFY_OK;
	}
+10 −1
Original line number Diff line number Diff line
@@ -220,6 +220,10 @@ after_vfp_restore:
 * in Hyp mode (see init_hyp_mode in arch/arm/kvm/arm.c).  Return values are
 * passed in r0 and r1.
 *
 * A function pointer with a value of 0xffffffff has a special meaning,
 * and is used to implement __hyp_get_vectors in the same way as in
 * arch/arm/kernel/hyp_stub.S.
 *
 * The calling convention follows the standard AAPCS:
 *   r0 - r3: caller save
 *   r12:     caller save
@@ -363,6 +367,11 @@ hyp_hvc:
host_switch_to_hyp:
	pop	{r0, r1, r2}

	/* Check for __hyp_get_vectors */
	cmp	r0, #-1
	mrceq	p15, 4, r0, c12, c0, 0	@ get HVBAR
	beq	1f

	push	{lr}
	mrs	lr, SPSR
	push	{lr}
@@ -378,7 +387,7 @@ THUMB( orr lr, #1)
	pop	{lr}
	msr	SPSR_csxf, lr
	pop	{lr}
	eret
1:	eret

guest_trap:
	load_vcpu			@ Load VCPU pointer to r0
+25 −2
Original line number Diff line number Diff line
@@ -694,6 +694,24 @@ __hyp_panic_str:

	.align	2

/*
 * u64 kvm_call_hyp(void *hypfn, ...);
 *
 * This is not really a variadic function in the classic C-way and care must
 * be taken when calling this to ensure parameters are passed in registers
 * only, since the stack will change between the caller and the callee.
 *
 * Call the function with the first argument containing a pointer to the
 * function you wish to call in Hyp mode, and subsequent arguments will be
 * passed as x0, x1, and x2 (a maximum of 3 arguments in addition to the
 * function pointer can be passed).  The function being called must be mapped
 * in Hyp mode (see init_hyp_mode in arch/arm/kvm/arm.c).  Return values are
 * passed in r0 and r1.
 *
 * A function pointer with a value of 0 has a special meaning, and is
 * used to implement __hyp_get_vectors in the same way as in
 * arch/arm64/kernel/hyp_stub.S.
 */
ENTRY(kvm_call_hyp)
	hvc	#0
	ret
@@ -737,7 +755,12 @@ el1_sync: // Guest trapped into EL2
	pop	x2, x3
	pop	x0, x1

	push	lr, xzr
	/* Check for __hyp_get_vectors */
	cbnz	x0, 1f
	mrs	x0, vbar_el2
	b	2f

1:	push	lr, xzr

	/*
	 * Compute the function address in EL2, and shuffle the parameters.
@@ -750,7 +773,7 @@ el1_sync: // Guest trapped into EL2
	blr	lr

	pop	lr, xzr
	eret
2:	eret

el1_trap:
	/*
+1 −0
Original line number Diff line number Diff line
@@ -2672,6 +2672,7 @@ static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write,
			break;
		}

		drop_large_spte(vcpu, iterator.sptep);
		if (!is_shadow_present_pte(*iterator.sptep)) {
			u64 base_addr = iterator.addr;

+1 −1
Original line number Diff line number Diff line
@@ -6688,7 +6688,7 @@ static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu)
		else if (is_page_fault(intr_info))
			return enable_ept;
		else if (is_no_device(intr_info) &&
			 !(nested_read_cr0(vmcs12) & X86_CR0_TS))
			 !(vmcs12->guest_cr0 & X86_CR0_TS))
			return 0;
		return vmcs12->exception_bitmap &
				(1u << (intr_info & INTR_INFO_VECTOR_MASK));
Loading