Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 5b6c02f3 authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull KVM fixes from Radim Krčmář:
 "s390:
   - Two fixes for potential bitmap overruns in the cmma migration code

  x86:
   - Clear guest provided GPRs to defeat the Project Zero PoC for CVE
     2017-5715"

* tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm:
  kvm: vmx: Scrub hardware GPRs at VM-exit
  KVM: s390: prevent buffer overrun on memory hotplug during migration
  KVM: s390: fix cmma migration for multiple memory slots
parents 3219e264 bb4945e6
Loading
Loading
Loading
Loading
+5 −4
Original line number Diff line number Diff line
@@ -792,11 +792,12 @@ static int kvm_s390_vm_start_migration(struct kvm *kvm)

	if (kvm->arch.use_cmma) {
		/*
		 * Get the last slot. They should be sorted by base_gfn, so the
		 * last slot is also the one at the end of the address space.
		 * We have verified above that at least one slot is present.
		 * Get the first slot. They are reverse sorted by base_gfn, so
		 * the first slot is also the one at the end of the address
		 * space. We have verified above that at least one slot is
		 * present.
		 */
		ms = slots->memslots + slots->used_slots - 1;
		ms = slots->memslots;
		/* round up so we only use full longs */
		ram_pages = roundup(ms->base_gfn + ms->npages, BITS_PER_LONG);
		/* allocate enough bytes to store all the bits */
+1 −1
Original line number Diff line number Diff line
@@ -1006,7 +1006,7 @@ static inline int do_essa(struct kvm_vcpu *vcpu, const int orc)
		cbrlo[entries] = gfn << PAGE_SHIFT;
	}

	if (orc) {
	if (orc && gfn < ms->bitmap_size) {
		/* increment only if we are really flipping the bit to 1 */
		if (!test_and_set_bit(gfn, ms->pgste_bitmap))
			atomic64_inc(&ms->dirty_pages);
+19 −0
Original line number Diff line number Diff line
@@ -4985,6 +4985,25 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
		"mov %%r13, %c[r13](%[svm]) \n\t"
		"mov %%r14, %c[r14](%[svm]) \n\t"
		"mov %%r15, %c[r15](%[svm]) \n\t"
#endif
		/*
		* Clear host registers marked as clobbered to prevent
		* speculative use.
		*/
		"xor %%" _ASM_BX ", %%" _ASM_BX " \n\t"
		"xor %%" _ASM_CX ", %%" _ASM_CX " \n\t"
		"xor %%" _ASM_DX ", %%" _ASM_DX " \n\t"
		"xor %%" _ASM_SI ", %%" _ASM_SI " \n\t"
		"xor %%" _ASM_DI ", %%" _ASM_DI " \n\t"
#ifdef CONFIG_X86_64
		"xor %%r8, %%r8 \n\t"
		"xor %%r9, %%r9 \n\t"
		"xor %%r10, %%r10 \n\t"
		"xor %%r11, %%r11 \n\t"
		"xor %%r12, %%r12 \n\t"
		"xor %%r13, %%r13 \n\t"
		"xor %%r14, %%r14 \n\t"
		"xor %%r15, %%r15 \n\t"
#endif
		"pop %%" _ASM_BP
		:
+13 −1
Original line number Diff line number Diff line
@@ -9415,6 +9415,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
		/* Save guest registers, load host registers, keep flags */
		"mov %0, %c[wordsize](%%" _ASM_SP ") \n\t"
		"pop %0 \n\t"
		"setbe %c[fail](%0)\n\t"
		"mov %%" _ASM_AX ", %c[rax](%0) \n\t"
		"mov %%" _ASM_BX ", %c[rbx](%0) \n\t"
		__ASM_SIZE(pop) " %c[rcx](%0) \n\t"
@@ -9431,12 +9432,23 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
		"mov %%r13, %c[r13](%0) \n\t"
		"mov %%r14, %c[r14](%0) \n\t"
		"mov %%r15, %c[r15](%0) \n\t"
		"xor %%r8d,  %%r8d \n\t"
		"xor %%r9d,  %%r9d \n\t"
		"xor %%r10d, %%r10d \n\t"
		"xor %%r11d, %%r11d \n\t"
		"xor %%r12d, %%r12d \n\t"
		"xor %%r13d, %%r13d \n\t"
		"xor %%r14d, %%r14d \n\t"
		"xor %%r15d, %%r15d \n\t"
#endif
		"mov %%cr2, %%" _ASM_AX "   \n\t"
		"mov %%" _ASM_AX ", %c[cr2](%0) \n\t"

		"xor %%eax, %%eax \n\t"
		"xor %%ebx, %%ebx \n\t"
		"xor %%esi, %%esi \n\t"
		"xor %%edi, %%edi \n\t"
		"pop  %%" _ASM_BP "; pop  %%" _ASM_DX " \n\t"
		"setbe %c[fail](%0) \n\t"
		".pushsection .rodata \n\t"
		".global vmx_return \n\t"
		"vmx_return: " _ASM_PTR " 2b \n\t"