Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit e5841525 authored by Linus Torvalds's avatar Linus Torvalds
Browse files
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/avi/kvm:
  KVM: MMU: Fix memory leak on guest demand faults
  KVM: VMX: convert init_rmode_tss() to slots_lock
  KVM: MMU: handle page removal with shadow mapping
  KVM: MMU: Fix is_rmap_pte() with io ptes
  KVM: VMX: Restore tss even on x86_64
parents 7ed7fe5e e48bb497
Loading
Loading
Loading
Loading
+14 −4
Original line number Diff line number Diff line
@@ -222,8 +222,7 @@ static int is_io_pte(unsigned long pte)

static int is_rmap_pte(u64 pte)
{
	return pte != shadow_trap_nonpresent_pte
		&& pte != shadow_notrap_nonpresent_pte;
	return is_shadow_present_pte(pte);
}

static gfn_t pse36_gfn_delta(u32 gpte)
@@ -893,14 +892,25 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
			 int *ptwrite, gfn_t gfn, struct page *page)
{
	u64 spte;
	int was_rmapped = is_rmap_pte(*shadow_pte);
	int was_rmapped = 0;
	int was_writeble = is_writeble_pte(*shadow_pte);
	hfn_t host_pfn = (*shadow_pte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT;

	pgprintk("%s: spte %llx access %x write_fault %d"
		 " user_fault %d gfn %lx\n",
		 __FUNCTION__, *shadow_pte, pt_access,
		 write_fault, user_fault, gfn);

	if (is_rmap_pte(*shadow_pte)) {
		if (host_pfn != page_to_pfn(page)) {
			pgprintk("hfn old %lx new %lx\n",
				 host_pfn, page_to_pfn(page));
			rmap_remove(vcpu->kvm, shadow_pte);
		}
		else
			was_rmapped = 1;
	}

	/*
	 * We don't set the accessed bit, since we sometimes want to see
	 * whether the guest actually used the pte (in order to detect
@@ -1402,7 +1412,7 @@ static void mmu_guess_page_from_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
	up_read(&current->mm->mmap_sem);

	vcpu->arch.update_pte.gfn = gfn;
	vcpu->arch.update_pte.page = gfn_to_page(vcpu->kvm, gfn);
	vcpu->arch.update_pte.page = page;
}

void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
+2 −5
Original line number Diff line number Diff line
@@ -349,8 +349,6 @@ static void update_exception_bitmap(struct kvm_vcpu *vcpu)

static void reload_tss(void)
{
#ifndef CONFIG_X86_64

	/*
	 * VT restores TR but not its size.  Useless.
	 */
@@ -361,7 +359,6 @@ static void reload_tss(void)
	descs = (void *)gdt.base;
	descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
	load_TR_desc();
#endif
}

static void load_transition_efer(struct vcpu_vmx *vmx)
@@ -1436,7 +1433,7 @@ static int init_rmode_tss(struct kvm *kvm)
	int ret = 0;
	int r;

	down_read(&current->mm->mmap_sem);
	down_read(&kvm->slots_lock);
	r = kvm_clear_guest_page(kvm, fn, 0, PAGE_SIZE);
	if (r < 0)
		goto out;
@@ -1459,7 +1456,7 @@ static int init_rmode_tss(struct kvm *kvm)

	ret = 1;
out:
	up_read(&current->mm->mmap_sem);
	up_read(&kvm->slots_lock);
	return ret;
}