Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit a0a4a2cb authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'kvm-updates/2.6.27' of git://git.kernel.org/pub/scm/linux/kernel/git/avi/kvm

* 'kvm-updates/2.6.27' of git://git.kernel.org/pub/scm/linux/kernel/git/avi/kvm:
  KVM: VMX: Always return old for clear_flush_young() when using EPT
  KVM: SVM: fix guest global tlb flushes with NPT
  KVM: SVM: fix random segfaults with NPT enabled
parents 29cd195e 534e38b4
Loading
Loading
Loading
Loading
+4 −0
Original line number Original line Diff line number Diff line
@@ -711,6 +711,10 @@ static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp)
	u64 *spte;
	u64 *spte;
	int young = 0;
	int young = 0;


	/* always return old for EPT */
	if (!shadow_accessed_mask)
		return 0;

	spte = rmap_next(kvm, rmapp, NULL);
	spte = rmap_next(kvm, rmapp, NULL);
	while (spte) {
	while (spte) {
		int _young;
		int _young;
+12 −0
Original line number Original line Diff line number Diff line
@@ -62,6 +62,7 @@ static int npt = 1;
module_param(npt, int, S_IRUGO);
module_param(npt, int, S_IRUGO);


static void kvm_reput_irq(struct vcpu_svm *svm);
static void kvm_reput_irq(struct vcpu_svm *svm);
static void svm_flush_tlb(struct kvm_vcpu *vcpu);


static inline struct vcpu_svm *to_svm(struct kvm_vcpu *vcpu)
static inline struct vcpu_svm *to_svm(struct kvm_vcpu *vcpu)
{
{
@@ -878,6 +879,10 @@ static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
static void svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
static void svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
{
{
	unsigned long host_cr4_mce = read_cr4() & X86_CR4_MCE;
	unsigned long host_cr4_mce = read_cr4() & X86_CR4_MCE;
	unsigned long old_cr4 = to_svm(vcpu)->vmcb->save.cr4;

	if (npt_enabled && ((old_cr4 ^ cr4) & X86_CR4_PGE))
		force_new_asid(vcpu);


	vcpu->arch.cr4 = cr4;
	vcpu->arch.cr4 = cr4;
	if (!npt_enabled)
	if (!npt_enabled)
@@ -1027,6 +1032,13 @@ static int pf_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
		KVMTRACE_3D(TDP_FAULT, &svm->vcpu, error_code,
		KVMTRACE_3D(TDP_FAULT, &svm->vcpu, error_code,
			    (u32)fault_address, (u32)(fault_address >> 32),
			    (u32)fault_address, (u32)(fault_address >> 32),
			    handler);
			    handler);
	/*
	 * FIXME: Tis shouldn't be necessary here, but there is a flush
	 * missing in the MMU code. Until we find this bug, flush the
	 * complete TLB here on an NPF
	 */
	if (npt_enabled)
		svm_flush_tlb(&svm->vcpu);


	if (event_injection)
	if (event_injection)
		kvm_mmu_unprotect_page_virt(&svm->vcpu, fault_address);
		kvm_mmu_unprotect_page_virt(&svm->vcpu, fault_address);
+1 −2
Original line number Original line Diff line number Diff line
@@ -3301,8 +3301,7 @@ static int __init vmx_init(void)
		kvm_mmu_set_base_ptes(VMX_EPT_READABLE_MASK |
		kvm_mmu_set_base_ptes(VMX_EPT_READABLE_MASK |
			VMX_EPT_WRITABLE_MASK |
			VMX_EPT_WRITABLE_MASK |
			VMX_EPT_DEFAULT_MT << VMX_EPT_MT_EPTE_SHIFT);
			VMX_EPT_DEFAULT_MT << VMX_EPT_MT_EPTE_SHIFT);
		kvm_mmu_set_mask_ptes(0ull, VMX_EPT_FAKE_ACCESSED_MASK,
		kvm_mmu_set_mask_ptes(0ull, 0ull, 0ull, 0ull,
				VMX_EPT_FAKE_DIRTY_MASK, 0ull,
				VMX_EPT_EXECUTABLE_MASK);
				VMX_EPT_EXECUTABLE_MASK);
		kvm_enable_tdp();
		kvm_enable_tdp();
	} else
	} else
+0 −2
Original line number Original line Diff line number Diff line
@@ -370,8 +370,6 @@ enum vmcs_field {
#define VMX_EPT_READABLE_MASK			0x1ull
#define VMX_EPT_READABLE_MASK			0x1ull
#define VMX_EPT_WRITABLE_MASK			0x2ull
#define VMX_EPT_WRITABLE_MASK			0x2ull
#define VMX_EPT_EXECUTABLE_MASK			0x4ull
#define VMX_EPT_EXECUTABLE_MASK			0x4ull
#define VMX_EPT_FAKE_ACCESSED_MASK		(1ull << 62)
#define VMX_EPT_FAKE_DIRTY_MASK			(1ull << 63)


#define VMX_EPT_IDENTITY_PAGETABLE_ADDR		0xfffbc000ul
#define VMX_EPT_IDENTITY_PAGETABLE_ADDR		0xfffbc000ul