Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 401d10de authored by Amit Shah's avatar Amit Shah Committed by Avi Kivity
Browse files

KVM: VMX: Update necessary state when guest enters long mode



setup_msrs() should be called when entering long mode to save the
shadow state for the 64-bit guest state.

Using vmx_set_efer() in enter_lmode() removes some duplicated code
and also ensures we call setup_msrs(). We can safely pass the value
of shadow_efer to vmx_set_efer() as no other bits in the efer change
while enabling long mode (guest first sets EFER.LME, then sets CR0.PG
which causes a vmexit where we activate long mode).

With this fix, is_long_mode() can check for EFER.LMA set instead of
EFER.LME and 5e23049e86dd298b72e206b420513dbc3a240cd9 can be reverted.

Signed-off-by: default avatarAmit Shah <amit.shah@redhat.com>
Signed-off-by: default avatarAvi Kivity <avi@redhat.com>
parent 6b08035f
Loading
Loading
Loading
Loading
+24 −30
Original line number Original line Diff line number Diff line
@@ -1430,6 +1430,29 @@ continue_rmode:
	init_rmode(vcpu->kvm);
	init_rmode(vcpu->kvm);
}
}


static void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer)
{
	struct vcpu_vmx *vmx = to_vmx(vcpu);
	struct kvm_msr_entry *msr = find_msr_entry(vmx, MSR_EFER);

	vcpu->arch.shadow_efer = efer;
	if (!msr)
		return;
	if (efer & EFER_LMA) {
		vmcs_write32(VM_ENTRY_CONTROLS,
			     vmcs_read32(VM_ENTRY_CONTROLS) |
			     VM_ENTRY_IA32E_MODE);
		msr->data = efer;
	} else {
		vmcs_write32(VM_ENTRY_CONTROLS,
			     vmcs_read32(VM_ENTRY_CONTROLS) &
			     ~VM_ENTRY_IA32E_MODE);

		msr->data = efer & ~EFER_LME;
	}
	setup_msrs(vmx);
}

#ifdef CONFIG_X86_64
#ifdef CONFIG_X86_64


static void enter_lmode(struct kvm_vcpu *vcpu)
static void enter_lmode(struct kvm_vcpu *vcpu)
@@ -1444,13 +1467,8 @@ static void enter_lmode(struct kvm_vcpu *vcpu)
			     (guest_tr_ar & ~AR_TYPE_MASK)
			     (guest_tr_ar & ~AR_TYPE_MASK)
			     | AR_TYPE_BUSY_64_TSS);
			     | AR_TYPE_BUSY_64_TSS);
	}
	}

	vcpu->arch.shadow_efer |= EFER_LMA;
	vcpu->arch.shadow_efer |= EFER_LMA;

	vmx_set_efer(vcpu, vcpu->arch.shadow_efer);
	find_msr_entry(to_vmx(vcpu), MSR_EFER)->data |= EFER_LMA | EFER_LME;
	vmcs_write32(VM_ENTRY_CONTROLS,
		     vmcs_read32(VM_ENTRY_CONTROLS)
		     | VM_ENTRY_IA32E_MODE);
}
}


static void exit_lmode(struct kvm_vcpu *vcpu)
static void exit_lmode(struct kvm_vcpu *vcpu)
@@ -1609,30 +1627,6 @@ static void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
	vmcs_writel(GUEST_CR4, hw_cr4);
	vmcs_writel(GUEST_CR4, hw_cr4);
}
}


static void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer)
{
	struct vcpu_vmx *vmx = to_vmx(vcpu);
	struct kvm_msr_entry *msr = find_msr_entry(vmx, MSR_EFER);

	vcpu->arch.shadow_efer = efer;
	if (!msr)
		return;
	if (efer & EFER_LMA) {
		vmcs_write32(VM_ENTRY_CONTROLS,
				     vmcs_read32(VM_ENTRY_CONTROLS) |
				     VM_ENTRY_IA32E_MODE);
		msr->data = efer;

	} else {
		vmcs_write32(VM_ENTRY_CONTROLS,
				     vmcs_read32(VM_ENTRY_CONTROLS) &
				     ~VM_ENTRY_IA32E_MODE);

		msr->data = efer & ~EFER_LME;
	}
	setup_msrs(vmx);
}

static u64 vmx_get_segment_base(struct kvm_vcpu *vcpu, int seg)
static u64 vmx_get_segment_base(struct kvm_vcpu *vcpu, int seg)
{
{
	struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
	struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];