Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit ad896af0 authored by Paolo Bonzini's avatar Paolo Bonzini
Browse files

KVM: x86: mmu: remove argument to kvm_init_shadow_mmu and kvm_init_shadow_ept_mmu



The initialization function in mmu.c can always use walk_mmu, which
is known to be vcpu->arch.mmu.  Only init_kvm_nested_mmu is used to
initialize vcpu->arch.nested_mmu.

Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent e0c6db3e
Loading
Loading
Loading
Loading
+20 −15
Original line number Diff line number Diff line
@@ -3763,7 +3763,7 @@ static void paging32E_init_context(struct kvm_vcpu *vcpu,

static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
{
	struct kvm_mmu *context = vcpu->arch.walk_mmu;
	struct kvm_mmu *context = &vcpu->arch.mmu;

	context->base_role.word = 0;
	context->page_fault = tdp_page_fault;
@@ -3803,11 +3803,13 @@ static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
	update_last_pte_bitmap(vcpu, context);
}

void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *context)
void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu)
{
	bool smep = kvm_read_cr4_bits(vcpu, X86_CR4_SMEP);
	struct kvm_mmu *context = &vcpu->arch.mmu;

	ASSERT(vcpu);
	ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
	ASSERT(!VALID_PAGE(context->root_hpa));

	if (!is_paging(vcpu))
		nonpaging_init_context(vcpu, context);
@@ -3818,19 +3820,20 @@ void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *context)
	else
		paging32_init_context(vcpu, context);

	vcpu->arch.mmu.base_role.nxe = is_nx(vcpu);
	vcpu->arch.mmu.base_role.cr4_pae = !!is_pae(vcpu);
	vcpu->arch.mmu.base_role.cr0_wp  = is_write_protection(vcpu);
	vcpu->arch.mmu.base_role.smep_andnot_wp
	context->base_role.nxe = is_nx(vcpu);
	context->base_role.cr4_pae = !!is_pae(vcpu);
	context->base_role.cr0_wp  = is_write_protection(vcpu);
	context->base_role.smep_andnot_wp
		= smep && !is_write_protection(vcpu);
}
EXPORT_SYMBOL_GPL(kvm_init_shadow_mmu);

void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *context,
		bool execonly)
void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly)
{
	struct kvm_mmu *context = &vcpu->arch.mmu;

	ASSERT(vcpu);
	ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
	ASSERT(!VALID_PAGE(context->root_hpa));

	context->shadow_root_level = kvm_x86_ops->get_tdp_level();

@@ -3851,11 +3854,13 @@ EXPORT_SYMBOL_GPL(kvm_init_shadow_ept_mmu);

static void init_kvm_softmmu(struct kvm_vcpu *vcpu)
{
	kvm_init_shadow_mmu(vcpu, vcpu->arch.walk_mmu);
	vcpu->arch.walk_mmu->set_cr3           = kvm_x86_ops->set_cr3;
	vcpu->arch.walk_mmu->get_cr3           = get_cr3;
	vcpu->arch.walk_mmu->get_pdptr         = kvm_pdptr_read;
	vcpu->arch.walk_mmu->inject_page_fault = kvm_inject_page_fault;
	struct kvm_mmu *context = &vcpu->arch.mmu;

	kvm_init_shadow_mmu(vcpu);
	context->set_cr3           = kvm_x86_ops->set_cr3;
	context->get_cr3           = get_cr3;
	context->get_pdptr         = kvm_pdptr_read;
	context->inject_page_fault = kvm_inject_page_fault;
}

static void init_kvm_nested_mmu(struct kvm_vcpu *vcpu)
+2 −3
Original line number Diff line number Diff line
@@ -81,9 +81,8 @@ enum {
};

int handle_mmio_page_fault_common(struct kvm_vcpu *vcpu, u64 addr, bool direct);
void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *context);
void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *context,
		bool execonly);
void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu);
void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly);
void update_permission_bitmask(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
		bool ept);

+2 −2
Original line number Diff line number Diff line
@@ -2003,8 +2003,8 @@ static void nested_svm_inject_npf_exit(struct kvm_vcpu *vcpu,

static void nested_svm_init_mmu_context(struct kvm_vcpu *vcpu)
{
	kvm_init_shadow_mmu(vcpu, &vcpu->arch.mmu);

	WARN_ON(mmu_is_nested(vcpu));
	kvm_init_shadow_mmu(vcpu);
	vcpu->arch.mmu.set_cr3           = nested_svm_set_tdp_cr3;
	vcpu->arch.mmu.get_cr3           = nested_svm_get_tdp_cr3;
	vcpu->arch.mmu.get_pdptr         = nested_svm_get_tdp_pdptr;
+2 −2
Original line number Diff line number Diff line
@@ -8202,9 +8202,9 @@ static unsigned long nested_ept_get_cr3(struct kvm_vcpu *vcpu)

static void nested_ept_init_mmu_context(struct kvm_vcpu *vcpu)
{
	kvm_init_shadow_ept_mmu(vcpu, &vcpu->arch.mmu,
	WARN_ON(mmu_is_nested(vcpu));
	kvm_init_shadow_ept_mmu(vcpu,
			nested_vmx_ept_caps & VMX_EPT_EXECUTE_ONLY_BIT);

	vcpu->arch.mmu.set_cr3           = vmx_set_cr3;
	vcpu->arch.mmu.get_cr3           = nested_ept_get_cr3;
	vcpu->arch.mmu.inject_page_fault = nested_ept_inject_page_fault;