Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 54bf36aa authored by Paolo Bonzini's avatar Paolo Bonzini
Browse files

KVM: x86: use vcpu-specific functions to read/write/translate GFNs



We need to hide SMRAM from guests not running in SMM.  Therefore,
all uses of kvm_read_guest* and kvm_write_guest* must be changed to
check whether the VCPU is in system management mode and use a
different set of memslots.  Switch from kvm_* to the newly-introduced
kvm_vcpu_*, which call into kvm_arch_vcpu_memslots_id.

Reviewed-by: default avatarRadim Krčmář <rkrcmar@redhat.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent e4cd1da9
Loading
Loading
Loading
Loading
+1 −1
Original line number Original line Diff line number Diff line
@@ -887,7 +887,7 @@ void kvm_mmu_clear_dirty_pt_masked(struct kvm *kvm,
				   struct kvm_memory_slot *slot,
				   struct kvm_memory_slot *slot,
				   gfn_t gfn_offset, unsigned long mask);
				   gfn_t gfn_offset, unsigned long mask);
void kvm_mmu_zap_all(struct kvm *kvm);
void kvm_mmu_zap_all(struct kvm *kvm);
void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm);
void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, struct kvm_memslots *slots);
unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm);
unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm);
void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages);
void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages);


+31 −31
Original line number Original line Diff line number Diff line
@@ -223,15 +223,15 @@ static unsigned int get_mmio_spte_generation(u64 spte)
	return gen;
	return gen;
}
}


static unsigned int kvm_current_mmio_generation(struct kvm *kvm)
static unsigned int kvm_current_mmio_generation(struct kvm_vcpu *vcpu)
{
{
	return kvm_memslots(kvm)->generation & MMIO_GEN_MASK;
	return kvm_vcpu_memslots(vcpu)->generation & MMIO_GEN_MASK;
}
}


static void mark_mmio_spte(struct kvm *kvm, u64 *sptep, u64 gfn,
static void mark_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, u64 gfn,
			   unsigned access)
			   unsigned access)
{
{
	unsigned int gen = kvm_current_mmio_generation(kvm);
	unsigned int gen = kvm_current_mmio_generation(vcpu);
	u64 mask = generation_mmio_spte_mask(gen);
	u64 mask = generation_mmio_spte_mask(gen);


	access &= ACC_WRITE_MASK | ACC_USER_MASK;
	access &= ACC_WRITE_MASK | ACC_USER_MASK;
@@ -258,22 +258,22 @@ static unsigned get_mmio_spte_access(u64 spte)
	return (spte & ~mask) & ~PAGE_MASK;
	return (spte & ~mask) & ~PAGE_MASK;
}
}


static bool set_mmio_spte(struct kvm *kvm, u64 *sptep, gfn_t gfn,
static bool set_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, gfn_t gfn,
			  pfn_t pfn, unsigned access)
			  pfn_t pfn, unsigned access)
{
{
	if (unlikely(is_noslot_pfn(pfn))) {
	if (unlikely(is_noslot_pfn(pfn))) {
		mark_mmio_spte(kvm, sptep, gfn, access);
		mark_mmio_spte(vcpu, sptep, gfn, access);
		return true;
		return true;
	}
	}


	return false;
	return false;
}
}


static bool check_mmio_spte(struct kvm *kvm, u64 spte)
static bool check_mmio_spte(struct kvm_vcpu *vcpu, u64 spte)
{
{
	unsigned int kvm_gen, spte_gen;
	unsigned int kvm_gen, spte_gen;


	kvm_gen = kvm_current_mmio_generation(kvm);
	kvm_gen = kvm_current_mmio_generation(vcpu);
	spte_gen = get_mmio_spte_generation(spte);
	spte_gen = get_mmio_spte_generation(spte);


	trace_check_mmio_spte(spte, kvm_gen, spte_gen);
	trace_check_mmio_spte(spte, kvm_gen, spte_gen);
@@ -837,14 +837,14 @@ static void unaccount_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp)
	kvm->arch.indirect_shadow_pages--;
	kvm->arch.indirect_shadow_pages--;
}
}


static int has_wrprotected_page(struct kvm *kvm,
static int has_wrprotected_page(struct kvm_vcpu *vcpu,
				gfn_t gfn,
				gfn_t gfn,
				int level)
				int level)
{
{
	struct kvm_memory_slot *slot;
	struct kvm_memory_slot *slot;
	struct kvm_lpage_info *linfo;
	struct kvm_lpage_info *linfo;


	slot = gfn_to_memslot(kvm, gfn);
	slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
	if (slot) {
	if (slot) {
		linfo = lpage_info_slot(gfn, slot, level);
		linfo = lpage_info_slot(gfn, slot, level);
		return linfo->write_count;
		return linfo->write_count;
@@ -876,7 +876,7 @@ gfn_to_memslot_dirty_bitmap(struct kvm_vcpu *vcpu, gfn_t gfn,
{
{
	struct kvm_memory_slot *slot;
	struct kvm_memory_slot *slot;


	slot = gfn_to_memslot(vcpu->kvm, gfn);
	slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
	if (!slot || slot->flags & KVM_MEMSLOT_INVALID ||
	if (!slot || slot->flags & KVM_MEMSLOT_INVALID ||
	      (no_dirty_log && slot->dirty_bitmap))
	      (no_dirty_log && slot->dirty_bitmap))
		slot = NULL;
		slot = NULL;
@@ -901,7 +901,7 @@ static int mapping_level(struct kvm_vcpu *vcpu, gfn_t large_gfn)
	max_level = min(kvm_x86_ops->get_lpage_level(), host_level);
	max_level = min(kvm_x86_ops->get_lpage_level(), host_level);


	for (level = PT_DIRECTORY_LEVEL; level <= max_level; ++level)
	for (level = PT_DIRECTORY_LEVEL; level <= max_level; ++level)
		if (has_wrprotected_page(vcpu->kvm, large_gfn, level))
		if (has_wrprotected_page(vcpu, large_gfn, level))
			break;
			break;


	return level - 1;
	return level - 1;
@@ -1336,18 +1336,18 @@ void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
		kvm_mmu_write_protect_pt_masked(kvm, slot, gfn_offset, mask);
		kvm_mmu_write_protect_pt_masked(kvm, slot, gfn_offset, mask);
}
}


static bool rmap_write_protect(struct kvm *kvm, u64 gfn)
static bool rmap_write_protect(struct kvm_vcpu *vcpu, u64 gfn)
{
{
	struct kvm_memory_slot *slot;
	struct kvm_memory_slot *slot;
	unsigned long *rmapp;
	unsigned long *rmapp;
	int i;
	int i;
	bool write_protected = false;
	bool write_protected = false;


	slot = gfn_to_memslot(kvm, gfn);
	slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);


	for (i = PT_PAGE_TABLE_LEVEL; i <= PT_MAX_HUGEPAGE_LEVEL; ++i) {
	for (i = PT_PAGE_TABLE_LEVEL; i <= PT_MAX_HUGEPAGE_LEVEL; ++i) {
		rmapp = __gfn_to_rmap(gfn, i, slot);
		rmapp = __gfn_to_rmap(gfn, i, slot);
		write_protected |= __rmap_write_protect(kvm, rmapp, true);
		write_protected |= __rmap_write_protect(vcpu->kvm, rmapp, true);
	}
	}


	return write_protected;
	return write_protected;
@@ -2032,7 +2032,7 @@ static void mmu_sync_children(struct kvm_vcpu *vcpu,
		bool protected = false;
		bool protected = false;


		for_each_sp(pages, sp, parents, i)
		for_each_sp(pages, sp, parents, i)
			protected |= rmap_write_protect(vcpu->kvm, sp->gfn);
			protected |= rmap_write_protect(vcpu, sp->gfn);


		if (protected)
		if (protected)
			kvm_flush_remote_tlbs(vcpu->kvm);
			kvm_flush_remote_tlbs(vcpu->kvm);
@@ -2130,7 +2130,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
	hlist_add_head(&sp->hash_link,
	hlist_add_head(&sp->hash_link,
		&vcpu->kvm->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)]);
		&vcpu->kvm->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)]);
	if (!direct) {
	if (!direct) {
		if (rmap_write_protect(vcpu->kvm, gfn))
		if (rmap_write_protect(vcpu, gfn))
			kvm_flush_remote_tlbs(vcpu->kvm);
			kvm_flush_remote_tlbs(vcpu->kvm);
		if (level > PT_PAGE_TABLE_LEVEL && need_sync)
		if (level > PT_PAGE_TABLE_LEVEL && need_sync)
			kvm_sync_pages(vcpu, gfn);
			kvm_sync_pages(vcpu, gfn);
@@ -2581,7 +2581,7 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
	u64 spte;
	u64 spte;
	int ret = 0;
	int ret = 0;


	if (set_mmio_spte(vcpu->kvm, sptep, gfn, pfn, pte_access))
	if (set_mmio_spte(vcpu, sptep, gfn, pfn, pte_access))
		return 0;
		return 0;


	spte = PT_PRESENT_MASK;
	spte = PT_PRESENT_MASK;
@@ -2618,7 +2618,7 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
		 * be fixed if guest refault.
		 * be fixed if guest refault.
		 */
		 */
		if (level > PT_PAGE_TABLE_LEVEL &&
		if (level > PT_PAGE_TABLE_LEVEL &&
		    has_wrprotected_page(vcpu->kvm, gfn, level))
		    has_wrprotected_page(vcpu, gfn, level))
			goto done;
			goto done;


		spte |= PT_WRITABLE_MASK | SPTE_MMU_WRITEABLE;
		spte |= PT_WRITABLE_MASK | SPTE_MMU_WRITEABLE;
@@ -2642,7 +2642,7 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
	}
	}


	if (pte_access & ACC_WRITE_MASK) {
	if (pte_access & ACC_WRITE_MASK) {
		mark_page_dirty(vcpu->kvm, gfn);
		kvm_vcpu_mark_page_dirty(vcpu, gfn);
		spte |= shadow_dirty_mask;
		spte |= shadow_dirty_mask;
	}
	}


@@ -2860,7 +2860,7 @@ static int kvm_handle_bad_page(struct kvm_vcpu *vcpu, gfn_t gfn, pfn_t pfn)
		return 1;
		return 1;


	if (pfn == KVM_PFN_ERR_HWPOISON) {
	if (pfn == KVM_PFN_ERR_HWPOISON) {
		kvm_send_hwpoison_signal(gfn_to_hva(vcpu->kvm, gfn), current);
		kvm_send_hwpoison_signal(kvm_vcpu_gfn_to_hva(vcpu, gfn), current);
		return 0;
		return 0;
	}
	}


@@ -2883,7 +2883,7 @@ static void transparent_hugepage_adjust(struct kvm_vcpu *vcpu,
	if (!is_error_noslot_pfn(pfn) && !kvm_is_reserved_pfn(pfn) &&
	if (!is_error_noslot_pfn(pfn) && !kvm_is_reserved_pfn(pfn) &&
	    level == PT_PAGE_TABLE_LEVEL &&
	    level == PT_PAGE_TABLE_LEVEL &&
	    PageTransCompound(pfn_to_page(pfn)) &&
	    PageTransCompound(pfn_to_page(pfn)) &&
	    !has_wrprotected_page(vcpu->kvm, gfn, PT_DIRECTORY_LEVEL)) {
	    !has_wrprotected_page(vcpu, gfn, PT_DIRECTORY_LEVEL)) {
		unsigned long mask;
		unsigned long mask;
		/*
		/*
		 * mmu_notifier_retry was successful and we hold the
		 * mmu_notifier_retry was successful and we hold the
@@ -2975,7 +2975,7 @@ fast_pf_fix_direct_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
	 * Compare with set_spte where instead shadow_dirty_mask is set.
	 * Compare with set_spte where instead shadow_dirty_mask is set.
	 */
	 */
	if (cmpxchg64(sptep, spte, spte | PT_WRITABLE_MASK) == spte)
	if (cmpxchg64(sptep, spte, spte | PT_WRITABLE_MASK) == spte)
		mark_page_dirty(vcpu->kvm, gfn);
		kvm_vcpu_mark_page_dirty(vcpu, gfn);


	return true;
	return true;
}
}
@@ -3430,7 +3430,7 @@ int handle_mmio_page_fault_common(struct kvm_vcpu *vcpu, u64 addr, bool direct)
		gfn_t gfn = get_mmio_spte_gfn(spte);
		gfn_t gfn = get_mmio_spte_gfn(spte);
		unsigned access = get_mmio_spte_access(spte);
		unsigned access = get_mmio_spte_access(spte);


		if (!check_mmio_spte(vcpu->kvm, spte))
		if (!check_mmio_spte(vcpu, spte))
			return RET_MMIO_PF_INVALID;
			return RET_MMIO_PF_INVALID;


		if (direct)
		if (direct)
@@ -3502,7 +3502,7 @@ static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn)
	arch.direct_map = vcpu->arch.mmu.direct_map;
	arch.direct_map = vcpu->arch.mmu.direct_map;
	arch.cr3 = vcpu->arch.mmu.get_cr3(vcpu);
	arch.cr3 = vcpu->arch.mmu.get_cr3(vcpu);


	return kvm_setup_async_pf(vcpu, gva, gfn_to_hva(vcpu->kvm, gfn), &arch);
	return kvm_setup_async_pf(vcpu, gva, kvm_vcpu_gfn_to_hva(vcpu, gfn), &arch);
}
}


static bool can_do_async_pf(struct kvm_vcpu *vcpu)
static bool can_do_async_pf(struct kvm_vcpu *vcpu)
@@ -3520,7 +3520,7 @@ static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn,
	struct kvm_memory_slot *slot;
	struct kvm_memory_slot *slot;
	bool async;
	bool async;


	slot = gfn_to_memslot(vcpu->kvm, gfn);
	slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
	async = false;
	async = false;
	*pfn = __gfn_to_pfn_memslot(slot, gfn, false, &async, write, writable);
	*pfn = __gfn_to_pfn_memslot(slot, gfn, false, &async, write, writable);
	if (!async)
	if (!async)
@@ -3633,7 +3633,7 @@ static void inject_page_fault(struct kvm_vcpu *vcpu,
	vcpu->arch.mmu.inject_page_fault(vcpu, fault);
	vcpu->arch.mmu.inject_page_fault(vcpu, fault);
}
}


static bool sync_mmio_spte(struct kvm *kvm, u64 *sptep, gfn_t gfn,
static bool sync_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, gfn_t gfn,
			   unsigned access, int *nr_present)
			   unsigned access, int *nr_present)
{
{
	if (unlikely(is_mmio_spte(*sptep))) {
	if (unlikely(is_mmio_spte(*sptep))) {
@@ -3643,7 +3643,7 @@ static bool sync_mmio_spte(struct kvm *kvm, u64 *sptep, gfn_t gfn,
		}
		}


		(*nr_present)++;
		(*nr_present)++;
		mark_mmio_spte(kvm, sptep, gfn, access);
		mark_mmio_spte(vcpu, sptep, gfn, access);
		return true;
		return true;
	}
	}


@@ -4153,7 +4153,7 @@ static u64 mmu_pte_write_fetch_gpte(struct kvm_vcpu *vcpu, gpa_t *gpa,
		/* Handle a 32-bit guest writing two halves of a 64-bit gpte */
		/* Handle a 32-bit guest writing two halves of a 64-bit gpte */
		*gpa &= ~(gpa_t)7;
		*gpa &= ~(gpa_t)7;
		*bytes = 8;
		*bytes = 8;
		r = kvm_read_guest(vcpu->kvm, *gpa, &gentry, 8);
		r = kvm_vcpu_read_guest(vcpu, *gpa, &gentry, 8);
		if (r)
		if (r)
			gentry = 0;
			gentry = 0;
		new = (const u8 *)&gentry;
		new = (const u8 *)&gentry;
@@ -4779,13 +4779,13 @@ static bool kvm_has_zapped_obsolete_pages(struct kvm *kvm)
	return unlikely(!list_empty_careful(&kvm->arch.zapped_obsolete_pages));
	return unlikely(!list_empty_careful(&kvm->arch.zapped_obsolete_pages));
}
}


void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm)
void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, struct kvm_memslots *slots)
{
{
	/*
	/*
	 * The very rare case: if the generation-number is round,
	 * The very rare case: if the generation-number is round,
	 * zap all shadow pages.
	 * zap all shadow pages.
	 */
	 */
	if (unlikely(kvm_current_mmio_generation(kvm) == 0)) {
	if (unlikely((slots->generation & MMIO_GEN_MASK) == 0)) {
		printk_ratelimited(KERN_DEBUG "kvm: zapping shadow pages for mmio generation wraparound\n");
		printk_ratelimited(KERN_DEBUG "kvm: zapping shadow pages for mmio generation wraparound\n");
		kvm_mmu_invalidate_zap_all_pages(kvm);
		kvm_mmu_invalidate_zap_all_pages(kvm);
	}
	}
+1 −1
Original line number Original line Diff line number Diff line
@@ -114,7 +114,7 @@ static void audit_mappings(struct kvm_vcpu *vcpu, u64 *sptep, int level)
		return;
		return;


	gfn = kvm_mmu_page_get_gfn(sp, sptep - sp->spt);
	gfn = kvm_mmu_page_get_gfn(sp, sptep - sp->spt);
	pfn = gfn_to_pfn_atomic(vcpu->kvm, gfn);
	pfn = kvm_vcpu_gfn_to_pfn_atomic(vcpu, gfn);


	if (is_error_pfn(pfn))
	if (is_error_pfn(pfn))
		return;
		return;
+9 −9
Original line number Original line Diff line number Diff line
@@ -256,7 +256,7 @@ static int FNAME(update_accessed_dirty_bits)(struct kvm_vcpu *vcpu,
		if (ret)
		if (ret)
			return ret;
			return ret;


		mark_page_dirty(vcpu->kvm, table_gfn);
		kvm_vcpu_mark_page_dirty(vcpu, table_gfn);
		walker->ptes[level] = pte;
		walker->ptes[level] = pte;
	}
	}
	return 0;
	return 0;
@@ -338,7 +338,7 @@ static int FNAME(walk_addr_generic)(struct guest_walker *walker,


		real_gfn = gpa_to_gfn(real_gfn);
		real_gfn = gpa_to_gfn(real_gfn);


		host_addr = gfn_to_hva_prot(vcpu->kvm, real_gfn,
		host_addr = kvm_vcpu_gfn_to_hva_prot(vcpu, real_gfn,
					    &walker->pte_writable[walker->level - 1]);
					    &walker->pte_writable[walker->level - 1]);
		if (unlikely(kvm_is_error_hva(host_addr)))
		if (unlikely(kvm_is_error_hva(host_addr)))
			goto error;
			goto error;
@@ -511,11 +511,11 @@ static bool FNAME(gpte_changed)(struct kvm_vcpu *vcpu,
		base_gpa = pte_gpa & ~mask;
		base_gpa = pte_gpa & ~mask;
		index = (pte_gpa - base_gpa) / sizeof(pt_element_t);
		index = (pte_gpa - base_gpa) / sizeof(pt_element_t);


		r = kvm_read_guest_atomic(vcpu->kvm, base_gpa,
		r = kvm_vcpu_read_guest_atomic(vcpu, base_gpa,
				gw->prefetch_ptes, sizeof(gw->prefetch_ptes));
				gw->prefetch_ptes, sizeof(gw->prefetch_ptes));
		curr_pte = gw->prefetch_ptes[index];
		curr_pte = gw->prefetch_ptes[index];
	} else
	} else
		r = kvm_read_guest_atomic(vcpu->kvm, pte_gpa,
		r = kvm_vcpu_read_guest_atomic(vcpu, pte_gpa,
				  &curr_pte, sizeof(curr_pte));
				  &curr_pte, sizeof(curr_pte));


	return r || curr_pte != gw->ptes[level - 1];
	return r || curr_pte != gw->ptes[level - 1];
@@ -869,7 +869,7 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
			if (!rmap_can_add(vcpu))
			if (!rmap_can_add(vcpu))
				break;
				break;


			if (kvm_read_guest_atomic(vcpu->kvm, pte_gpa, &gpte,
			if (kvm_vcpu_read_guest_atomic(vcpu, pte_gpa, &gpte,
						       sizeof(pt_element_t)))
						       sizeof(pt_element_t)))
				break;
				break;


@@ -956,7 +956,7 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)


		pte_gpa = first_pte_gpa + i * sizeof(pt_element_t);
		pte_gpa = first_pte_gpa + i * sizeof(pt_element_t);


		if (kvm_read_guest_atomic(vcpu->kvm, pte_gpa, &gpte,
		if (kvm_vcpu_read_guest_atomic(vcpu, pte_gpa, &gpte,
					       sizeof(pt_element_t)))
					       sizeof(pt_element_t)))
			return -EINVAL;
			return -EINVAL;


@@ -970,7 +970,7 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
		pte_access &= FNAME(gpte_access)(vcpu, gpte);
		pte_access &= FNAME(gpte_access)(vcpu, gpte);
		FNAME(protect_clean_gpte)(&pte_access, gpte);
		FNAME(protect_clean_gpte)(&pte_access, gpte);


		if (sync_mmio_spte(vcpu->kvm, &sp->spt[i], gfn, pte_access,
		if (sync_mmio_spte(vcpu, &sp->spt[i], gfn, pte_access,
		      &nr_present))
		      &nr_present))
			continue;
			continue;


+6 −6
Original line number Original line Diff line number Diff line
@@ -1955,7 +1955,7 @@ static u64 nested_svm_get_tdp_pdptr(struct kvm_vcpu *vcpu, int index)
	u64 pdpte;
	u64 pdpte;
	int ret;
	int ret;


	ret = kvm_read_guest_page(vcpu->kvm, gpa_to_gfn(cr3), &pdpte,
	ret = kvm_vcpu_read_guest_page(vcpu, gpa_to_gfn(cr3), &pdpte,
				       offset_in_page(cr3) + index * 8, 8);
				       offset_in_page(cr3) + index * 8, 8);
	if (ret)
	if (ret)
		return 0;
		return 0;
@@ -2114,7 +2114,7 @@ static void *nested_svm_map(struct vcpu_svm *svm, u64 gpa, struct page **_page)


	might_sleep();
	might_sleep();


	page = gfn_to_page(svm->vcpu.kvm, gpa >> PAGE_SHIFT);
	page = kvm_vcpu_gfn_to_page(&svm->vcpu, gpa >> PAGE_SHIFT);
	if (is_error_page(page))
	if (is_error_page(page))
		goto error;
		goto error;


@@ -2153,7 +2153,7 @@ static int nested_svm_intercept_ioio(struct vcpu_svm *svm)
	mask = (0xf >> (4 - size)) << start_bit;
	mask = (0xf >> (4 - size)) << start_bit;
	val = 0;
	val = 0;


	if (kvm_read_guest(svm->vcpu.kvm, gpa, &val, iopm_len))
	if (kvm_vcpu_read_guest(&svm->vcpu, gpa, &val, iopm_len))
		return NESTED_EXIT_DONE;
		return NESTED_EXIT_DONE;


	return (val & mask) ? NESTED_EXIT_DONE : NESTED_EXIT_HOST;
	return (val & mask) ? NESTED_EXIT_DONE : NESTED_EXIT_HOST;
@@ -2178,7 +2178,7 @@ static int nested_svm_exit_handled_msr(struct vcpu_svm *svm)
	/* Offset is in 32 bit units but need in 8 bit units */
	/* Offset is in 32 bit units but need in 8 bit units */
	offset *= 4;
	offset *= 4;


	if (kvm_read_guest(svm->vcpu.kvm, svm->nested.vmcb_msrpm + offset, &value, 4))
	if (kvm_vcpu_read_guest(&svm->vcpu, svm->nested.vmcb_msrpm + offset, &value, 4))
		return NESTED_EXIT_DONE;
		return NESTED_EXIT_DONE;


	return (value & mask) ? NESTED_EXIT_DONE : NESTED_EXIT_HOST;
	return (value & mask) ? NESTED_EXIT_DONE : NESTED_EXIT_HOST;
@@ -2449,7 +2449,7 @@ static bool nested_svm_vmrun_msrpm(struct vcpu_svm *svm)
		p      = msrpm_offsets[i];
		p      = msrpm_offsets[i];
		offset = svm->nested.vmcb_msrpm + (p * 4);
		offset = svm->nested.vmcb_msrpm + (p * 4);


		if (kvm_read_guest(svm->vcpu.kvm, offset, &value, 4))
		if (kvm_vcpu_read_guest(&svm->vcpu, offset, &value, 4))
			return false;
			return false;


		svm->nested.msrpm[p] = svm->msrpm[p] | value;
		svm->nested.msrpm[p] = svm->msrpm[p] | value;
Loading