Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 3520469d authored by Paolo Bonzini's avatar Paolo Bonzini
Browse files

KVM: export __gfn_to_pfn_memslot, drop gfn_to_pfn_async



gfn_to_pfn_async is used in just one place, and because of x86-specific
treatment that place will need to look at the memory slot.  Hence inline
it into try_async_pf and export __gfn_to_pfn_memslot.

The patch also switches the subsequent call to gfn_to_pfn_prot to use
__gfn_to_pfn_memslot.  This is a small optimization.  Finally, remove
the now-unused async argument of __gfn_to_pfn.

Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 69a12200
Loading
Loading
Loading
Loading
+5 −4
Original line number Original line Diff line number Diff line
@@ -3511,10 +3511,12 @@ static bool can_do_async_pf(struct kvm_vcpu *vcpu)
static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn,
static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn,
			 gva_t gva, pfn_t *pfn, bool write, bool *writable)
			 gva_t gva, pfn_t *pfn, bool write, bool *writable)
{
{
	struct kvm_memory_slot *slot;
	bool async;
	bool async;


	*pfn = gfn_to_pfn_async(vcpu->kvm, gfn, &async, write, writable);
	slot = gfn_to_memslot(vcpu->kvm, gfn);

	async = false;
	*pfn = __gfn_to_pfn_memslot(slot, gfn, false, &async, write, writable);
	if (!async)
	if (!async)
		return false; /* *pfn has correct page already */
		return false; /* *pfn has correct page already */


@@ -3528,8 +3530,7 @@ static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn,
			return true;
			return true;
	}
	}


	*pfn = gfn_to_pfn_prot(vcpu->kvm, gfn, write, writable);
	*pfn = __gfn_to_pfn_memslot(slot, gfn, false, NULL, write, writable);

	return false;
	return false;
}
}


+2 −2
Original line number Original line Diff line number Diff line
@@ -539,13 +539,13 @@ void kvm_release_page_dirty(struct page *page);
void kvm_set_page_accessed(struct page *page);
void kvm_set_page_accessed(struct page *page);


pfn_t gfn_to_pfn_atomic(struct kvm *kvm, gfn_t gfn);
pfn_t gfn_to_pfn_atomic(struct kvm *kvm, gfn_t gfn);
pfn_t gfn_to_pfn_async(struct kvm *kvm, gfn_t gfn, bool *async,
		       bool write_fault, bool *writable);
pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn);
pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn);
pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault,
pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault,
		      bool *writable);
		      bool *writable);
pfn_t gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn);
pfn_t gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn);
pfn_t gfn_to_pfn_memslot_atomic(struct kvm_memory_slot *slot, gfn_t gfn);
pfn_t gfn_to_pfn_memslot_atomic(struct kvm_memory_slot *slot, gfn_t gfn);
pfn_t __gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn, bool atomic,
			   bool *async, bool write_fault, bool *writable);


void kvm_release_pfn_clean(pfn_t pfn);
void kvm_release_pfn_clean(pfn_t pfn);
void kvm_set_pfn_dirty(pfn_t pfn);
void kvm_set_pfn_dirty(pfn_t pfn);
+8 −18
Original line number Original line Diff line number Diff line
@@ -1355,8 +1355,7 @@ static pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool *async,
	return pfn;
	return pfn;
}
}


static pfn_t
pfn_t __gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn, bool atomic,
__gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn, bool atomic,
			   bool *async, bool write_fault, bool *writable)
			   bool *async, bool write_fault, bool *writable)
{
{
	unsigned long addr = __gfn_to_hva_many(slot, gfn, NULL, write_fault);
	unsigned long addr = __gfn_to_hva_many(slot, gfn, NULL, write_fault);
@@ -1376,44 +1375,35 @@ __gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn, bool atomic,
	return hva_to_pfn(addr, atomic, async, write_fault,
	return hva_to_pfn(addr, atomic, async, write_fault,
			  writable);
			  writable);
}
}
EXPORT_SYMBOL_GPL(__gfn_to_pfn_memslot);


static pfn_t __gfn_to_pfn(struct kvm *kvm, gfn_t gfn, bool atomic, bool *async,
static pfn_t __gfn_to_pfn(struct kvm *kvm, gfn_t gfn, bool atomic,
			  bool write_fault, bool *writable)
			  bool write_fault, bool *writable)
{
{
	struct kvm_memory_slot *slot;
	struct kvm_memory_slot *slot;


	if (async)
		*async = false;

	slot = gfn_to_memslot(kvm, gfn);
	slot = gfn_to_memslot(kvm, gfn);


	return __gfn_to_pfn_memslot(slot, gfn, atomic, async, write_fault,
	return __gfn_to_pfn_memslot(slot, gfn, atomic, NULL, write_fault,
				    writable);
				    writable);
}
}


pfn_t gfn_to_pfn_atomic(struct kvm *kvm, gfn_t gfn)
pfn_t gfn_to_pfn_atomic(struct kvm *kvm, gfn_t gfn)
{
{
	return __gfn_to_pfn(kvm, gfn, true, NULL, true, NULL);
	return __gfn_to_pfn(kvm, gfn, true, true, NULL);
}
}
EXPORT_SYMBOL_GPL(gfn_to_pfn_atomic);
EXPORT_SYMBOL_GPL(gfn_to_pfn_atomic);


pfn_t gfn_to_pfn_async(struct kvm *kvm, gfn_t gfn, bool *async,
		       bool write_fault, bool *writable)
{
	return __gfn_to_pfn(kvm, gfn, false, async, write_fault, writable);
}
EXPORT_SYMBOL_GPL(gfn_to_pfn_async);

pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn)
pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn)
{
{
	return __gfn_to_pfn(kvm, gfn, false, NULL, true, NULL);
	return __gfn_to_pfn(kvm, gfn, false, true, NULL);
}
}
EXPORT_SYMBOL_GPL(gfn_to_pfn);
EXPORT_SYMBOL_GPL(gfn_to_pfn);


pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault,
pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault,
		      bool *writable)
		      bool *writable)
{
{
	return __gfn_to_pfn(kvm, gfn, false, NULL, write_fault, writable);
	return __gfn_to_pfn(kvm, gfn, false, write_fault, writable);
}
}
EXPORT_SYMBOL_GPL(gfn_to_pfn_prot);
EXPORT_SYMBOL_GPL(gfn_to_pfn_prot);