Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 2e2e3738 authored by Anthony Liguori's avatar Anthony Liguori Committed by Avi Kivity
Browse files

KVM: Handle vma regions with no backing page



This patch allows VMAs that contain no backing page to be used for guest
memory.  This is useful for assigning mmio regions to a guest.

Signed-off-by: default avatarAnthony Liguori <aliguori@us.ibm.com>
Signed-off-by: default avatarAvi Kivity <avi@qumranet.com>
parent d2ebb410
Loading
Loading
Loading
Loading
+37 −12
Original line number Diff line number Diff line
@@ -532,6 +532,7 @@ pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn)
	struct page *page[1];
	unsigned long addr;
	int npages;
	pfn_t pfn;

	might_sleep();

@@ -544,19 +545,38 @@ pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn)
	npages = get_user_pages(current, current->mm, addr, 1, 1, 1, page,
				NULL);

	if (npages != 1) {
	if (unlikely(npages != 1)) {
		struct vm_area_struct *vma;

		vma = find_vma(current->mm, addr);
		if (vma == NULL || addr < vma->vm_start ||
		    !(vma->vm_flags & VM_PFNMAP)) {
			get_page(bad_page);
			return page_to_pfn(bad_page);
		}

	return page_to_pfn(page[0]);
		pfn = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
		BUG_ON(pfn_valid(pfn));
	} else
		pfn = page_to_pfn(page[0]);

	return pfn;
}

EXPORT_SYMBOL_GPL(gfn_to_pfn);

struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
{
	return pfn_to_page(gfn_to_pfn(kvm, gfn));
	pfn_t pfn;

	pfn = gfn_to_pfn(kvm, gfn);
	if (pfn_valid(pfn))
		return pfn_to_page(pfn);

	WARN_ON(!pfn_valid(pfn));

	get_page(bad_page);
	return bad_page;
}

EXPORT_SYMBOL_GPL(gfn_to_page);
@@ -569,6 +589,7 @@ EXPORT_SYMBOL_GPL(kvm_release_page_clean);

void kvm_release_pfn_clean(pfn_t pfn)
{
	if (pfn_valid(pfn))
		put_page(pfn_to_page(pfn));
}
EXPORT_SYMBOL_GPL(kvm_release_pfn_clean);
@@ -594,20 +615,24 @@ EXPORT_SYMBOL_GPL(kvm_set_page_dirty);

void kvm_set_pfn_dirty(pfn_t pfn)
{
	if (pfn_valid(pfn)) {
		struct page *page = pfn_to_page(pfn);
		if (!PageReserved(page))
			SetPageDirty(page);
	}
}
EXPORT_SYMBOL_GPL(kvm_set_pfn_dirty);

void kvm_set_pfn_accessed(pfn_t pfn)
{
	if (pfn_valid(pfn))
		mark_page_accessed(pfn_to_page(pfn));
}
EXPORT_SYMBOL_GPL(kvm_set_pfn_accessed);

void kvm_get_pfn(pfn_t pfn)
{
	if (pfn_valid(pfn))
		get_page(pfn_to_page(pfn));
}
EXPORT_SYMBOL_GPL(kvm_get_pfn);