Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 6cede2e6 authored by Xiao Guangrong's avatar Xiao Guangrong Committed by Avi Kivity
Browse files

KVM: introduce KVM_ERR_PTR_BAD_PAGE



It is used to eliminate the overload of function call and cleanup
the code

Signed-off-by: default avatarXiao Guangrong <xiaoguangrong@linux.vnet.ibm.com>
Signed-off-by: default avatarAvi Kivity <avi@redhat.com>
parent 9a592a95
Loading
Loading
Loading
Loading
+7 −2
Original line number Diff line number Diff line
@@ -68,6 +68,13 @@ static inline int is_invalid_pfn(pfn_t pfn)
	return !is_noslot_pfn(pfn) && is_error_pfn(pfn);
}

#define KVM_ERR_PTR_BAD_PAGE	(ERR_PTR(-ENOENT))

static inline int is_error_page(struct page *page)
{
	return IS_ERR(page);
}

/*
 * vcpu->requests bit members
 */
@@ -409,7 +416,6 @@ id_to_memslot(struct kvm_memslots *slots, int id)
	return slot;
}

int is_error_page(struct page *page);
int kvm_is_error_hva(unsigned long addr);
int kvm_set_memory_region(struct kvm *kvm,
			  struct kvm_userspace_memory_region *mem,
@@ -436,7 +442,6 @@ void kvm_arch_flush_shadow(struct kvm *kvm);
int gfn_to_page_many_atomic(struct kvm *kvm, gfn_t gfn, struct page **pages,
			    int nr_pages);

struct page *get_bad_page(void);
struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn);
unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn);
void kvm_release_page_clean(struct page *page);
+1 −1
Original line number Diff line number Diff line
@@ -203,7 +203,7 @@ int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu)
	if (!work)
		return -ENOMEM;

	work->page = get_bad_page();
	work->page = KVM_ERR_PTR_BAD_PAGE;
	INIT_LIST_HEAD(&work->queue); /* for list_del to work */

	spin_lock(&vcpu->async_pf.lock);
+1 −12
Original line number Diff line number Diff line
@@ -922,17 +922,6 @@ void kvm_disable_largepages(void)
}
EXPORT_SYMBOL_GPL(kvm_disable_largepages);

int is_error_page(struct page *page)
{
	return IS_ERR(page);
}
EXPORT_SYMBOL_GPL(is_error_page);

struct page *get_bad_page(void)
{
	return ERR_PTR(-ENOENT);
}

static inline unsigned long bad_hva(void)
{
	return PAGE_OFFSET;
@@ -1179,7 +1168,7 @@ static struct page *kvm_pfn_to_page(pfn_t pfn)
	WARN_ON(kvm_is_mmio_pfn(pfn));

	if (is_error_pfn(pfn) || kvm_is_mmio_pfn(pfn))
		return get_bad_page();
		return KVM_ERR_PTR_BAD_PAGE;

	return pfn_to_page(pfn);
}