Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 2fc84311 authored by Xiao Guangrong's avatar Xiao Guangrong Committed by Avi Kivity
Browse files

KVM: reorganize hva_to_pfn



We do too many things in hva_to_pfn, this patch reorganize the code,
let it be better readable

Signed-off-by: default avatarXiao Guangrong <xiaoguangrong@linux.vnet.ibm.com>
Signed-off-by: default avatarAvi Kivity <avi@redhat.com>
parent 86ab8cff
Loading
Loading
Loading
Loading
+97 −62
Original line number Original line Diff line number Diff line
@@ -1041,25 +1041,41 @@ static inline int check_user_page_hwpoison(unsigned long addr)
	return rc == -EHWPOISON;
	return rc == -EHWPOISON;
}
}


static pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool *async,
/*
			bool write_fault, bool *writable)
 * The atomic path to get the writable pfn which will be stored in @pfn,
 * true indicates success, otherwise false is returned.
 */
static bool hva_to_pfn_fast(unsigned long addr, bool atomic, bool *async,
			    bool write_fault, bool *writable, pfn_t *pfn)
{
{
	struct page *page[1];
	struct page *page[1];
	int npages = 0;
	int npages;
	pfn_t pfn;


	/* we can do it either atomically or asynchronously, not both */
	if (!(async || atomic))
	BUG_ON(atomic && async);
		return false;


	BUG_ON(!write_fault && !writable);
	npages = __get_user_pages_fast(addr, 1, 1, page);
	if (npages == 1) {
		*pfn = page_to_pfn(page[0]);


		if (writable)
		if (writable)
			*writable = true;
			*writable = true;
		return true;
	}


	if (atomic || async)
	return false;
		npages = __get_user_pages_fast(addr, 1, 1, page);
}

/*
 * The slow path to get the pfn of the specified host virtual address,
 * 1 indicates success, -errno is returned if error is detected.
 */
static int hva_to_pfn_slow(unsigned long addr, bool *async, bool write_fault,
			   bool *writable, pfn_t *pfn)
{
	struct page *page[1];
	int npages = 0;


	if (unlikely(npages != 1) && !atomic) {
	might_sleep();
	might_sleep();


	if (writable)
	if (writable)
@@ -1073,9 +1089,11 @@ static pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool *async,
	} else
	} else
		npages = get_user_pages_fast(addr, 1, write_fault,
		npages = get_user_pages_fast(addr, 1, write_fault,
					     page);
					     page);
	if (npages != 1)
		return npages;


	/* map read fault as writable if possible */
	/* map read fault as writable if possible */
		if (unlikely(!write_fault) && npages == 1) {
	if (unlikely(!write_fault)) {
		struct page *wpage[1];
		struct page *wpage[1];


		npages = __get_user_pages_fast(addr, 1, 1, wpage);
		npages = __get_user_pages_fast(addr, 1, 1, wpage);
@@ -1084,21 +1102,40 @@ static pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool *async,
			put_page(page[0]);
			put_page(page[0]);
			page[0] = wpage[0];
			page[0] = wpage[0];
		}
		}

		npages = 1;
		npages = 1;
	}
	}
	*pfn = page_to_pfn(page[0]);
	return npages;
}
}


	if (unlikely(npages != 1)) {
static pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool *async,
			bool write_fault, bool *writable)
{
	struct vm_area_struct *vma;
	struct vm_area_struct *vma;
	pfn_t pfn = 0;
	int npages;

	/* we can do it either atomically or asynchronously, not both */
	BUG_ON(atomic && async);

	BUG_ON(!write_fault && !writable);

	if (hva_to_pfn_fast(addr, atomic, async, write_fault, writable, &pfn))
		return pfn;


	if (atomic)
	if (atomic)
		return KVM_PFN_ERR_FAULT;
		return KVM_PFN_ERR_FAULT;


	npages = hva_to_pfn_slow(addr, async, write_fault, writable, &pfn);
	if (npages == 1)
		return pfn;

	down_read(&current->mm->mmap_sem);
	down_read(&current->mm->mmap_sem);
	if (npages == -EHWPOISON ||
	if (npages == -EHWPOISON ||
	      (!async && check_user_page_hwpoison(addr))) {
	      (!async && check_user_page_hwpoison(addr))) {
			up_read(&current->mm->mmap_sem);
		pfn = KVM_PFN_ERR_HWPOISON;
			return KVM_PFN_ERR_HWPOISON;
		goto exit;
	}
	}


	vma = find_vma_intersection(current->mm, addr, addr + 1);
	vma = find_vma_intersection(current->mm, addr, addr + 1);
@@ -1114,10 +1151,8 @@ static pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool *async,
			*async = true;
			*async = true;
		pfn = KVM_PFN_ERR_FAULT;
		pfn = KVM_PFN_ERR_FAULT;
	}
	}
exit:
	up_read(&current->mm->mmap_sem);
	up_read(&current->mm->mmap_sem);
	} else
		pfn = page_to_pfn(page[0]);

	return pfn;
	return pfn;
}
}