Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit a0ed4607 authored by Takuya Yoshikawa's avatar Takuya Yoshikawa Committed by Avi Kivity
Browse files

KVM: MMU: Split the main body of rmap_write_protect() off from others



We will use this in the following patch to implement another function
which needs to write protect pages using the rmap information.

Note that there is a small change in debug printing for large pages:
we do not differentiate them from others to avoid duplicating code.

Signed-off-by: default avatarTakuya Yoshikawa <yoshikawa.takuya@oss.ntt.co.jp>
Signed-off-by: default avatarAvi Kivity <avi@redhat.com>
parent 24899709
Loading
Loading
Loading
Loading
+27 −26
Original line number Diff line number Diff line
@@ -1010,42 +1010,43 @@ static void drop_spte(struct kvm *kvm, u64 *sptep)
		rmap_remove(kvm, sptep);
}

int kvm_mmu_rmap_write_protect(struct kvm *kvm, u64 gfn,
			       struct kvm_memory_slot *slot)
static int __rmap_write_protect(struct kvm *kvm, unsigned long *rmapp, int level)
{
	unsigned long *rmapp;
	u64 *spte;
	int i, write_protected = 0;
	u64 *spte = NULL;
	int write_protected = 0;

	rmapp = __gfn_to_rmap(gfn, PT_PAGE_TABLE_LEVEL, slot);
	spte = rmap_next(rmapp, NULL);
	while (spte) {
	while ((spte = rmap_next(rmapp, spte))) {
		BUG_ON(!(*spte & PT_PRESENT_MASK));
		rmap_printk("rmap_write_protect: spte %p %llx\n", spte, *spte);
		if (is_writable_pte(*spte)) {
			mmu_spte_update(spte, *spte & ~PT_WRITABLE_MASK);
			write_protected = 1;
		}
		spte = rmap_next(rmapp, spte);
	}

	/* check for huge page mappings */
	for (i = PT_DIRECTORY_LEVEL;
	     i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) {
		rmapp = __gfn_to_rmap(gfn, i, slot);
		spte = rmap_next(rmapp, NULL);
		while (spte) {
			BUG_ON(!(*spte & PT_PRESENT_MASK));
		if (!is_writable_pte(*spte))
			continue;

		if (level == PT_PAGE_TABLE_LEVEL) {
			mmu_spte_update(spte, *spte & ~PT_WRITABLE_MASK);
		} else {
			BUG_ON(!is_large_pte(*spte));
			pgprintk("rmap_write_protect(large): spte %p %llx %lld\n", spte, *spte, gfn);
			if (is_writable_pte(*spte)) {
			drop_spte(kvm, spte);
			--kvm->stat.lpages;
			spte = NULL;
		}

		write_protected = 1;
	}
			spte = rmap_next(rmapp, spte);

	return write_protected;
}

int kvm_mmu_rmap_write_protect(struct kvm *kvm, u64 gfn,
			       struct kvm_memory_slot *slot)
{
	unsigned long *rmapp;
	int i, write_protected = 0;

	for (i = PT_PAGE_TABLE_LEVEL;
	     i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) {
		rmapp = __gfn_to_rmap(gfn, i, slot);
		write_protected |= __rmap_write_protect(kvm, rmapp, i);
	}

	return write_protected;