Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit d89cc617 authored by Takuya Yoshikawa's avatar Takuya Yoshikawa Committed by Avi Kivity
Browse files

KVM: Push rmap into kvm_arch_memory_slot



Two reasons:
 - x86 can integrate rmap and rmap_pde and remove heuristics in
   __gfn_to_rmap().
 - Some architectures do not need rmap.

Since rmap is one of the most memory consuming stuff in KVM, ppc'd
better restrict the allocation to Book3S HV.

Signed-off-by: default avatarTakuya Yoshikawa <yoshikawa.takuya@oss.ntt.co.jp>
Acked-by: default avatarPaul Mackerras <paulus@samba.org>
Signed-off-by: default avatarAvi Kivity <avi@redhat.com>
parent 65fbe37c
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -221,6 +221,7 @@ struct revmap_entry {
#define KVMPPC_GOT_PAGE		0x80

struct kvm_arch_memory_slot {
	unsigned long *rmap;
};

struct kvm_arch {
+3 −3
Original line number Diff line number Diff line
@@ -705,7 +705,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
		goto out_unlock;
	hpte[0] = (hpte[0] & ~HPTE_V_ABSENT) | HPTE_V_VALID;

	rmap = &memslot->rmap[gfn - memslot->base_gfn];
	rmap = &memslot->arch.rmap[gfn - memslot->base_gfn];
	lock_rmap(rmap);

	/* Check if we might have been invalidated; let the guest retry if so */
@@ -788,7 +788,7 @@ static int kvm_handle_hva_range(struct kvm *kvm,
		for (; gfn < gfn_end; ++gfn) {
			gfn_t gfn_offset = gfn - memslot->base_gfn;

			ret = handler(kvm, &memslot->rmap[gfn_offset], gfn);
			ret = handler(kvm, &memslot->arch.rmap[gfn_offset], gfn);
			retval |= ret;
		}
	}
@@ -1036,7 +1036,7 @@ long kvmppc_hv_get_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot)
	unsigned long *rmapp, *map;

	preempt_disable();
	rmapp = memslot->rmap;
	rmapp = memslot->arch.rmap;
	map = memslot->dirty_bitmap;
	for (i = 0; i < memslot->npages; ++i) {
		if (kvm_test_clear_dirty(kvm, rmapp))
+2 −2
Original line number Diff line number Diff line
@@ -84,7 +84,7 @@ static void remove_revmap_chain(struct kvm *kvm, long pte_index,
	if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID))
		return;

	rmap = real_vmalloc_addr(&memslot->rmap[gfn - memslot->base_gfn]);
	rmap = real_vmalloc_addr(&memslot->arch.rmap[gfn - memslot->base_gfn]);
	lock_rmap(rmap);

	head = *rmap & KVMPPC_RMAP_INDEX;
@@ -180,7 +180,7 @@ long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
	if (!slot_is_aligned(memslot, psize))
		return H_PARAMETER;
	slot_fn = gfn - memslot->base_gfn;
	rmap = &memslot->rmap[slot_fn];
	rmap = &memslot->arch.rmap[slot_fn];

	if (!kvm->arch.using_mmu_notifiers) {
		physp = kvm->arch.slot_phys[memslot->id];
+8 −0
Original line number Diff line number Diff line
@@ -302,10 +302,18 @@ long kvm_arch_dev_ioctl(struct file *filp,
void kvm_arch_free_memslot(struct kvm_memory_slot *free,
			   struct kvm_memory_slot *dont)
{
	if (!dont || free->arch.rmap != dont->arch.rmap) {
		vfree(free->arch.rmap);
		free->arch.rmap = NULL;
	}
}

int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages)
{
	slot->arch.rmap = vzalloc(npages * sizeof(*slot->arch.rmap));
	if (!slot->arch.rmap)
		return -ENOMEM;

	return 0;
}

+1 −1
Original line number Diff line number Diff line
@@ -504,7 +504,7 @@ struct kvm_lpage_info {
};

struct kvm_arch_memory_slot {
	unsigned long *rmap_pde[KVM_NR_PAGE_SIZES - 1];
	unsigned long *rmap[KVM_NR_PAGE_SIZES];
	struct kvm_lpage_info *lpage_info[KVM_NR_PAGE_SIZES - 1];
};

Loading