Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 3d089f84 authored by David Gibson's avatar David Gibson Committed by Paul Mackerras
Browse files

KVM: PPC: Book3S HV: Don't store values derivable from HPT order



Currently the kvm_hpt_info structure stores the hashed page table's order,
and also the number of HPTEs it contains and a mask for its size.  The
last two can be easily derived from the order, so remove them and just
calculate them as necessary with a couple of helper inlines.

Signed-off-by: default avatarDavid Gibson <david@gibson.dropbear.id.au>
Reviewed-by: default avatarThomas Huth <thuth@redhat.com>
Signed-off-by: default avatarPaul Mackerras <paulus@ozlabs.org>
parent 3f9d4f5a
Loading
Loading
Loading
Loading
+12 −0
Original line number Diff line number Diff line
@@ -356,6 +356,18 @@ extern void kvmppc_mmu_debugfs_init(struct kvm *kvm);

extern void kvmhv_rm_send_ipi(int cpu);

static inline unsigned long kvmppc_hpt_npte(struct kvm_hpt_info *hpt)
{
	/* HPTEs are 2**4 bytes long */
	return 1UL << (hpt->order - 4);
}

static inline unsigned long kvmppc_hpt_mask(struct kvm_hpt_info *hpt)
{
	/* 128 (2**7) bytes in each HPTEG */
	return (1UL << (hpt->order - 7)) - 1;
}

#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */

#endif /* __ASM_KVM_BOOK3S_64_H__ */
+0 −2
Original line number Diff line number Diff line
@@ -246,8 +246,6 @@ struct kvm_hpt_info {
	unsigned long virt;
	/* Array of reverse mapping entries for each guest HPTE */
	struct revmap_entry *rev;
	unsigned long npte;
	unsigned long mask;
	/* Guest HPT size is 2**(order) bytes */
	u32 order;
	/* 1 if HPT allocated with CMA, 0 otherwise */
+13 −15
Original line number Diff line number Diff line
@@ -83,15 +83,11 @@ long kvmppc_alloc_hpt(struct kvm *kvm, u32 *htab_orderp)

	kvm->arch.hpt.virt = hpt;
	kvm->arch.hpt.order = order;
	/* HPTEs are 2**4 bytes long */
	kvm->arch.hpt.npte = 1ul << (order - 4);
	/* 128 (2**7) bytes in each HPTEG */
	kvm->arch.hpt.mask = (1ul << (order - 7)) - 1;

	atomic64_set(&kvm->arch.mmio_update, 0);

	/* Allocate reverse map array */
	rev = vmalloc(sizeof(struct revmap_entry) * kvm->arch.hpt.npte);
	rev = vmalloc(sizeof(struct revmap_entry) * kvmppc_hpt_npte(&kvm->arch.hpt));
	if (!rev) {
		pr_err("kvmppc_alloc_hpt: Couldn't alloc reverse map array\n");
		goto out_freehpt;
@@ -196,8 +192,8 @@ void kvmppc_map_vrma(struct kvm_vcpu *vcpu, struct kvm_memory_slot *memslot,
	if (npages > 1ul << (40 - porder))
		npages = 1ul << (40 - porder);
	/* Can't use more than 1 HPTE per HPTEG */
	if (npages > kvm->arch.hpt.mask + 1)
		npages = kvm->arch.hpt.mask + 1;
	if (npages > kvmppc_hpt_mask(&kvm->arch.hpt) + 1)
		npages = kvmppc_hpt_mask(&kvm->arch.hpt) + 1;

	hp0 = HPTE_V_1TB_SEG | (VRMA_VSID << (40 - 16)) |
		HPTE_V_BOLTED | hpte0_pgsize_encoding(psize);
@@ -207,7 +203,8 @@ void kvmppc_map_vrma(struct kvm_vcpu *vcpu, struct kvm_memory_slot *memslot,
	for (i = 0; i < npages; ++i) {
		addr = i << porder;
		/* can't use hpt_hash since va > 64 bits */
		hash = (i ^ (VRMA_VSID ^ (VRMA_VSID << 25))) & kvm->arch.hpt.mask;
		hash = (i ^ (VRMA_VSID ^ (VRMA_VSID << 25)))
			& kvmppc_hpt_mask(&kvm->arch.hpt);
		/*
		 * We assume that the hash table is empty and no
		 * vcpus are using it at this stage.  Since we create
@@ -1327,7 +1324,7 @@ static ssize_t kvm_htab_read(struct file *file, char __user *buf,

		/* Skip uninteresting entries, i.e. clean on not-first pass */
		if (!first_pass) {
			while (i < kvm->arch.hpt.npte &&
			while (i < kvmppc_hpt_npte(&kvm->arch.hpt) &&
			       !hpte_dirty(revp, hptp)) {
				++i;
				hptp += 2;
@@ -1337,7 +1334,7 @@ static ssize_t kvm_htab_read(struct file *file, char __user *buf,
		hdr.index = i;

		/* Grab a series of valid entries */
		while (i < kvm->arch.hpt.npte &&
		while (i < kvmppc_hpt_npte(&kvm->arch.hpt) &&
		       hdr.n_valid < 0xffff &&
		       nb + HPTE_SIZE < count &&
		       record_hpte(flags, hptp, hpte, revp, 1, first_pass)) {
@@ -1353,7 +1350,7 @@ static ssize_t kvm_htab_read(struct file *file, char __user *buf,
			++revp;
		}
		/* Now skip invalid entries while we can */
		while (i < kvm->arch.hpt.npte &&
		while (i < kvmppc_hpt_npte(&kvm->arch.hpt) &&
		       hdr.n_invalid < 0xffff &&
		       record_hpte(flags, hptp, hpte, revp, 0, first_pass)) {
			/* found an invalid entry */
@@ -1374,7 +1371,7 @@ static ssize_t kvm_htab_read(struct file *file, char __user *buf,
		}

		/* Check if we've wrapped around the hash table */
		if (i >= kvm->arch.hpt.npte) {
		if (i >= kvmppc_hpt_npte(&kvm->arch.hpt)) {
			i = 0;
			ctx->first_pass = 0;
			break;
@@ -1433,8 +1430,8 @@ static ssize_t kvm_htab_write(struct file *file, const char __user *buf,

		err = -EINVAL;
		i = hdr.index;
		if (i >= kvm->arch.hpt.npte ||
		    i + hdr.n_valid + hdr.n_invalid > kvm->arch.hpt.npte)
		if (i >= kvmppc_hpt_npte(&kvm->arch.hpt) ||
		    i + hdr.n_valid + hdr.n_invalid > kvmppc_hpt_npte(&kvm->arch.hpt))
			break;

		hptp = (__be64 *)(kvm->arch.hpt.virt + (i * HPTE_SIZE));
@@ -1625,7 +1622,8 @@ static ssize_t debugfs_htab_read(struct file *file, char __user *buf,
	kvm = p->kvm;
	i = p->hpt_index;
	hptp = (__be64 *)(kvm->arch.hpt.virt + (i * HPTE_SIZE));
	for (; len != 0 && i < kvm->arch.hpt.npte; ++i, hptp += 2) {
	for (; len != 0 && i < kvmppc_hpt_npte(&kvm->arch.hpt);
	     ++i, hptp += 2) {
		if (!(be64_to_cpu(hptp[0]) & (HPTE_V_VALID | HPTE_V_ABSENT)))
			continue;

+9 −9
Original line number Diff line number Diff line
@@ -292,7 +292,7 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,

	/* Find and lock the HPTEG slot to use */
 do_insert:
	if (pte_index >= kvm->arch.hpt.npte)
	if (pte_index >= kvmppc_hpt_npte(&kvm->arch.hpt))
		return H_PARAMETER;
	if (likely((flags & H_EXACT) == 0)) {
		pte_index &= ~7UL;
@@ -469,7 +469,7 @@ long kvmppc_do_h_remove(struct kvm *kvm, unsigned long flags,

	if (kvm_is_radix(kvm))
		return H_FUNCTION;
	if (pte_index >= kvm->arch.hpt.npte)
	if (pte_index >= kvmppc_hpt_npte(&kvm->arch.hpt))
		return H_PARAMETER;
	hpte = (__be64 *)(kvm->arch.hpt.virt + (pte_index << 4));
	while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
@@ -557,7 +557,7 @@ long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu)
				break;
			}
			if (req != 1 || flags == 3 ||
			    pte_index >= kvm->arch.hpt.npte) {
			    pte_index >= kvmppc_hpt_npte(&kvm->arch.hpt)) {
				/* parameter error */
				args[j] = ((0xa0 | flags) << 56) + pte_index;
				ret = H_PARAMETER;
@@ -657,7 +657,7 @@ long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags,

	if (kvm_is_radix(kvm))
		return H_FUNCTION;
	if (pte_index >= kvm->arch.hpt.npte)
	if (pte_index >= kvmppc_hpt_npte(&kvm->arch.hpt))
		return H_PARAMETER;

	hpte = (__be64 *)(kvm->arch.hpt.virt + (pte_index << 4));
@@ -728,7 +728,7 @@ long kvmppc_h_read(struct kvm_vcpu *vcpu, unsigned long flags,

	if (kvm_is_radix(kvm))
		return H_FUNCTION;
	if (pte_index >= kvm->arch.hpt.npte)
	if (pte_index >= kvmppc_hpt_npte(&kvm->arch.hpt))
		return H_PARAMETER;
	if (flags & H_READ_4) {
		pte_index &= ~3;
@@ -769,7 +769,7 @@ long kvmppc_h_clear_ref(struct kvm_vcpu *vcpu, unsigned long flags,

	if (kvm_is_radix(kvm))
		return H_FUNCTION;
	if (pte_index >= kvm->arch.hpt.npte)
	if (pte_index >= kvmppc_hpt_npte(&kvm->arch.hpt))
		return H_PARAMETER;

	rev = real_vmalloc_addr(&kvm->arch.hpt.rev[pte_index]);
@@ -817,7 +817,7 @@ long kvmppc_h_clear_mod(struct kvm_vcpu *vcpu, unsigned long flags,

	if (kvm_is_radix(kvm))
		return H_FUNCTION;
	if (pte_index >= kvm->arch.hpt.npte)
	if (pte_index >= kvmppc_hpt_npte(&kvm->arch.hpt))
		return H_PARAMETER;

	rev = real_vmalloc_addr(&kvm->arch.hpt.rev[pte_index]);
@@ -970,7 +970,7 @@ long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr, unsigned long slb_v,
		somask = (1UL << 28) - 1;
		vsid = (slb_v & ~SLB_VSID_B) >> SLB_VSID_SHIFT;
	}
	hash = (vsid ^ ((eaddr & somask) >> pshift)) & kvm->arch.hpt.mask;
	hash = (vsid ^ ((eaddr & somask) >> pshift)) & kvmppc_hpt_mask(&kvm->arch.hpt);
	avpn = slb_v & ~(somask >> 16);	/* also includes B */
	avpn |= (eaddr & somask) >> 16;

@@ -1017,7 +1017,7 @@ long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr, unsigned long slb_v,
		if (val & HPTE_V_SECONDARY)
			break;
		val |= HPTE_V_SECONDARY;
		hash = hash ^ kvm->arch.hpt.mask;
		hash = hash ^ kvmppc_hpt_mask(&kvm->arch.hpt);
	}
	return -1;
}