Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 83aae4a8 authored by Hollis Blanchard's avatar Hollis Blanchard Committed by Avi Kivity
Browse files

KVM: ppc: Write only modified shadow entries into the TLB on exit



Track which TLB entries need to be written, instead of overwriting everything
below the high water mark. Typically only a single guest TLB entry will be
modified in a single exit.

Guest boot time performance improvement: about 15%.

Signed-off-by: default avatarHollis Blanchard <hollisb@us.ibm.com>
Signed-off-by: default avatarAvi Kivity <avi@qumranet.com>
parent 20754c24
Loading
Loading
Loading
Loading
+3 −0
Original line number Diff line number Diff line
@@ -82,6 +82,9 @@ struct kvm_vcpu_arch {
	/* Pages which are referenced in the shadow TLB. */
	struct page *shadow_pages[PPC44x_TLB_SIZE];

	/* Track which TLB entries we've modified in the current exit. */
	u8 shadow_tlb_mod[PPC44x_TLB_SIZE];

	u32 host_stack;
	u32 host_pid;
	u32 host_dbcr0;
+3 −0
Original line number Diff line number Diff line
@@ -65,6 +65,9 @@ extern void kvmppc_mmu_invalidate(struct kvm_vcpu *vcpu, gva_t eaddr,
                                  gva_t eend, u32 asid);
extern void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode);

/* XXX Book E specific */
extern void kvmppc_tlbe_set_modified(struct kvm_vcpu *vcpu, unsigned int i);

extern void kvmppc_check_and_deliver_interrupts(struct kvm_vcpu *vcpu);

static inline void kvmppc_queue_exception(struct kvm_vcpu *vcpu, int exception)
+1 −0
Original line number Diff line number Diff line
@@ -357,6 +357,7 @@ int main(void)
	DEFINE(VCPU_HOST_STACK, offsetof(struct kvm_vcpu, arch.host_stack));
	DEFINE(VCPU_HOST_PID, offsetof(struct kvm_vcpu, arch.host_pid));
	DEFINE(VCPU_SHADOW_TLB, offsetof(struct kvm_vcpu, arch.shadow_tlb));
	DEFINE(VCPU_SHADOW_MOD, offsetof(struct kvm_vcpu, arch.shadow_tlb_mod));
	DEFINE(VCPU_GPRS, offsetof(struct kvm_vcpu, arch.gpr));
	DEFINE(VCPU_LR, offsetof(struct kvm_vcpu, arch.lr));
	DEFINE(VCPU_CR, offsetof(struct kvm_vcpu, arch.cr));
+8 −1
Original line number Diff line number Diff line
@@ -125,6 +125,11 @@ static void kvmppc_44x_shadow_release(struct kvm_vcpu *vcpu,
	}
}

void kvmppc_tlbe_set_modified(struct kvm_vcpu *vcpu, unsigned int i)
{
    vcpu->arch.shadow_tlb_mod[i] = 1;
}

/* Caller must ensure that the specified guest TLB entry is safe to insert into
 * the shadow TLB. */
void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gfn_t gfn, u64 asid,
@@ -172,10 +177,10 @@ void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gfn_t gfn, u64 asid,
	 * use host large pages in the future. */
	stlbe->word0 = (gvaddr & PAGE_MASK) | PPC44x_TLB_VALID | PPC44x_TLB_TS
	               | PPC44x_TLB_4K;

	stlbe->word1 = (hpaddr & 0xfffffc00) | ((hpaddr >> 32) & 0xf);
	stlbe->word2 = kvmppc_44x_tlb_shadow_attrib(flags,
	                                            vcpu->arch.msr & MSR_PR);
	kvmppc_tlbe_set_modified(vcpu, victim);

	KVMTRACE_5D(STLB_WRITE, vcpu, victim,
			stlbe->tid, stlbe->word0, stlbe->word1, stlbe->word2,
@@ -209,6 +214,7 @@ void kvmppc_mmu_invalidate(struct kvm_vcpu *vcpu, gva_t eaddr,

		kvmppc_44x_shadow_release(vcpu, i);
		stlbe->word0 = 0;
		kvmppc_tlbe_set_modified(vcpu, i);
		KVMTRACE_5D(STLB_INVAL, vcpu, i,
				stlbe->tid, stlbe->word0, stlbe->word1,
				stlbe->word2, handler);
@@ -229,6 +235,7 @@ void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode)

		kvmppc_44x_shadow_release(vcpu, i);
		stlbe->word0 = 0;
		kvmppc_tlbe_set_modified(vcpu, i);
		KVMTRACE_5D(STLB_INVAL, vcpu, i,
				stlbe->tid, stlbe->word0, stlbe->word1,
				stlbe->word2, handler);
+34 −17
Original line number Diff line number Diff line
@@ -335,7 +335,7 @@ lightweight_exit:
	lwz	r3, VCPU_PID(r4)
	mtspr	SPRN_PID, r3

	/* Prevent all TLB updates. */
	/* Prevent all asynchronous TLB updates. */
	mfmsr	r5
	lis	r6, (MSR_EE|MSR_CE|MSR_ME|MSR_DE)@h
	ori	r6, r6, (MSR_EE|MSR_CE|MSR_ME|MSR_DE)@l
@@ -344,28 +344,45 @@ lightweight_exit:

	/* Load the guest mappings, leaving the host's "pinned" kernel mappings
	 * in place. */
	/* XXX optimization: load only modified guest entries. */
	mfspr	r10, SPRN_MMUCR			/* Save host MMUCR. */
	lis	r8, tlb_44x_hwater@ha
	lwz	r8, tlb_44x_hwater@l(r8)
	addi	r9, r4, VCPU_SHADOW_TLB - 4
	li	r6, 0
	li	r5, PPC44x_TLB_SIZE
	lis	r5, tlb_44x_hwater@ha
	lwz	r5, tlb_44x_hwater@l(r5)
	mtctr	r5
	addi	r9, r4, VCPU_SHADOW_TLB
	addi	r5, r4, VCPU_SHADOW_MOD
	li	r3, 0
1:
	lbzx	r7, r3, r5
	cmpwi	r7, 0
	beq	3f

	/* Load guest entry. */
	lwzu	r7, 4(r9)
	mulli	r11, r3, TLBE_BYTES
	add	r11, r11, r9
	lwz	r7, 0(r11)
	mtspr	SPRN_MMUCR, r7
	lwzu	r7, 4(r9)
	tlbwe	r7, r6, PPC44x_TLB_PAGEID
	lwzu	r7, 4(r9)
	tlbwe	r7, r6, PPC44x_TLB_XLAT
	lwzu	r7, 4(r9)
	tlbwe	r7, r6, PPC44x_TLB_ATTRIB
	/* Increment index. */
	addi	r6, r6, 1
	cmpw	r6, r8
	blt	1b
	lwz	r7, 4(r11)
	tlbwe	r7, r3, PPC44x_TLB_PAGEID
	lwz	r7, 8(r11)
	tlbwe	r7, r3, PPC44x_TLB_XLAT
	lwz	r7, 12(r11)
	tlbwe	r7, r3, PPC44x_TLB_ATTRIB
3:
	addi	r3, r3, 1                       /* Increment index. */
	bdnz	1b

	mtspr	SPRN_MMUCR, r10			/* Restore host MMUCR. */

	/* Clear bitmap of modified TLB entries */
	li	r5, PPC44x_TLB_SIZE>>2
	mtctr	r5
	addi	r5, r4, VCPU_SHADOW_MOD - 4
	li	r6, 0
1:
	stwu	r6, 4(r5)
	bdnz	1b

	iccci	0, 0 /* XXX hack */

	/* Load some guest volatiles. */
Loading