Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 8e5b26b5 authored by Alexander Graf's avatar Alexander Graf Committed by Marcelo Tosatti
Browse files

KVM: PPC: Use accessor functions for GPR access



All code in PPC KVM currently accesses gprs in the vcpu struct directly.

While there's nothing wrong with that wrt the current way gprs are stored
and loaded, it doesn't suffice for the PACA acceleration that will follow
in this patchset.

So let's just create little wrapper inline functions that we call whenever
a GPR needs to be read from or written to. The compiled code shouldn't really
change at all for now.

Signed-off-by: default avatarAlexander Graf <agraf@suse.de>
Signed-off-by: default avatarAvi Kivity <avi@redhat.com>
parent 0d178975
Loading
Loading
Loading
Loading
+26 −0
Original line number Diff line number Diff line
@@ -96,4 +96,30 @@ extern void kvmppc_booke_exit(void);

extern void kvmppc_core_destroy_mmu(struct kvm_vcpu *vcpu);

#ifdef CONFIG_PPC_BOOK3S

static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val)
{
	vcpu->arch.gpr[num] = val;
}

static inline ulong kvmppc_get_gpr(struct kvm_vcpu *vcpu, int num)
{
	return vcpu->arch.gpr[num];
}

#else

static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val)
{
	vcpu->arch.gpr[num] = val;
}

static inline ulong kvmppc_get_gpr(struct kvm_vcpu *vcpu, int num)
{
	return vcpu->arch.gpr[num];
}

#endif

#endif /* __POWERPC_KVM_PPC_H__ */
+13 −12
Original line number Diff line number Diff line
@@ -65,13 +65,14 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
			 */
			switch (dcrn) {
			case DCRN_CPR0_CONFIG_ADDR:
				vcpu->arch.gpr[rt] = vcpu->arch.cpr0_cfgaddr;
				kvmppc_set_gpr(vcpu, rt, vcpu->arch.cpr0_cfgaddr);
				break;
			case DCRN_CPR0_CONFIG_DATA:
				local_irq_disable();
				mtdcr(DCRN_CPR0_CONFIG_ADDR,
					  vcpu->arch.cpr0_cfgaddr);
				vcpu->arch.gpr[rt] = mfdcr(DCRN_CPR0_CONFIG_DATA);
				kvmppc_set_gpr(vcpu, rt,
					       mfdcr(DCRN_CPR0_CONFIG_DATA));
				local_irq_enable();
				break;
			default:
@@ -93,11 +94,11 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
			/* emulate some access in kernel */
			switch (dcrn) {
			case DCRN_CPR0_CONFIG_ADDR:
				vcpu->arch.cpr0_cfgaddr = vcpu->arch.gpr[rs];
				vcpu->arch.cpr0_cfgaddr = kvmppc_get_gpr(vcpu, rs);
				break;
			default:
				run->dcr.dcrn = dcrn;
				run->dcr.data = vcpu->arch.gpr[rs];
				run->dcr.data = kvmppc_get_gpr(vcpu, rs);
				run->dcr.is_write = 1;
				vcpu->arch.dcr_needed = 1;
				kvmppc_account_exit(vcpu, DCR_EXITS);
@@ -146,13 +147,13 @@ int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)

	switch (sprn) {
	case SPRN_PID:
		kvmppc_set_pid(vcpu, vcpu->arch.gpr[rs]); break;
		kvmppc_set_pid(vcpu, kvmppc_get_gpr(vcpu, rs)); break;
	case SPRN_MMUCR:
		vcpu->arch.mmucr = vcpu->arch.gpr[rs]; break;
		vcpu->arch.mmucr = kvmppc_get_gpr(vcpu, rs); break;
	case SPRN_CCR0:
		vcpu->arch.ccr0 = vcpu->arch.gpr[rs]; break;
		vcpu->arch.ccr0 = kvmppc_get_gpr(vcpu, rs); break;
	case SPRN_CCR1:
		vcpu->arch.ccr1 = vcpu->arch.gpr[rs]; break;
		vcpu->arch.ccr1 = kvmppc_get_gpr(vcpu, rs); break;
	default:
		emulated = kvmppc_booke_emulate_mtspr(vcpu, sprn, rs);
	}
@@ -167,13 +168,13 @@ int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt)

	switch (sprn) {
	case SPRN_PID:
		vcpu->arch.gpr[rt] = vcpu->arch.pid; break;
		kvmppc_set_gpr(vcpu, rt, vcpu->arch.pid); break;
	case SPRN_MMUCR:
		vcpu->arch.gpr[rt] = vcpu->arch.mmucr; break;
		kvmppc_set_gpr(vcpu, rt, vcpu->arch.mmucr); break;
	case SPRN_CCR0:
		vcpu->arch.gpr[rt] = vcpu->arch.ccr0; break;
		kvmppc_set_gpr(vcpu, rt, vcpu->arch.ccr0); break;
	case SPRN_CCR1:
		vcpu->arch.gpr[rt] = vcpu->arch.ccr1; break;
		kvmppc_set_gpr(vcpu, rt, vcpu->arch.ccr1); break;
	default:
		emulated = kvmppc_booke_emulate_mfspr(vcpu, sprn, rt);
	}
+7 −7
Original line number Diff line number Diff line
@@ -439,7 +439,7 @@ int kvmppc_44x_emul_tlbwe(struct kvm_vcpu *vcpu, u8 ra, u8 rs, u8 ws)
	struct kvmppc_44x_tlbe *tlbe;
	unsigned int gtlb_index;

	gtlb_index = vcpu->arch.gpr[ra];
	gtlb_index = kvmppc_get_gpr(vcpu, ra);
	if (gtlb_index > KVM44x_GUEST_TLB_SIZE) {
		printk("%s: index %d\n", __func__, gtlb_index);
		kvmppc_dump_vcpu(vcpu);
@@ -455,15 +455,15 @@ int kvmppc_44x_emul_tlbwe(struct kvm_vcpu *vcpu, u8 ra, u8 rs, u8 ws)
	switch (ws) {
	case PPC44x_TLB_PAGEID:
		tlbe->tid = get_mmucr_stid(vcpu);
		tlbe->word0 = vcpu->arch.gpr[rs];
		tlbe->word0 = kvmppc_get_gpr(vcpu, rs);
		break;

	case PPC44x_TLB_XLAT:
		tlbe->word1 = vcpu->arch.gpr[rs];
		tlbe->word1 = kvmppc_get_gpr(vcpu, rs);
		break;

	case PPC44x_TLB_ATTRIB:
		tlbe->word2 = vcpu->arch.gpr[rs];
		tlbe->word2 = kvmppc_get_gpr(vcpu, rs);
		break;

	default:
@@ -500,9 +500,9 @@ int kvmppc_44x_emul_tlbsx(struct kvm_vcpu *vcpu, u8 rt, u8 ra, u8 rb, u8 rc)
	unsigned int as = get_mmucr_sts(vcpu);
	unsigned int pid = get_mmucr_stid(vcpu);

	ea = vcpu->arch.gpr[rb];
	ea = kvmppc_get_gpr(vcpu, rb);
	if (ra)
		ea += vcpu->arch.gpr[ra];
		ea += kvmppc_get_gpr(vcpu, ra);

	gtlb_index = kvmppc_44x_tlb_index(vcpu, ea, pid, as);
	if (rc) {
@@ -511,7 +511,7 @@ int kvmppc_44x_emul_tlbsx(struct kvm_vcpu *vcpu, u8 rt, u8 ra, u8 rb, u8 rc)
		else
			vcpu->arch.cr |= 0x20000000;
	}
	vcpu->arch.gpr[rt] = gtlb_index;
	kvmppc_set_gpr(vcpu, rt, gtlb_index);

	kvmppc_set_exit_type(vcpu, EMULATED_TLBSX_EXITS);
	return EMULATE_DONE;
+4 −4
Original line number Diff line number Diff line
@@ -658,7 +658,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
	}
	case BOOK3S_INTERRUPT_SYSCALL:
#ifdef EXIT_DEBUG
		printk(KERN_INFO "Syscall Nr %d\n", (int)vcpu->arch.gpr[0]);
		printk(KERN_INFO "Syscall Nr %d\n", (int)kvmppc_get_gpr(vcpu, 0));
#endif
		vcpu->stat.syscall_exits++;
		kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
@@ -734,7 +734,7 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
	regs->sprg7 = vcpu->arch.sprg6;

	for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
		regs->gpr[i] = vcpu->arch.gpr[i];
		regs->gpr[i] = kvmppc_get_gpr(vcpu, i);

	return 0;
}
@@ -759,8 +759,8 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
	vcpu->arch.sprg6 = regs->sprg5;
	vcpu->arch.sprg7 = regs->sprg6;

	for (i = 0; i < ARRAY_SIZE(vcpu->arch.gpr); i++)
		vcpu->arch.gpr[i] = regs->gpr[i];
	for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
		kvmppc_set_gpr(vcpu, i, regs->gpr[i]);

	return 0;
}
+40 −37
Original line number Diff line number Diff line
@@ -65,11 +65,11 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
	case 31:
		switch (get_xop(inst)) {
		case OP_31_XOP_MFMSR:
			vcpu->arch.gpr[get_rt(inst)] = vcpu->arch.msr;
			kvmppc_set_gpr(vcpu, get_rt(inst), vcpu->arch.msr);
			break;
		case OP_31_XOP_MTMSRD:
		{
			ulong rs = vcpu->arch.gpr[get_rs(inst)];
			ulong rs = kvmppc_get_gpr(vcpu, get_rs(inst));
			if (inst & 0x10000) {
				vcpu->arch.msr &= ~(MSR_RI | MSR_EE);
				vcpu->arch.msr |= rs & (MSR_RI | MSR_EE);
@@ -78,30 +78,30 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
			break;
		}
		case OP_31_XOP_MTMSR:
			kvmppc_set_msr(vcpu, vcpu->arch.gpr[get_rs(inst)]);
			kvmppc_set_msr(vcpu, kvmppc_get_gpr(vcpu, get_rs(inst)));
			break;
		case OP_31_XOP_MFSRIN:
		{
			int srnum;

			srnum = (vcpu->arch.gpr[get_rb(inst)] >> 28) & 0xf;
			srnum = (kvmppc_get_gpr(vcpu, get_rb(inst)) >> 28) & 0xf;
			if (vcpu->arch.mmu.mfsrin) {
				u32 sr;
				sr = vcpu->arch.mmu.mfsrin(vcpu, srnum);
				vcpu->arch.gpr[get_rt(inst)] = sr;
				kvmppc_set_gpr(vcpu, get_rt(inst), sr);
			}
			break;
		}
		case OP_31_XOP_MTSRIN:
			vcpu->arch.mmu.mtsrin(vcpu,
				(vcpu->arch.gpr[get_rb(inst)] >> 28) & 0xf,
				vcpu->arch.gpr[get_rs(inst)]);
				(kvmppc_get_gpr(vcpu, get_rb(inst)) >> 28) & 0xf,
				kvmppc_get_gpr(vcpu, get_rs(inst)));
			break;
		case OP_31_XOP_TLBIE:
		case OP_31_XOP_TLBIEL:
		{
			bool large = (inst & 0x00200000) ? true : false;
			ulong addr = vcpu->arch.gpr[get_rb(inst)];
			ulong addr = kvmppc_get_gpr(vcpu, get_rb(inst));
			vcpu->arch.mmu.tlbie(vcpu, addr, large);
			break;
		}
@@ -111,14 +111,16 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
			if (!vcpu->arch.mmu.slbmte)
				return EMULATE_FAIL;

			vcpu->arch.mmu.slbmte(vcpu, vcpu->arch.gpr[get_rs(inst)],
						vcpu->arch.gpr[get_rb(inst)]);
			vcpu->arch.mmu.slbmte(vcpu,
					kvmppc_get_gpr(vcpu, get_rs(inst)),
					kvmppc_get_gpr(vcpu, get_rb(inst)));
			break;
		case OP_31_XOP_SLBIE:
			if (!vcpu->arch.mmu.slbie)
				return EMULATE_FAIL;

			vcpu->arch.mmu.slbie(vcpu, vcpu->arch.gpr[get_rb(inst)]);
			vcpu->arch.mmu.slbie(vcpu,
					kvmppc_get_gpr(vcpu, get_rb(inst)));
			break;
		case OP_31_XOP_SLBIA:
			if (!vcpu->arch.mmu.slbia)
@@ -132,9 +134,9 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
			} else {
				ulong t, rb;

				rb = vcpu->arch.gpr[get_rb(inst)];
				rb = kvmppc_get_gpr(vcpu, get_rb(inst));
				t = vcpu->arch.mmu.slbmfee(vcpu, rb);
				vcpu->arch.gpr[get_rt(inst)] = t;
				kvmppc_set_gpr(vcpu, get_rt(inst), t);
			}
			break;
		case OP_31_XOP_SLBMFEV:
@@ -143,20 +145,20 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
			} else {
				ulong t, rb;

				rb = vcpu->arch.gpr[get_rb(inst)];
				rb = kvmppc_get_gpr(vcpu, get_rb(inst));
				t = vcpu->arch.mmu.slbmfev(vcpu, rb);
				vcpu->arch.gpr[get_rt(inst)] = t;
				kvmppc_set_gpr(vcpu, get_rt(inst), t);
			}
			break;
		case OP_31_XOP_DCBZ:
		{
			ulong rb =  vcpu->arch.gpr[get_rb(inst)];
			ulong rb = kvmppc_get_gpr(vcpu, get_rb(inst));
			ulong ra = 0;
			ulong addr;
			u32 zeros[8] = { 0, 0, 0, 0, 0, 0, 0, 0 };

			if (get_ra(inst))
				ra = vcpu->arch.gpr[get_ra(inst)];
				ra = kvmppc_get_gpr(vcpu, get_ra(inst));

			addr = (ra + rb) & ~31ULL;
			if (!(vcpu->arch.msr & MSR_SF))
@@ -233,43 +235,44 @@ static void kvmppc_write_bat(struct kvm_vcpu *vcpu, int sprn, u32 val)
int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
{
	int emulated = EMULATE_DONE;
	ulong spr_val = kvmppc_get_gpr(vcpu, rs);

	switch (sprn) {
	case SPRN_SDR1:
		to_book3s(vcpu)->sdr1 = vcpu->arch.gpr[rs];
		to_book3s(vcpu)->sdr1 = spr_val;
		break;
	case SPRN_DSISR:
		to_book3s(vcpu)->dsisr = vcpu->arch.gpr[rs];
		to_book3s(vcpu)->dsisr = spr_val;
		break;
	case SPRN_DAR:
		vcpu->arch.dear = vcpu->arch.gpr[rs];
		vcpu->arch.dear = spr_val;
		break;
	case SPRN_HIOR:
		to_book3s(vcpu)->hior = vcpu->arch.gpr[rs];
		to_book3s(vcpu)->hior = spr_val;
		break;
	case SPRN_IBAT0U ... SPRN_IBAT3L:
	case SPRN_IBAT4U ... SPRN_IBAT7L:
	case SPRN_DBAT0U ... SPRN_DBAT3L:
	case SPRN_DBAT4U ... SPRN_DBAT7L:
		kvmppc_write_bat(vcpu, sprn, (u32)vcpu->arch.gpr[rs]);
		kvmppc_write_bat(vcpu, sprn, (u32)spr_val);
		/* BAT writes happen so rarely that we're ok to flush
		 * everything here */
		kvmppc_mmu_pte_flush(vcpu, 0, 0);
		break;
	case SPRN_HID0:
		to_book3s(vcpu)->hid[0] = vcpu->arch.gpr[rs];
		to_book3s(vcpu)->hid[0] = spr_val;
		break;
	case SPRN_HID1:
		to_book3s(vcpu)->hid[1] = vcpu->arch.gpr[rs];
		to_book3s(vcpu)->hid[1] = spr_val;
		break;
	case SPRN_HID2:
		to_book3s(vcpu)->hid[2] = vcpu->arch.gpr[rs];
		to_book3s(vcpu)->hid[2] = spr_val;
		break;
	case SPRN_HID4:
		to_book3s(vcpu)->hid[4] = vcpu->arch.gpr[rs];
		to_book3s(vcpu)->hid[4] = spr_val;
		break;
	case SPRN_HID5:
		to_book3s(vcpu)->hid[5] = vcpu->arch.gpr[rs];
		to_book3s(vcpu)->hid[5] = spr_val;
		/* guest HID5 set can change is_dcbz32 */
		if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
		    (mfmsr() & MSR_HV))
@@ -299,38 +302,38 @@ int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt)

	switch (sprn) {
	case SPRN_SDR1:
		vcpu->arch.gpr[rt] = to_book3s(vcpu)->sdr1;
		kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->sdr1);
		break;
	case SPRN_DSISR:
		vcpu->arch.gpr[rt] = to_book3s(vcpu)->dsisr;
		kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->dsisr);
		break;
	case SPRN_DAR:
		vcpu->arch.gpr[rt] = vcpu->arch.dear;
		kvmppc_set_gpr(vcpu, rt, vcpu->arch.dear);
		break;
	case SPRN_HIOR:
		vcpu->arch.gpr[rt] = to_book3s(vcpu)->hior;
		kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hior);
		break;
	case SPRN_HID0:
		vcpu->arch.gpr[rt] = to_book3s(vcpu)->hid[0];
		kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hid[0]);
		break;
	case SPRN_HID1:
		vcpu->arch.gpr[rt] = to_book3s(vcpu)->hid[1];
		kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hid[1]);
		break;
	case SPRN_HID2:
		vcpu->arch.gpr[rt] = to_book3s(vcpu)->hid[2];
		kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hid[2]);
		break;
	case SPRN_HID4:
		vcpu->arch.gpr[rt] = to_book3s(vcpu)->hid[4];
		kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hid[4]);
		break;
	case SPRN_HID5:
		vcpu->arch.gpr[rt] = to_book3s(vcpu)->hid[5];
		kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hid[5]);
		break;
	case SPRN_THRM1:
	case SPRN_THRM2:
	case SPRN_THRM3:
	case SPRN_CTRLF:
	case SPRN_CTRLT:
		vcpu->arch.gpr[rt] = 0;
		kvmppc_set_gpr(vcpu, rt, 0);
		break;
	default:
		printk(KERN_INFO "KVM: invalid SPR read: %d\n", sprn);
Loading