Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 468a12c2 authored by Alexander Graf's avatar Alexander Graf Committed by Avi Kivity
Browse files

KVM: PPC: Use get/set for to_svcpu to help preemption



When running the 64-bit Book3s PR code without CONFIG_PREEMPT_NONE, we were
doing a few things wrong, most notably access to PACA fields without making
sure that the pointers stay stable accross the access (preempt_disable()).

This patch moves to_svcpu towards a get/put model which allows us to disable
preemption while accessing the shadow vcpu fields in the PACA. That way we
can run preemptible and everyone's happy!

Reported-by: default avatarJörg Sommer <joerg@alea.gnuu.de>
Signed-off-by: default avatarAlexander Graf <agraf@suse.de>
Signed-off-by: default avatarAvi Kivity <avi@redhat.com>
parent d33ad328
Loading
Loading
Loading
Loading
+59 −17
Original line number Diff line number Diff line
@@ -183,7 +183,9 @@ static inline void kvmppc_update_int_pending(struct kvm_vcpu *vcpu,
static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val)
{
	if ( num < 14 ) {
		to_svcpu(vcpu)->gpr[num] = val;
		struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
		svcpu->gpr[num] = val;
		svcpu_put(svcpu);
		to_book3s(vcpu)->shadow_vcpu->gpr[num] = val;
	} else
		vcpu->arch.gpr[num] = val;
@@ -191,80 +193,120 @@ static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val)

static inline ulong kvmppc_get_gpr(struct kvm_vcpu *vcpu, int num)
{
	if ( num < 14 )
		return to_svcpu(vcpu)->gpr[num];
	else
	if ( num < 14 ) {
		struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
		ulong r = svcpu->gpr[num];
		svcpu_put(svcpu);
		return r;
	} else
		return vcpu->arch.gpr[num];
}

static inline void kvmppc_set_cr(struct kvm_vcpu *vcpu, u32 val)
{
	to_svcpu(vcpu)->cr = val;
	struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
	svcpu->cr = val;
	svcpu_put(svcpu);
	to_book3s(vcpu)->shadow_vcpu->cr = val;
}

static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu)
{
	return to_svcpu(vcpu)->cr;
	struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
	u32 r;
	r = svcpu->cr;
	svcpu_put(svcpu);
	return r;
}

static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, u32 val)
{
	to_svcpu(vcpu)->xer = val;
	struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
	svcpu->xer = val;
	to_book3s(vcpu)->shadow_vcpu->xer = val;
	svcpu_put(svcpu);
}

static inline u32 kvmppc_get_xer(struct kvm_vcpu *vcpu)
{
	return to_svcpu(vcpu)->xer;
	struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
	u32 r;
	r = svcpu->xer;
	svcpu_put(svcpu);
	return r;
}

static inline void kvmppc_set_ctr(struct kvm_vcpu *vcpu, ulong val)
{
	to_svcpu(vcpu)->ctr = val;
	struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
	svcpu->ctr = val;
	svcpu_put(svcpu);
}

static inline ulong kvmppc_get_ctr(struct kvm_vcpu *vcpu)
{
	return to_svcpu(vcpu)->ctr;
	struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
	ulong r;
	r = svcpu->ctr;
	svcpu_put(svcpu);
	return r;
}

static inline void kvmppc_set_lr(struct kvm_vcpu *vcpu, ulong val)
{
	to_svcpu(vcpu)->lr = val;
	struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
	svcpu->lr = val;
	svcpu_put(svcpu);
}

static inline ulong kvmppc_get_lr(struct kvm_vcpu *vcpu)
{
	return to_svcpu(vcpu)->lr;
	struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
	ulong r;
	r = svcpu->lr;
	svcpu_put(svcpu);
	return r;
}

static inline void kvmppc_set_pc(struct kvm_vcpu *vcpu, ulong val)
{
	to_svcpu(vcpu)->pc = val;
	struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
	svcpu->pc = val;
	svcpu_put(svcpu);
}

static inline ulong kvmppc_get_pc(struct kvm_vcpu *vcpu)
{
	return to_svcpu(vcpu)->pc;
	struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
	ulong r;
	r = svcpu->pc;
	svcpu_put(svcpu);
	return r;
}

static inline u32 kvmppc_get_last_inst(struct kvm_vcpu *vcpu)
{
	ulong pc = kvmppc_get_pc(vcpu);
	struct kvmppc_book3s_shadow_vcpu *svcpu = to_svcpu(vcpu);
	struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
	u32 r;

	/* Load the instruction manually if it failed to do so in the
	 * exit path */
	if (svcpu->last_inst == KVM_INST_FETCH_FAILED)
		kvmppc_ld(vcpu, &pc, sizeof(u32), &svcpu->last_inst, false);

	return svcpu->last_inst;
	r = svcpu->last_inst;
	svcpu_put(svcpu);
	return r;
}

static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu)
{
	return to_svcpu(vcpu)->fault_dar;
	struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
	ulong r;
	r = svcpu->fault_dar;
	svcpu_put(svcpu);
	return r;
}

static inline bool kvmppc_critical_section(struct kvm_vcpu *vcpu)
+5 −1
Original line number Diff line number Diff line
@@ -20,11 +20,15 @@
#ifndef __ASM_KVM_BOOK3S_32_H__
#define __ASM_KVM_BOOK3S_32_H__

static inline struct kvmppc_book3s_shadow_vcpu *to_svcpu(struct kvm_vcpu *vcpu)
static inline struct kvmppc_book3s_shadow_vcpu *svcpu_get(struct kvm_vcpu *vcpu)
{
	return to_book3s(vcpu)->shadow_vcpu;
}

static inline void svcpu_put(struct kvmppc_book3s_shadow_vcpu *svcpu)
{
}

#define PTE_SIZE	12
#define VSID_ALL	0
#define SR_INVALID	0x00000001	/* VSID 1 should always be unused */
+7 −1
Original line number Diff line number Diff line
@@ -21,10 +21,16 @@
#define __ASM_KVM_BOOK3S_64_H__

#ifdef CONFIG_KVM_BOOK3S_PR
static inline struct kvmppc_book3s_shadow_vcpu *to_svcpu(struct kvm_vcpu *vcpu)
static inline struct kvmppc_book3s_shadow_vcpu *svcpu_get(struct kvm_vcpu *vcpu)
{
	preempt_disable();
	return &get_paca()->shadow_vcpu;
}

static inline void svcpu_put(struct kvmppc_book3s_shadow_vcpu *svcpu)
{
	preempt_enable();
}
#endif

#define SPAPR_TCE_SHIFT		12
+15 −6
Original line number Diff line number Diff line
@@ -151,13 +151,15 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte)
	bool primary = false;
	bool evict = false;
	struct hpte_cache *pte;
	int r = 0;

	/* Get host physical address for gpa */
	hpaddr = kvmppc_gfn_to_pfn(vcpu, orig_pte->raddr >> PAGE_SHIFT);
	if (is_error_pfn(hpaddr)) {
		printk(KERN_INFO "Couldn't get guest page for gfn %lx!\n",
				 orig_pte->eaddr);
		return -EINVAL;
		r = -EINVAL;
		goto out;
	}
	hpaddr <<= PAGE_SHIFT;

@@ -249,7 +251,8 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte)

	kvmppc_mmu_hpte_cache_map(vcpu, pte);

	return 0;
out:
	return r;
}

static struct kvmppc_sid_map *create_sid_map(struct kvm_vcpu *vcpu, u64 gvsid)
@@ -297,12 +300,14 @@ int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr)
	u64 gvsid;
	u32 sr;
	struct kvmppc_sid_map *map;
	struct kvmppc_book3s_shadow_vcpu *svcpu = to_svcpu(vcpu);
	struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
	int r = 0;

	if (vcpu->arch.mmu.esid_to_vsid(vcpu, esid, &gvsid)) {
		/* Invalidate an entry */
		svcpu->sr[esid] = SR_INVALID;
		return -ENOENT;
		r = -ENOENT;
		goto out;
	}

	map = find_sid_vsid(vcpu, gvsid);
@@ -315,17 +320,21 @@ int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr)

	dprintk_sr("MMU: mtsr %d, 0x%x\n", esid, sr);

	return 0;
out:
	svcpu_put(svcpu);
	return r;
}

void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu)
{
	int i;
	struct kvmppc_book3s_shadow_vcpu *svcpu = to_svcpu(vcpu);
	struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);

	dprintk_sr("MMU: flushing all segments (%d)\n", ARRAY_SIZE(svcpu->sr));
	for (i = 0; i < ARRAY_SIZE(svcpu->sr); i++)
		svcpu->sr[i] = SR_INVALID;

	svcpu_put(svcpu);
}

void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu)
+43 −23
Original line number Diff line number Diff line
@@ -88,12 +88,14 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte)
	int vflags = 0;
	int attempt = 0;
	struct kvmppc_sid_map *map;
	int r = 0;

	/* Get host physical address for gpa */
	hpaddr = kvmppc_gfn_to_pfn(vcpu, orig_pte->raddr >> PAGE_SHIFT);
	if (is_error_pfn(hpaddr)) {
		printk(KERN_INFO "Couldn't get guest page for gfn %lx!\n", orig_pte->eaddr);
		return -EINVAL;
		r = -EINVAL;
		goto out;
	}
	hpaddr <<= PAGE_SHIFT;
	hpaddr |= orig_pte->raddr & (~0xfffULL & ~PAGE_MASK);
@@ -110,7 +112,8 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte)
		printk(KERN_ERR "KVM: Segment map for 0x%llx (0x%lx) failed\n",
				vsid, orig_pte->eaddr);
		WARN_ON(true);
		return -EINVAL;
		r = -EINVAL;
		goto out;
	}

	vsid = map->host_vsid;
@@ -131,8 +134,10 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte)

	/* In case we tried normal mapping already, let's nuke old entries */
	if (attempt > 1)
		if (ppc_md.hpte_remove(hpteg) < 0)
			return -1;
		if (ppc_md.hpte_remove(hpteg) < 0) {
			r = -1;
			goto out;
		}

	ret = ppc_md.hpte_insert(hpteg, va, hpaddr, rflags, vflags, MMU_PAGE_4K, MMU_SEGSIZE_256M);

@@ -162,7 +167,8 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte)
		kvmppc_mmu_hpte_cache_map(vcpu, pte);
	}

	return 0;
out:
	return r;
}

static struct kvmppc_sid_map *create_sid_map(struct kvm_vcpu *vcpu, u64 gvsid)
@@ -207,25 +213,30 @@ static struct kvmppc_sid_map *create_sid_map(struct kvm_vcpu *vcpu, u64 gvsid)

static int kvmppc_mmu_next_segment(struct kvm_vcpu *vcpu, ulong esid)
{
	struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
	int i;
	int max_slb_size = 64;
	int found_inval = -1;
	int r;

	if (!to_svcpu(vcpu)->slb_max)
		to_svcpu(vcpu)->slb_max = 1;
	if (!svcpu->slb_max)
		svcpu->slb_max = 1;

	/* Are we overwriting? */
	for (i = 1; i < to_svcpu(vcpu)->slb_max; i++) {
		if (!(to_svcpu(vcpu)->slb[i].esid & SLB_ESID_V))
	for (i = 1; i < svcpu->slb_max; i++) {
		if (!(svcpu->slb[i].esid & SLB_ESID_V))
			found_inval = i;
		else if ((to_svcpu(vcpu)->slb[i].esid & ESID_MASK) == esid)
			return i;
		else if ((svcpu->slb[i].esid & ESID_MASK) == esid) {
			r = i;
			goto out;
		}
	}

	/* Found a spare entry that was invalidated before */
	if (found_inval > 0)
		return found_inval;
	if (found_inval > 0) {
		r = found_inval;
		goto out;
	}

	/* No spare invalid entry, so create one */

@@ -233,30 +244,35 @@ static int kvmppc_mmu_next_segment(struct kvm_vcpu *vcpu, ulong esid)
		max_slb_size = mmu_slb_size;

	/* Overflowing -> purge */
	if ((to_svcpu(vcpu)->slb_max) == max_slb_size)
	if ((svcpu->slb_max) == max_slb_size)
		kvmppc_mmu_flush_segments(vcpu);

	r = to_svcpu(vcpu)->slb_max;
	to_svcpu(vcpu)->slb_max++;
	r = svcpu->slb_max;
	svcpu->slb_max++;

out:
	svcpu_put(svcpu);
	return r;
}

int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr)
{
	struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
	u64 esid = eaddr >> SID_SHIFT;
	u64 slb_esid = (eaddr & ESID_MASK) | SLB_ESID_V;
	u64 slb_vsid = SLB_VSID_USER;
	u64 gvsid;
	int slb_index;
	struct kvmppc_sid_map *map;
	int r = 0;

	slb_index = kvmppc_mmu_next_segment(vcpu, eaddr & ESID_MASK);

	if (vcpu->arch.mmu.esid_to_vsid(vcpu, esid, &gvsid)) {
		/* Invalidate an entry */
		to_svcpu(vcpu)->slb[slb_index].esid = 0;
		return -ENOENT;
		svcpu->slb[slb_index].esid = 0;
		r = -ENOENT;
		goto out;
	}

	map = find_sid_vsid(vcpu, gvsid);
@@ -269,18 +285,22 @@ int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr)
	slb_vsid &= ~SLB_VSID_KP;
	slb_esid |= slb_index;

	to_svcpu(vcpu)->slb[slb_index].esid = slb_esid;
	to_svcpu(vcpu)->slb[slb_index].vsid = slb_vsid;
	svcpu->slb[slb_index].esid = slb_esid;
	svcpu->slb[slb_index].vsid = slb_vsid;

	trace_kvm_book3s_slbmte(slb_vsid, slb_esid);

	return 0;
out:
	svcpu_put(svcpu);
	return r;
}

void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu)
{
	to_svcpu(vcpu)->slb_max = 1;
	to_svcpu(vcpu)->slb[0].esid = 0;
	struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
	svcpu->slb_max = 1;
	svcpu->slb[0].esid = 0;
	svcpu_put(svcpu);
}

void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu)
Loading