Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 3cd1d327 authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull KVM fixes from Paolo Bonzini:
 "PPC:
   - fix bug leading to lost IPIs and smp_call_function_many() lockups
     on POWER9

  ARM:
   - locking fix
   - reset fix
   - GICv2 multi-source SGI injection fix
   - GICv2-on-v3 MMIO synchronization fix
   - make the console less verbose.

  x86:
   - fix device passthrough on AMD SME"

* tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm:
  KVM: x86: Fix device passthrough when SME is active
  kvm: arm/arm64: vgic-v3: Tighten synchronization for guests using v2 on v3
  KVM: arm/arm64: vgic: Don't populate multiple LRs with the same vintid
  KVM: arm/arm64: Reduce verbosity of KVM init log
  KVM: arm/arm64: Reset mapped IRQs on VM reset
  KVM: arm/arm64: Avoid vcpu_load for other vcpu ioctls than KVM_RUN
  KVM: arm/arm64: vgic: Add missing irq_lock to vgic_mmio_read_pending
  KVM: PPC: Book3S HV: Fix trap number return from __kvmppc_vcore_entry
parents 9ef0f88f daaf216c
Loading
Loading
Loading
Loading
+0 −3
Original line number Original line Diff line number Diff line
@@ -363,8 +363,6 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
{
{
	int ret = 0;
	int ret = 0;


	vcpu_load(vcpu);

	trace_kvm_set_guest_debug(vcpu, dbg->control);
	trace_kvm_set_guest_debug(vcpu, dbg->control);


	if (dbg->control & ~KVM_GUESTDBG_VALID_MASK) {
	if (dbg->control & ~KVM_GUESTDBG_VALID_MASK) {
@@ -386,7 +384,6 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
	}
	}


out:
out:
	vcpu_put(vcpu);
	return ret;
	return ret;
}
}


+5 −5
Original line number Original line Diff line number Diff line
@@ -320,7 +320,6 @@ kvm_novcpu_exit:
	stw	r12, STACK_SLOT_TRAP(r1)
	stw	r12, STACK_SLOT_TRAP(r1)
	bl	kvmhv_commence_exit
	bl	kvmhv_commence_exit
	nop
	nop
	lwz	r12, STACK_SLOT_TRAP(r1)
	b	kvmhv_switch_to_host
	b	kvmhv_switch_to_host


/*
/*
@@ -1220,6 +1219,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)


secondary_too_late:
secondary_too_late:
	li	r12, 0
	li	r12, 0
	stw	r12, STACK_SLOT_TRAP(r1)
	cmpdi	r4, 0
	cmpdi	r4, 0
	beq	11f
	beq	11f
	stw	r12, VCPU_TRAP(r4)
	stw	r12, VCPU_TRAP(r4)
@@ -1558,12 +1558,12 @@ mc_cont:
3:	stw	r5,VCPU_SLB_MAX(r9)
3:	stw	r5,VCPU_SLB_MAX(r9)


guest_bypass:
guest_bypass:
	stw	r12, STACK_SLOT_TRAP(r1)
	mr 	r3, r12
	mr 	r3, r12
	/* Increment exit count, poke other threads to exit */
	/* Increment exit count, poke other threads to exit */
	bl	kvmhv_commence_exit
	bl	kvmhv_commence_exit
	nop
	nop
	ld	r9, HSTATE_KVM_VCPU(r13)
	ld	r9, HSTATE_KVM_VCPU(r13)
	lwz	r12, VCPU_TRAP(r9)


	/* Stop others sending VCPU interrupts to this physical CPU */
	/* Stop others sending VCPU interrupts to this physical CPU */
	li	r0, -1
	li	r0, -1
@@ -1898,6 +1898,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_POWER9_DD1)
	 * POWER7/POWER8 guest -> host partition switch code.
	 * POWER7/POWER8 guest -> host partition switch code.
	 * We don't have to lock against tlbies but we do
	 * We don't have to lock against tlbies but we do
	 * have to coordinate the hardware threads.
	 * have to coordinate the hardware threads.
	 * Here STACK_SLOT_TRAP(r1) contains the trap number.
	 */
	 */
kvmhv_switch_to_host:
kvmhv_switch_to_host:
	/* Secondary threads wait for primary to do partition switch */
	/* Secondary threads wait for primary to do partition switch */
@@ -1950,12 +1951,12 @@ BEGIN_FTR_SECTION
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)


	/* If HMI, call kvmppc_realmode_hmi_handler() */
	/* If HMI, call kvmppc_realmode_hmi_handler() */
	lwz	r12, STACK_SLOT_TRAP(r1)
	cmpwi	r12, BOOK3S_INTERRUPT_HMI
	cmpwi	r12, BOOK3S_INTERRUPT_HMI
	bne	27f
	bne	27f
	bl	kvmppc_realmode_hmi_handler
	bl	kvmppc_realmode_hmi_handler
	nop
	nop
	cmpdi	r3, 0
	cmpdi	r3, 0
	li	r12, BOOK3S_INTERRUPT_HMI
	/*
	/*
	 * At this point kvmppc_realmode_hmi_handler may have resync-ed
	 * At this point kvmppc_realmode_hmi_handler may have resync-ed
	 * the TB, and if it has, we must not subtract the guest timebase
	 * the TB, and if it has, we must not subtract the guest timebase
@@ -2008,10 +2009,8 @@ BEGIN_FTR_SECTION
	lwz	r8, KVM_SPLIT_DO_RESTORE(r3)
	lwz	r8, KVM_SPLIT_DO_RESTORE(r3)
	cmpwi	r8, 0
	cmpwi	r8, 0
	beq	47f
	beq	47f
	stw	r12, STACK_SLOT_TRAP(r1)
	bl	kvmhv_p9_restore_lpcr
	bl	kvmhv_p9_restore_lpcr
	nop
	nop
	lwz	r12, STACK_SLOT_TRAP(r1)
	b	48f
	b	48f
47:
47:
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
@@ -2049,6 +2048,7 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX)
	li	r0, KVM_GUEST_MODE_NONE
	li	r0, KVM_GUEST_MODE_NONE
	stb	r0, HSTATE_IN_GUEST(r13)
	stb	r0, HSTATE_IN_GUEST(r13)


	lwz	r12, STACK_SLOT_TRAP(r1)	/* return trap # in r12 */
	ld	r0, SFS+PPC_LR_STKOFF(r1)
	ld	r0, SFS+PPC_LR_STKOFF(r1)
	addi	r1, r1, SFS
	addi	r1, r1, SFS
	mtlr	r0
	mtlr	r0
+3 −1
Original line number Original line Diff line number Diff line
@@ -2770,9 +2770,11 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
	else
	else
		pte_access &= ~ACC_WRITE_MASK;
		pte_access &= ~ACC_WRITE_MASK;


	spte |= (u64)pfn << PAGE_SHIFT;
	if (!kvm_is_mmio_pfn(pfn))
		spte |= shadow_me_mask;
		spte |= shadow_me_mask;


	spte |= (u64)pfn << PAGE_SHIFT;

	if (pte_access & ACC_WRITE_MASK) {
	if (pte_access & ACC_WRITE_MASK) {


		/*
		/*
+1 −0
Original line number Original line Diff line number Diff line
@@ -360,6 +360,7 @@ void kvm_vgic_put(struct kvm_vcpu *vcpu);
bool kvm_vcpu_has_pending_irqs(struct kvm_vcpu *vcpu);
bool kvm_vcpu_has_pending_irqs(struct kvm_vcpu *vcpu);
void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu);
void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu);
void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu);
void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu);
void kvm_vgic_reset_mapped_irq(struct kvm_vcpu *vcpu, u32 vintid);


void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg);
void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg);


+1 −0
Original line number Original line Diff line number Diff line
@@ -503,6 +503,7 @@


#define ICH_HCR_EN			(1 << 0)
#define ICH_HCR_EN			(1 << 0)
#define ICH_HCR_UIE			(1 << 1)
#define ICH_HCR_UIE			(1 << 1)
#define ICH_HCR_NPIE			(1 << 3)
#define ICH_HCR_TC			(1 << 10)
#define ICH_HCR_TC			(1 << 10)
#define ICH_HCR_TALL0			(1 << 11)
#define ICH_HCR_TALL0			(1 << 11)
#define ICH_HCR_TALL1			(1 << 12)
#define ICH_HCR_TALL1			(1 << 12)
Loading