Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 2d62e076 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge tag 'kvm-4.11-2' of git://git.kernel.org/pub/scm/virt/kvm/kvm

Pull more KVM updates from Radim Krčmář:
 "Second batch of KVM changes for the 4.11 merge window:

  PPC:
   - correct assumption about ASDR on POWER9
   - fix MMIO emulation on POWER9

  x86:
   - add a simple test for ioperm
   - cleanup TSS (going through KVM tree as the whole undertaking was
     caused by VMX's use of TSS)
   - fix nVMX interrupt delivery
   - fix some performance counters in the guest

  ... and two cleanup patches"

* tag 'kvm-4.11-2' of git://git.kernel.org/pub/scm/virt/kvm/kvm:
  KVM: nVMX: Fix pending events injection
  x86/kvm/vmx: remove unused variable in segment_base()
  selftests/x86: Add a basic selftest for ioperm
  x86/asm: Tidy up TSS limit code
  kvm: convert kvm.users_count from atomic_t to refcount_t
  KVM: x86: never specify a sample period for virtualized in_tx_cp counters
  KVM: PPC: Book3S HV: Don't use ASDR for real-mode HPT faults on POWER9
  KVM: PPC: Book3S HV: Fix software walk of guest process page tables
parents be834aaf 16ce771b
Loading
Loading
Loading
Loading
+2 −1
Original line number Diff line number Diff line
@@ -46,7 +46,7 @@ extern struct patb_entry *partition_tb;

/* Bits in patb0 field */
#define PATB_HR		(1UL << 63)
#define RPDB_MASK	0x0ffffffffffff00fUL
#define RPDB_MASK	0x0fffffffffffff00UL
#define RPDB_SHIFT	(1UL << 8)
#define RTS1_SHIFT	61		/* top 2 bits of radix tree size */
#define RTS1_MASK	(3UL << RTS1_SHIFT)
@@ -57,6 +57,7 @@ extern struct patb_entry *partition_tb;
/* Bits in patb1 field */
#define PATB_GR		(1UL << 63)	/* guest uses radix; must match HR */
#define PRTS_MASK	0x1f		/* process table size field */
#define PRTB_MASK	0x0ffffffffffff000UL

/*
 * Limit process table to PAGE_SIZE table. This
+3 −2
Original line number Diff line number Diff line
@@ -32,6 +32,7 @@ int kvmppc_mmu_radix_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
	u32 pid;
	int ret, level, ps;
	__be64 prte, rpte;
	unsigned long ptbl;
	unsigned long root, pte, index;
	unsigned long rts, bits, offset;
	unsigned long gpa;
@@ -53,8 +54,8 @@ int kvmppc_mmu_radix_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
		return -EINVAL;

	/* Read partition table to find root of tree for effective PID */
	ret = kvm_read_guest(kvm, kvm->arch.process_table + pid * 16,
			     &prte, sizeof(prte));
	ptbl = (kvm->arch.process_table & PRTB_MASK) + (pid * 16);
	ret = kvm_read_guest(kvm, ptbl, &prte, sizeof(prte));
	if (ret)
		return ret;

+4 −4
Original line number Diff line number Diff line
@@ -1787,12 +1787,12 @@ kvmppc_hdsi:
	/* HPTE not found fault or protection fault? */
	andis.	r0, r6, (DSISR_NOHPTE | DSISR_PROTFAULT)@h
	beq	1f			/* if not, send it to the guest */
	andi.	r0, r11, MSR_DR		/* data relocation enabled? */
	beq	3f
BEGIN_FTR_SECTION
	mfspr	r5, SPRN_ASDR		/* on POWER9, use ASDR to get VSID */
	b	4f
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
	andi.	r0, r11, MSR_DR		/* data relocation enabled? */
	beq	3f
	clrrdi	r0, r4, 28
	PPC_SLBFEE_DOT(R5, R0)		/* if so, look up SLB */
	li	r0, BOOK3S_INTERRUPT_DATA_SEGMENT
@@ -1879,12 +1879,12 @@ kvmppc_hisi:
	bne	.Lradix_hisi		/* for radix, just save ASDR */
	andis.	r0, r11, SRR1_ISI_NOPT@h
	beq	1f
	andi.	r0, r11, MSR_IR		/* instruction relocation enabled? */
	beq	3f
BEGIN_FTR_SECTION
	mfspr	r5, SPRN_ASDR		/* on POWER9, use ASDR to get VSID */
	b	4f
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
	andi.	r0, r11, MSR_IR		/* instruction relocation enabled? */
	beq	3f
	clrrdi	r0, r10, 28
	PPC_SLBFEE_DOT(R5, R0)		/* if so, look up SLB */
	li	r0, BOOK3S_INTERRUPT_INST_SEGMENT
+11 −7
Original line number Diff line number Diff line
@@ -205,6 +205,8 @@ static inline void native_load_tr_desc(void)
	asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
}

DECLARE_PER_CPU(bool, __tss_limit_invalid);

static inline void force_reload_TR(void)
{
	struct desc_struct *d = get_cpu_gdt_table(smp_processor_id());
@@ -220,18 +222,20 @@ static inline void force_reload_TR(void)
	write_gdt_entry(d, GDT_ENTRY_TSS, &tss, DESC_TSS);

	load_TR_desc();
	this_cpu_write(__tss_limit_invalid, false);
}

DECLARE_PER_CPU(bool, need_tr_refresh);

static inline void refresh_TR(void)
/*
 * Call this if you need the TSS limit to be correct, which should be the case
 * if and only if you have TIF_IO_BITMAP set or you're switching to a task
 * with TIF_IO_BITMAP set.
 */
static inline void refresh_tss_limit(void)
{
	DEBUG_LOCKS_WARN_ON(preemptible());

	if (unlikely(this_cpu_read(need_tr_refresh))) {
	if (unlikely(this_cpu_read(__tss_limit_invalid)))
		force_reload_TR();
		this_cpu_write(need_tr_refresh, false);
	}
}

/*
@@ -250,7 +254,7 @@ static inline void invalidate_tss_limit(void)
	if (unlikely(test_thread_flag(TIF_IO_BITMAP)))
		force_reload_TR();
	else
		this_cpu_write(need_tr_refresh, true);
		this_cpu_write(__tss_limit_invalid, true);
}

static inline void native_load_gdt(const struct desc_ptr *dtr)
+7 −1
Original line number Diff line number Diff line
@@ -48,8 +48,14 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
		t->io_bitmap_ptr = bitmap;
		set_thread_flag(TIF_IO_BITMAP);

		/*
		 * Now that we have an IO bitmap, we need our TSS limit to be
		 * correct.  It's fine if we are preempted after doing this:
		 * with TIF_IO_BITMAP set, context switches will keep our TSS
		 * limit correct.
		 */
		preempt_disable();
		refresh_TR();
		refresh_tss_limit();
		preempt_enable();
	}

Loading