Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 3370b69e authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull second batch of kvm updates from Paolo Bonzini:
 "Four changes:

   - x86: work around two nasty cases where a benign exception occurs
     while another is being delivered.  The endless stream of exceptions
     causes an infinite loop in the processor, which not even NMIs or
     SMIs can interrupt; in the virt case, there is no possibility to
     exit to the host either.

   - x86: support for Skylake per-guest TSC rate.  Long supported by
     AMD, the patches mostly move things from there to common
     arch/x86/kvm/ code.

   - generic: remove local_irq_save/restore from the guest entry and
     exit paths when context tracking is enabled.  The patches are a few
     months old, but we discussed them again at kernel summit.  Andy
     will pick up from here and, in 4.5, try to remove it from the user
     entry/exit paths.

   - PPC: Two bug fixes, see merge commit 37028975 for details"

* tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (21 commits)
  KVM: x86: rename update_db_bp_intercept to update_bp_intercept
  KVM: svm: unconditionally intercept #DB
  KVM: x86: work around infinite loop in microcode when #AC is delivered
  context_tracking: avoid irq_save/irq_restore on guest entry and exit
  context_tracking: remove duplicate enabled check
  KVM: VMX: Dump TSC multiplier in dump_vmcs()
  KVM: VMX: Use a scaled host TSC for guest readings of MSR_IA32_TSC
  KVM: VMX: Setup TSC scaling ratio when a vcpu is loaded
  KVM: VMX: Enable and initialize VMX TSC scaling
  KVM: x86: Use the correct vcpu's TSC rate to compute time scale
  KVM: x86: Move TSC scaling logic out of call-back read_l1_tsc()
  KVM: x86: Move TSC scaling logic out of call-back adjust_tsc_offset()
  KVM: x86: Replace call-back compute_tsc_offset() with a common function
  KVM: x86: Replace call-back set_tsc_khz() with a common function
  KVM: x86: Add a common TSC scaling function
  KVM: x86: Add a common TSC scaling ratio field in kvm_vcpu_arch
  KVM: x86: Collect information for setting TSC scaling ratio
  KVM: x86: declare a few variables as __read_mostly
  KVM: x86: merge handle_mmio_page_fault and handle_mmio_page_fault_common
  KVM: PPC: Book3S HV: Don't dynamically split core when already split
  ...
parents be23c9d2 37028975
Loading
Loading
Loading
Loading
+1 −1
Original line number Original line Diff line number Diff line
@@ -2019,7 +2019,7 @@ static bool can_split_piggybacked_subcores(struct core_info *cip)
			return false;
			return false;
		n_subcores += (cip->subcore_threads[sub] - 1) >> 1;
		n_subcores += (cip->subcore_threads[sub] - 1) >> 1;
	}
	}
	if (n_subcores > 3 || large_sub < 0)
	if (large_sub < 0 || !subcore_config_ok(n_subcores + 1, 2))
		return false;
		return false;


	/*
	/*
+12 −8
Original line number Original line Diff line number Diff line
@@ -1749,7 +1749,8 @@ kvmppc_hdsi:
	beq	3f
	beq	3f
	clrrdi	r0, r4, 28
	clrrdi	r0, r4, 28
	PPC_SLBFEE_DOT(R5, R0)		/* if so, look up SLB */
	PPC_SLBFEE_DOT(R5, R0)		/* if so, look up SLB */
	bne	1f			/* if no SLB entry found */
	li	r0, BOOK3S_INTERRUPT_DATA_SEGMENT
	bne	7f			/* if no SLB entry found */
4:	std	r4, VCPU_FAULT_DAR(r9)
4:	std	r4, VCPU_FAULT_DAR(r9)
	stw	r6, VCPU_FAULT_DSISR(r9)
	stw	r6, VCPU_FAULT_DSISR(r9)


@@ -1768,14 +1769,15 @@ kvmppc_hdsi:
	cmpdi	r3, -2			/* MMIO emulation; need instr word */
	cmpdi	r3, -2			/* MMIO emulation; need instr word */
	beq	2f
	beq	2f


	/* Synthesize a DSI for the guest */
	/* Synthesize a DSI (or DSegI) for the guest */
	ld	r4, VCPU_FAULT_DAR(r9)
	ld	r4, VCPU_FAULT_DAR(r9)
	mr	r6, r3
	mr	r6, r3
1:	mtspr	SPRN_DAR, r4
1:	li	r0, BOOK3S_INTERRUPT_DATA_STORAGE
	mtspr	SPRN_DSISR, r6
	mtspr	SPRN_DSISR, r6
7:	mtspr	SPRN_DAR, r4
	mtspr	SPRN_SRR0, r10
	mtspr	SPRN_SRR0, r10
	mtspr	SPRN_SRR1, r11
	mtspr	SPRN_SRR1, r11
	li	r10, BOOK3S_INTERRUPT_DATA_STORAGE
	mr	r10, r0
	bl	kvmppc_msr_interrupt
	bl	kvmppc_msr_interrupt
fast_interrupt_c_return:
fast_interrupt_c_return:
6:	ld	r7, VCPU_CTR(r9)
6:	ld	r7, VCPU_CTR(r9)
@@ -1823,7 +1825,8 @@ kvmppc_hisi:
	beq	3f
	beq	3f
	clrrdi	r0, r10, 28
	clrrdi	r0, r10, 28
	PPC_SLBFEE_DOT(R5, R0)		/* if so, look up SLB */
	PPC_SLBFEE_DOT(R5, R0)		/* if so, look up SLB */
	bne	1f			/* if no SLB entry found */
	li	r0, BOOK3S_INTERRUPT_INST_SEGMENT
	bne	7f			/* if no SLB entry found */
4:
4:
	/* Search the hash table. */
	/* Search the hash table. */
	mr	r3, r9			/* vcpu pointer */
	mr	r3, r9			/* vcpu pointer */
@@ -1840,11 +1843,12 @@ kvmppc_hisi:
	cmpdi	r3, -1			/* handle in kernel mode */
	cmpdi	r3, -1			/* handle in kernel mode */
	beq	guest_exit_cont
	beq	guest_exit_cont


	/* Synthesize an ISI for the guest */
	/* Synthesize an ISI (or ISegI) for the guest */
	mr	r11, r3
	mr	r11, r3
1:	mtspr	SPRN_SRR0, r10
1:	li	r0, BOOK3S_INTERRUPT_INST_STORAGE
7:	mtspr	SPRN_SRR0, r10
	mtspr	SPRN_SRR1, r11
	mtspr	SPRN_SRR1, r11
	li	r10, BOOK3S_INTERRUPT_INST_STORAGE
	mr	r10, r0
	bl	kvmppc_msr_interrupt
	bl	kvmppc_msr_interrupt
	b	fast_interrupt_c_return
	b	fast_interrupt_c_return


+10 −17
Original line number Original line Diff line number Diff line
@@ -505,6 +505,7 @@ struct kvm_vcpu_arch {
	u32 virtual_tsc_mult;
	u32 virtual_tsc_mult;
	u32 virtual_tsc_khz;
	u32 virtual_tsc_khz;
	s64 ia32_tsc_adjust_msr;
	s64 ia32_tsc_adjust_msr;
	u64 tsc_scaling_ratio;


	atomic_t nmi_queued;  /* unprocessed asynchronous NMIs */
	atomic_t nmi_queued;  /* unprocessed asynchronous NMIs */
	unsigned nmi_pending; /* NMI queued after currently running handler */
	unsigned nmi_pending; /* NMI queued after currently running handler */
@@ -777,7 +778,7 @@ struct kvm_x86_ops {
	void (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu);
	void (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu);
	void (*vcpu_put)(struct kvm_vcpu *vcpu);
	void (*vcpu_put)(struct kvm_vcpu *vcpu);


	void (*update_db_bp_intercept)(struct kvm_vcpu *vcpu);
	void (*update_bp_intercept)(struct kvm_vcpu *vcpu);
	int (*get_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr);
	int (*get_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr);
	int (*set_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr);
	int (*set_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr);
	u64 (*get_segment_base)(struct kvm_vcpu *vcpu, int seg);
	u64 (*get_segment_base)(struct kvm_vcpu *vcpu, int seg);
@@ -844,7 +845,7 @@ struct kvm_x86_ops {
	int (*get_lpage_level)(void);
	int (*get_lpage_level)(void);
	bool (*rdtscp_supported)(void);
	bool (*rdtscp_supported)(void);
	bool (*invpcid_supported)(void);
	bool (*invpcid_supported)(void);
	void (*adjust_tsc_offset)(struct kvm_vcpu *vcpu, s64 adjustment, bool host);
	void (*adjust_tsc_offset_guest)(struct kvm_vcpu *vcpu, s64 adjustment);


	void (*set_tdp_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3);
	void (*set_tdp_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3);


@@ -852,11 +853,9 @@ struct kvm_x86_ops {


	bool (*has_wbinvd_exit)(void);
	bool (*has_wbinvd_exit)(void);


	void (*set_tsc_khz)(struct kvm_vcpu *vcpu, u32 user_tsc_khz, bool scale);
	u64 (*read_tsc_offset)(struct kvm_vcpu *vcpu);
	u64 (*read_tsc_offset)(struct kvm_vcpu *vcpu);
	void (*write_tsc_offset)(struct kvm_vcpu *vcpu, u64 offset);
	void (*write_tsc_offset)(struct kvm_vcpu *vcpu, u64 offset);


	u64 (*compute_tsc_offset)(struct kvm_vcpu *vcpu, u64 target_tsc);
	u64 (*read_l1_tsc)(struct kvm_vcpu *vcpu, u64 host_tsc);
	u64 (*read_l1_tsc)(struct kvm_vcpu *vcpu, u64 host_tsc);


	void (*get_exit_info)(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2);
	void (*get_exit_info)(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2);
@@ -923,17 +922,6 @@ struct kvm_arch_async_pf {


extern struct kvm_x86_ops *kvm_x86_ops;
extern struct kvm_x86_ops *kvm_x86_ops;


static inline void adjust_tsc_offset_guest(struct kvm_vcpu *vcpu,
					   s64 adjustment)
{
	kvm_x86_ops->adjust_tsc_offset(vcpu, adjustment, false);
}

static inline void adjust_tsc_offset_host(struct kvm_vcpu *vcpu, s64 adjustment)
{
	kvm_x86_ops->adjust_tsc_offset(vcpu, adjustment, true);
}

int kvm_mmu_module_init(void);
int kvm_mmu_module_init(void);
void kvm_mmu_module_exit(void);
void kvm_mmu_module_exit(void);


@@ -986,10 +974,12 @@ u64 vcpu_tsc_khz(struct kvm_vcpu *vcpu);


/* control of guest tsc rate supported? */
/* control of guest tsc rate supported? */
extern bool kvm_has_tsc_control;
extern bool kvm_has_tsc_control;
/* minimum supported tsc_khz for guests */
extern u32  kvm_min_guest_tsc_khz;
/* maximum supported tsc_khz for guests */
/* maximum supported tsc_khz for guests */
extern u32  kvm_max_guest_tsc_khz;
extern u32  kvm_max_guest_tsc_khz;
/* number of bits of the fractional part of the TSC scaling ratio */
extern u8   kvm_tsc_scaling_ratio_frac_bits;
/* maximum allowed value of TSC scaling ratio */
extern u64  kvm_max_tsc_scaling_ratio;


enum emulation_result {
enum emulation_result {
	EMULATE_DONE,         /* no further processing */
	EMULATE_DONE,         /* no further processing */
@@ -1235,6 +1225,9 @@ void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm,
void kvm_define_shared_msr(unsigned index, u32 msr);
void kvm_define_shared_msr(unsigned index, u32 msr);
int kvm_set_shared_msr(unsigned index, u64 val, u64 mask);
int kvm_set_shared_msr(unsigned index, u64 val, u64 mask);


u64 kvm_scale_tsc(struct kvm_vcpu *vcpu, u64 tsc);
u64 kvm_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc);

unsigned long kvm_get_linear_rip(struct kvm_vcpu *vcpu);
unsigned long kvm_get_linear_rip(struct kvm_vcpu *vcpu);
bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip);
bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip);


+3 −0
Original line number Original line Diff line number Diff line
@@ -73,6 +73,7 @@
#define SECONDARY_EXEC_ENABLE_PML               0x00020000
#define SECONDARY_EXEC_ENABLE_PML               0x00020000
#define SECONDARY_EXEC_XSAVES			0x00100000
#define SECONDARY_EXEC_XSAVES			0x00100000
#define SECONDARY_EXEC_PCOMMIT			0x00200000
#define SECONDARY_EXEC_PCOMMIT			0x00200000
#define SECONDARY_EXEC_TSC_SCALING              0x02000000


#define PIN_BASED_EXT_INTR_MASK                 0x00000001
#define PIN_BASED_EXT_INTR_MASK                 0x00000001
#define PIN_BASED_NMI_EXITING                   0x00000008
#define PIN_BASED_NMI_EXITING                   0x00000008
@@ -167,6 +168,8 @@ enum vmcs_field {
	VMWRITE_BITMAP                  = 0x00002028,
	VMWRITE_BITMAP                  = 0x00002028,
	XSS_EXIT_BITMAP                 = 0x0000202C,
	XSS_EXIT_BITMAP                 = 0x0000202C,
	XSS_EXIT_BITMAP_HIGH            = 0x0000202D,
	XSS_EXIT_BITMAP_HIGH            = 0x0000202D,
	TSC_MULTIPLIER                  = 0x00002032,
	TSC_MULTIPLIER_HIGH             = 0x00002033,
	GUEST_PHYSICAL_ADDRESS          = 0x00002400,
	GUEST_PHYSICAL_ADDRESS          = 0x00002400,
	GUEST_PHYSICAL_ADDRESS_HIGH     = 0x00002401,
	GUEST_PHYSICAL_ADDRESS_HIGH     = 0x00002401,
	VMCS_LINK_POINTER               = 0x00002800,
	VMCS_LINK_POINTER               = 0x00002800,
+1 −0
Original line number Original line Diff line number Diff line
@@ -100,6 +100,7 @@
	{ SVM_EXIT_EXCP_BASE + UD_VECTOR,       "UD excp" }, \
	{ SVM_EXIT_EXCP_BASE + UD_VECTOR,       "UD excp" }, \
	{ SVM_EXIT_EXCP_BASE + PF_VECTOR,       "PF excp" }, \
	{ SVM_EXIT_EXCP_BASE + PF_VECTOR,       "PF excp" }, \
	{ SVM_EXIT_EXCP_BASE + NM_VECTOR,       "NM excp" }, \
	{ SVM_EXIT_EXCP_BASE + NM_VECTOR,       "NM excp" }, \
	{ SVM_EXIT_EXCP_BASE + AC_VECTOR,       "AC excp" }, \
	{ SVM_EXIT_EXCP_BASE + MC_VECTOR,       "MC excp" }, \
	{ SVM_EXIT_EXCP_BASE + MC_VECTOR,       "MC excp" }, \
	{ SVM_EXIT_INTR,        "interrupt" }, \
	{ SVM_EXIT_INTR,        "interrupt" }, \
	{ SVM_EXIT_NMI,         "nmi" }, \
	{ SVM_EXIT_NMI,         "nmi" }, \
Loading