Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 406732c9 authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull KVM fixes from Paolo Bonzini:

 - fix for module unload vs deferred jump labels (note: there might be
   other buggy modules!)

 - two NULL pointer dereferences from syzkaller

 - also syzkaller: fix emulation of fxsave/fxrstor/sgdt/sidt, problem
   made worse during this merge window, "just" kernel memory leak on
   releases

 - fix emulation of "mov ss" - somewhat serious on AMD, less so on Intel

* tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm:
  KVM: x86: fix emulation of "MOV SS, null selector"
  KVM: x86: fix NULL deref in vcpu_scan_ioapic
  KVM: eventfd: fix NULL deref irqbypass consumer
  KVM: x86: Introduce segmented_write_std
  KVM: x86: flush pending lapic jump label updates on module unload
  jump_labels: API for flushing deferred jump label updates
parents a65c9259 33ab9110
Loading
Loading
Loading
Loading
+56 −14
Original line number Original line Diff line number Diff line
@@ -818,6 +818,20 @@ static int segmented_read_std(struct x86_emulate_ctxt *ctxt,
	return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception);
	return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception);
}
}


static int segmented_write_std(struct x86_emulate_ctxt *ctxt,
			       struct segmented_address addr,
			       void *data,
			       unsigned int size)
{
	int rc;
	ulong linear;

	rc = linearize(ctxt, addr, size, true, &linear);
	if (rc != X86EMUL_CONTINUE)
		return rc;
	return ctxt->ops->write_std(ctxt, linear, data, size, &ctxt->exception);
}

/*
/*
 * Prefetch the remaining bytes of the instruction without crossing page
 * Prefetch the remaining bytes of the instruction without crossing page
 * boundary if they are not in fetch_cache yet.
 * boundary if they are not in fetch_cache yet.
@@ -1571,7 +1585,6 @@ static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
				    &ctxt->exception);
				    &ctxt->exception);
}
}


/* Does not support long mode */
static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
				     u16 selector, int seg, u8 cpl,
				     u16 selector, int seg, u8 cpl,
				     enum x86_transfer_type transfer,
				     enum x86_transfer_type transfer,
@@ -1608,20 +1621,34 @@ static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,


	rpl = selector & 3;
	rpl = selector & 3;


	/* NULL selector is not valid for TR, CS and SS (except for long mode) */
	if ((seg == VCPU_SREG_CS
	     || (seg == VCPU_SREG_SS
		 && (ctxt->mode != X86EMUL_MODE_PROT64 || rpl != cpl))
	     || seg == VCPU_SREG_TR)
	    && null_selector)
		goto exception;

	/* TR should be in GDT only */
	/* TR should be in GDT only */
	if (seg == VCPU_SREG_TR && (selector & (1 << 2)))
	if (seg == VCPU_SREG_TR && (selector & (1 << 2)))
		goto exception;
		goto exception;


	if (null_selector) /* for NULL selector skip all following checks */
	/* NULL selector is not valid for TR, CS and (except for long mode) SS */
	if (null_selector) {
		if (seg == VCPU_SREG_CS || seg == VCPU_SREG_TR)
			goto exception;

		if (seg == VCPU_SREG_SS) {
			if (ctxt->mode != X86EMUL_MODE_PROT64 || rpl != cpl)
				goto exception;

			/*
			 * ctxt->ops->set_segment expects the CPL to be in
			 * SS.DPL, so fake an expand-up 32-bit data segment.
			 */
			seg_desc.type = 3;
			seg_desc.p = 1;
			seg_desc.s = 1;
			seg_desc.dpl = cpl;
			seg_desc.d = 1;
			seg_desc.g = 1;
		}

		/* Skip all following checks */
		goto load;
		goto load;
	}


	ret = read_segment_descriptor(ctxt, selector, &seg_desc, &desc_addr);
	ret = read_segment_descriptor(ctxt, selector, &seg_desc, &desc_addr);
	if (ret != X86EMUL_CONTINUE)
	if (ret != X86EMUL_CONTINUE)
@@ -1737,6 +1764,21 @@ static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
				   u16 selector, int seg)
				   u16 selector, int seg)
{
{
	u8 cpl = ctxt->ops->cpl(ctxt);
	u8 cpl = ctxt->ops->cpl(ctxt);

	/*
	 * None of MOV, POP and LSS can load a NULL selector in CPL=3, but
	 * they can load it at CPL<3 (Intel's manual says only LSS can,
	 * but it's wrong).
	 *
	 * However, the Intel manual says that putting IST=1/DPL=3 in
	 * an interrupt gate will result in SS=3 (the AMD manual instead
	 * says it doesn't), so allow SS=3 in __load_segment_descriptor
	 * and only forbid it here.
	 */
	if (seg == VCPU_SREG_SS && selector == 3 &&
	    ctxt->mode == X86EMUL_MODE_PROT64)
		return emulate_exception(ctxt, GP_VECTOR, 0, true);

	return __load_segment_descriptor(ctxt, selector, seg, cpl,
	return __load_segment_descriptor(ctxt, selector, seg, cpl,
					 X86_TRANSFER_NONE, NULL);
					 X86_TRANSFER_NONE, NULL);
}
}
@@ -3685,7 +3727,7 @@ static int emulate_store_desc_ptr(struct x86_emulate_ctxt *ctxt,
	}
	}
	/* Disable writeback. */
	/* Disable writeback. */
	ctxt->dst.type = OP_NONE;
	ctxt->dst.type = OP_NONE;
	return segmented_write(ctxt, ctxt->dst.addr.mem,
	return segmented_write_std(ctxt, ctxt->dst.addr.mem,
				   &desc_ptr, 2 + ctxt->op_bytes);
				   &desc_ptr, 2 + ctxt->op_bytes);
}
}


@@ -3932,7 +3974,7 @@ static int em_fxsave(struct x86_emulate_ctxt *ctxt)
	else
	else
		size = offsetof(struct fxregs_state, xmm_space[0]);
		size = offsetof(struct fxregs_state, xmm_space[0]);


	return segmented_write(ctxt, ctxt->memop.addr.mem, &fx_state, size);
	return segmented_write_std(ctxt, ctxt->memop.addr.mem, &fx_state, size);
}
}


static int fxrstor_fixup(struct x86_emulate_ctxt *ctxt,
static int fxrstor_fixup(struct x86_emulate_ctxt *ctxt,
@@ -3974,7 +4016,7 @@ static int em_fxrstor(struct x86_emulate_ctxt *ctxt)
	if (rc != X86EMUL_CONTINUE)
	if (rc != X86EMUL_CONTINUE)
		return rc;
		return rc;


	rc = segmented_read(ctxt, ctxt->memop.addr.mem, &fx_state, 512);
	rc = segmented_read_std(ctxt, ctxt->memop.addr.mem, &fx_state, 512);
	if (rc != X86EMUL_CONTINUE)
	if (rc != X86EMUL_CONTINUE)
		return rc;
		return rc;


+6 −0
Original line number Original line Diff line number Diff line
@@ -2426,3 +2426,9 @@ void kvm_lapic_init(void)
	jump_label_rate_limit(&apic_hw_disabled, HZ);
	jump_label_rate_limit(&apic_hw_disabled, HZ);
	jump_label_rate_limit(&apic_sw_disabled, HZ);
	jump_label_rate_limit(&apic_sw_disabled, HZ);
}
}

void kvm_lapic_exit(void)
{
	static_key_deferred_flush(&apic_hw_disabled);
	static_key_deferred_flush(&apic_sw_disabled);
}
+1 −0
Original line number Original line Diff line number Diff line
@@ -110,6 +110,7 @@ static inline bool kvm_hv_vapic_assist_page_enabled(struct kvm_vcpu *vcpu)


int kvm_lapic_enable_pv_eoi(struct kvm_vcpu *vcpu, u64 data);
int kvm_lapic_enable_pv_eoi(struct kvm_vcpu *vcpu, u64 data);
void kvm_lapic_init(void);
void kvm_lapic_init(void);
void kvm_lapic_exit(void);


#define VEC_POS(v) ((v) & (32 - 1))
#define VEC_POS(v) ((v) & (32 - 1))
#define REG_POS(v) (((v) >> 5) << 4)
#define REG_POS(v) (((v) >> 5) << 4)
+3 −0
Original line number Original line Diff line number Diff line
@@ -3342,6 +3342,8 @@ static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,


	switch (cap->cap) {
	switch (cap->cap) {
	case KVM_CAP_HYPERV_SYNIC:
	case KVM_CAP_HYPERV_SYNIC:
		if (!irqchip_in_kernel(vcpu->kvm))
			return -EINVAL;
		return kvm_hv_activate_synic(vcpu);
		return kvm_hv_activate_synic(vcpu);
	default:
	default:
		return -EINVAL;
		return -EINVAL;
@@ -6045,6 +6047,7 @@ int kvm_arch_init(void *opaque)


void kvm_arch_exit(void)
void kvm_arch_exit(void)
{
{
	kvm_lapic_exit();
	perf_unregister_guest_info_callbacks(&kvm_guest_cbs);
	perf_unregister_guest_info_callbacks(&kvm_guest_cbs);


	if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
	if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
+5 −0
Original line number Original line Diff line number Diff line
@@ -14,6 +14,7 @@ struct static_key_deferred {


#ifdef HAVE_JUMP_LABEL
#ifdef HAVE_JUMP_LABEL
extern void static_key_slow_dec_deferred(struct static_key_deferred *key);
extern void static_key_slow_dec_deferred(struct static_key_deferred *key);
extern void static_key_deferred_flush(struct static_key_deferred *key);
extern void
extern void
jump_label_rate_limit(struct static_key_deferred *key, unsigned long rl);
jump_label_rate_limit(struct static_key_deferred *key, unsigned long rl);


@@ -26,6 +27,10 @@ static inline void static_key_slow_dec_deferred(struct static_key_deferred *key)
	STATIC_KEY_CHECK_USE();
	STATIC_KEY_CHECK_USE();
	static_key_slow_dec(&key->key);
	static_key_slow_dec(&key->key);
}
}
static inline void static_key_deferred_flush(struct static_key_deferred *key)
{
	STATIC_KEY_CHECK_USE();
}
static inline void
static inline void
jump_label_rate_limit(struct static_key_deferred *key,
jump_label_rate_limit(struct static_key_deferred *key,
		unsigned long rl)
		unsigned long rl)
Loading