Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 583dbad3 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'core-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull core fixes from Thomas Gleixner:

 - Unbreak the BPF compilation which got broken by the unconditional
   requirement of asm-goto, which is not supported by clang.

 - Prevent probing on exception masking instructions in uprobes and
   kprobes to avoid the issues of the delayed exceptions instead of
   having an ugly workaround.

 - Prevent a double free_page() in the error path of do_kexec_load()

 - A set of objtool updates addressing various issues mostly related to
   switch tables and the noreturn detection for recursive sibling calls

 - Header sync for tools.

* 'core-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  objtool: Detect RIP-relative switch table references, part 2
  objtool: Detect RIP-relative switch table references
  objtool: Support GCC 8 switch tables
  objtool: Support GCC 8's cold subfunctions
  objtool: Fix "noreturn" detection for recursive sibling calls
  objtool, kprobes/x86: Sync the latest <asm/insn.h> header with tools/objtool/arch/x86/include/asm/insn.h
  x86/cpufeature: Guard asm_volatile_goto usage for BPF compilation
  uprobes/x86: Prohibit probing on MOV SS instruction
  kprobes/x86: Prohibit probing on exception masking instructions
  x86/kexec: Avoid double free_page() upon do_kexec_load() failure
parents 203ec2fe 7dec80cc
Loading
Loading
Loading
Loading
+15 −0
Original line number Original line Diff line number Diff line
@@ -140,6 +140,20 @@ extern void clear_cpu_cap(struct cpuinfo_x86 *c, unsigned int bit);


#define setup_force_cpu_bug(bit) setup_force_cpu_cap(bit)
#define setup_force_cpu_bug(bit) setup_force_cpu_cap(bit)


#if defined(__clang__) && !defined(CC_HAVE_ASM_GOTO)

/*
 * Workaround for the sake of BPF compilation which utilizes kernel
 * headers, but clang does not support ASM GOTO and fails the build.
 */
#ifndef __BPF_TRACING__
#warning "Compiler lacks ASM_GOTO support. Add -D __BPF_TRACING__ to your compiler arguments"
#endif

#define static_cpu_has(bit)            boot_cpu_has(bit)

#else

/*
/*
 * Static testing of CPU features.  Used the same as boot_cpu_has().
 * Static testing of CPU features.  Used the same as boot_cpu_has().
 * These will statically patch the target code for additional
 * These will statically patch the target code for additional
@@ -195,6 +209,7 @@ static __always_inline __pure bool _static_cpu_has(u16 bit)
		boot_cpu_has(bit) :				\
		boot_cpu_has(bit) :				\
		_static_cpu_has(bit)				\
		_static_cpu_has(bit)				\
)
)
#endif


#define cpu_has_bug(c, bit)		cpu_has(c, (bit))
#define cpu_has_bug(c, bit)		cpu_has(c, (bit))
#define set_cpu_bug(c, bit)		set_cpu_cap(c, (bit))
#define set_cpu_bug(c, bit)		set_cpu_cap(c, (bit))
+18 −0
Original line number Original line Diff line number Diff line
@@ -208,4 +208,22 @@ static inline int insn_offset_immediate(struct insn *insn)
	return insn_offset_displacement(insn) + insn->displacement.nbytes;
	return insn_offset_displacement(insn) + insn->displacement.nbytes;
}
}


#define POP_SS_OPCODE 0x1f
#define MOV_SREG_OPCODE 0x8e

/*
 * Intel SDM Vol.3A 6.8.3 states;
 * "Any single-step trap that would be delivered following the MOV to SS
 * instruction or POP to SS instruction (because EFLAGS.TF is 1) is
 * suppressed."
 * This function returns true if @insn is MOV SS or POP SS. On these
 * instructions, single stepping is suppressed.
 */
static inline int insn_masking_exception(struct insn *insn)
{
	return insn->opcode.bytes[0] == POP_SS_OPCODE ||
		(insn->opcode.bytes[0] == MOV_SREG_OPCODE &&
		 X86_MODRM_REG(insn->modrm.bytes[0]) == 2);
}

#endif /* _ASM_X86_INSN_H */
#endif /* _ASM_X86_INSN_H */
+4 −0
Original line number Original line Diff line number Diff line
@@ -370,6 +370,10 @@ int __copy_instruction(u8 *dest, u8 *src, u8 *real, struct insn *insn)
	if (insn->opcode.bytes[0] == BREAKPOINT_INSTRUCTION)
	if (insn->opcode.bytes[0] == BREAKPOINT_INSTRUCTION)
		return 0;
		return 0;


	/* We should not singlestep on the exception masking instructions */
	if (insn_masking_exception(insn))
		return 0;

#ifdef CONFIG_X86_64
#ifdef CONFIG_X86_64
	/* Only x86_64 has RIP relative instructions */
	/* Only x86_64 has RIP relative instructions */
	if (insn_rip_relative(insn)) {
	if (insn_rip_relative(insn)) {
+5 −1
Original line number Original line Diff line number Diff line
@@ -57,12 +57,17 @@ static void load_segments(void)
static void machine_kexec_free_page_tables(struct kimage *image)
static void machine_kexec_free_page_tables(struct kimage *image)
{
{
	free_page((unsigned long)image->arch.pgd);
	free_page((unsigned long)image->arch.pgd);
	image->arch.pgd = NULL;
#ifdef CONFIG_X86_PAE
#ifdef CONFIG_X86_PAE
	free_page((unsigned long)image->arch.pmd0);
	free_page((unsigned long)image->arch.pmd0);
	image->arch.pmd0 = NULL;
	free_page((unsigned long)image->arch.pmd1);
	free_page((unsigned long)image->arch.pmd1);
	image->arch.pmd1 = NULL;
#endif
#endif
	free_page((unsigned long)image->arch.pte0);
	free_page((unsigned long)image->arch.pte0);
	image->arch.pte0 = NULL;
	free_page((unsigned long)image->arch.pte1);
	free_page((unsigned long)image->arch.pte1);
	image->arch.pte1 = NULL;
}
}


static int machine_kexec_alloc_page_tables(struct kimage *image)
static int machine_kexec_alloc_page_tables(struct kimage *image)
@@ -79,7 +84,6 @@ static int machine_kexec_alloc_page_tables(struct kimage *image)
	    !image->arch.pmd0 || !image->arch.pmd1 ||
	    !image->arch.pmd0 || !image->arch.pmd1 ||
#endif
#endif
	    !image->arch.pte0 || !image->arch.pte1) {
	    !image->arch.pte0 || !image->arch.pte1) {
		machine_kexec_free_page_tables(image);
		return -ENOMEM;
		return -ENOMEM;
	}
	}
	return 0;
	return 0;
+4 −1
Original line number Original line Diff line number Diff line
@@ -39,9 +39,13 @@ const struct kexec_file_ops * const kexec_file_loaders[] = {
static void free_transition_pgtable(struct kimage *image)
static void free_transition_pgtable(struct kimage *image)
{
{
	free_page((unsigned long)image->arch.p4d);
	free_page((unsigned long)image->arch.p4d);
	image->arch.p4d = NULL;
	free_page((unsigned long)image->arch.pud);
	free_page((unsigned long)image->arch.pud);
	image->arch.pud = NULL;
	free_page((unsigned long)image->arch.pmd);
	free_page((unsigned long)image->arch.pmd);
	image->arch.pmd = NULL;
	free_page((unsigned long)image->arch.pte);
	free_page((unsigned long)image->arch.pte);
	image->arch.pte = NULL;
}
}


static int init_transition_pgtable(struct kimage *image, pgd_t *pgd)
static int init_transition_pgtable(struct kimage *image, pgd_t *pgd)
@@ -91,7 +95,6 @@ static int init_transition_pgtable(struct kimage *image, pgd_t *pgd)
	set_pte(pte, pfn_pte(paddr >> PAGE_SHIFT, PAGE_KERNEL_EXEC_NOENC));
	set_pte(pte, pfn_pte(paddr >> PAGE_SHIFT, PAGE_KERNEL_EXEC_NOENC));
	return 0;
	return 0;
err:
err:
	free_transition_pgtable(image);
	return result;
	return result;
}
}


Loading