Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 161aa772 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 fixes from Peter Anvin:
 "A collection of small fixes:

   - There still seem to be problems with asm goto which requires the
     empty asm hack.
   - If SMAP is disabled at compile time, don't enable it nor try to
     interpret a page fault as an SMAP violation.
   - Fix a case of unbounded recursion while tracing"

* 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86, smap: smap_violation() is bogus if CONFIG_X86_SMAP is off
  x86, smap: Don't enable SMAP if CONFIG_X86_SMAP is disabled
  compiler/gcc4: Make quirk for asm_volatile_goto() unconditional
  x86: Use preempt_disable_notrace() in cycles_2_ns()
parents eef445ee 4640c7ee
Loading
Loading
Loading
Loading
+6 −1
Original line number Diff line number Diff line
@@ -284,8 +284,13 @@ static __always_inline void setup_smap(struct cpuinfo_x86 *c)
	raw_local_save_flags(eflags);
	BUG_ON(eflags & X86_EFLAGS_AC);

	if (cpu_has(c, X86_FEATURE_SMAP))
	if (cpu_has(c, X86_FEATURE_SMAP)) {
#ifdef CONFIG_X86_SMAP
		set_in_cr4(X86_CR4_SMAP);
#else
		clear_in_cr4(X86_CR4_SMAP);
#endif
	}
}

/*
+2 −2
Original line number Diff line number Diff line
@@ -209,7 +209,7 @@ static inline unsigned long long cycles_2_ns(unsigned long long cyc)
	 * dance when its actually needed.
	 */

	preempt_disable();
	preempt_disable_notrace();
	data = this_cpu_read(cyc2ns.head);
	tail = this_cpu_read(cyc2ns.tail);

@@ -229,7 +229,7 @@ static inline unsigned long long cycles_2_ns(unsigned long long cyc)
		if (!--data->__count)
			this_cpu_write(cyc2ns.tail, data);
	}
	preempt_enable();
	preempt_enable_notrace();

	return ns;
}
+9 −5
Original line number Diff line number Diff line
@@ -1001,6 +1001,12 @@ static int fault_in_kernel_space(unsigned long address)

static inline bool smap_violation(int error_code, struct pt_regs *regs)
{
	if (!IS_ENABLED(CONFIG_X86_SMAP))
		return false;

	if (!static_cpu_has(X86_FEATURE_SMAP))
		return false;

	if (error_code & PF_USER)
		return false;

@@ -1087,12 +1093,10 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code)
	if (unlikely(error_code & PF_RSVD))
		pgtable_bad(regs, error_code, address);

	if (static_cpu_has(X86_FEATURE_SMAP)) {
	if (unlikely(smap_violation(error_code, regs))) {
		bad_area_nosemaphore(regs, error_code, address);
		return;
	}
	}

	/*
	 * If we're in an interrupt, have no user context or are running
+1 −5
Original line number Diff line number Diff line
@@ -75,11 +75,7 @@
 *
 * (asm goto is automatically volatile - the naming reflects this.)
 */
#if GCC_VERSION <= 40801
#define asm_volatile_goto(x...)	do { asm goto(x); asm (""); } while (0)
#else
# define asm_volatile_goto(x...)	do { asm goto(x); } while (0)
#endif

#ifdef CONFIG_ARCH_USE_BUILTIN_BSWAP
#if GCC_VERSION >= 40400