Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit ba1a96fc authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'x86-seccomp-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 seccomp changes from Ingo Molnar:
 "This tree includes x86 seccomp filter speedups and related preparatory
  work, which touches core seccomp facilities as well.

  The main idea is to split seccomp into two phases, to be able to enter
  a simple fast path for syscalls with ptrace side effects.

  There's no substantial user-visible (and ABI) effects expected from
  this, except a change in how we emit a better audit record for
  SECCOMP_RET_TRACE events"

* 'x86-seccomp-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86_64, entry: Use split-phase syscall_trace_enter for 64-bit syscalls
  x86_64, entry: Treat regs->ax the same in fastpath and slowpath syscalls
  x86: Split syscall_trace_enter into two phases
  x86, entry: Only call user_exit if TIF_NOHZ
  x86, x32, audit: Fix x32's AUDIT_ARCH wrt audit
  seccomp: Document two-phase seccomp and arch-provided seccomp_data
  seccomp: Allow arch code to provide seccomp_data
  seccomp: Refactor the filter callback and the API
  seccomp,x86,arm,mips,s390: Remove nr parameter from secure_computing
parents f1bfbd98 1dcf74f6
Loading
Loading
Loading
Loading
+11 −0
Original line number Diff line number Diff line
@@ -323,6 +323,17 @@ config HAVE_ARCH_SECCOMP_FILTER
	    results in the system call being skipped immediately.
	  - seccomp syscall wired up

	  For best performance, an arch should use seccomp_phase1 and
	  seccomp_phase2 directly.  It should call seccomp_phase1 for all
	  syscalls if TIF_SECCOMP is set, but seccomp_phase1 does not
	  need to be called from a ptrace-safe context.  It must then
	  call seccomp_phase2 if seccomp_phase1 returns anything other
	  than SECCOMP_PHASE1_OK or SECCOMP_PHASE1_SKIP.

	  As an additional optimization, an arch may provide seccomp_data
	  directly to seccomp_phase1; this avoids multiple calls
	  to the syscall_xyz helpers for every syscall.

config SECCOMP_FILTER
	def_bool y
	depends on HAVE_ARCH_SECCOMP_FILTER && SECCOMP && NET
+6 −1
Original line number Diff line number Diff line
@@ -933,8 +933,13 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs, int scno)
	current_thread_info()->syscall = scno;

	/* Do the secure computing check first; failures should be fast. */
	if (secure_computing(scno) == -1)
#ifdef CONFIG_HAVE_ARCH_SECCOMP_FILTER
	if (secure_computing() == -1)
		return -1;
#else
	/* XXX: remove this once OABI gets fixed */
	secure_computing_strict(scno);
#endif

	if (test_thread_flag(TIF_SYSCALL_TRACE))
		tracehook_report_syscall(regs, PTRACE_SYSCALL_ENTER);
+1 −1
Original line number Diff line number Diff line
@@ -770,7 +770,7 @@ asmlinkage long syscall_trace_enter(struct pt_regs *regs, long syscall)
	long ret = 0;
	user_exit();

	if (secure_computing(syscall) == -1)
	if (secure_computing() == -1)
		return -1;

	if (test_thread_flag(TIF_SYSCALL_TRACE) &&
+1 −1
Original line number Diff line number Diff line
@@ -803,7 +803,7 @@ asmlinkage long do_syscall_trace_enter(struct pt_regs *regs)
	long ret = 0;

	/* Do the secure computing check first. */
	if (secure_computing(regs->gprs[2])) {
	if (secure_computing()) {
		/* seccomp failures shouldn't expose any additional code. */
		ret = -1;
		goto out;
+5 −1
Original line number Diff line number Diff line
@@ -85,7 +85,7 @@ For 32-bit we have the following conventions - kernel is built with
#define ARGOFFSET	R11
#define SWFRAME		ORIG_RAX

	.macro SAVE_ARGS addskip=0, save_rcx=1, save_r891011=1
	.macro SAVE_ARGS addskip=0, save_rcx=1, save_r891011=1, rax_enosys=0
	subq  $9*8+\addskip, %rsp
	CFI_ADJUST_CFA_OFFSET	9*8+\addskip
	movq_cfi rdi, 8*8
@@ -96,7 +96,11 @@ For 32-bit we have the following conventions - kernel is built with
	movq_cfi rcx, 5*8
	.endif

	.if \rax_enosys
	movq $-ENOSYS, 4*8(%rsp)
	.else
	movq_cfi rax, 4*8
	.endif

	.if \save_r891011
	movq_cfi r8,  3*8
Loading