Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit e22438f8 authored by Andy Lutomirski's avatar Andy Lutomirski Committed by Ingo Molnar
Browse files

x86, selftests: Add a test for the "sysret_ss_attrs" bug



On AMD CPUs, SYSRET can return with a valid SS descriptor with
with the hidden attributes set to an unusable state.  Make sure
the kernel doesn't let this happen.  This detects an
as-yet-unfixed regression.

Note that the 64-bit version of this test fails on AMD CPUs on
all kernel versions, although the issue in the 64-bit case is
much less severe than in the 32-bit case.

Reported-by: default avatarBrian Gerst <brgerst@gmail.com>
Tested-by: default avatarDenys Vlasenko <dvlasenk@redhat.com>
Signed-off-by: default avatarAndy Lutomirski <luto@kernel.org>
Tests: e7d6eefa ("x86/vdso32/syscall.S: Do not load __USER32_DS to %ss")
Cc: Alexei Starovoitov <ast@plumgrid.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Denys Vlasenko <vda.linux@googlemail.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Kees Cook <keescook@chromium.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Will Drewry <wad@chromium.org>
Link: http://lkml.kernel.org/r/resend_4d740841bac383742949e2fefb03982736595087.git.luto@kernel.org


Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 7ae383be
Loading
Loading
Loading
Loading
+4 −1
Original line number Diff line number Diff line
.PHONY: all all_32 all_64 check_build32 clean run_tests

TARGETS_C_BOTHBITS := sigreturn single_step_syscall
TARGETS_C_BOTHBITS := sigreturn single_step_syscall sysret_ss_attrs

BINARIES_32 := $(TARGETS_C_BOTHBITS:%=%_32)
BINARIES_64 := $(TARGETS_C_BOTHBITS:%=%_64)
@@ -46,3 +46,6 @@ check_build32:
	  echo "  yum install glibc-devel.*i686";			\
	  exit 1;							\
	fi

# Some tests have additional dependencies.
sysret_ss_attrs_64: thunks.S
+2 −0
Original line number Diff line number Diff line
@@ -4,10 +4,12 @@
# script here.
./sigreturn_32 || exit 1
./single_step_syscall_32 || exit 1
./sysret_ss_attrs_32 || exit 1

if [[ "$uname -p" -eq "x86_64" ]]; then
    ./sigreturn_64 || exit 1
    ./single_step_syscall_64 || exit 1
    ./sysret_ss_attrs_64 || exit 1
fi

exit 0
+112 −0
Original line number Diff line number Diff line
/*
 * sysret_ss_attrs.c - test that syscalls return valid hidden SS attributes
 * Copyright (c) 2015 Andrew Lutomirski
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms and conditions of the GNU General Public License,
 * version 2, as published by the Free Software Foundation.
 *
 * This program is distributed in the hope it will be useful, but
 * WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * General Public License for more details.
 *
 * On AMD CPUs, SYSRET can return with a valid SS descriptor with with
 * the hidden attributes set to an unusable state.  Make sure the kernel
 * doesn't let this happen.
 */

#define _GNU_SOURCE

#include <stdlib.h>
#include <unistd.h>
#include <stdio.h>
#include <string.h>
#include <sys/mman.h>
#include <err.h>
#include <stddef.h>
#include <stdbool.h>
#include <pthread.h>

static void *threadproc(void *ctx)
{
	/*
	 * Do our best to cause sleeps on this CPU to exit the kernel and
	 * re-enter with SS = 0.
	 */
	while (true)
		;

	return NULL;
}

#ifdef __x86_64__
extern unsigned long call32_from_64(void *stack, void (*function)(void));

asm (".pushsection .text\n\t"
     ".code32\n\t"
     "test_ss:\n\t"
     "pushl $0\n\t"
     "popl %eax\n\t"
     "ret\n\t"
     ".code64");
extern void test_ss(void);
#endif

int main()
{
	/*
	 * Start a busy-looping thread on the same CPU we're on.
	 * For simplicity, just stick everything to CPU 0.  This will
	 * fail in some containers, but that's probably okay.
	 */
	cpu_set_t cpuset;
	CPU_ZERO(&cpuset);
	CPU_SET(0, &cpuset);
	if (sched_setaffinity(0, sizeof(cpuset), &cpuset) != 0)
		printf("[WARN]\tsched_setaffinity failed\n");

	pthread_t thread;
	if (pthread_create(&thread, 0, threadproc, 0) != 0)
		err(1, "pthread_create");

#ifdef __x86_64__
	unsigned char *stack32 = mmap(NULL, 4096, PROT_READ | PROT_WRITE,
				      MAP_32BIT | MAP_ANONYMOUS | MAP_PRIVATE,
				      -1, 0);
	if (stack32 == MAP_FAILED)
		err(1, "mmap");
#endif

	printf("[RUN]\tSyscalls followed by SS validation\n");

	for (int i = 0; i < 1000; i++) {
		/*
		 * Go to sleep and return using sysret (if we're 64-bit
		 * or we're 32-bit on AMD on a 64-bit kernel).  On AMD CPUs,
		 * SYSRET doesn't fix up the cached SS descriptor, so the
		 * kernel needs some kind of workaround to make sure that we
		 * end the system call with a valid stack segment.  This
		 * can be a confusing failure because the SS *selector*
		 * is the same regardless.
		 */
		usleep(2);

#ifdef __x86_64__
		/*
		 * On 32-bit, just doing a syscall through glibc is enough
		 * to cause a crash if our cached SS descriptor is invalid.
		 * On 64-bit, it's not, so try extra hard.
		 */
		call32_from_64(stack32 + 4088, test_ss);
#endif
	}

	printf("[OK]\tWe survived\n");

#ifdef __x86_64__
	munmap(stack32, 4096);
#endif

	return 0;
}
+67 −0
Original line number Diff line number Diff line
/*
 * thunks.S - assembly helpers for mixed-bitness code
 * Copyright (c) 2015 Andrew Lutomirski
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms and conditions of the GNU General Public License,
 * version 2, as published by the Free Software Foundation.
 *
 * This program is distributed in the hope it will be useful, but
 * WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * General Public License for more details.
 *
 * These are little helpers that make it easier to switch bitness on
 * the fly.
 */

	.text

	.global call32_from_64
	.type call32_from_64, @function
call32_from_64:
	// rdi: stack to use
	// esi: function to call

	// Save registers
	pushq %rbx
	pushq %rbp
	pushq %r12
	pushq %r13
	pushq %r14
	pushq %r15
	pushfq

	// Switch stacks
	mov %rsp,(%rdi)
	mov %rdi,%rsp

	// Switch to compatibility mode
	pushq $0x23  /* USER32_CS */
	pushq $1f
	lretq

1:
	.code32
	// Call the function
	call *%esi
	// Switch back to long mode
	jmp $0x33,$1f
	.code64

1:
	// Restore the stack
	mov (%rsp),%rsp

	// Restore registers
	popfq
	popq %r15
	popq %r14
	popq %r13
	popq %r12
	popq %rbp
	popq %rbx

	ret

.size call32_from_64, .-call32_from_64