Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 6c176c46 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Alistair Delva
Browse files

BACKPORT: x86/uaccess: Introduce user_access_{save,restore}()



(Upstream commit e74deb11931ff682b59d5b9d387f7115f689698e).

Introduce common helpers for when we need to safely suspend a
uaccess section; for instance to generate a {KA,UB}SAN report.

Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
Change-Id: I9a1afec5d437689dc9a976b371448c7e81811724
Signed-off-by: default avatarAndrey Konovalov <andreyknvl@google.com>
Bug: 128674696
parent 0ee9b439
Loading
Loading
Loading
Loading
+20 −0
Original line number Diff line number Diff line
@@ -58,6 +58,23 @@ static __always_inline void stac(void)
	alternative("", __stringify(__ASM_STAC), X86_FEATURE_SMAP);
}

static __always_inline unsigned long smap_save(void)
{
	unsigned long flags;

	asm volatile (ALTERNATIVE("", "pushf; pop %0; " __stringify(__ASM_CLAC),
				  X86_FEATURE_SMAP)
		      : "=rm" (flags) : : "memory", "cc");

	return flags;
}

static __always_inline void smap_restore(unsigned long flags)
{
	asm volatile (ALTERNATIVE("", "push %0; popf", X86_FEATURE_SMAP)
		      : : "g" (flags) : "memory", "cc");
}

/* These macros can be used in asm() statements */
#define ASM_CLAC \
	ALTERNATIVE("", __stringify(__ASM_CLAC), X86_FEATURE_SMAP)
@@ -69,6 +86,9 @@ static __always_inline void stac(void)
static inline void clac(void) { }
static inline void stac(void) { }

static inline unsigned long smap_save(void) { return 0; }
static inline void smap_restore(unsigned long flags) { }

#define ASM_CLAC
#define ASM_STAC

+3 −0
Original line number Diff line number Diff line
@@ -724,6 +724,9 @@ static __must_check inline bool user_access_begin(int type,
#define user_access_begin(a, b, c)	user_access_begin(a, b, c)
#define user_access_end()	__uaccess_end()

#define user_access_save()	smap_save()
#define user_access_restore(x)	smap_restore(x)

#define unsafe_put_user(x, ptr, err_label)					\
do {										\
	int __pu_err;								\
+2 −0
Original line number Diff line number Diff line
@@ -271,6 +271,8 @@ extern long strncpy_from_unsafe(char *dst, const void *unsafe_addr, long count);
#define user_access_end() do { } while (0)
#define unsafe_get_user(x, ptr, err) do { if (unlikely(__get_user(x, ptr))) goto err; } while (0)
#define unsafe_put_user(x, ptr, err) do { if (unlikely(__put_user(x, ptr))) goto err; } while (0)
static inline unsigned long user_access_save(void) { return 0UL; }
static inline void user_access_restore(unsigned long flags) { }
#endif

#ifdef CONFIG_HARDENED_USERCOPY