Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit c10d38dd authored by Nick Piggin's avatar Nick Piggin Committed by Ingo Molnar
Browse files

x86: some lock annotations for user copy paths



copy_to/from_user and all its variants (except the atomic ones) can take a
page fault and perform non-trivial work like taking mmap_sem and entering
the filesyste/pagecache.

Unfortunately, this often escapes lockdep because a common pattern is to
use it to read in some arguments just set up from userspace, or write data
back to a hot buffer. In those cases, it will be unlikely for page reclaim
to get a window in to cause copy_*_user to fault.

With the new might_lock primitives, add some annotations to x86. I don't
know if I caught all possible faulting points (it's a bit of a maze, and I
didn't really look at 32-bit). But this is a starting point.

Boots and runs OK so far.

Signed-off-by: default avatarNick Piggin <npiggin@suse.de>
Acked-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 76b189e9
Loading
Loading
Loading
Loading
+6 −1
Original line number Diff line number Diff line
@@ -33,6 +33,8 @@ static inline int __movsl_is_ok(unsigned long a1, unsigned long a2, unsigned lon
do {									   \
	int __d0, __d1, __d2;						   \
	might_sleep();							   \
	if (current->mm)						   \
		might_lock_read(&current->mm->mmap_sem);		   \
	__asm__ __volatile__(						   \
		"	testl %1,%1\n"					   \
		"	jz 2f\n"					   \
@@ -120,6 +122,8 @@ EXPORT_SYMBOL(strncpy_from_user);
do {									\
	int __d0;							\
	might_sleep();							\
	if (current->mm)						\
		might_lock_read(&current->mm->mmap_sem);		\
	__asm__ __volatile__(						\
		"0:	rep; stosl\n"					\
		"	movl %2,%0\n"					\
@@ -148,7 +152,6 @@ do { \
unsigned long
clear_user(void __user *to, unsigned long n)
{
	might_sleep();
	if (access_ok(VERIFY_WRITE, to, n))
		__do_clear_user(to, n);
	return n;
@@ -191,6 +194,8 @@ long strnlen_user(const char __user *s, long n)
	unsigned long res, tmp;

	might_sleep();
	if (current->mm)
		might_lock_read(&current->mm->mmap_sem);

	__asm__ __volatile__(
		"	testl %0, %0\n"
+4 −0
Original line number Diff line number Diff line
@@ -16,6 +16,8 @@
do {									   \
	long __d0, __d1, __d2;						   \
	might_sleep();							   \
	if (current->mm)						   \
		might_lock_read(&current->mm->mmap_sem);		   \
	__asm__ __volatile__(						   \
		"	testq %1,%1\n"					   \
		"	jz 2f\n"					   \
@@ -65,6 +67,8 @@ unsigned long __clear_user(void __user *addr, unsigned long size)
{
	long __d0;
	might_sleep();
	if (current->mm)
		might_lock_read(&current->mm->mmap_sem);
	/* no memory constraint because it doesn't change any memory gcc knows
	   about */
	asm volatile(
+14 −0
Original line number Diff line number Diff line
@@ -8,6 +8,8 @@
#include <linux/thread_info.h>
#include <linux/prefetch.h>
#include <linux/string.h>
#include <linux/lockdep.h>
#include <linux/sched.h>
#include <asm/asm.h>
#include <asm/page.h>

@@ -157,6 +159,9 @@ extern int __get_user_bad(void);
	int __ret_gu;							\
	unsigned long __val_gu;						\
	__chk_user_ptr(ptr);						\
	might_sleep();							\
	if (current->mm)						\
		might_lock_read(&current->mm->mmap_sem);		\
	switch (sizeof(*(ptr))) {					\
	case 1:								\
		__get_user_x(1, __ret_gu, __val_gu, ptr);		\
@@ -241,6 +246,9 @@ extern void __put_user_8(void);
	int __ret_pu;						\
	__typeof__(*(ptr)) __pu_val;				\
	__chk_user_ptr(ptr);					\
	might_sleep();						\
	if (current->mm)					\
		might_lock_read(&current->mm->mmap_sem);	\
	__pu_val = x;						\
	switch (sizeof(*(ptr))) {				\
	case 1:							\
@@ -265,6 +273,9 @@ extern void __put_user_8(void);
#define __put_user_size(x, ptr, size, retval, errret)			\
do {									\
	retval = 0;							\
	might_sleep();							\
	if (current->mm)						\
		might_lock_read(&current->mm->mmap_sem);		\
	__chk_user_ptr(ptr);						\
	switch (size) {							\
	case 1:								\
@@ -317,6 +328,9 @@ do { \
#define __get_user_size(x, ptr, size, retval, errret)			\
do {									\
	retval = 0;							\
	might_sleep();							\
	if (current->mm)						\
		might_lock_read(&current->mm->mmap_sem);		\
	__chk_user_ptr(ptr);						\
	switch (size) {							\
	case 1:								\
+8 −2
Original line number Diff line number Diff line
@@ -83,6 +83,8 @@ static __always_inline unsigned long __must_check
__copy_to_user(void __user *to, const void *from, unsigned long n)
{
	might_sleep();
	if (current->mm)
		might_lock_read(&current->mm->mmap_sem);
	return __copy_to_user_inatomic(to, from, n);
}

@@ -138,6 +140,8 @@ static __always_inline unsigned long
__copy_from_user(void *to, const void __user *from, unsigned long n)
{
	might_sleep();
	if (current->mm)
		might_lock_read(&current->mm->mmap_sem);
	if (__builtin_constant_p(n)) {
		unsigned long ret;

@@ -160,6 +164,8 @@ static __always_inline unsigned long __copy_from_user_nocache(void *to,
				const void __user *from, unsigned long n)
{
	might_sleep();
	if (current->mm)
		might_lock_read(&current->mm->mmap_sem);
	if (__builtin_constant_p(n)) {
		unsigned long ret;

+12 −0
Original line number Diff line number Diff line
@@ -28,6 +28,10 @@ static __always_inline __must_check
int __copy_from_user(void *dst, const void __user *src, unsigned size)
{
	int ret = 0;

	might_sleep();
	if (current->mm)
		might_lock_read(&current->mm->mmap_sem);
	if (!__builtin_constant_p(size))
		return copy_user_generic(dst, (__force void *)src, size);
	switch (size) {
@@ -70,6 +74,10 @@ static __always_inline __must_check
int __copy_to_user(void __user *dst, const void *src, unsigned size)
{
	int ret = 0;

	might_sleep();
	if (current->mm)
		might_lock_read(&current->mm->mmap_sem);
	if (!__builtin_constant_p(size))
		return copy_user_generic((__force void *)dst, src, size);
	switch (size) {
@@ -112,6 +120,10 @@ static __always_inline __must_check
int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
{
	int ret = 0;

	might_sleep();
	if (current->mm)
		might_lock_read(&current->mm->mmap_sem);
	if (!__builtin_constant_p(size))
		return copy_user_generic((__force void *)dst,
					 (__force void *)src, size);