Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 3ee1afa3 authored by Nick Piggin's avatar Nick Piggin Committed by Ingo Molnar
Browse files

x86: some lock annotations for user copy paths, v2



 - introduce might_fault()
 - handle the atomic user copy paths correctly

[ mingo@elte.hu: move might_sleep() outside of in_atomic(). ]
Signed-off-by: default avatarNick Piggin <npiggin@suse.de>
Acked-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent c10d38dd
Loading
Loading
Loading
Loading
+3 −9
Original line number Diff line number Diff line
@@ -32,9 +32,7 @@ static inline int __movsl_is_ok(unsigned long a1, unsigned long a2, unsigned lon
#define __do_strncpy_from_user(dst, src, count, res)			   \
do {									   \
	int __d0, __d1, __d2;						   \
	might_sleep();							   \
	if (current->mm)						   \
		might_lock_read(&current->mm->mmap_sem);		   \
	might_fault();							   \
	__asm__ __volatile__(						   \
		"	testl %1,%1\n"					   \
		"	jz 2f\n"					   \
@@ -121,9 +119,7 @@ EXPORT_SYMBOL(strncpy_from_user);
#define __do_clear_user(addr,size)					\
do {									\
	int __d0;							\
	might_sleep();							\
	if (current->mm)						\
		might_lock_read(&current->mm->mmap_sem);		\
	might_fault();							\
	__asm__ __volatile__(						\
		"0:	rep; stosl\n"					\
		"	movl %2,%0\n"					\
@@ -193,9 +189,7 @@ long strnlen_user(const char __user *s, long n)
	unsigned long mask = -__addr_ok(s);
	unsigned long res, tmp;

	might_sleep();
	if (current->mm)
		might_lock_read(&current->mm->mmap_sem);
	might_fault();

	__asm__ __volatile__(
		"	testl %0, %0\n"
+2 −6
Original line number Diff line number Diff line
@@ -15,9 +15,7 @@
#define __do_strncpy_from_user(dst,src,count,res)			   \
do {									   \
	long __d0, __d1, __d2;						   \
	might_sleep();							   \
	if (current->mm)						   \
		might_lock_read(&current->mm->mmap_sem);		   \
	might_fault();							   \
	__asm__ __volatile__(						   \
		"	testq %1,%1\n"					   \
		"	jz 2f\n"					   \
@@ -66,9 +64,7 @@ EXPORT_SYMBOL(strncpy_from_user);
unsigned long __clear_user(void __user *addr, unsigned long size)
{
	long __d0;
	might_sleep();
	if (current->mm)
		might_lock_read(&current->mm->mmap_sem);
	might_fault();
	/* no memory constraint because it doesn't change any memory gcc knows
	   about */
	asm volatile(
+4 −14
Original line number Diff line number Diff line
@@ -8,8 +8,6 @@
#include <linux/thread_info.h>
#include <linux/prefetch.h>
#include <linux/string.h>
#include <linux/lockdep.h>
#include <linux/sched.h>
#include <asm/asm.h>
#include <asm/page.h>

@@ -159,9 +157,7 @@ extern int __get_user_bad(void);
	int __ret_gu;							\
	unsigned long __val_gu;						\
	__chk_user_ptr(ptr);						\
	might_sleep();							\
	if (current->mm)						\
		might_lock_read(&current->mm->mmap_sem);		\
	might_fault();							\
	switch (sizeof(*(ptr))) {					\
	case 1:								\
		__get_user_x(1, __ret_gu, __val_gu, ptr);		\
@@ -246,9 +242,7 @@ extern void __put_user_8(void);
	int __ret_pu;						\
	__typeof__(*(ptr)) __pu_val;				\
	__chk_user_ptr(ptr);					\
	might_sleep();						\
	if (current->mm)					\
		might_lock_read(&current->mm->mmap_sem);	\
	might_fault();						\
	__pu_val = x;						\
	switch (sizeof(*(ptr))) {				\
	case 1:							\
@@ -273,9 +267,7 @@ extern void __put_user_8(void);
#define __put_user_size(x, ptr, size, retval, errret)			\
do {									\
	retval = 0;							\
	might_sleep();							\
	if (current->mm)						\
		might_lock_read(&current->mm->mmap_sem);		\
	might_fault();							\
	__chk_user_ptr(ptr);						\
	switch (size) {							\
	case 1:								\
@@ -328,9 +320,7 @@ do { \
#define __get_user_size(x, ptr, size, retval, errret)			\
do {									\
	retval = 0;							\
	might_sleep();							\
	if (current->mm)						\
		might_lock_read(&current->mm->mmap_sem);		\
	might_fault();							\
	__chk_user_ptr(ptr);						\
	switch (size) {							\
	case 1:								\
+3 −9
Original line number Diff line number Diff line
@@ -82,9 +82,7 @@ __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
static __always_inline unsigned long __must_check
__copy_to_user(void __user *to, const void *from, unsigned long n)
{
	might_sleep();
	if (current->mm)
		might_lock_read(&current->mm->mmap_sem);
	might_fault();
	return __copy_to_user_inatomic(to, from, n);
}

@@ -139,9 +137,7 @@ __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
static __always_inline unsigned long
__copy_from_user(void *to, const void __user *from, unsigned long n)
{
	might_sleep();
	if (current->mm)
		might_lock_read(&current->mm->mmap_sem);
	might_fault();
	if (__builtin_constant_p(n)) {
		unsigned long ret;

@@ -163,9 +159,7 @@ __copy_from_user(void *to, const void __user *from, unsigned long n)
static __always_inline unsigned long __copy_from_user_nocache(void *to,
				const void __user *from, unsigned long n)
{
	might_sleep();
	if (current->mm)
		might_lock_read(&current->mm->mmap_sem);
	might_fault();
	if (__builtin_constant_p(n)) {
		unsigned long ret;

+3 −9
Original line number Diff line number Diff line
@@ -29,9 +29,7 @@ int __copy_from_user(void *dst, const void __user *src, unsigned size)
{
	int ret = 0;

	might_sleep();
	if (current->mm)
		might_lock_read(&current->mm->mmap_sem);
	might_fault();
	if (!__builtin_constant_p(size))
		return copy_user_generic(dst, (__force void *)src, size);
	switch (size) {
@@ -75,9 +73,7 @@ int __copy_to_user(void __user *dst, const void *src, unsigned size)
{
	int ret = 0;

	might_sleep();
	if (current->mm)
		might_lock_read(&current->mm->mmap_sem);
	might_fault();
	if (!__builtin_constant_p(size))
		return copy_user_generic((__force void *)dst, src, size);
	switch (size) {
@@ -121,9 +117,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
{
	int ret = 0;

	might_sleep();
	if (current->mm)
		might_lock_read(&current->mm->mmap_sem);
	might_fault();
	if (!__builtin_constant_p(size))
		return copy_user_generic((__force void *)dst,
					 (__force void *)src, size);
Loading