Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit a052858f authored by H. Peter Anvin's avatar H. Peter Anvin
Browse files

x86, uaccess: Merge prototypes for clear_user/__clear_user



The prototypes for clear_user() and __clear_user() are identical in
the 32- and 64-bit headers.  No functionality change.

Signed-off-by: default avatarH. Peter Anvin <hpa@linux.intel.com>
Link: http://lkml.kernel.org/r/1348256595-29119-8-git-send-email-hpa@linux.intel.com
parent 51ae4a2d
Loading
Loading
Loading
Loading
+3 −0
Original line number Original line Diff line number Diff line
@@ -569,6 +569,9 @@ strncpy_from_user(char *dst, const char __user *src, long count);
extern __must_check long strlen_user(const char __user *str);
extern __must_check long strlen_user(const char __user *str);
extern __must_check long strnlen_user(const char __user *str, long n);
extern __must_check long strnlen_user(const char __user *str, long n);


unsigned long __must_check clear_user(void __user *mem, unsigned long len);
unsigned long __must_check __clear_user(void __user *mem, unsigned long len);

/*
/*
 * movsl can be slow when source and dest are not both 8-byte aligned
 * movsl can be slow when source and dest are not both 8-byte aligned
 */
 */
+0 −3
Original line number Original line Diff line number Diff line
@@ -213,7 +213,4 @@ static inline unsigned long __must_check copy_from_user(void *to,
	return n;
	return n;
}
}


unsigned long __must_check clear_user(void __user *mem, unsigned long len);
unsigned long __must_check __clear_user(void __user *mem, unsigned long len);

#endif /* _ASM_X86_UACCESS_32_H */
#endif /* _ASM_X86_UACCESS_32_H */
+0 −3
Original line number Original line Diff line number Diff line
@@ -217,9 +217,6 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
	}
	}
}
}


__must_check unsigned long clear_user(void __user *mem, unsigned long len);
__must_check unsigned long __clear_user(void __user *mem, unsigned long len);

static __must_check __always_inline int
static __must_check __always_inline int
__copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
__copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
{
{