Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit e13053f5 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'sched-mm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull voluntary preemption fixes from Ingo Molnar:
 "This tree contains a speedup which is achieved through better
  might_sleep()/might_fault() preemption point annotations for uaccess
  functions, by Michael S Tsirkin:

  1. The only reason uaccess routines might sleep is if they fault.
     Make this explicit for all architectures.

  2. A voluntary preemption point in uaccess functions means compiler
     can't inline them efficiently, this breaks assumptions that they
     are very fast and small that e.g.  net code seems to make.  Remove
     this preemption point so behaviour matches with what callers
     assume.

  3. Accesses (e.g through socket ops) to kernel memory with KERNEL_DS
     like net/sunrpc does will never sleep.  Remove an unconditinal
     might_sleep() in the might_fault() inline in kernel.h (used when
     PROVE_LOCKING is not set).

  4. Accesses with pagefault_disable() return EFAULT but won't cause
     caller to sleep.  Check for that and thus avoid might_sleep() when
     PROVE_LOCKING is set.

  These changes offer a nice speedup for CONFIG_PREEMPT_VOLUNTARY=y
  kernels, here's a network bandwidth measurement between a virtual
  machine and the host:

   before:
        incoming: 7122.77   Mb/s
        outgoing: 8480.37   Mb/s

   after:
        incoming: 8619.24   Mb/s   [ +21.0% ]
        outgoing: 9455.42   Mb/s   [ +11.5% ]

  I kept these changes in a separate tree, separate from scheduler
  changes, because it's a mixed MM and scheduler topic"

* 'sched-mm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  mm, sched: Allow uaccess in atomic with pagefault_disable()
  mm, sched: Drop voluntary schedule from might_fault()
  x86: uaccess s/might_sleep/might_fault/
  tile: uaccess s/might_sleep/might_fault/
  powerpc: uaccess s/might_sleep/might_fault/
  mn10300: uaccess s/might_sleep/might_fault/
  microblaze: uaccess s/might_sleep/might_fault/
  m32r: uaccess s/might_sleep/might_fault/
  frv: uaccess s/might_sleep/might_fault/
  arm64: uaccess s/might_sleep/might_fault/
  asm-generic: uaccess s/might_sleep/might_fault/
parents 2d722f6d 662bbcb2
Loading
Loading
Loading
Loading
+2 −2
Original line number Diff line number Diff line
@@ -166,7 +166,7 @@ do { \

#define get_user(x, ptr)						\
({									\
	might_sleep();							\
	might_fault();							\
	access_ok(VERIFY_READ, (ptr), sizeof(*(ptr))) ?			\
		__get_user((x), (ptr)) :				\
		((x) = 0, -EFAULT);					\
@@ -227,7 +227,7 @@ do { \

#define put_user(x, ptr)						\
({									\
	might_sleep();							\
	might_fault();							\
	access_ok(VERIFY_WRITE, (ptr), sizeof(*(ptr))) ?		\
		__put_user((x), (ptr)) :				\
		-EFAULT;						\
+2 −2
Original line number Diff line number Diff line
@@ -280,14 +280,14 @@ extern long __memcpy_user(void *dst, const void *src, unsigned long count);
static inline unsigned long __must_check
__copy_to_user(void __user *to, const void *from, unsigned long n)
{
       might_sleep();
       might_fault();
       return __copy_to_user_inatomic(to, from, n);
}

static inline unsigned long
__copy_from_user(void *to, const void __user *from, unsigned long n)
{
       might_sleep();
       might_fault();
       return __copy_from_user_inatomic(to, from, n);
}

+6 −6
Original line number Diff line number Diff line
@@ -216,7 +216,7 @@ extern int fixup_exception(struct pt_regs *regs);
({									\
	long __gu_err = 0;						\
	unsigned long __gu_val;						\
	might_sleep();							\
	might_fault();							\
	__get_user_size(__gu_val,(ptr),(size),__gu_err);		\
	(x) = (__typeof__(*(ptr)))__gu_val;				\
	__gu_err;							\
@@ -227,7 +227,7 @@ extern int fixup_exception(struct pt_regs *regs);
	long __gu_err = -EFAULT;					\
	unsigned long __gu_val = 0;					\
	const __typeof__(*(ptr)) __user *__gu_addr = (ptr);		\
	might_sleep();							\
	might_fault();							\
	if (access_ok(VERIFY_READ,__gu_addr,size))			\
		__get_user_size(__gu_val,__gu_addr,(size),__gu_err);	\
	(x) = (__typeof__(*(ptr)))__gu_val;				\
@@ -295,7 +295,7 @@ do { \
#define __put_user_nocheck(x,ptr,size)					\
({									\
	long __pu_err;							\
	might_sleep();							\
	might_fault();							\
	__put_user_size((x),(ptr),(size),__pu_err);			\
	__pu_err;							\
})
@@ -305,7 +305,7 @@ do { \
({									\
	long __pu_err = -EFAULT;					\
	__typeof__(*(ptr)) __user *__pu_addr = (ptr);			\
	might_sleep();							\
	might_fault();							\
	if (access_ok(VERIFY_WRITE,__pu_addr,size))			\
		__put_user_size((x),__pu_addr,(size),__pu_err);		\
	__pu_err;							\
@@ -597,7 +597,7 @@ unsigned long __generic_copy_from_user(void *, const void __user *, unsigned lon
 */
#define copy_to_user(to,from,n)				\
({							\
	might_sleep();					\
	might_fault();					\
	__generic_copy_to_user((to),(from),(n));	\
})

@@ -638,7 +638,7 @@ unsigned long __generic_copy_from_user(void *, const void __user *, unsigned lon
 */
#define copy_from_user(to,from,n)			\
({							\
	might_sleep();					\
	might_fault();					\
	__generic_copy_from_user((to),(from),(n));	\
})

+3 −3
Original line number Diff line number Diff line
@@ -145,7 +145,7 @@ static inline unsigned long __must_check __clear_user(void __user *to,
static inline unsigned long __must_check clear_user(void __user *to,
							unsigned long n)
{
	might_sleep();
	might_fault();
	if (unlikely(!access_ok(VERIFY_WRITE, to, n)))
		return n;

@@ -371,7 +371,7 @@ extern long __user_bad(void);
static inline long copy_from_user(void *to,
		const void __user *from, unsigned long n)
{
	might_sleep();
	might_fault();
	if (access_ok(VERIFY_READ, from, n))
		return __copy_from_user(to, from, n);
	return n;
@@ -385,7 +385,7 @@ static inline long copy_from_user(void *to,
static inline long copy_to_user(void __user *to,
		const void *from, unsigned long n)
{
	might_sleep();
	might_fault();
	if (access_ok(VERIFY_WRITE, to, n))
		return __copy_to_user(to, from, n);
	return n;
+2 −2
Original line number Diff line number Diff line
@@ -471,13 +471,13 @@ extern unsigned long __generic_copy_from_user(void *, const void __user *,

#define __copy_to_user(to, from, n)			\
({							\
	might_sleep();					\
	might_fault();					\
	__copy_to_user_inatomic((to), (from), (n));	\
})

#define __copy_from_user(to, from, n)			\
({							\
	might_sleep();					\
	might_fault();					\
	__copy_from_user_inatomic((to), (from), (n));	\
})

Loading