Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 892ad5ac authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull locking updates from Ingo Molnar:
 "The main changes in this cycle were:

   - Add CONFIG_REFCOUNT_FULL=y to allow the disabling of the 'full'
     (robustness checked) refcount_t implementation with slightly lower
     runtime overhead. (Kees Cook)

     The lighter weight variant is the default. The two variants use the
     same API. Having this variant was a precondition by some
     maintainers to merge refcount_t cleanups.

   - Add lockdep support for rtmutexes (Peter Zijlstra)

   - liblockdep fixes and improvements (Sasha Levin, Ben Hutchings)

   - ... misc fixes and improvements"

* 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (30 commits)
  locking/refcount: Remove the half-implemented refcount_sub() API
  locking/refcount: Create unchecked atomic_t implementation
  locking/rtmutex: Don't initialize lockdep when not required
  locking/selftest: Add RT-mutex support
  locking/selftest: Remove the bad unlock ordering test
  rt_mutex: Add lockdep annotations
  MAINTAINERS: Claim atomic*_t maintainership
  locking/x86: Remove the unused atomic_inc_short() methd
  tools/lib/lockdep: Remove private kernel headers
  tools/lib/lockdep: Hide liblockdep output from test results
  tools/lib/lockdep: Add dummy current_gfp_context()
  tools/include: Add IS_ERR_OR_NULL to err.h
  tools/lib/lockdep: Add empty __is_[module,kernel]_percpu_address
  tools/lib/lockdep: Include err.h
  tools/include: Add (mostly) empty include/linux/sched/mm.h
  tools/lib/lockdep: Use LDFLAGS
  tools/lib/lockdep: Remove double-quotes from soname
  tools/lib/lockdep: Fix object file paths used in an out-of-tree build
  tools/lib/lockdep: Fix compilation for 4.11
  tools/lib/lockdep: Don't mix fd-based and stream IO
  ...
parents 162b246e 5d6dec6f
Loading
Loading
Loading
Loading
+10 −1
Original line number Original line Diff line number Diff line
@@ -2322,6 +2322,15 @@ F: Documentation/devicetree/bindings/input/atmel,maxtouch.txt
F:	drivers/input/touchscreen/atmel_mxt_ts.c
F:	drivers/input/touchscreen/atmel_mxt_ts.c
F:	include/linux/platform_data/atmel_mxt_ts.h
F:	include/linux/platform_data/atmel_mxt_ts.h


ATOMIC INFRASTRUCTURE
M:	Will Deacon <will.deacon@arm.com>
M:	Peter Zijlstra <peterz@infradead.org>
R:	Boqun Feng <boqun.feng@gmail.com>
L:	linux-kernel@vger.kernel.org
S:	Maintained
F:	arch/*/include/asm/atomic*.h
F:	include/*/atomic*.h

ATTO EXPRESSSAS SAS/SATA RAID SCSI DRIVER
ATTO EXPRESSSAS SAS/SATA RAID SCSI DRIVER
M:	Bradley Grove <linuxdrivers@attotech.com>
M:	Bradley Grove <linuxdrivers@attotech.com>
L:	linux-scsi@vger.kernel.org
L:	linux-scsi@vger.kernel.org
@@ -7555,7 +7564,7 @@ S: Maintained
F:	drivers/ata/sata_promise.*
F:	drivers/ata/sata_promise.*


LIBLOCKDEP
LIBLOCKDEP
M:	Sasha Levin <sasha.levin@oracle.com>
M:	Sasha Levin <alexander.levin@verizon.com>
S:	Maintained
S:	Maintained
F:	tools/lib/lockdep/
F:	tools/lib/lockdep/


+9 −0
Original line number Original line Diff line number Diff line
@@ -867,4 +867,13 @@ config STRICT_MODULE_RWX
config ARCH_WANT_RELAX_ORDER
config ARCH_WANT_RELAX_ORDER
	bool
	bool


config REFCOUNT_FULL
	bool "Perform full reference count validation at the expense of speed"
	help
	  Enabling this switches the refcounting infrastructure from a fast
	  unchecked atomic_t implementation to a fully state checked
	  implementation, which can be (slightly) slower but provides protections
	  against various use-after-free conditions that can be used in
	  security flaw exploits.

source "kernel/gcov/Kconfig"
source "kernel/gcov/Kconfig"
+1 −2
Original line number Original line Diff line number Diff line
@@ -24,8 +24,7 @@
 * has an opportunity to return -EFAULT to the user if needed.
 * has an opportunity to return -EFAULT to the user if needed.
 * The 64-bit routines just return a "long long" with the value,
 * The 64-bit routines just return a "long long" with the value,
 * since they are only used from kernel space and don't expect to fault.
 * since they are only used from kernel space and don't expect to fault.
 * Support for 16-bit ops is included in the framework but we don't provide
 * Support for 16-bit ops is included in the framework but we don't provide any.
 * any (x86_64 has an atomic_inc_short(), so we might want to some day).
 *
 *
 * Note that the caller is advised to issue a suitable L1 or L2
 * Note that the caller is advised to issue a suitable L1 or L2
 * prefetch on the address being manipulated to avoid extra stalls.
 * prefetch on the address being manipulated to avoid extra stalls.
+0 −13
Original line number Original line Diff line number Diff line
@@ -246,19 +246,6 @@ static __always_inline int __atomic_add_unless(atomic_t *v, int a, int u)
	return c;
	return c;
}
}


/**
 * atomic_inc_short - increment of a short integer
 * @v: pointer to type int
 *
 * Atomically adds 1 to @v
 * Returns the new value of @u
 */
static __always_inline short int atomic_inc_short(short int *v)
{
	asm(LOCK_PREFIX "addw $1, %0" : "+m" (*v));
	return *v;
}

#ifdef CONFIG_X86_32
#ifdef CONFIG_X86_32
# include <asm/atomic64_32.h>
# include <asm/atomic64_32.h>
#else
#else
+37 −1
Original line number Original line Diff line number Diff line
@@ -41,6 +41,7 @@ static inline unsigned int refcount_read(const refcount_t *r)
	return atomic_read(&r->refs);
	return atomic_read(&r->refs);
}
}


#ifdef CONFIG_REFCOUNT_FULL
extern __must_check bool refcount_add_not_zero(unsigned int i, refcount_t *r);
extern __must_check bool refcount_add_not_zero(unsigned int i, refcount_t *r);
extern void refcount_add(unsigned int i, refcount_t *r);
extern void refcount_add(unsigned int i, refcount_t *r);


@@ -48,10 +49,45 @@ extern __must_check bool refcount_inc_not_zero(refcount_t *r);
extern void refcount_inc(refcount_t *r);
extern void refcount_inc(refcount_t *r);


extern __must_check bool refcount_sub_and_test(unsigned int i, refcount_t *r);
extern __must_check bool refcount_sub_and_test(unsigned int i, refcount_t *r);
extern void refcount_sub(unsigned int i, refcount_t *r);


extern __must_check bool refcount_dec_and_test(refcount_t *r);
extern __must_check bool refcount_dec_and_test(refcount_t *r);
extern void refcount_dec(refcount_t *r);
extern void refcount_dec(refcount_t *r);
#else
static inline __must_check bool refcount_add_not_zero(unsigned int i, refcount_t *r)
{
	return atomic_add_unless(&r->refs, i, 0);
}

static inline void refcount_add(unsigned int i, refcount_t *r)
{
	atomic_add(i, &r->refs);
}

static inline __must_check bool refcount_inc_not_zero(refcount_t *r)
{
	return atomic_add_unless(&r->refs, 1, 0);
}

static inline void refcount_inc(refcount_t *r)
{
	atomic_inc(&r->refs);
}

static inline __must_check bool refcount_sub_and_test(unsigned int i, refcount_t *r)
{
	return atomic_sub_and_test(i, &r->refs);
}

static inline __must_check bool refcount_dec_and_test(refcount_t *r)
{
	return atomic_dec_and_test(&r->refs);
}

static inline void refcount_dec(refcount_t *r)
{
	atomic_dec(&r->refs);
}
#endif /* CONFIG_REFCOUNT_FULL */


extern __must_check bool refcount_dec_if_one(refcount_t *r);
extern __must_check bool refcount_dec_if_one(refcount_t *r);
extern __must_check bool refcount_dec_not_one(refcount_t *r);
extern __must_check bool refcount_dec_not_one(refcount_t *r);
Loading