Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 42e7c229 authored by Will Deacon's avatar Will Deacon Committed by Bernhard Thoben
Browse files

locking/refcount: Define constants for saturation and max refcount values



The REFCOUNT_FULL implementation uses a different saturation point than
the x86 implementation, which means that the shared refcount code in
lib/refcount.c (e.g. refcount_dec_not_one()) needs to be aware of the
difference.

Rather than duplicate the definitions from the lkdtm driver, instead
move them into <linux/refcount.h> and update all references accordingly.

Signed-off-by: default avatarWill Deacon <will@kernel.org>
Reviewed-by: default avatarArd Biesheuvel <ardb@kernel.org>
Reviewed-by: default avatarKees Cook <keescook@chromium.org>
Tested-by: default avatarHanjun Guo <guohanjun@huawei.com>
Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Cc: Elena Reshetova <elena.reshetova@intel.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: https://lkml.kernel.org/r/20191121115902.2551-2-will@kernel.org


Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
(cherry picked from commit 23e6b169c9917fbd77534f8c5f378cb073f548bd)
parent 811cf3dd
Loading
Loading
Loading
Loading
+0 −8
Original line number Diff line number Diff line
@@ -5,14 +5,6 @@
#include "lkdtm.h"
#include <linux/refcount.h>

#ifdef CONFIG_REFCOUNT_FULL
#define REFCOUNT_MAX		(UINT_MAX - 1)
#define REFCOUNT_SATURATED	UINT_MAX
#else
#define REFCOUNT_MAX		INT_MAX
#define REFCOUNT_SATURATED	(INT_MIN / 2)
#endif

static void overflow_check(refcount_t *ref)
{
	switch (refcount_read(ref)) {
+13 −1
Original line number Diff line number Diff line
@@ -3,6 +3,7 @@

#include <linux/atomic.h>
#include <linux/compiler.h>
#include <linux/limits.h>
#include <linux/spinlock_types.h>

struct mutex;
@@ -11,7 +12,7 @@ struct mutex;
 * refcount_t - variant of atomic_t specialized for reference counts
 * @refs: atomic_t counter field
 *
 * The counter saturates at UINT_MAX and will not move once
 * The counter saturates at REFCOUNT_SATURATED and will not move once
 * there. This avoids wrapping the counter and causing 'spurious'
 * use-after-free bugs.
 */
@@ -55,6 +56,9 @@ extern void refcount_dec_checked(refcount_t *r);

#ifdef CONFIG_REFCOUNT_FULL

#define REFCOUNT_MAX		(UINT_MAX - 1)
#define REFCOUNT_SATURATED	UINT_MAX

#define refcount_add_not_zero	refcount_add_not_zero_checked
#define refcount_add		refcount_add_checked

@@ -68,6 +72,14 @@ extern void refcount_sub(unsigned int i, refcount_t *r);
#define refcount_dec		refcount_dec_checked

#else


#define REFCOUNT_MAX		INT_MAX
#define REFCOUNT_SATURATED	(INT_MIN / 2)

# ifdef CONFIG_ARCH_HAS_REFCOUNT
#  include <asm/refcount.h>
# else
static inline __must_check bool refcount_add_not_zero(unsigned int i, refcount_t *r)
{
	return atomic_add_unless(&r->refs, i, 0);
+20 −17
Original line number Diff line number Diff line
@@ -4,8 +4,8 @@
 * The interface matches the atomic_t interface (to aid in porting) but only
 * provides the few functions one should use for reference counting.
 *
 * It differs in that the counter saturates at UINT_MAX and will not move once
 * there. This avoids wrapping the counter and causing 'spurious'
 * It differs in that the counter saturates at REFCOUNT_SATURATED and will not
 * move once there. This avoids wrapping the counter and causing 'spurious'
 * use-after-free issues.
 *
 * Memory ordering rules are slightly relaxed wrt regular atomic_t functions
@@ -47,7 +47,7 @@
 * @i: the value to add to the refcount
 * @r: the refcount
 *
 * Will saturate at UINT_MAX and WARN.
 * Will saturate at REFCOUNT_SATURATED and WARN.
 *
 * Provides no memory ordering, it is assumed the caller has guaranteed the
 * object memory to be stable (RCU, etc.). It does provide a control dependency
@@ -68,16 +68,17 @@ bool refcount_add_not_zero_checked(unsigned int i, refcount_t *r)
		if (!val)
			return false;

		if (unlikely(val == UINT_MAX))
		if (unlikely(val == REFCOUNT_SATURATED))
			return true;

		new = val + i;
		if (new < val)
			new = UINT_MAX;
			new = REFCOUNT_SATURATED;

	} while (!atomic_try_cmpxchg_relaxed(&r->refs, &val, new));

	WARN_ONCE(new == UINT_MAX, "refcount_t: saturated; leaking memory.\n");
	WARN_ONCE(new == REFCOUNT_SATURATED,
		  "refcount_t: saturated; leaking memory.\n");

	return true;
}
@@ -88,7 +89,7 @@ EXPORT_SYMBOL(refcount_add_not_zero_checked);
 * @i: the value to add to the refcount
 * @r: the refcount
 *
 * Similar to atomic_add(), but will saturate at UINT_MAX and WARN.
 * Similar to atomic_add(), but will saturate at REFCOUNT_SATURATED and WARN.
 *
 * Provides no memory ordering, it is assumed the caller has guaranteed the
 * object memory to be stable (RCU, etc.). It does provide a control dependency
@@ -109,7 +110,8 @@ EXPORT_SYMBOL(refcount_add_checked);
 * refcount_inc_not_zero_checked - increment a refcount unless it is 0
 * @r: the refcount to increment
 *
 * Similar to atomic_inc_not_zero(), but will saturate at UINT_MAX and WARN.
 * Similar to atomic_inc_not_zero(), but will saturate at REFCOUNT_SATURATED
 * and WARN.
 *
 * Provides no memory ordering, it is assumed the caller has guaranteed the
 * object memory to be stable (RCU, etc.). It does provide a control dependency
@@ -132,7 +134,8 @@ bool refcount_inc_not_zero_checked(refcount_t *r)

	} while (!atomic_try_cmpxchg_relaxed(&r->refs, &val, new));

	WARN_ONCE(new == UINT_MAX, "refcount_t: saturated; leaking memory.\n");
	WARN_ONCE(new == REFCOUNT_SATURATED,
		  "refcount_t: saturated; leaking memory.\n");

	return true;
}
@@ -142,7 +145,7 @@ EXPORT_SYMBOL(refcount_inc_not_zero_checked);
 * refcount_inc_checked - increment a refcount
 * @r: the refcount to increment
 *
 * Similar to atomic_inc(), but will saturate at UINT_MAX and WARN.
 * Similar to atomic_inc(), but will saturate at REFCOUNT_SATURATED and WARN.
 *
 * Provides no memory ordering, it is assumed the caller already has a
 * reference on the object.
@@ -163,7 +166,7 @@ EXPORT_SYMBOL(refcount_inc_checked);
 *
 * Similar to atomic_dec_and_test(), but it will WARN, return false and
 * ultimately leak on underflow and will fail to decrement when saturated
 * at UINT_MAX.
 * at REFCOUNT_SATURATED.
 *
 * Provides release memory ordering, such that prior loads and stores are done
 * before, and provides an acquire ordering on success such that free()
@@ -181,7 +184,7 @@ bool refcount_sub_and_test_checked(unsigned int i, refcount_t *r)
	unsigned int new, val = atomic_read(&r->refs);

	do {
		if (unlikely(val == UINT_MAX))
		if (unlikely(val == REFCOUNT_SATURATED))
			return false;

		new = val - i;
@@ -206,7 +209,7 @@ EXPORT_SYMBOL(refcount_sub_and_test_checked);
 * @r: the refcount
 *
 * Similar to atomic_dec_and_test(), it will WARN on underflow and fail to
 * decrement when saturated at UINT_MAX.
 * decrement when saturated at REFCOUNT_SATURATED.
 *
 * Provides release memory ordering, such that prior loads and stores are done
 * before, and provides an acquire ordering on success such that free()
@@ -225,7 +228,7 @@ EXPORT_SYMBOL(refcount_dec_and_test_checked);
 * @r: the refcount
 *
 * Similar to atomic_dec(), it will WARN on underflow and fail to decrement
 * when saturated at UINT_MAX.
 * when saturated at REFCOUNT_SATURATED.
 *
 * Provides release memory ordering, such that prior loads and stores are done
 * before.
@@ -276,7 +279,7 @@ bool refcount_dec_not_one(refcount_t *r)
	unsigned int new, val = atomic_read(&r->refs);

	do {
		if (unlikely(val == UINT_MAX))
		if (unlikely(val == REFCOUNT_SATURATED))
			return true;

		if (val == 1)
@@ -301,7 +304,7 @@ EXPORT_SYMBOL(refcount_dec_not_one);
 * @lock: the mutex to be locked
 *
 * Similar to atomic_dec_and_mutex_lock(), it will WARN on underflow and fail
 * to decrement when saturated at UINT_MAX.
 * to decrement when saturated at REFCOUNT_SATURATED.
 *
 * Provides release memory ordering, such that prior loads and stores are done
 * before, and provides a control dependency such that free() must come after.
@@ -332,7 +335,7 @@ EXPORT_SYMBOL(refcount_dec_and_mutex_lock);
 * @lock: the spinlock to be locked
 *
 * Similar to atomic_dec_and_lock(), it will WARN on underflow and fail to
 * decrement when saturated at UINT_MAX.
 * decrement when saturated at REFCOUNT_SATURATED.
 *
 * Provides release memory ordering, such that prior loads and stores are done
 * before, and provides a control dependency such that free() must come after.