Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit c2189e3a authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull percpu changes from Tejun Heo:
 "Nothing too interesting.  One cleanup patch and another to add a
  trivial state check function"

* 'for-3.20' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/percpu:
  percpu_ref: implement percpu_ref_is_dying()
  percpu_ref: remove unnecessary ACCESS_ONCE() in percpu_ref_tryget_live()
parents ed824a62 4c907baf
Loading
Loading
Loading
Loading
+31 −3
Original line number Original line Diff line number Diff line
@@ -128,8 +128,22 @@ static inline void percpu_ref_kill(struct percpu_ref *ref)
static inline bool __ref_is_percpu(struct percpu_ref *ref,
static inline bool __ref_is_percpu(struct percpu_ref *ref,
					  unsigned long __percpu **percpu_countp)
					  unsigned long __percpu **percpu_countp)
{
{
	/* paired with smp_store_release() in percpu_ref_reinit() */
	unsigned long percpu_ptr;
	unsigned long percpu_ptr = lockless_dereference(ref->percpu_count_ptr);

	/*
	 * The value of @ref->percpu_count_ptr is tested for
	 * !__PERCPU_REF_ATOMIC, which may be set asynchronously, and then
	 * used as a pointer.  If the compiler generates a separate fetch
	 * when using it as a pointer, __PERCPU_REF_ATOMIC may be set in
	 * between contaminating the pointer value, meaning that
	 * ACCESS_ONCE() is required when fetching it.
	 *
	 * Also, we need a data dependency barrier to be paired with
	 * smp_store_release() in __percpu_ref_switch_to_percpu().
	 *
	 * Use lockless deref which contains both.
	 */
	percpu_ptr = lockless_dereference(ref->percpu_count_ptr);


	/*
	/*
	 * Theoretically, the following could test just ATOMIC; however,
	 * Theoretically, the following could test just ATOMIC; however,
@@ -233,7 +247,7 @@ static inline bool percpu_ref_tryget_live(struct percpu_ref *ref)
	if (__ref_is_percpu(ref, &percpu_count)) {
	if (__ref_is_percpu(ref, &percpu_count)) {
		this_cpu_inc(*percpu_count);
		this_cpu_inc(*percpu_count);
		ret = true;
		ret = true;
	} else if (!(ACCESS_ONCE(ref->percpu_count_ptr) & __PERCPU_REF_DEAD)) {
	} else if (!(ref->percpu_count_ptr & __PERCPU_REF_DEAD)) {
		ret = atomic_long_inc_not_zero(&ref->count);
		ret = atomic_long_inc_not_zero(&ref->count);
	}
	}


@@ -280,6 +294,20 @@ static inline void percpu_ref_put(struct percpu_ref *ref)
	percpu_ref_put_many(ref, 1);
	percpu_ref_put_many(ref, 1);
}
}


/**
 * percpu_ref_is_dying - test whether a percpu refcount is dying or dead
 * @ref: percpu_ref to test
 *
 * Returns %true if @ref is dying or dead.
 *
 * This function is safe to call as long as @ref is between init and exit
 * and the caller is responsible for synchronizing against state changes.
 */
static inline bool percpu_ref_is_dying(struct percpu_ref *ref)
{
	return ref->percpu_count_ptr & __PERCPU_REF_DEAD;
}

/**
/**
 * percpu_ref_is_zero - test whether a percpu refcount reached zero
 * percpu_ref_is_zero - test whether a percpu refcount reached zero
 * @ref: percpu_ref to test
 * @ref: percpu_ref to test