Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit a1240cf7 authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull percpu updates from Dennis Zhou:
 "This includes changes to let percpu_ref release the backing percpu
  memory earlier after it has been switched to atomic in cases where the
  percpu ref is not revived.

  This will help recycle percpu memory earlier in cases where the
  refcounts are pinned for prolonged periods of time"

* 'for-5.3' of git://git.kernel.org/pub/scm/linux/kernel/git/dennis/percpu:
  percpu_ref: release percpu memory early without PERCPU_REF_ALLOW_REINIT
  md: initialize percpu refcounters using PERCU_REF_ALLOW_REINIT
  io_uring: initialize percpu refcounters using PERCU_REF_ALLOW_REINIT
  percpu_ref: introduce PERCPU_REF_ALLOW_REINIT flag
parents 1d039859 7d9ab9b6
Loading
Loading
Loading
Loading
+2 −1
Original line number Diff line number Diff line
@@ -5316,7 +5316,8 @@ int mddev_init_writes_pending(struct mddev *mddev)
{
	if (mddev->writes_pending.percpu_count_ptr)
		return 0;
	if (percpu_ref_init(&mddev->writes_pending, no_op, 0, GFP_KERNEL) < 0)
	if (percpu_ref_init(&mddev->writes_pending, no_op,
			    PERCPU_REF_ALLOW_REINIT, GFP_KERNEL) < 0)
		return -ENOMEM;
	/* We want to start with the refcount at zero */
	percpu_ref_put(&mddev->writes_pending);
+2 −1
Original line number Diff line number Diff line
@@ -399,7 +399,8 @@ static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
	if (!ctx)
		return NULL;

	if (percpu_ref_init(&ctx->refs, io_ring_ctx_ref_free, 0, GFP_KERNEL)) {
	if (percpu_ref_init(&ctx->refs, io_ring_ctx_ref_free,
			    PERCPU_REF_ALLOW_REINIT, GFP_KERNEL)) {
		kfree(ctx);
		return NULL;
	}
+9 −1
Original line number Diff line number Diff line
@@ -75,14 +75,21 @@ enum {
	 * operation using percpu_ref_switch_to_percpu().  If initialized
	 * with this flag, the ref will stay in atomic mode until
	 * percpu_ref_switch_to_percpu() is invoked on it.
	 * Implies ALLOW_REINIT.
	 */
	PERCPU_REF_INIT_ATOMIC	= 1 << 0,

	/*
	 * Start dead w/ ref == 0 in atomic mode.  Must be revived with
	 * percpu_ref_reinit() before used.  Implies INIT_ATOMIC.
	 * percpu_ref_reinit() before used.  Implies INIT_ATOMIC and
	 * ALLOW_REINIT.
	 */
	PERCPU_REF_INIT_DEAD	= 1 << 1,

	/*
	 * Allow switching from atomic mode to percpu mode.
	 */
	PERCPU_REF_ALLOW_REINIT	= 1 << 2,
};

struct percpu_ref {
@@ -95,6 +102,7 @@ struct percpu_ref {
	percpu_ref_func_t	*release;
	percpu_ref_func_t	*confirm_switch;
	bool			force_atomic:1;
	bool			allow_reinit:1;
	struct rcu_head		rcu;
};

+11 −2
Original line number Diff line number Diff line
@@ -70,11 +70,14 @@ int percpu_ref_init(struct percpu_ref *ref, percpu_ref_func_t *release,
		return -ENOMEM;

	ref->force_atomic = flags & PERCPU_REF_INIT_ATOMIC;
	ref->allow_reinit = flags & PERCPU_REF_ALLOW_REINIT;

	if (flags & (PERCPU_REF_INIT_ATOMIC | PERCPU_REF_INIT_DEAD))
	if (flags & (PERCPU_REF_INIT_ATOMIC | PERCPU_REF_INIT_DEAD)) {
		ref->percpu_count_ptr |= __PERCPU_REF_ATOMIC;
	else
		ref->allow_reinit = true;
	} else {
		start_count += PERCPU_COUNT_BIAS;
	}

	if (flags & PERCPU_REF_INIT_DEAD)
		ref->percpu_count_ptr |= __PERCPU_REF_DEAD;
@@ -120,6 +123,9 @@ static void percpu_ref_call_confirm_rcu(struct rcu_head *rcu)
	ref->confirm_switch = NULL;
	wake_up_all(&percpu_ref_switch_waitq);

	if (!ref->allow_reinit)
		percpu_ref_exit(ref);

	/* drop ref from percpu_ref_switch_to_atomic() */
	percpu_ref_put(ref);
}
@@ -195,6 +201,9 @@ static void __percpu_ref_switch_to_percpu(struct percpu_ref *ref)
	if (!(ref->percpu_count_ptr & __PERCPU_REF_ATOMIC))
		return;

	if (WARN_ON_ONCE(!ref->allow_reinit))
		return;

	atomic_long_add(PERCPU_COUNT_BIAS, &ref->count);

	/*