Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 70da5b5c authored by Martin KaFai Lau's avatar Martin KaFai Lau Committed by David S. Miller
Browse files

ipv6: Replace spinlock with seqlock and rcu in ip6_tunnel



This patch uses a seqlock to ensure consistency between idst->dst and
idst->cookie.  It also makes dst freeing from fib tree to undergo a
rcu grace period.

Signed-off-by: default avatarMartin KaFai Lau <kafai@fb.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 8e3d5be7
Loading
Loading
Loading
Loading
+2 −2
Original line number Diff line number Diff line
@@ -33,8 +33,8 @@ struct __ip6_tnl_parm {
};

struct ip6_tnl_dst {
	spinlock_t lock;
	struct dst_entry *dst;
	seqlock_t lock;
	struct dst_entry __rcu *dst;
	u32 cookie;
};

+7 −2
Original line number Diff line number Diff line
@@ -155,6 +155,11 @@ static void node_free(struct fib6_node *fn)
	kmem_cache_free(fib6_node_kmem, fn);
}

static void rt6_rcu_free(struct rt6_info *rt)
{
	call_rcu(&rt->dst.rcu_head, dst_rcu_free);
}

static void rt6_free_pcpu(struct rt6_info *non_pcpu_rt)
{
	int cpu;
@@ -169,7 +174,7 @@ static void rt6_free_pcpu(struct rt6_info *non_pcpu_rt)
		ppcpu_rt = per_cpu_ptr(non_pcpu_rt->rt6i_pcpu, cpu);
		pcpu_rt = *ppcpu_rt;
		if (pcpu_rt) {
			dst_free(&pcpu_rt->dst);
			rt6_rcu_free(pcpu_rt);
			*ppcpu_rt = NULL;
		}
	}
@@ -181,7 +186,7 @@ static void rt6_release(struct rt6_info *rt)
{
	if (atomic_dec_and_test(&rt->rt6i_ref)) {
		rt6_free_pcpu(rt);
		dst_free(&rt->dst);
		rt6_rcu_free(rt);
	}
}

+27 −24
Original line number Diff line number Diff line
@@ -126,45 +126,48 @@ static struct net_device_stats *ip6_get_stats(struct net_device *dev)
 * Locking : hash tables are protected by RCU and RTNL
 */

static void __ip6_tnl_per_cpu_dst_set(struct ip6_tnl_dst *idst,
static void ip6_tnl_per_cpu_dst_set(struct ip6_tnl_dst *idst,
				    struct dst_entry *dst)
{
	dst_release(idst->dst);
	write_seqlock_bh(&idst->lock);
	dst_release(rcu_dereference_protected(
			    idst->dst,
			    lockdep_is_held(&idst->lock.lock)));
	if (dst) {
		dst_hold(dst);
		idst->cookie = rt6_get_cookie((struct rt6_info *)dst);
	} else {
		idst->cookie = 0;
	}
	idst->dst = dst;
}

static void ip6_tnl_per_cpu_dst_set(struct ip6_tnl_dst *idst,
				    struct dst_entry *dst)
{

	spin_lock_bh(&idst->lock);
	__ip6_tnl_per_cpu_dst_set(idst, dst);
	spin_unlock_bh(&idst->lock);
	rcu_assign_pointer(idst->dst, dst);
	write_sequnlock_bh(&idst->lock);
}

struct dst_entry *ip6_tnl_dst_get(struct ip6_tnl *t)
{
	struct ip6_tnl_dst *idst;
	struct dst_entry *dst;
	unsigned int seq;
	u32 cookie;

	idst = raw_cpu_ptr(t->dst_cache);
	spin_lock_bh(&idst->lock);
	dst = idst->dst;
	if (dst) {
		if (!dst->obsolete || dst->ops->check(dst, idst->cookie)) {
			dst_hold(idst->dst);
		} else {
			__ip6_tnl_per_cpu_dst_set(idst, NULL);

	rcu_read_lock();
	do {
		seq = read_seqbegin(&idst->lock);
		dst = rcu_dereference(idst->dst);
		cookie = idst->cookie;
	} while (read_seqretry(&idst->lock, seq));

	if (dst && !atomic_inc_not_zero(&dst->__refcnt))
		dst = NULL;
	rcu_read_unlock();

	if (dst && dst->obsolete && !dst->ops->check(dst, cookie)) {
		ip6_tnl_per_cpu_dst_set(idst, NULL);
		dst_release(dst);
		dst = NULL;
	}
	}
	spin_unlock_bh(&idst->lock);
	return dst;
}
EXPORT_SYMBOL_GPL(ip6_tnl_dst_get);
@@ -204,7 +207,7 @@ int ip6_tnl_dst_init(struct ip6_tnl *t)
		return -ENOMEM;

	for_each_possible_cpu(i)
		spin_lock_init(&per_cpu_ptr(t->dst_cache, i)->lock);
		seqlock_init(&per_cpu_ptr(t->dst_cache, i)->lock);

	return 0;
}