Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 9a4aa9af authored by Tom Herbert's avatar Tom Herbert Committed by David S. Miller
Browse files

ipv4: Use percpu Cache route in IP tunnels



percpu route cache eliminates share of dst refcnt between CPUs.

Signed-off-by: default avatarTom Herbert <therbert@google.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 7d442fab
Loading
Loading
Loading
Loading
+6 −2
Original line number Diff line number Diff line
@@ -38,6 +38,11 @@ struct ip_tunnel_prl_entry {
	struct rcu_head			rcu_head;
};

struct ip_tunnel_dst {
	struct dst_entry __rcu 		*dst;
	spinlock_t			lock;
};

struct ip_tunnel {
	struct ip_tunnel __rcu	*next;
	struct hlist_node hash_node;
@@ -54,8 +59,7 @@ struct ip_tunnel {
	int		hlen;		/* Precalculated header length */
	int		mlink;

	struct		dst_entry __rcu *dst_cache;
	spinlock_t	dst_lock;
	struct ip_tunnel_dst __percpu *dst_cache;

	struct ip_tunnel_parm parms;

+32 −12
Original line number Diff line number Diff line
@@ -68,23 +68,24 @@ static unsigned int ip_tunnel_hash(struct ip_tunnel_net *itn,
			 IP_TNL_HASH_BITS);
}

static inline void __tunnel_dst_set(struct ip_tunnel *t, struct dst_entry *dst)
static inline void __tunnel_dst_set(struct ip_tunnel_dst *idst,
				    struct dst_entry *dst)
{
	struct dst_entry *old_dst;

	if (dst && (dst->flags & DST_NOCACHE))
		dst = NULL;

	spin_lock_bh(&t->dst_lock);
	old_dst = rcu_dereference_raw(t->dst_cache);
	rcu_assign_pointer(t->dst_cache, dst);
	spin_lock_bh(&idst->lock);
	old_dst = rcu_dereference(idst->dst);
	rcu_assign_pointer(idst->dst, dst);
	dst_release(old_dst);
	spin_unlock_bh(&t->dst_lock);
	spin_unlock_bh(&idst->lock);
}

static inline void tunnel_dst_set(struct ip_tunnel *t, struct dst_entry *dst)
{
	__tunnel_dst_set(t, dst);
	__tunnel_dst_set(this_cpu_ptr(t->dst_cache), dst);
}

static inline void tunnel_dst_reset(struct ip_tunnel *t)
@@ -92,12 +93,20 @@ static inline void tunnel_dst_reset(struct ip_tunnel *t)
	tunnel_dst_set(t, NULL);
}

static void tunnel_dst_reset_all(struct ip_tunnel *t)
{
	int i;

	for_each_possible_cpu(i)
		__tunnel_dst_set(per_cpu_ptr(t->dst_cache, i), NULL);
}

static inline struct dst_entry *tunnel_dst_get(struct ip_tunnel *t)
{
	struct dst_entry *dst;

	rcu_read_lock();
	dst = rcu_dereference(t->dst_cache);
	dst = rcu_dereference(this_cpu_ptr(t->dst_cache)->dst);
	if (dst)
		dst_hold(dst);
	rcu_read_unlock();
@@ -755,7 +764,7 @@ static void ip_tunnel_update(struct ip_tunnel_net *itn,
		if (set_mtu)
			dev->mtu = mtu;
	}
	tunnel_dst_reset(t);
	tunnel_dst_reset_all(t);
	netdev_state_change(dev);
}

@@ -871,6 +880,7 @@ static void ip_tunnel_dev_free(struct net_device *dev)
	struct ip_tunnel *tunnel = netdev_priv(dev);

	gro_cells_destroy(&tunnel->gro_cells);
	free_percpu(tunnel->dst_cache);
	free_percpu(dev->tstats);
	free_netdev(dev);
}
@@ -1049,8 +1059,21 @@ int ip_tunnel_init(struct net_device *dev)
		u64_stats_init(&ipt_stats->syncp);
	}

	tunnel->dst_cache = alloc_percpu(struct ip_tunnel_dst);
	if (!tunnel->dst_cache) {
		free_percpu(dev->tstats);
		return -ENOMEM;
	}

	for_each_possible_cpu(i) {
		struct ip_tunnel_dst *idst = per_cpu_ptr(tunnel->dst_cache, i);
		idst-> dst = NULL;
		spin_lock_init(&idst->lock);
	}

	err = gro_cells_init(&tunnel->gro_cells, dev);
	if (err) {
		free_percpu(tunnel->dst_cache);
		free_percpu(dev->tstats);
		return err;
	}
@@ -1061,9 +1084,6 @@ int ip_tunnel_init(struct net_device *dev)
	iph->version		= 4;
	iph->ihl		= 5;

	tunnel->dst_cache = NULL;
	spin_lock_init(&tunnel->dst_lock);

	return 0;
}
EXPORT_SYMBOL_GPL(ip_tunnel_init);
@@ -1079,7 +1099,7 @@ void ip_tunnel_uninit(struct net_device *dev)
	if (itn->fb_tunnel_dev != dev)
		ip_tunnel_del(netdev_priv(dev));

	tunnel_dst_reset(tunnel);
	tunnel_dst_reset_all(tunnel);
}
EXPORT_SYMBOL_GPL(ip_tunnel_uninit);