Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 0565de29 authored by David S. Miller's avatar David S. Miller
Browse files

Merge branch 'ipv6-Separate-data-structures-for-FIB-and-data-path'



David Ahern says:

====================
net/ipv6: Separate data structures for FIB and data path

IPv6 uses the same data struct for both control plane (FIB entries) and
data path (dst entries). This struct has elements needed for both paths
adding memory overhead and complexity (taking a dst hold in most places
but an additional reference on rt6i_ref in a few). Furthermore, because
of the dst_alloc tie, all FIB entries are allocated with GFP_ATOMIC.

This patch set separates FIB entries from dst entries, better aligning
IPv6 code with IPv4, simplifying the reference counting and allowing
FIB entries added by userspace (not autoconf) to use GFP_KERNEL. It is
first step to a number of performance and scalability changes.

The end result of this patch set:
  - FIB entries (fib6_info):
        /* size: 208, cachelines: 4, members: 25 */
        /* sum members: 207, holes: 1, sum holes: 1 */

  - dst entries (rt6_info)
       /* size: 240, cachelines: 4, members: 11 */

Versus the the single rt6_info struct today for both paths:
      /* size: 320, cachelines: 5, members: 28 */

This amounts to a 35% reduction in memory use for FIB entries and a
25% reduction for dst entries.

With respect to locking FIB entries use RCU and a single atomic
counter with fib6_info_hold and fib6_info_release helpers to manage
the reference counting. dst entries use only the traditional dst
refcounts with dst_hold and dst_release.

FIB entries for host routes are referenced by inet6_ifaddr and
ifacaddr6. In both cases, additional holds are taken -- similar to
what is done for devices.

This set is the first of many changes to improve the scalability of the
IPv6 code. Follow on changes include:
- consolidating duplicate fib6_info references like IPv4 does with
  duplicate fib_info

- moving fib6_info into a slab cache to avoid allocation roundups to
  power of 2 (the 208 size becomes a 256 actual allocation)

- Allow FIB lookups without generating a dst (e.g., most rt6_lookup
  users just want to verify the egress device). Means moving dst
  allocation to the other side of fib6_rule_lookup which again aligns
  with IPv4 behavior

- using separate standalone nexthop objects which have performance
  benefits beyond fib_info consolidation

At this point I am not seeing any refcount leaks or underflows, no
oops or bug_ons, or warnings from kasan, so I think it is ready for
others to beat up on it finding errors in code paths I have missed.

v2 changes
- rebased to top of tree
- improved commit message on patch 7

v1 changes
- rebased to top of tree
- fix memory leak of metrics as noted by Ido
- MTU fixes based on pmtu tests (thanks Stefano Brivio for writing)

RFC v2 changes
- improved commit messages
- move common metrics code from dst.c to net/ipv4/metrics.c (comment
  from DaveM)
- address comments from Wei Wang and Martin KaFai Lau (let me know if
  I missed something)
- fixes detected by kernel test robots
  + added fib6_metric_set to change metric on a FIB entry which could
    be pointing to read-only dst_default_metrics
  + 0day testing found a problem with an intermediate patch; added
    dst_hold_safe on rt->from. Code is removed 3 patches later
- allow cacheinfo to handle NULL dst; means only expires is pushed to
  userspace
====================

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents a2d481b3 77634cc6
Loading
Loading
Loading
Loading
+48 −48
Original line number Diff line number Diff line
@@ -442,7 +442,7 @@ struct mlxsw_sp_fib6_entry {

struct mlxsw_sp_rt6 {
	struct list_head list;
	struct rt6_info *rt;
	struct fib6_info *rt;
};

struct mlxsw_sp_lpm_tree {
@@ -2770,9 +2770,9 @@ mlxsw_sp_nexthop6_group_cmp(const struct mlxsw_sp_nexthop_group *nh_grp,
		struct in6_addr *gw;
		int ifindex, weight;

		ifindex = mlxsw_sp_rt6->rt->dst.dev->ifindex;
		weight = mlxsw_sp_rt6->rt->rt6i_nh_weight;
		gw = &mlxsw_sp_rt6->rt->rt6i_gateway;
		ifindex = mlxsw_sp_rt6->rt->fib6_nh.nh_dev->ifindex;
		weight = mlxsw_sp_rt6->rt->fib6_nh.nh_weight;
		gw = &mlxsw_sp_rt6->rt->fib6_nh.nh_gw;
		if (!mlxsw_sp_nexthop6_group_has_nexthop(nh_grp, gw, ifindex,
							 weight))
			return false;
@@ -2838,7 +2838,7 @@ mlxsw_sp_nexthop6_group_hash(struct mlxsw_sp_fib6_entry *fib6_entry, u32 seed)
	struct net_device *dev;

	list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
		dev = mlxsw_sp_rt6->rt->dst.dev;
		dev = mlxsw_sp_rt6->rt->fib6_nh.nh_dev;
		val ^= dev->ifindex;
	}

@@ -3834,11 +3834,11 @@ mlxsw_sp_rt6_nexthop(struct mlxsw_sp_nexthop_group *nh_grp,

	for (i = 0; i < nh_grp->count; i++) {
		struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i];
		struct rt6_info *rt = mlxsw_sp_rt6->rt;
		struct fib6_info *rt = mlxsw_sp_rt6->rt;

		if (nh->rif && nh->rif->dev == rt->dst.dev &&
		if (nh->rif && nh->rif->dev == rt->fib6_nh.nh_dev &&
		    ipv6_addr_equal((const struct in6_addr *) &nh->gw_addr,
				    &rt->rt6i_gateway))
				    &rt->fib6_nh.nh_gw))
			return nh;
		continue;
	}
@@ -3895,7 +3895,7 @@ mlxsw_sp_fib6_entry_offload_set(struct mlxsw_sp_fib_entry *fib_entry)

	if (fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_LOCAL) {
		list_first_entry(&fib6_entry->rt6_list, struct mlxsw_sp_rt6,
				 list)->rt->rt6i_nh_flags |= RTNH_F_OFFLOAD;
				 list)->rt->fib6_nh.nh_flags |= RTNH_F_OFFLOAD;
		return;
	}

@@ -3905,9 +3905,9 @@ mlxsw_sp_fib6_entry_offload_set(struct mlxsw_sp_fib_entry *fib_entry)

		nh = mlxsw_sp_rt6_nexthop(nh_grp, mlxsw_sp_rt6);
		if (nh && nh->offloaded)
			mlxsw_sp_rt6->rt->rt6i_nh_flags |= RTNH_F_OFFLOAD;
			mlxsw_sp_rt6->rt->fib6_nh.nh_flags |= RTNH_F_OFFLOAD;
		else
			mlxsw_sp_rt6->rt->rt6i_nh_flags &= ~RTNH_F_OFFLOAD;
			mlxsw_sp_rt6->rt->fib6_nh.nh_flags &= ~RTNH_F_OFFLOAD;
	}
}

@@ -3920,9 +3920,9 @@ mlxsw_sp_fib6_entry_offload_unset(struct mlxsw_sp_fib_entry *fib_entry)
	fib6_entry = container_of(fib_entry, struct mlxsw_sp_fib6_entry,
				  common);
	list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
		struct rt6_info *rt = mlxsw_sp_rt6->rt;
		struct fib6_info *rt = mlxsw_sp_rt6->rt;

		rt->rt6i_nh_flags &= ~RTNH_F_OFFLOAD;
		rt->fib6_nh.nh_flags &= ~RTNH_F_OFFLOAD;
	}
}

@@ -4699,7 +4699,7 @@ static void mlxsw_sp_router_fib4_del(struct mlxsw_sp *mlxsw_sp,
	mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
}

static bool mlxsw_sp_fib6_rt_should_ignore(const struct rt6_info *rt)
static bool mlxsw_sp_fib6_rt_should_ignore(const struct fib6_info *rt)
{
	/* Packets with link-local destination IP arriving to the router
	 * are trapped to the CPU, so no need to program specific routes
@@ -4721,7 +4721,7 @@ static bool mlxsw_sp_fib6_rt_should_ignore(const struct rt6_info *rt)
	return false;
}

static struct mlxsw_sp_rt6 *mlxsw_sp_rt6_create(struct rt6_info *rt)
static struct mlxsw_sp_rt6 *mlxsw_sp_rt6_create(struct fib6_info *rt)
{
	struct mlxsw_sp_rt6 *mlxsw_sp_rt6;

@@ -4734,18 +4734,18 @@ static struct mlxsw_sp_rt6 *mlxsw_sp_rt6_create(struct rt6_info *rt)
	 * memory.
	 */
	mlxsw_sp_rt6->rt = rt;
	rt6_hold(rt);
	fib6_info_hold(rt);

	return mlxsw_sp_rt6;
}

#if IS_ENABLED(CONFIG_IPV6)
static void mlxsw_sp_rt6_release(struct rt6_info *rt)
static void mlxsw_sp_rt6_release(struct fib6_info *rt)
{
	rt6_release(rt);
	fib6_info_release(rt);
}
#else
static void mlxsw_sp_rt6_release(struct rt6_info *rt)
static void mlxsw_sp_rt6_release(struct fib6_info *rt)
{
}
#endif
@@ -4756,13 +4756,13 @@ static void mlxsw_sp_rt6_destroy(struct mlxsw_sp_rt6 *mlxsw_sp_rt6)
	kfree(mlxsw_sp_rt6);
}

static bool mlxsw_sp_fib6_rt_can_mp(const struct rt6_info *rt)
static bool mlxsw_sp_fib6_rt_can_mp(const struct fib6_info *rt)
{
	/* RTF_CACHE routes are ignored */
	return (rt->rt6i_flags & (RTF_GATEWAY | RTF_ADDRCONF)) == RTF_GATEWAY;
}

static struct rt6_info *
static struct fib6_info *
mlxsw_sp_fib6_entry_rt(const struct mlxsw_sp_fib6_entry *fib6_entry)
{
	return list_first_entry(&fib6_entry->rt6_list, struct mlxsw_sp_rt6,
@@ -4771,7 +4771,7 @@ mlxsw_sp_fib6_entry_rt(const struct mlxsw_sp_fib6_entry *fib6_entry)

static struct mlxsw_sp_fib6_entry *
mlxsw_sp_fib6_node_mp_entry_find(const struct mlxsw_sp_fib_node *fib_node,
				 const struct rt6_info *nrt, bool replace)
				 const struct fib6_info *nrt, bool replace)
{
	struct mlxsw_sp_fib6_entry *fib6_entry;

@@ -4779,7 +4779,7 @@ mlxsw_sp_fib6_node_mp_entry_find(const struct mlxsw_sp_fib_node *fib_node,
		return NULL;

	list_for_each_entry(fib6_entry, &fib_node->entry_list, common.list) {
		struct rt6_info *rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
		struct fib6_info *rt = mlxsw_sp_fib6_entry_rt(fib6_entry);

		/* RT6_TABLE_LOCAL and RT6_TABLE_MAIN share the same
		 * virtual router.
@@ -4802,7 +4802,7 @@ mlxsw_sp_fib6_node_mp_entry_find(const struct mlxsw_sp_fib_node *fib_node,

static struct mlxsw_sp_rt6 *
mlxsw_sp_fib6_entry_rt_find(const struct mlxsw_sp_fib6_entry *fib6_entry,
			    const struct rt6_info *rt)
			    const struct fib6_info *rt)
{
	struct mlxsw_sp_rt6 *mlxsw_sp_rt6;

@@ -4815,21 +4815,21 @@ mlxsw_sp_fib6_entry_rt_find(const struct mlxsw_sp_fib6_entry *fib6_entry,
}

static bool mlxsw_sp_nexthop6_ipip_type(const struct mlxsw_sp *mlxsw_sp,
					const struct rt6_info *rt,
					const struct fib6_info *rt,
					enum mlxsw_sp_ipip_type *ret)
{
	return rt->dst.dev &&
	       mlxsw_sp_netdev_ipip_type(mlxsw_sp, rt->dst.dev, ret);
	return rt->fib6_nh.nh_dev &&
	       mlxsw_sp_netdev_ipip_type(mlxsw_sp, rt->fib6_nh.nh_dev, ret);
}

static int mlxsw_sp_nexthop6_type_init(struct mlxsw_sp *mlxsw_sp,
				       struct mlxsw_sp_nexthop_group *nh_grp,
				       struct mlxsw_sp_nexthop *nh,
				       const struct rt6_info *rt)
				       const struct fib6_info *rt)
{
	const struct mlxsw_sp_ipip_ops *ipip_ops;
	struct mlxsw_sp_ipip_entry *ipip_entry;
	struct net_device *dev = rt->dst.dev;
	struct net_device *dev = rt->fib6_nh.nh_dev;
	struct mlxsw_sp_rif *rif;
	int err;

@@ -4870,13 +4870,13 @@ static void mlxsw_sp_nexthop6_type_fini(struct mlxsw_sp *mlxsw_sp,
static int mlxsw_sp_nexthop6_init(struct mlxsw_sp *mlxsw_sp,
				  struct mlxsw_sp_nexthop_group *nh_grp,
				  struct mlxsw_sp_nexthop *nh,
				  const struct rt6_info *rt)
				  const struct fib6_info *rt)
{
	struct net_device *dev = rt->dst.dev;
	struct net_device *dev = rt->fib6_nh.nh_dev;

	nh->nh_grp = nh_grp;
	nh->nh_weight = rt->rt6i_nh_weight;
	memcpy(&nh->gw_addr, &rt->rt6i_gateway, sizeof(nh->gw_addr));
	nh->nh_weight = rt->fib6_nh.nh_weight;
	memcpy(&nh->gw_addr, &rt->fib6_nh.nh_gw, sizeof(nh->gw_addr));
	mlxsw_sp_nexthop_counter_alloc(mlxsw_sp, nh);

	list_add_tail(&nh->router_list_node, &mlxsw_sp->router->nexthop_list);
@@ -4897,7 +4897,7 @@ static void mlxsw_sp_nexthop6_fini(struct mlxsw_sp *mlxsw_sp,
}

static bool mlxsw_sp_rt6_is_gateway(const struct mlxsw_sp *mlxsw_sp,
				    const struct rt6_info *rt)
				    const struct fib6_info *rt)
{
	return rt->rt6i_flags & RTF_GATEWAY ||
	       mlxsw_sp_nexthop6_ipip_type(mlxsw_sp, rt, NULL);
@@ -4928,7 +4928,7 @@ mlxsw_sp_nexthop6_group_create(struct mlxsw_sp *mlxsw_sp,
	nh_grp->gateway = mlxsw_sp_rt6_is_gateway(mlxsw_sp, mlxsw_sp_rt6->rt);
	nh_grp->count = fib6_entry->nrt6;
	for (i = 0; i < nh_grp->count; i++) {
		struct rt6_info *rt = mlxsw_sp_rt6->rt;
		struct fib6_info *rt = mlxsw_sp_rt6->rt;

		nh = &nh_grp->nexthops[i];
		err = mlxsw_sp_nexthop6_init(mlxsw_sp, nh_grp, nh, rt);
@@ -5040,7 +5040,7 @@ mlxsw_sp_nexthop6_group_update(struct mlxsw_sp *mlxsw_sp,
static int
mlxsw_sp_fib6_entry_nexthop_add(struct mlxsw_sp *mlxsw_sp,
				struct mlxsw_sp_fib6_entry *fib6_entry,
				struct rt6_info *rt)
				struct fib6_info *rt)
{
	struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
	int err;
@@ -5068,7 +5068,7 @@ mlxsw_sp_fib6_entry_nexthop_add(struct mlxsw_sp *mlxsw_sp,
static void
mlxsw_sp_fib6_entry_nexthop_del(struct mlxsw_sp *mlxsw_sp,
				struct mlxsw_sp_fib6_entry *fib6_entry,
				struct rt6_info *rt)
				struct fib6_info *rt)
{
	struct mlxsw_sp_rt6 *mlxsw_sp_rt6;

@@ -5084,7 +5084,7 @@ mlxsw_sp_fib6_entry_nexthop_del(struct mlxsw_sp *mlxsw_sp,

static void mlxsw_sp_fib6_entry_type_set(struct mlxsw_sp *mlxsw_sp,
					 struct mlxsw_sp_fib_entry *fib_entry,
					 const struct rt6_info *rt)
					 const struct fib6_info *rt)
{
	/* Packets hitting RTF_REJECT routes need to be discarded by the
	 * stack. We can rely on their destination device not having a
@@ -5118,7 +5118,7 @@ mlxsw_sp_fib6_entry_rt_destroy_all(struct mlxsw_sp_fib6_entry *fib6_entry)
static struct mlxsw_sp_fib6_entry *
mlxsw_sp_fib6_entry_create(struct mlxsw_sp *mlxsw_sp,
			   struct mlxsw_sp_fib_node *fib_node,
			   struct rt6_info *rt)
			   struct fib6_info *rt)
{
	struct mlxsw_sp_fib6_entry *fib6_entry;
	struct mlxsw_sp_fib_entry *fib_entry;
@@ -5168,12 +5168,12 @@ static void mlxsw_sp_fib6_entry_destroy(struct mlxsw_sp *mlxsw_sp,

static struct mlxsw_sp_fib6_entry *
mlxsw_sp_fib6_node_entry_find(const struct mlxsw_sp_fib_node *fib_node,
			      const struct rt6_info *nrt, bool replace)
			      const struct fib6_info *nrt, bool replace)
{
	struct mlxsw_sp_fib6_entry *fib6_entry, *fallback = NULL;

	list_for_each_entry(fib6_entry, &fib_node->entry_list, common.list) {
		struct rt6_info *rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
		struct fib6_info *rt = mlxsw_sp_fib6_entry_rt(fib6_entry);

		if (rt->rt6i_table->tb6_id > nrt->rt6i_table->tb6_id)
			continue;
@@ -5198,7 +5198,7 @@ mlxsw_sp_fib6_node_list_insert(struct mlxsw_sp_fib6_entry *new6_entry,
			       bool replace)
{
	struct mlxsw_sp_fib_node *fib_node = new6_entry->common.fib_node;
	struct rt6_info *nrt = mlxsw_sp_fib6_entry_rt(new6_entry);
	struct fib6_info *nrt = mlxsw_sp_fib6_entry_rt(new6_entry);
	struct mlxsw_sp_fib6_entry *fib6_entry;

	fib6_entry = mlxsw_sp_fib6_node_entry_find(fib_node, nrt, replace);
@@ -5213,7 +5213,7 @@ mlxsw_sp_fib6_node_list_insert(struct mlxsw_sp_fib6_entry *new6_entry,
		struct mlxsw_sp_fib6_entry *last;

		list_for_each_entry(last, &fib_node->entry_list, common.list) {
			struct rt6_info *rt = mlxsw_sp_fib6_entry_rt(last);
			struct fib6_info *rt = mlxsw_sp_fib6_entry_rt(last);

			if (nrt->rt6i_table->tb6_id > rt->rt6i_table->tb6_id)
				break;
@@ -5268,7 +5268,7 @@ mlxsw_sp_fib6_node_entry_unlink(struct mlxsw_sp *mlxsw_sp,

static struct mlxsw_sp_fib6_entry *
mlxsw_sp_fib6_entry_lookup(struct mlxsw_sp *mlxsw_sp,
			   const struct rt6_info *rt)
			   const struct fib6_info *rt)
{
	struct mlxsw_sp_fib6_entry *fib6_entry;
	struct mlxsw_sp_fib_node *fib_node;
@@ -5287,7 +5287,7 @@ mlxsw_sp_fib6_entry_lookup(struct mlxsw_sp *mlxsw_sp,
		return NULL;

	list_for_each_entry(fib6_entry, &fib_node->entry_list, common.list) {
		struct rt6_info *iter_rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
		struct fib6_info *iter_rt = mlxsw_sp_fib6_entry_rt(fib6_entry);

		if (rt->rt6i_table->tb6_id == iter_rt->rt6i_table->tb6_id &&
		    rt->rt6i_metric == iter_rt->rt6i_metric &&
@@ -5316,7 +5316,7 @@ static void mlxsw_sp_fib6_entry_replace(struct mlxsw_sp *mlxsw_sp,
}

static int mlxsw_sp_router_fib6_add(struct mlxsw_sp *mlxsw_sp,
				    struct rt6_info *rt, bool replace)
				    struct fib6_info *rt, bool replace)
{
	struct mlxsw_sp_fib6_entry *fib6_entry;
	struct mlxsw_sp_fib_node *fib_node;
@@ -5373,7 +5373,7 @@ static int mlxsw_sp_router_fib6_add(struct mlxsw_sp *mlxsw_sp,
}

static void mlxsw_sp_router_fib6_del(struct mlxsw_sp *mlxsw_sp,
				     struct rt6_info *rt)
				     struct fib6_info *rt)
{
	struct mlxsw_sp_fib6_entry *fib6_entry;
	struct mlxsw_sp_fib_node *fib_node;
@@ -5836,7 +5836,7 @@ static void mlxsw_sp_router_fib6_event(struct mlxsw_sp_fib_event_work *fib_work,
		fen6_info = container_of(info, struct fib6_entry_notifier_info,
					 info);
		fib_work->fen6_info = *fen6_info;
		rt6_hold(fib_work->fen6_info.rt);
		fib6_info_hold(fib_work->fen6_info.rt);
		break;
	}
}
+6 −19
Original line number Diff line number Diff line
@@ -48,6 +48,9 @@ static unsigned int vrf_net_id;
struct net_vrf {
	struct rtable __rcu	*rth;
	struct rt6_info	__rcu	*rt6;
#if IS_ENABLED(CONFIG_IPV6)
	struct fib6_table	*fib6_table;
#endif
	u32                     tb_id;
};

@@ -496,7 +499,6 @@ static int vrf_rt6_create(struct net_device *dev)
	int flags = DST_HOST | DST_NOPOLICY | DST_NOXFRM;
	struct net_vrf *vrf = netdev_priv(dev);
	struct net *net = dev_net(dev);
	struct fib6_table *rt6i_table;
	struct rt6_info *rt6;
	int rc = -ENOMEM;

@@ -504,8 +506,8 @@ static int vrf_rt6_create(struct net_device *dev)
	if (!ipv6_mod_enabled())
		return 0;

	rt6i_table = fib6_new_table(net, vrf->tb_id);
	if (!rt6i_table)
	vrf->fib6_table = fib6_new_table(net, vrf->tb_id);
	if (!vrf->fib6_table)
		goto out;

	/* create a dst for routing packets out a VRF device */
@@ -513,7 +515,6 @@ static int vrf_rt6_create(struct net_device *dev)
	if (!rt6)
		goto out;

	rt6->rt6i_table = rt6i_table;
	rt6->dst.output	= vrf_output6;

	rcu_assign_pointer(vrf->rt6, rt6);
@@ -946,22 +947,8 @@ static struct rt6_info *vrf_ip6_route_lookup(struct net *net,
					     int flags)
{
	struct net_vrf *vrf = netdev_priv(dev);
	struct fib6_table *table = NULL;
	struct rt6_info *rt6;

	rcu_read_lock();

	/* fib6_table does not have a refcnt and can not be freed */
	rt6 = rcu_dereference(vrf->rt6);
	if (likely(rt6))
		table = rt6->rt6i_table;

	rcu_read_unlock();

	if (!table)
		return NULL;

	return ip6_pol_route(net, table, ifindex, fl6, skb, flags);
	return ip6_pol_route(net, vrf->fib6_table, ifindex, fl6, skb, flags);
}

static void vrf_ip6_input_dst(struct sk_buff *skb, struct net_device *vrf_dev,
+2 −2
Original line number Diff line number Diff line
@@ -64,7 +64,7 @@ struct inet6_ifaddr {
	struct delayed_work	dad_work;

	struct inet6_dev	*idev;
	struct rt6_info		*rt;
	struct fib6_info	*rt;

	struct hlist_node	addr_lst;
	struct list_head	if_list;
@@ -144,7 +144,7 @@ struct ipv6_ac_socklist {
struct ifacaddr6 {
	struct in6_addr		aca_addr;
	struct inet6_dev	*aca_idev;
	struct rt6_info		*aca_rt;
	struct fib6_info	*aca_rt;
	struct ifacaddr6	*aca_next;
	int			aca_users;
	refcount_t		aca_refcnt;
+3 −0
Original line number Diff line number Diff line
@@ -396,6 +396,9 @@ static inline unsigned int ip_skb_dst_mtu(struct sock *sk,
	return min(READ_ONCE(skb_dst(skb)->dev->mtu), IP_MAX_MTU);
}

int ip_metrics_convert(struct net *net, struct nlattr *fc_mx, int fc_mx_len,
		       u32 *metrics);

u32 ip_idents_reserve(u32 hash, int segs);
void __ip_select_ident(struct net *net, struct iphdr *iph, int segs);

+91 −60
Original line number Diff line number Diff line
@@ -38,6 +38,7 @@
#endif

struct rt6_info;
struct fib6_info;

struct fib6_config {
	u32		fc_table;
@@ -74,12 +75,12 @@ struct fib6_node {
#ifdef CONFIG_IPV6_SUBTREES
	struct fib6_node __rcu	*subtree;
#endif
	struct rt6_info __rcu	*leaf;
	struct fib6_info __rcu	*leaf;

	__u16			fn_bit;		/* bit key */
	__u16			fn_flags;
	int			fn_sernum;
	struct rt6_info __rcu	*rr_ptr;
	struct fib6_info __rcu	*rr_ptr;
	struct rcu_head		rcu;
};

@@ -94,11 +95,6 @@ struct fib6_gc_args {
#define FIB6_SUBTREE(fn)	(rcu_dereference_protected((fn)->subtree, 1))
#endif

struct mx6_config {
	const u32 *mx;
	DECLARE_BITMAP(mx_valid, RTAX_MAX);
};

/*
 *	routing information
 *
@@ -127,56 +123,72 @@ struct rt6_exception {
#define FIB6_EXCEPTION_BUCKET_SIZE (1 << FIB6_EXCEPTION_BUCKET_SIZE_SHIFT)
#define FIB6_MAX_DEPTH 5

struct rt6_info {
	struct dst_entry		dst;
	struct rt6_info __rcu		*rt6_next;
	struct rt6_info			*from;
struct fib6_nh {
	struct in6_addr		nh_gw;
	struct net_device	*nh_dev;
	struct lwtunnel_state	*nh_lwtstate;

	/*
	 * Tail elements of dst_entry (__refcnt etc.)
	 * and these elements (rarely used in hot path) are in
	 * the same cache line.
	 */
	unsigned int		nh_flags;
	atomic_t		nh_upper_bound;
	int			nh_weight;
};

struct fib6_info {
	struct fib6_table		*rt6i_table;
	struct fib6_info __rcu		*rt6_next;
	struct fib6_node __rcu		*rt6i_node;

	struct in6_addr			rt6i_gateway;

	/* Multipath routes:
	 * siblings is a list of rt6_info that have the the same metric/weight,
	 * siblings is a list of fib6_info that have the the same metric/weight,
	 * destination, but not the same gateway. nsiblings is just a cache
	 * to speed up lookup.
	 */
	struct list_head		rt6i_siblings;
	unsigned int			rt6i_nsiblings;
	atomic_t			rt6i_nh_upper_bound;

	atomic_t			rt6i_ref;
	struct inet6_dev		*rt6i_idev;
	unsigned long			expires;
	struct dst_metrics		*fib6_metrics;
#define fib6_pmtu		fib6_metrics->metrics[RTAX_MTU-1]

	unsigned int			rt6i_nh_flags;

	/* These are in a separate cache line. */
	struct rt6key			rt6i_dst ____cacheline_aligned_in_smp;
	struct rt6key			rt6i_dst;
	u32				rt6i_flags;
	struct rt6key			rt6i_src;
	struct rt6key			rt6i_prefsrc;

	struct list_head		rt6i_uncached;
	struct uncached_list		*rt6i_uncached_list;

	struct inet6_dev		*rt6i_idev;
	struct rt6_info * __percpu	*rt6i_pcpu;
	struct rt6_exception_bucket __rcu *rt6i_exception_bucket;

	u32				rt6i_metric;
	u32				rt6i_pmtu;
	/* more non-fragment space at head required */
	int				rt6i_nh_weight;
	unsigned short			rt6i_nfheader_len;
	u8				rt6i_protocol;
	u8				fib6_type;
	u8				exception_bucket_flushed:1,
					should_flush:1,
					unused:6;
					dst_nocount:1,
					dst_nopolicy:1,
					dst_host:1,
					unused:3;

	struct fib6_nh			fib6_nh;
};

struct rt6_info {
	struct dst_entry		dst;
	struct fib6_info		*from;

	struct rt6key			rt6i_dst;
	struct rt6key			rt6i_src;
	struct in6_addr			rt6i_gateway;
	struct inet6_dev		*rt6i_idev;
	u32				rt6i_flags;
	struct rt6key			rt6i_prefsrc;

	struct list_head		rt6i_uncached;
	struct uncached_list		*rt6i_uncached_list;

	/* more non-fragment space at head required */
	unsigned short			rt6i_nfheader_len;
};

#define for_each_fib6_node_rt_rcu(fn)					\
@@ -192,6 +204,26 @@ static inline struct inet6_dev *ip6_dst_idev(struct dst_entry *dst)
	return ((struct rt6_info *)dst)->rt6i_idev;
}

static inline void fib6_clean_expires(struct fib6_info *f6i)
{
	f6i->rt6i_flags &= ~RTF_EXPIRES;
	f6i->expires = 0;
}

static inline void fib6_set_expires(struct fib6_info *f6i,
				    unsigned long expires)
{
	f6i->expires = expires;
	f6i->rt6i_flags |= RTF_EXPIRES;
}

static inline bool fib6_check_expired(const struct fib6_info *f6i)
{
	if (f6i->rt6i_flags & RTF_EXPIRES)
		return time_after(jiffies, f6i->expires);
	return false;
}

static inline void rt6_clean_expires(struct rt6_info *rt)
{
	rt->rt6i_flags &= ~RTF_EXPIRES;
@@ -206,11 +238,9 @@ static inline void rt6_set_expires(struct rt6_info *rt, unsigned long expires)

static inline void rt6_update_expires(struct rt6_info *rt0, int timeout)
{
	struct rt6_info *rt;
	if (!(rt0->rt6i_flags & RTF_EXPIRES) && rt0->from)
		rt0->dst.expires = rt0->from->expires;

	for (rt = rt0; rt && !(rt->rt6i_flags & RTF_EXPIRES); rt = rt->from);
	if (rt && rt != rt0)
		rt0->dst.expires = rt->dst.expires;
	dst_set_expires(&rt0->dst, timeout);
	rt0->rt6i_flags |= RTF_EXPIRES;
}
@@ -220,7 +250,7 @@ static inline void rt6_update_expires(struct rt6_info *rt0, int timeout)
 * Return true if we can get cookie safely
 * Return false if not
 */
static inline bool rt6_get_cookie_safe(const struct rt6_info *rt,
static inline bool rt6_get_cookie_safe(const struct fib6_info *rt,
				       u32 *cookie)
{
	struct fib6_node *fn;
@@ -246,9 +276,7 @@ static inline u32 rt6_get_cookie(const struct rt6_info *rt)

	if (rt->rt6i_flags & RTF_PCPU ||
	    (unlikely(!list_empty(&rt->rt6i_uncached)) && rt->from))
		rt = rt->from;

	rt6_get_cookie_safe(rt, &cookie);
		rt6_get_cookie_safe(rt->from, &cookie);

	return cookie;
}
@@ -262,20 +290,18 @@ static inline void ip6_rt_put(struct rt6_info *rt)
	dst_release(&rt->dst);
}

void rt6_free_pcpu(struct rt6_info *non_pcpu_rt);
struct fib6_info *fib6_info_alloc(gfp_t gfp_flags);
void fib6_info_destroy(struct fib6_info *f6i);

static inline void rt6_hold(struct rt6_info *rt)
static inline void fib6_info_hold(struct fib6_info *f6i)
{
	atomic_inc(&rt->rt6i_ref);
	atomic_inc(&f6i->rt6i_ref);
}

static inline void rt6_release(struct rt6_info *rt)
static inline void fib6_info_release(struct fib6_info *f6i)
{
	if (atomic_dec_and_test(&rt->rt6i_ref)) {
		rt6_free_pcpu(rt);
		dst_dev_put(&rt->dst);
		dst_release(&rt->dst);
	}
	if (f6i && atomic_dec_and_test(&f6i->rt6i_ref))
		fib6_info_destroy(f6i);
}

enum fib6_walk_state {
@@ -291,7 +317,7 @@ enum fib6_walk_state {
struct fib6_walker {
	struct list_head lh;
	struct fib6_node *root, *node;
	struct rt6_info *leaf;
	struct fib6_info *leaf;
	enum fib6_walk_state state;
	unsigned int skip;
	unsigned int count;
@@ -355,7 +381,7 @@ typedef struct rt6_info *(*pol_lookup_t)(struct net *,

struct fib6_entry_notifier_info {
	struct fib_notifier_info info; /* must be first */
	struct rt6_info *rt;
	struct fib6_info *rt;
};

/*
@@ -377,15 +403,14 @@ struct fib6_node *fib6_locate(struct fib6_node *root,
			      const struct in6_addr *saddr, int src_len,
			      bool exact_match);

void fib6_clean_all(struct net *net, int (*func)(struct rt6_info *, void *arg),
void fib6_clean_all(struct net *net, int (*func)(struct fib6_info *, void *arg),
		    void *arg);

int fib6_add(struct fib6_node *root, struct rt6_info *rt,
	     struct nl_info *info, struct mx6_config *mxc,
	     struct netlink_ext_ack *extack);
int fib6_del(struct rt6_info *rt, struct nl_info *info);
int fib6_add(struct fib6_node *root, struct fib6_info *rt,
	     struct nl_info *info, struct netlink_ext_ack *extack);
int fib6_del(struct fib6_info *rt, struct nl_info *info);

void inet6_rt_notify(int event, struct rt6_info *rt, struct nl_info *info,
void inet6_rt_notify(int event, struct fib6_info *rt, struct nl_info *info,
		     unsigned int flags);

void fib6_run_gc(unsigned long expires, struct net *net, bool force);
@@ -408,8 +433,14 @@ void __net_exit fib6_notifier_exit(struct net *net);
unsigned int fib6_tables_seq_read(struct net *net);
int fib6_tables_dump(struct net *net, struct notifier_block *nb);

void fib6_update_sernum(struct rt6_info *rt);
void fib6_update_sernum_upto_root(struct net *net, struct rt6_info *rt);
void fib6_update_sernum(struct net *net, struct fib6_info *rt);
void fib6_update_sernum_upto_root(struct net *net, struct fib6_info *rt);

void fib6_metric_set(struct fib6_info *f6i, int metric, u32 val);
static inline bool fib6_metric_locked(struct fib6_info *f6i, int metric)
{
	return !!(f6i->fib6_metrics->metrics[RTAX_LOCK - 1] & (1 << metric));
}

#ifdef CONFIG_IPV6_MULTIPLE_TABLES
int fib6_rules_init(void);
Loading