Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit f191a1d1 authored by Vlad Yasevich's avatar Vlad Yasevich Committed by David S. Miller
Browse files

net: Remove code duplication between offload structures



Move the offload callbacks into its own structure.

Signed-off-by: default avatarVlad Yasevich <vyasevic@redhat.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent c6b641a4
Loading
Loading
Loading
Loading
+7 −3
Original line number Original line Diff line number Diff line
@@ -1515,14 +1515,18 @@ struct packet_type {
	struct list_head	list;
	struct list_head	list;
};
};


struct packet_offload {
struct offload_callbacks {
	__be16			type;	/* This is really htons(ether_type). */
	struct sk_buff		*(*gso_segment)(struct sk_buff *skb,
	struct sk_buff		*(*gso_segment)(struct sk_buff *skb,
						netdev_features_t features);
						netdev_features_t features);
	int			(*gso_send_check)(struct sk_buff *skb);
	int			(*gso_send_check)(struct sk_buff *skb);
	struct sk_buff		**(*gro_receive)(struct sk_buff **head,
	struct sk_buff		**(*gro_receive)(struct sk_buff **head,
					       struct sk_buff *skb);
					       struct sk_buff *skb);
	int			(*gro_complete)(struct sk_buff *skb);
	int			(*gro_complete)(struct sk_buff *skb);
};

struct packet_offload {
	__be16			 type;	/* This is really htons(ether_type). */
	struct offload_callbacks callbacks;
	struct list_head	 list;
	struct list_head	 list;
};
};


+3 −7
Original line number Original line Diff line number Diff line
@@ -29,6 +29,7 @@
#if IS_ENABLED(CONFIG_IPV6)
#if IS_ENABLED(CONFIG_IPV6)
#include <linux/ipv6.h>
#include <linux/ipv6.h>
#endif
#endif
#include <linux/netdevice.h>


/* This is one larger than the largest protocol value that can be
/* This is one larger than the largest protocol value that can be
 * found in an ipv4 or ipv6 header.  Since in both cases the protocol
 * found in an ipv4 or ipv6 header.  Since in both cases the protocol
@@ -63,12 +64,7 @@ struct inet6_protocol {
#endif
#endif


struct net_offload {
struct net_offload {
	int			(*gso_send_check)(struct sk_buff *skb);
	struct offload_callbacks callbacks;
	struct sk_buff	       *(*gso_segment)(struct sk_buff *skb,
					       netdev_features_t features);
	struct sk_buff	      **(*gro_receive)(struct sk_buff **head,
					       struct sk_buff *skb);
	int			(*gro_complete)(struct sk_buff *skb);
	unsigned int		 flags;	/* Flags used by IPv6 for now */
	unsigned int		 flags;	/* Flags used by IPv6 for now */
};
};
/* This should be set for any extension header which is compatible with GSO. */
/* This should be set for any extension header which is compatible with GSO. */
+7 −7
Original line number Original line Diff line number Diff line
@@ -2102,16 +2102,16 @@ struct sk_buff *skb_gso_segment(struct sk_buff *skb,


	rcu_read_lock();
	rcu_read_lock();
	list_for_each_entry_rcu(ptype, &offload_base, list) {
	list_for_each_entry_rcu(ptype, &offload_base, list) {
		if (ptype->type == type && ptype->gso_segment) {
		if (ptype->type == type && ptype->callbacks.gso_segment) {
			if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
			if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
				err = ptype->gso_send_check(skb);
				err = ptype->callbacks.gso_send_check(skb);
				segs = ERR_PTR(err);
				segs = ERR_PTR(err);
				if (err || skb_gso_ok(skb, features))
				if (err || skb_gso_ok(skb, features))
					break;
					break;
				__skb_push(skb, (skb->data -
				__skb_push(skb, (skb->data -
						 skb_network_header(skb)));
						 skb_network_header(skb)));
			}
			}
			segs = ptype->gso_segment(skb, features);
			segs = ptype->callbacks.gso_segment(skb, features);
			break;
			break;
		}
		}
	}
	}
@@ -3533,10 +3533,10 @@ static int napi_gro_complete(struct sk_buff *skb)


	rcu_read_lock();
	rcu_read_lock();
	list_for_each_entry_rcu(ptype, head, list) {
	list_for_each_entry_rcu(ptype, head, list) {
		if (ptype->type != type || !ptype->gro_complete)
		if (ptype->type != type || !ptype->callbacks.gro_complete)
			continue;
			continue;


		err = ptype->gro_complete(skb);
		err = ptype->callbacks.gro_complete(skb);
		break;
		break;
	}
	}
	rcu_read_unlock();
	rcu_read_unlock();
@@ -3598,7 +3598,7 @@ enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)


	rcu_read_lock();
	rcu_read_lock();
	list_for_each_entry_rcu(ptype, head, list) {
	list_for_each_entry_rcu(ptype, head, list) {
		if (ptype->type != type || !ptype->gro_receive)
		if (ptype->type != type || !ptype->callbacks.gro_receive)
			continue;
			continue;


		skb_set_network_header(skb, skb_gro_offset(skb));
		skb_set_network_header(skb, skb_gro_offset(skb));
@@ -3608,7 +3608,7 @@ enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
		NAPI_GRO_CB(skb)->flush = 0;
		NAPI_GRO_CB(skb)->flush = 0;
		NAPI_GRO_CB(skb)->free = 0;
		NAPI_GRO_CB(skb)->free = 0;


		pp = ptype->gro_receive(&napi->gro_list, skb);
		pp = ptype->callbacks.gro_receive(&napi->gro_list, skb);
		break;
		break;
	}
	}
	rcu_read_unlock();
	rcu_read_unlock();
+24 −18
Original line number Original line Diff line number Diff line
@@ -1276,8 +1276,8 @@ static int inet_gso_send_check(struct sk_buff *skb)


	rcu_read_lock();
	rcu_read_lock();
	ops = rcu_dereference(inet_offloads[proto]);
	ops = rcu_dereference(inet_offloads[proto]);
	if (likely(ops && ops->gso_send_check))
	if (likely(ops && ops->callbacks.gso_send_check))
		err = ops->gso_send_check(skb);
		err = ops->callbacks.gso_send_check(skb);
	rcu_read_unlock();
	rcu_read_unlock();


out:
out:
@@ -1326,8 +1326,8 @@ static struct sk_buff *inet_gso_segment(struct sk_buff *skb,


	rcu_read_lock();
	rcu_read_lock();
	ops = rcu_dereference(inet_offloads[proto]);
	ops = rcu_dereference(inet_offloads[proto]);
	if (likely(ops && ops->gso_segment))
	if (likely(ops && ops->callbacks.gso_segment))
		segs = ops->gso_segment(skb, features);
		segs = ops->callbacks.gso_segment(skb, features);
	rcu_read_unlock();
	rcu_read_unlock();


	if (!segs || IS_ERR(segs))
	if (!segs || IS_ERR(segs))
@@ -1379,7 +1379,7 @@ static struct sk_buff **inet_gro_receive(struct sk_buff **head,


	rcu_read_lock();
	rcu_read_lock();
	ops = rcu_dereference(inet_offloads[proto]);
	ops = rcu_dereference(inet_offloads[proto]);
	if (!ops || !ops->gro_receive)
	if (!ops || !ops->callbacks.gro_receive)
		goto out_unlock;
		goto out_unlock;


	if (*(u8 *)iph != 0x45)
	if (*(u8 *)iph != 0x45)
@@ -1420,7 +1420,7 @@ static struct sk_buff **inet_gro_receive(struct sk_buff **head,
	skb_gro_pull(skb, sizeof(*iph));
	skb_gro_pull(skb, sizeof(*iph));
	skb_set_transport_header(skb, skb_gro_offset(skb));
	skb_set_transport_header(skb, skb_gro_offset(skb));


	pp = ops->gro_receive(head, skb);
	pp = ops->callbacks.gro_receive(head, skb);


out_unlock:
out_unlock:
	rcu_read_unlock();
	rcu_read_unlock();
@@ -1444,10 +1444,10 @@ static int inet_gro_complete(struct sk_buff *skb)


	rcu_read_lock();
	rcu_read_lock();
	ops = rcu_dereference(inet_offloads[proto]);
	ops = rcu_dereference(inet_offloads[proto]);
	if (WARN_ON(!ops || !ops->gro_complete))
	if (WARN_ON(!ops || !ops->callbacks.gro_complete))
		goto out_unlock;
		goto out_unlock;


	err = ops->gro_complete(skb);
	err = ops->callbacks.gro_complete(skb);


out_unlock:
out_unlock:
	rcu_read_unlock();
	rcu_read_unlock();
@@ -1563,10 +1563,12 @@ static const struct net_protocol tcp_protocol = {
};
};


static const struct net_offload tcp_offload = {
static const struct net_offload tcp_offload = {
	.callbacks = {
		.gso_send_check	=	tcp_v4_gso_send_check,
		.gso_send_check	=	tcp_v4_gso_send_check,
		.gso_segment	=	tcp_tso_segment,
		.gso_segment	=	tcp_tso_segment,
		.gro_receive	=	tcp4_gro_receive,
		.gro_receive	=	tcp4_gro_receive,
		.gro_complete	=	tcp4_gro_complete,
		.gro_complete	=	tcp4_gro_complete,
	},
};
};


static const struct net_protocol udp_protocol = {
static const struct net_protocol udp_protocol = {
@@ -1577,8 +1579,10 @@ static const struct net_protocol udp_protocol = {
};
};


static const struct net_offload udp_offload = {
static const struct net_offload udp_offload = {
	.callbacks = {
		.gso_send_check = udp4_ufo_send_check,
		.gso_send_check = udp4_ufo_send_check,
		.gso_segment = udp4_ufo_fragment,
		.gso_segment = udp4_ufo_fragment,
	},
};
};


static const struct net_protocol icmp_protocol = {
static const struct net_protocol icmp_protocol = {
@@ -1667,10 +1671,12 @@ static int ipv4_proc_init(void);


static struct packet_offload ip_packet_offload __read_mostly = {
static struct packet_offload ip_packet_offload __read_mostly = {
	.type = cpu_to_be16(ETH_P_IP),
	.type = cpu_to_be16(ETH_P_IP),
	.callbacks = {
		.gso_send_check = inet_gso_send_check,
		.gso_send_check = inet_gso_send_check,
		.gso_segment = inet_gso_segment,
		.gso_segment = inet_gso_segment,
		.gro_receive = inet_gro_receive,
		.gro_receive = inet_gro_receive,
		.gro_complete = inet_gro_complete,
		.gro_complete = inet_gro_complete,
	},
};
};


static int __init ipv4_offload_init(void)
static int __init ipv4_offload_init(void)
+15 −13
Original line number Original line Diff line number Diff line
@@ -70,9 +70,9 @@ static int ipv6_gso_send_check(struct sk_buff *skb)
	ops = rcu_dereference(inet6_offloads[
	ops = rcu_dereference(inet6_offloads[
		ipv6_gso_pull_exthdrs(skb, ipv6h->nexthdr)]);
		ipv6_gso_pull_exthdrs(skb, ipv6h->nexthdr)]);


	if (likely(ops && ops->gso_send_check)) {
	if (likely(ops && ops->callbacks.gso_send_check)) {
		skb_reset_transport_header(skb);
		skb_reset_transport_header(skb);
		err = ops->gso_send_check(skb);
		err = ops->callbacks.gso_send_check(skb);
	}
	}
	rcu_read_unlock();
	rcu_read_unlock();


@@ -113,9 +113,9 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb,
	proto = ipv6_gso_pull_exthdrs(skb, ipv6h->nexthdr);
	proto = ipv6_gso_pull_exthdrs(skb, ipv6h->nexthdr);
	rcu_read_lock();
	rcu_read_lock();
	ops = rcu_dereference(inet6_offloads[proto]);
	ops = rcu_dereference(inet6_offloads[proto]);
	if (likely(ops && ops->gso_segment)) {
	if (likely(ops && ops->callbacks.gso_segment)) {
		skb_reset_transport_header(skb);
		skb_reset_transport_header(skb);
		segs = ops->gso_segment(skb, features);
		segs = ops->callbacks.gso_segment(skb, features);
	}
	}
	rcu_read_unlock();
	rcu_read_unlock();


@@ -173,7 +173,7 @@ static struct sk_buff **ipv6_gro_receive(struct sk_buff **head,
	rcu_read_lock();
	rcu_read_lock();
	proto = iph->nexthdr;
	proto = iph->nexthdr;
	ops = rcu_dereference(inet6_offloads[proto]);
	ops = rcu_dereference(inet6_offloads[proto]);
	if (!ops || !ops->gro_receive) {
	if (!ops || !ops->callbacks.gro_receive) {
		__pskb_pull(skb, skb_gro_offset(skb));
		__pskb_pull(skb, skb_gro_offset(skb));
		proto = ipv6_gso_pull_exthdrs(skb, proto);
		proto = ipv6_gso_pull_exthdrs(skb, proto);
		skb_gro_pull(skb, -skb_transport_offset(skb));
		skb_gro_pull(skb, -skb_transport_offset(skb));
@@ -181,7 +181,7 @@ static struct sk_buff **ipv6_gro_receive(struct sk_buff **head,
		__skb_push(skb, skb_gro_offset(skb));
		__skb_push(skb, skb_gro_offset(skb));


		ops = rcu_dereference(inet6_offloads[proto]);
		ops = rcu_dereference(inet6_offloads[proto]);
		if (!ops || !ops->gro_receive)
		if (!ops || !ops->callbacks.gro_receive)
			goto out_unlock;
			goto out_unlock;


		iph = ipv6_hdr(skb);
		iph = ipv6_hdr(skb);
@@ -220,7 +220,7 @@ static struct sk_buff **ipv6_gro_receive(struct sk_buff **head,
	csum = skb->csum;
	csum = skb->csum;
	skb_postpull_rcsum(skb, iph, skb_network_header_len(skb));
	skb_postpull_rcsum(skb, iph, skb_network_header_len(skb));


	pp = ops->gro_receive(head, skb);
	pp = ops->callbacks.gro_receive(head, skb);


	skb->csum = csum;
	skb->csum = csum;


@@ -244,10 +244,10 @@ static int ipv6_gro_complete(struct sk_buff *skb)


	rcu_read_lock();
	rcu_read_lock();
	ops = rcu_dereference(inet6_offloads[NAPI_GRO_CB(skb)->proto]);
	ops = rcu_dereference(inet6_offloads[NAPI_GRO_CB(skb)->proto]);
	if (WARN_ON(!ops || !ops->gro_complete))
	if (WARN_ON(!ops || !ops->callbacks.gro_complete))
		goto out_unlock;
		goto out_unlock;


	err = ops->gro_complete(skb);
	err = ops->callbacks.gro_complete(skb);


out_unlock:
out_unlock:
	rcu_read_unlock();
	rcu_read_unlock();
@@ -257,10 +257,12 @@ out_unlock:


static struct packet_offload ipv6_packet_offload __read_mostly = {
static struct packet_offload ipv6_packet_offload __read_mostly = {
	.type = cpu_to_be16(ETH_P_IPV6),
	.type = cpu_to_be16(ETH_P_IPV6),
	.callbacks = {
		.gso_send_check = ipv6_gso_send_check,
		.gso_send_check = ipv6_gso_send_check,
		.gso_segment = ipv6_gso_segment,
		.gso_segment = ipv6_gso_segment,
		.gro_receive = ipv6_gro_receive,
		.gro_receive = ipv6_gro_receive,
		.gro_complete = ipv6_gro_complete,
		.gro_complete = ipv6_gro_complete,
	},
};
};


static int __init ipv6_offload_init(void)
static int __init ipv6_offload_init(void)
Loading