Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 6c035ea0 authored by David S. Miller's avatar David S. Miller
Browse files

Merge branch 'net-hash-tx'



Tom Herbert says:

====================
net: Improvements and applications of packet flow hash in transmit path

This patch series includes some patches which improve and make use
of skb->hash in the transmit path.

What is included:

- Infrastructure to save a precomputed hash in the sock structure.
  For connected TCP and UDP sockets we only need to compute the
  flow hash once and not once for every packet.
- Call skb_get_hash in get_xps_queue and __skb_tx_hash. This eliminates
  the awkward access to skb->sk->sk_hash in the lower transmit path.
- Move UDP source port generation into a common function in udp.h This
  implementation is mostly based on vxlan_src_port.
- Use non-zero IPv6 flow labels in flow_dissector as port information
  for flow hash calculation.
- Implement automatic flow label generation on transmit (per RFC 6438).
- Don't repeatedly try to compute an L4 hash in skb_get_hash if we've
  already tried to find one in software stack calculation.
====================

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 081a20ff a3b18ddb
Loading
Loading
Loading
Loading
+9 −0
Original line number Diff line number Diff line
@@ -1132,6 +1132,15 @@ flowlabel_consistency - BOOLEAN
	FALSE: disabled
	Default: TRUE

auto_flowlabels - BOOLEAN
	Automatically generate flow labels based based on a flow hash
	of the packet. This allows intermediate devices, such as routers,
	to idenfify packet flows for mechanisms like Equal Cost Multipath
	Routing (see RFC 6438).
	TRUE: enabled
	FALSE: disabled
	Default: false

anycast_src_echo_reply - BOOLEAN
	Controls the use of anycast addresses as source addresses for ICMPv6
	echo reply
+2 −24
Original line number Diff line number Diff line
@@ -1570,25 +1570,6 @@ static bool route_shortcircuit(struct net_device *dev, struct sk_buff *skb)
	return false;
}

/* Compute source port for outgoing packet
 *   first choice to use L4 flow hash since it will spread
 *     better and maybe available from hardware
 *   secondary choice is to use jhash on the Ethernet header
 */
__be16 vxlan_src_port(__u16 port_min, __u16 port_max, struct sk_buff *skb)
{
	unsigned int range = (port_max - port_min) + 1;
	u32 hash;

	hash = skb_get_hash(skb);
	if (!hash)
		hash = jhash(skb->data, 2 * ETH_ALEN,
			     (__force u32) skb->protocol);

	return htons((((u64) hash * range) >> 32) + port_min);
}
EXPORT_SYMBOL_GPL(vxlan_src_port);

static inline struct sk_buff *vxlan_handle_offloads(struct sk_buff *skb,
						    bool udp_csum)
{
@@ -1807,7 +1788,8 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
	if (tos == 1)
		tos = ip_tunnel_get_dsfield(old_iph, skb);

	src_port = vxlan_src_port(vxlan->port_min, vxlan->port_max, skb);
	src_port = udp_flow_src_port(dev_net(dev), skb, vxlan->port_min,
				     vxlan->port_max, true);

	if (dst->sa.sa_family == AF_INET) {
		memset(&fl4, 0, sizeof(fl4));
@@ -2235,7 +2217,6 @@ static void vxlan_setup(struct net_device *dev)
{
	struct vxlan_dev *vxlan = netdev_priv(dev);
	unsigned int h;
	int low, high;

	eth_hw_addr_random(dev);
	ether_setup(dev);
@@ -2272,9 +2253,6 @@ static void vxlan_setup(struct net_device *dev)
	vxlan->age_timer.function = vxlan_cleanup;
	vxlan->age_timer.data = (unsigned long) vxlan;

	inet_get_local_port_range(dev_net(dev), &low, &high);
	vxlan->port_min = low;
	vxlan->port_max = high;
	vxlan->dst_port = htons(vxlan_port);

	vxlan->dev = dev;
+2 −1
Original line number Diff line number Diff line
@@ -199,7 +199,8 @@ struct ipv6_pinfo {
						 * 010: prefer public address
						 * 100: prefer care-of address
						 */
				dontfrag:1;
				dontfrag:1,
				autoflowlabel:1;
	__u8			min_hopcount;
	__u8			tclass;
	__be32			rcv_flowinfo;
+1 −1
Original line number Diff line number Diff line
@@ -2486,7 +2486,7 @@ static inline int netif_set_xps_queue(struct net_device *dev,
 * as a distribution range limit for the returned value.
 */
static inline u16 skb_tx_hash(const struct net_device *dev,
			      const struct sk_buff *skb)
			      struct sk_buff *skb)
{
	return __skb_tx_hash(dev, skb, dev->real_num_tx_queues);
}
+8 −3
Original line number Diff line number Diff line
@@ -455,6 +455,7 @@ static inline u32 skb_mstamp_us_delta(const struct skb_mstamp *t1,
 *	@ooo_okay: allow the mapping of a socket to a queue to be changed
 *	@l4_hash: indicate hash is a canonical 4-tuple hash over transport
 *		ports.
 *	@sw_hash: indicates hash was computed in software stack
 *	@wifi_acked_valid: wifi_acked was set
 *	@wifi_acked: whether frame was acked on wifi or not
 *	@no_fcs:  Request NIC to treat last 4 bytes as Ethernet FCS
@@ -562,6 +563,7 @@ struct sk_buff {
	__u8			pfmemalloc:1;
	__u8			ooo_okay:1;
	__u8			l4_hash:1;
	__u8			sw_hash:1;
	__u8			wifi_acked_valid:1;
	__u8			wifi_acked:1;
	__u8			no_fcs:1;
@@ -575,7 +577,7 @@ struct sk_buff {
	__u8			encap_hdr_csum:1;
	__u8			csum_valid:1;
	__u8			csum_complete_sw:1;
	/* 3/5 bit hole (depending on ndisc_nodetype presence) */
	/* 2/4 bit hole (depending on ndisc_nodetype presence) */
	kmemcheck_bitfield_end(flags2);

#if defined CONFIG_NET_DMA || defined CONFIG_NET_RX_BUSY_POLL
@@ -830,13 +832,14 @@ static inline void
skb_set_hash(struct sk_buff *skb, __u32 hash, enum pkt_hash_types type)
{
	skb->l4_hash = (type == PKT_HASH_TYPE_L4);
	skb->sw_hash = 0;
	skb->hash = hash;
}

void __skb_get_hash(struct sk_buff *skb);
static inline __u32 skb_get_hash(struct sk_buff *skb)
{
	if (!skb->l4_hash)
	if (!skb->l4_hash && !skb->sw_hash)
		__skb_get_hash(skb);

	return skb->hash;
@@ -850,6 +853,7 @@ static inline __u32 skb_get_hash_raw(const struct sk_buff *skb)
static inline void skb_clear_hash(struct sk_buff *skb)
{
	skb->hash = 0;
	skb->sw_hash = 0;
	skb->l4_hash = 0;
}

@@ -862,6 +866,7 @@ static inline void skb_clear_hash_if_not_l4(struct sk_buff *skb)
static inline void skb_copy_hash(struct sk_buff *to, const struct sk_buff *from)
{
	to->hash = from->hash;
	to->sw_hash = from->sw_hash;
	to->l4_hash = from->l4_hash;
};

@@ -3005,7 +3010,7 @@ static inline bool skb_rx_queue_recorded(const struct sk_buff *skb)
	return skb->queue_mapping != 0;
}

u16 __skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb,
u16 __skb_tx_hash(const struct net_device *dev, struct sk_buff *skb,
		  unsigned int num_tx_queues);

static inline struct sec_path *skb_sec_path(struct sk_buff *skb)
Loading