Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 3ee72ca9 authored by Linus Torvalds's avatar Linus Torvalds
Browse files
* git://github.com/davem330/net:
  net: fix typos in Documentation/networking/scaling.txt
  bridge: leave carrier on for empty bridge
  netfilter: Use proper rwlock init function
  tcp: properly update lost_cnt_hint during shifting
  tcp: properly handle md5sig_pool references
  macvlan/macvtap: Fix unicast between macvtap interfaces in bridge mode
parents 29cf7a30 186c6bbc
Loading
Loading
Loading
Loading
+5 −5
Original line number Original line Diff line number Diff line
@@ -27,7 +27,7 @@ applying a filter to each packet that assigns it to one of a small number
of logical flows. Packets for each flow are steered to a separate receive
of logical flows. Packets for each flow are steered to a separate receive
queue, which in turn can be processed by separate CPUs. This mechanism is
queue, which in turn can be processed by separate CPUs. This mechanism is
generally known as “Receive-side Scaling” (RSS). The goal of RSS and
generally known as “Receive-side Scaling” (RSS). The goal of RSS and
the other scaling techniques to increase performance uniformly.
the other scaling techniques is to increase performance uniformly.
Multi-queue distribution can also be used for traffic prioritization, but
Multi-queue distribution can also be used for traffic prioritization, but
that is not the focus of these techniques.
that is not the focus of these techniques.


@@ -186,10 +186,10 @@ are steered using plain RPS. Multiple table entries may point to the
same CPU. Indeed, with many flows and few CPUs, it is very likely that
same CPU. Indeed, with many flows and few CPUs, it is very likely that
a single application thread handles flows with many different flow hashes.
a single application thread handles flows with many different flow hashes.


rps_sock_table is a global flow table that contains the *desired* CPU for
rps_sock_flow_table is a global flow table that contains the *desired* CPU
flows: the CPU that is currently processing the flow in userspace. Each
for flows: the CPU that is currently processing the flow in userspace.
table value is a CPU index that is updated during calls to recvmsg and
Each table value is a CPU index that is updated during calls to recvmsg
sendmsg (specifically, inet_recvmsg(), inet_sendmsg(), inet_sendpage()
and sendmsg (specifically, inet_recvmsg(), inet_sendmsg(), inet_sendpage()
and tcp_splice_read()).
and tcp_splice_read()).


When the scheduler moves a thread to a new CPU while it has outstanding
When the scheduler moves a thread to a new CPU while it has outstanding
+1 −1
Original line number Original line Diff line number Diff line
@@ -239,7 +239,7 @@ static int macvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev)
		dest = macvlan_hash_lookup(port, eth->h_dest);
		dest = macvlan_hash_lookup(port, eth->h_dest);
		if (dest && dest->mode == MACVLAN_MODE_BRIDGE) {
		if (dest && dest->mode == MACVLAN_MODE_BRIDGE) {
			/* send to lowerdev first for its network taps */
			/* send to lowerdev first for its network taps */
			vlan->forward(vlan->lowerdev, skb);
			dev_forward_skb(vlan->lowerdev, skb);


			return NET_XMIT_SUCCESS;
			return NET_XMIT_SUCCESS;
		}
		}
+0 −3
Original line number Original line Diff line number Diff line
@@ -91,7 +91,6 @@ static int br_dev_open(struct net_device *dev)
{
{
	struct net_bridge *br = netdev_priv(dev);
	struct net_bridge *br = netdev_priv(dev);


	netif_carrier_off(dev);
	netdev_update_features(dev);
	netdev_update_features(dev);
	netif_start_queue(dev);
	netif_start_queue(dev);
	br_stp_enable_bridge(br);
	br_stp_enable_bridge(br);
@@ -108,8 +107,6 @@ static int br_dev_stop(struct net_device *dev)
{
{
	struct net_bridge *br = netdev_priv(dev);
	struct net_bridge *br = netdev_priv(dev);


	netif_carrier_off(dev);

	br_stp_disable_bridge(br);
	br_stp_disable_bridge(br);
	br_multicast_stop(br);
	br_multicast_stop(br);


+1 −3
Original line number Original line Diff line number Diff line
@@ -1389,9 +1389,7 @@ static int tcp_shifted_skb(struct sock *sk, struct sk_buff *skb,


	BUG_ON(!pcount);
	BUG_ON(!pcount);


	/* Tweak before seqno plays */
	if (skb == tp->lost_skb_hint)
	if (!tcp_is_fack(tp) && tcp_is_sack(tp) && tp->lost_skb_hint &&
	    !before(TCP_SKB_CB(tp->lost_skb_hint)->seq, TCP_SKB_CB(skb)->seq))
		tp->lost_cnt_hint += pcount;
		tp->lost_cnt_hint += pcount;


	TCP_SKB_CB(prev)->end_seq += shifted;
	TCP_SKB_CB(prev)->end_seq += shifted;
+7 −4
Original line number Original line Diff line number Diff line
@@ -927,17 +927,20 @@ int tcp_v4_md5_do_add(struct sock *sk, __be32 addr,
			}
			}
			sk_nocaps_add(sk, NETIF_F_GSO_MASK);
			sk_nocaps_add(sk, NETIF_F_GSO_MASK);
		}
		}
		if (tcp_alloc_md5sig_pool(sk) == NULL) {

		md5sig = tp->md5sig_info;
		if (md5sig->entries4 == 0 &&
		    tcp_alloc_md5sig_pool(sk) == NULL) {
			kfree(newkey);
			kfree(newkey);
			return -ENOMEM;
			return -ENOMEM;
		}
		}
		md5sig = tp->md5sig_info;


		if (md5sig->alloced4 == md5sig->entries4) {
		if (md5sig->alloced4 == md5sig->entries4) {
			keys = kmalloc((sizeof(*keys) *
			keys = kmalloc((sizeof(*keys) *
					(md5sig->entries4 + 1)), GFP_ATOMIC);
					(md5sig->entries4 + 1)), GFP_ATOMIC);
			if (!keys) {
			if (!keys) {
				kfree(newkey);
				kfree(newkey);
				if (md5sig->entries4 == 0)
					tcp_free_md5sig_pool();
					tcp_free_md5sig_pool();
				return -ENOMEM;
				return -ENOMEM;
			}
			}
@@ -982,6 +985,7 @@ int tcp_v4_md5_do_del(struct sock *sk, __be32 addr)
				kfree(tp->md5sig_info->keys4);
				kfree(tp->md5sig_info->keys4);
				tp->md5sig_info->keys4 = NULL;
				tp->md5sig_info->keys4 = NULL;
				tp->md5sig_info->alloced4 = 0;
				tp->md5sig_info->alloced4 = 0;
				tcp_free_md5sig_pool();
			} else if (tp->md5sig_info->entries4 != i) {
			} else if (tp->md5sig_info->entries4 != i) {
				/* Need to do some manipulation */
				/* Need to do some manipulation */
				memmove(&tp->md5sig_info->keys4[i],
				memmove(&tp->md5sig_info->keys4[i],
@@ -989,7 +993,6 @@ int tcp_v4_md5_do_del(struct sock *sk, __be32 addr)
					(tp->md5sig_info->entries4 - i) *
					(tp->md5sig_info->entries4 - i) *
					 sizeof(struct tcp4_md5sig_key));
					 sizeof(struct tcp4_md5sig_key));
			}
			}
			tcp_free_md5sig_pool();
			return 0;
			return 0;
		}
		}
	}
	}
Loading