Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 63d8ea7f authored by David S. Miller's avatar David S. Miller
Browse files

net: Forgot to commit net/core/dev.c part of Jiri's ->rx_handler patch.

parent 5b2c4dd2
Loading
Loading
Loading
Loading
+31 −88
Original line number Original line Diff line number Diff line
@@ -3096,54 +3096,23 @@ void netdev_rx_handler_unregister(struct net_device *dev)
}
}
EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister);
EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister);


static inline void skb_bond_set_mac_by_master(struct sk_buff *skb,
static void vlan_on_bond_hook(struct sk_buff *skb)
					      struct net_device *master)
{
{
	if (skb->pkt_type == PACKET_HOST) {
	/*
		u16 *dest = (u16 *) eth_hdr(skb)->h_dest;
	 * Make sure ARP frames received on VLAN interfaces stacked on

	 * bonding interfaces still make their way to any base bonding
		memcpy(dest, master->dev_addr, ETH_ALEN);
	 * device that may have registered for a specific ptype.
	}
}

/* On bonding slaves other than the currently active slave, suppress
 * duplicates except for 802.3ad ETH_P_SLOW, alb non-mcast/bcast, and
 * ARP on active-backup slaves with arp_validate enabled.
 */
static int __skb_bond_should_drop(struct sk_buff *skb,
				  struct net_device *master)
{
	struct net_device *dev = skb->dev;

	if (master->priv_flags & IFF_MASTER_ARPMON)
		dev->last_rx = jiffies;

	if ((master->priv_flags & IFF_MASTER_ALB) &&
	    (master->priv_flags & IFF_BRIDGE_PORT)) {
		/* Do address unmangle. The local destination address
		 * will be always the one master has. Provides the right
		 * functionality in a bridge.
	 */
	 */
		skb_bond_set_mac_by_master(skb, master);
	if (skb->dev->priv_flags & IFF_802_1Q_VLAN &&
	}
	    vlan_dev_real_dev(skb->dev)->priv_flags & IFF_BONDING &&
	    skb->protocol == htons(ETH_P_ARP)) {
		struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);


	if (dev->priv_flags & IFF_SLAVE_INACTIVE) {
		if (!skb2)
		if ((dev->priv_flags & IFF_SLAVE_NEEDARP) &&
			return;
		    skb->protocol == __cpu_to_be16(ETH_P_ARP))
		skb2->dev = vlan_dev_real_dev(skb->dev);
			return 0;
		netif_rx(skb2);

		if (master->priv_flags & IFF_MASTER_ALB) {
			if (skb->pkt_type != PACKET_BROADCAST &&
			    skb->pkt_type != PACKET_MULTICAST)
				return 0;
		}
		if (master->priv_flags & IFF_MASTER_8023AD &&
		    skb->protocol == __cpu_to_be16(ETH_P_SLOW))
			return 0;

		return 1;
	}
	}
	return 0;
}
}


static int __netif_receive_skb(struct sk_buff *skb)
static int __netif_receive_skb(struct sk_buff *skb)
@@ -3151,8 +3120,7 @@ static int __netif_receive_skb(struct sk_buff *skb)
	struct packet_type *ptype, *pt_prev;
	struct packet_type *ptype, *pt_prev;
	rx_handler_func_t *rx_handler;
	rx_handler_func_t *rx_handler;
	struct net_device *orig_dev;
	struct net_device *orig_dev;
	struct net_device *null_or_orig;
	struct net_device *null_or_dev;
	struct net_device *orig_or_bond;
	int ret = NET_RX_DROP;
	int ret = NET_RX_DROP;
	__be16 type;
	__be16 type;


@@ -3167,32 +3135,8 @@ static int __netif_receive_skb(struct sk_buff *skb)


	if (!skb->skb_iif)
	if (!skb->skb_iif)
		skb->skb_iif = skb->dev->ifindex;
		skb->skb_iif = skb->dev->ifindex;

	/*
	 * bonding note: skbs received on inactive slaves should only
	 * be delivered to pkt handlers that are exact matches.  Also
	 * the deliver_no_wcard flag will be set.  If packet handlers
	 * are sensitive to duplicate packets these skbs will need to
	 * be dropped at the handler.
	 */
	null_or_orig = NULL;
	orig_dev = skb->dev;
	orig_dev = skb->dev;
	if (skb->deliver_no_wcard)
		null_or_orig = orig_dev;
	else if (netif_is_bond_slave(orig_dev)) {
		struct net_device *bond_master = ACCESS_ONCE(orig_dev->master);

		if (likely(bond_master)) {
			if (__skb_bond_should_drop(skb, bond_master)) {
				skb->deliver_no_wcard = 1;
				/* deliver only exact match */
				null_or_orig = orig_dev;
			} else
				skb->dev = bond_master;
		}
	}


	__this_cpu_inc(softnet_data.processed);
	skb_reset_network_header(skb);
	skb_reset_network_header(skb);
	skb_reset_transport_header(skb);
	skb_reset_transport_header(skb);
	skb->mac_len = skb->network_header - skb->mac_header;
	skb->mac_len = skb->network_header - skb->mac_header;
@@ -3201,6 +3145,10 @@ static int __netif_receive_skb(struct sk_buff *skb)


	rcu_read_lock();
	rcu_read_lock();


another_round:

	__this_cpu_inc(softnet_data.processed);

#ifdef CONFIG_NET_CLS_ACT
#ifdef CONFIG_NET_CLS_ACT
	if (skb->tc_verd & TC_NCLS) {
	if (skb->tc_verd & TC_NCLS) {
		skb->tc_verd = CLR_TC_NCLS(skb->tc_verd);
		skb->tc_verd = CLR_TC_NCLS(skb->tc_verd);
@@ -3209,8 +3157,7 @@ static int __netif_receive_skb(struct sk_buff *skb)
#endif
#endif


	list_for_each_entry_rcu(ptype, &ptype_all, list) {
	list_for_each_entry_rcu(ptype, &ptype_all, list) {
		if (ptype->dev == null_or_orig || ptype->dev == skb->dev ||
		if (!ptype->dev || ptype->dev == skb->dev) {
		    ptype->dev == orig_dev) {
			if (pt_prev)
			if (pt_prev)
				ret = deliver_skb(skb, pt_prev, orig_dev);
				ret = deliver_skb(skb, pt_prev, orig_dev);
			pt_prev = ptype;
			pt_prev = ptype;
@@ -3224,16 +3171,20 @@ static int __netif_receive_skb(struct sk_buff *skb)
ncls:
ncls:
#endif
#endif


	/* Handle special case of bridge or macvlan */
	rx_handler = rcu_dereference(skb->dev->rx_handler);
	rx_handler = rcu_dereference(skb->dev->rx_handler);
	if (rx_handler) {
	if (rx_handler) {
		struct net_device *prev_dev;

		if (pt_prev) {
		if (pt_prev) {
			ret = deliver_skb(skb, pt_prev, orig_dev);
			ret = deliver_skb(skb, pt_prev, orig_dev);
			pt_prev = NULL;
			pt_prev = NULL;
		}
		}
		prev_dev = skb->dev;
		skb = rx_handler(skb);
		skb = rx_handler(skb);
		if (!skb)
		if (!skb)
			goto out;
			goto out;
		if (skb->dev != prev_dev)
			goto another_round;
	}
	}


	if (vlan_tx_tag_present(skb)) {
	if (vlan_tx_tag_present(skb)) {
@@ -3248,24 +3199,16 @@ static int __netif_receive_skb(struct sk_buff *skb)
			goto out;
			goto out;
	}
	}


	/*
	vlan_on_bond_hook(skb);
	 * Make sure frames received on VLAN interfaces stacked on

	 * bonding interfaces still make their way to any base bonding
	/* deliver only exact match when indicated */
	 * device that may have registered for a specific ptype.  The
	null_or_dev = skb->deliver_no_wcard ? skb->dev : NULL;
	 * handler may have to adjust skb->dev and orig_dev.
	 */
	orig_or_bond = orig_dev;
	if ((skb->dev->priv_flags & IFF_802_1Q_VLAN) &&
	    (vlan_dev_real_dev(skb->dev)->priv_flags & IFF_BONDING)) {
		orig_or_bond = vlan_dev_real_dev(skb->dev);
	}


	type = skb->protocol;
	type = skb->protocol;
	list_for_each_entry_rcu(ptype,
	list_for_each_entry_rcu(ptype,
			&ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
			&ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
		if (ptype->type == type && (ptype->dev == null_or_orig ||
		if (ptype->type == type &&
		     ptype->dev == skb->dev || ptype->dev == orig_dev ||
		    (ptype->dev == null_or_dev || ptype->dev == skb->dev)) {
		     ptype->dev == orig_or_bond)) {
			if (pt_prev)
			if (pt_prev)
				ret = deliver_skb(skb, pt_prev, orig_dev);
				ret = deliver_skb(skb, pt_prev, orig_dev);
			pt_prev = ptype;
			pt_prev = ptype;