Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit eae3f88e authored by David S. Miller's avatar David S. Miller
Browse files

net: Separate out SKB validation logic from transmit path.



dev_hard_start_xmit() does two things, it first validates and
canonicalizes the SKB, then it actually sends it.

Make a set of helper functions for doing the first part.

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 95f6b3dd
Loading
Loading
Loading
Loading
+71 −54
Original line number Diff line number Diff line
@@ -2644,32 +2644,35 @@ static struct sk_buff *xmit_list(struct sk_buff *first, struct net_device *dev,
	return skb;
}

int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
			struct netdev_queue *txq)
struct sk_buff *validate_xmit_vlan(struct sk_buff *skb, netdev_features_t features)
{
	int rc = NETDEV_TX_OK;
	if (vlan_tx_tag_present(skb) &&
	    !vlan_hw_offload_capable(features, skb->vlan_proto)) {
		skb = __vlan_put_tag(skb, skb->vlan_proto,
				     vlan_tx_tag_get(skb));
		if (skb)
			skb->vlan_tci = 0;
	}
	return skb;
}

	if (likely(!skb->next)) {
static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device *dev)
{
	netdev_features_t features;

		/*
		 * If device doesn't need skb->dst, release it right now while
	if (skb->next)
		return skb;

	/* If device doesn't need skb->dst, release it right now while
	 * its hot in this cpu cache
	 */
	if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
		skb_dst_drop(skb);

	features = netif_skb_features(skb);

		if (vlan_tx_tag_present(skb) &&
		    !vlan_hw_offload_capable(features, skb->vlan_proto)) {
			skb = __vlan_put_tag(skb, skb->vlan_proto,
					     vlan_tx_tag_get(skb));
	skb = validate_xmit_vlan(skb, features);
	if (unlikely(!skb))
				goto out;

			skb->vlan_tci = 0;
		}
		goto out_null;

	/* If encapsulation offload request, verify we are testing
	 * hardware encapsulation features instead of standard
@@ -2681,8 +2684,6 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
	if (netif_needs_gso(skb, features)) {
		if (unlikely(dev_gso_segment(skb, features)))
			goto out_kfree_skb;
			if (skb->next)
				goto gso;
	} else {
		if (skb_needs_linearize(skb, features) &&
		    __skb_linearize(skb))
@@ -2705,19 +2706,35 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
		}
	}

		return xmit_one(skb, dev, txq, false);
	return skb;

out_kfree_skb:
	kfree_skb(skb);
out_null:
	return NULL;
}

gso:
int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
			struct netdev_queue *txq)
{
	int rc = NETDEV_TX_OK;

	skb = validate_xmit_skb(skb, dev);
	if (!skb)
		return rc;

	if (likely(!skb->next))
		return xmit_one(skb, dev, txq, false);

	skb->next = xmit_list(skb->next, dev, txq, &rc);
	if (likely(skb->next == NULL)) {
		skb->destructor = DEV_GSO_CB(skb)->destructor;
		consume_skb(skb);
		return rc;
	}
out_kfree_skb:

	kfree_skb(skb);
out:

	return rc;
}
EXPORT_SYMBOL_GPL(dev_hard_start_xmit);