Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 8a83a00b authored by Arnd Bergmann's avatar Arnd Bergmann Committed by David S. Miller
Browse files

net: maintain namespace isolation between vlan and real device



In the vlan and macvlan drivers, the start_xmit function forwards
data to the dev_queue_xmit function for another device, which may
potentially belong to a different namespace.

To make sure that classification stays within a single namespace,
this resets the potentially critical fields.

Signed-off-by: default avatarArnd Bergmann <arnd@arndb.de>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 6884b348
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -269,7 +269,7 @@ static int macvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev)
	}

xmit_world:
	skb->dev = vlan->lowerdev;
	skb_set_dev(skb, vlan->lowerdev);
	return dev_queue_xmit(skb);
}

+9 −0
Original line number Diff line number Diff line
@@ -1004,6 +1004,15 @@ static inline bool netdev_uses_dsa_tags(struct net_device *dev)
	return 0;
}

#ifndef CONFIG_NET_NS
static inline void skb_set_dev(struct sk_buff *skb, struct net_device *dev)
{
	skb->dev = dev;
}
#else /* CONFIG_NET_NS */
void skb_set_dev(struct sk_buff *skb, struct net_device *dev);
#endif

static inline bool netdev_uses_trailer_tags(struct net_device *dev)
{
#ifdef CONFIG_NET_DSA_TAG_TRAILER
+1 −1
Original line number Diff line number Diff line
@@ -322,7 +322,7 @@ static netdev_tx_t vlan_dev_hard_start_xmit(struct sk_buff *skb,
	}


	skb->dev = vlan_dev_info(dev)->real_dev;
	skb_set_dev(skb, vlan_dev_info(dev)->real_dev);
	len = skb->len;
	ret = dev_queue_xmit(skb);

+31 −4
Original line number Diff line number Diff line
@@ -1448,13 +1448,10 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
	if (skb->len > (dev->mtu + dev->hard_header_len))
		return NET_RX_DROP;

	skb_dst_drop(skb);
	skb_set_dev(skb, dev);
	skb->tstamp.tv64 = 0;
	skb->pkt_type = PACKET_HOST;
	skb->protocol = eth_type_trans(skb, dev);
	skb->mark = 0;
	secpath_reset(skb);
	nf_reset(skb);
	return netif_rx(skb);
}
EXPORT_SYMBOL_GPL(dev_forward_skb);
@@ -1614,6 +1611,36 @@ static bool dev_can_checksum(struct net_device *dev, struct sk_buff *skb)
	return false;
}

/**
 * skb_dev_set -- assign a new device to a buffer
 * @skb: buffer for the new device
 * @dev: network device
 *
 * If an skb is owned by a device already, we have to reset
 * all data private to the namespace a device belongs to
 * before assigning it a new device.
 */
#ifdef CONFIG_NET_NS
void skb_set_dev(struct sk_buff *skb, struct net_device *dev)
{
	skb_dst_drop(skb);
	if (skb->dev && !net_eq(dev_net(skb->dev), dev_net(dev))) {
		secpath_reset(skb);
		nf_reset(skb);
		skb_init_secmark(skb);
		skb->mark = 0;
		skb->priority = 0;
		skb->nf_trace = 0;
		skb->ipvs_property = 0;
#ifdef CONFIG_NET_SCHED
		skb->tc_index = 0;
#endif
	}
	skb->dev = dev;
}
EXPORT_SYMBOL(skb_set_dev);
#endif /* CONFIG_NET_NS */

/*
 * Invalidate hardware checksum when packet is to be mangled, and
 * complete checksum manually on outgoing path.