Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit bbb300eb authored by David S. Miller's avatar David S. Miller
Browse files

Merge branch 'bridge-vlan'



Nikolay Aleksandrov says:

====================
bridge: vlan: cleanups & fixes (part 3)

Patch 01 converts the vlgrp member to use rcu as it was already used in a
similar way so better to make it official and use all the available RCU
instrumentation. Patch 02 fixes a bug where the vlan_list can be traversed
without rtnl or rcu held which could lead to using freed entries.
Patch 03 removes some redundant code that isn't needed anymore.
Patch 04 fixes a bug reported by Ido Schimmel about the vlan_flush order
and switchdevs, it moves it back.

v2: patch 03 and 04 are new, couldn't escape the second synchronize_rcu()
since the rhtable destruction can sleep
====================

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 4b918163 f409d0ed
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -56,7 +56,7 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
	skb_reset_mac_header(skb);
	skb_pull(skb, ETH_HLEN);

	if (!br_allowed_ingress(br, br_vlan_group(br), skb, &vid))
	if (!br_allowed_ingress(br, br_vlan_group_rcu(br), skb, &vid))
		goto out;

	if (is_broadcast_ether_addr(dest))
+3 −3
Original line number Diff line number Diff line
@@ -32,7 +32,7 @@ static inline int should_deliver(const struct net_bridge_port *p,
{
	struct net_bridge_vlan_group *vg;

	vg = nbp_vlan_group(p);
	vg = nbp_vlan_group_rcu(p);
	return ((p->flags & BR_HAIRPIN_MODE) || skb->dev != p->dev) &&
		br_allowed_egress(vg, skb) && p->state == BR_STATE_FORWARDING;
}
@@ -80,7 +80,7 @@ static void __br_deliver(const struct net_bridge_port *to, struct sk_buff *skb)
{
	struct net_bridge_vlan_group *vg;

	vg = nbp_vlan_group(to);
	vg = nbp_vlan_group_rcu(to);
	skb = br_handle_vlan(to->br, vg, skb);
	if (!skb)
		return;
@@ -112,7 +112,7 @@ static void __br_forward(const struct net_bridge_port *to, struct sk_buff *skb)
		return;
	}

	vg = nbp_vlan_group(to);
	vg = nbp_vlan_group_rcu(to);
	skb = br_handle_vlan(to->br, vg, skb);
	if (!skb)
		return;
+1 −2
Original line number Diff line number Diff line
@@ -248,6 +248,7 @@ static void del_nbp(struct net_bridge_port *p)

	list_del_rcu(&p->list);

	nbp_vlan_flush(p);
	br_fdb_delete_by_port(br, p, 0, 1);
	nbp_update_port_count(br);

@@ -256,8 +257,6 @@ static void del_nbp(struct net_bridge_port *p)
	dev->priv_flags &= ~IFF_BRIDGE_PORT;

	netdev_rx_handler_unregister(dev);
	/* use the synchronize_rcu done by netdev_rx_handler_unregister */
	nbp_vlan_flush(p);

	br_multicast_del_port(p);

+2 −2
Original line number Diff line number Diff line
@@ -44,7 +44,7 @@ static int br_pass_frame_up(struct sk_buff *skb)
	brstats->rx_bytes += skb->len;
	u64_stats_update_end(&brstats->syncp);

	vg = br_vlan_group(br);
	vg = br_vlan_group_rcu(br);
	/* Bridge is just like any other port.  Make sure the
	 * packet is allowed except in promisc modue when someone
	 * may be running packet capture.
@@ -140,7 +140,7 @@ int br_handle_frame_finish(struct net *net, struct sock *sk, struct sk_buff *skb
	if (!p || p->state == BR_STATE_DISABLED)
		goto drop;

	if (!br_allowed_ingress(p->br, nbp_vlan_group(p), skb, &vid))
	if (!br_allowed_ingress(p->br, nbp_vlan_group_rcu(p), skb, &vid))
		goto out;

	/* insert into forwarding database after filtering to avoid spoofing */
+15 −10
Original line number Diff line number Diff line
@@ -102,10 +102,10 @@ static size_t br_get_link_af_size_filtered(const struct net_device *dev,
	rcu_read_lock();
	if (br_port_exists(dev)) {
		p = br_port_get_rcu(dev);
		vg = nbp_vlan_group(p);
		vg = nbp_vlan_group_rcu(p);
	} else if (dev->priv_flags & IFF_EBRIDGE) {
		br = netdev_priv(dev);
		vg = br_vlan_group(br);
		vg = br_vlan_group_rcu(br);
	}
	num_vlan_infos = br_get_num_vlan_infos(vg, filter_mask);
	rcu_read_unlock();
@@ -253,7 +253,7 @@ static int br_fill_ifvlaninfo_compressed(struct sk_buff *skb,
	 * if vlaninfo represents a range
	 */
	pvid = br_get_pvid(vg);
	list_for_each_entry(v, &vg->vlan_list, vlist) {
	list_for_each_entry_rcu(v, &vg->vlan_list, vlist) {
		flags = 0;
		if (!br_vlan_should_use(v))
			continue;
@@ -303,7 +303,7 @@ static int br_fill_ifvlaninfo(struct sk_buff *skb,
	u16 pvid;

	pvid = br_get_pvid(vg);
	list_for_each_entry(v, &vg->vlan_list, vlist) {
	list_for_each_entry_rcu(v, &vg->vlan_list, vlist) {
		if (!br_vlan_should_use(v))
			continue;

@@ -386,22 +386,27 @@ static int br_fill_ifinfo(struct sk_buff *skb,
		struct nlattr *af;
		int err;

		/* RCU needed because of the VLAN locking rules (rcu || rtnl) */
		rcu_read_lock();
		if (port)
			vg = nbp_vlan_group(port);
			vg = nbp_vlan_group_rcu(port);
		else
			vg = br_vlan_group(br);
			vg = br_vlan_group_rcu(br);

		if (!vg || !vg->num_vlans)
		if (!vg || !vg->num_vlans) {
			rcu_read_unlock();
			goto done;

		}
		af = nla_nest_start(skb, IFLA_AF_SPEC);
		if (!af)
		if (!af) {
			rcu_read_unlock();
			goto nla_put_failure;

		}
		if (filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED)
			err = br_fill_ifvlaninfo_compressed(skb, vg);
		else
			err = br_fill_ifvlaninfo(skb, vg);
		rcu_read_unlock();
		if (err)
			goto nla_put_failure;
		nla_nest_end(skb, af);
Loading