Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 8bd63cf1 authored by Florian Westphal's avatar Florian Westphal Committed by Pablo Neira Ayuso
Browse files

bridge: move mac header copying into br_netfilter



The mac header only has to be copied back into the skb for
fragments generated by ip_fragment(), which only happens
for bridge forwarded packets with nf-call-iptables=1 && active nf_defrag.

Signed-off-by: default avatarFlorian Westphal <fw@strlen.de>
Signed-off-by: default avatarPablo Neira Ayuso <pablo@netfilter.org>
parent 1cae565e
Loading
Loading
Loading
Loading
+0 −31
Original line number Original line Diff line number Diff line
@@ -44,36 +44,6 @@ static inline void nf_bridge_update_protocol(struct sk_buff *skb)
		skb->protocol = htons(ETH_P_PPP_SES);
		skb->protocol = htons(ETH_P_PPP_SES);
}
}


/* Fill in the header for fragmented IP packets handled by
 * the IPv4 connection tracking code.
 *
 * Only used in br_forward.c
 */
static inline int nf_bridge_copy_header(struct sk_buff *skb)
{
	int err;
	unsigned int header_size;

	nf_bridge_update_protocol(skb);
	header_size = ETH_HLEN + nf_bridge_encap_header_len(skb);
	err = skb_cow_head(skb, header_size);
	if (err)
		return err;

	skb_copy_to_linear_data_offset(skb, -header_size,
				       skb->nf_bridge->data, header_size);
	__skb_push(skb, nf_bridge_encap_header_len(skb));
	return 0;
}

static inline int nf_bridge_maybe_copy_header(struct sk_buff *skb)
{
	if (skb->nf_bridge &&
	    skb->nf_bridge->mask & (BRNF_BRIDGED | BRNF_BRIDGED_DNAT))
		return nf_bridge_copy_header(skb);
  	return 0;
}

static inline unsigned int nf_bridge_mtu_reduction(const struct sk_buff *skb)
static inline unsigned int nf_bridge_mtu_reduction(const struct sk_buff *skb)
{
{
	if (unlikely(skb->nf_bridge->mask & BRNF_PPPoE))
	if (unlikely(skb->nf_bridge->mask & BRNF_PPPoE))
@@ -119,7 +89,6 @@ static inline void br_drop_fake_rtable(struct sk_buff *skb)
}
}


#else
#else
#define nf_bridge_maybe_copy_header(skb)	(0)
#define nf_bridge_pad(skb)			(0)
#define nf_bridge_pad(skb)			(0)
#define br_drop_fake_rtable(skb)	        do { } while (0)
#define br_drop_fake_rtable(skb)	        do { } while (0)
#endif /* CONFIG_BRIDGE_NETFILTER */
#endif /* CONFIG_BRIDGE_NETFILTER */
+1 −3
Original line number Original line Diff line number Diff line
@@ -37,9 +37,7 @@ static inline int should_deliver(const struct net_bridge_port *p,


int br_dev_queue_push_xmit(struct sk_buff *skb)
int br_dev_queue_push_xmit(struct sk_buff *skb)
{
{
	/* ip_fragment doesn't copy the MAC header */
	if (!is_skb_forwardable(skb->dev, skb)) {
	if (nf_bridge_maybe_copy_header(skb) ||
	    !is_skb_forwardable(skb->dev, skb)) {
		kfree_skb(skb);
		kfree_skb(skb);
	} else {
	} else {
		skb_push(skb, ETH_HLEN);
		skb_push(skb, ETH_HLEN);
+28 −1
Original line number Original line Diff line number Diff line
@@ -764,6 +764,33 @@ static unsigned int br_nf_forward_arp(const struct nf_hook_ops *ops,
}
}


#if IS_ENABLED(CONFIG_NF_DEFRAG_IPV4)
#if IS_ENABLED(CONFIG_NF_DEFRAG_IPV4)
static bool nf_bridge_copy_header(struct sk_buff *skb)
{
	int err;
	unsigned int header_size;

	nf_bridge_update_protocol(skb);
	header_size = ETH_HLEN + nf_bridge_encap_header_len(skb);
	err = skb_cow_head(skb, header_size);
	if (err)
		return false;

	skb_copy_to_linear_data_offset(skb, -header_size,
				       skb->nf_bridge->data, header_size);
	__skb_push(skb, nf_bridge_encap_header_len(skb));
	return true;
}

static int br_nf_push_frag_xmit(struct sk_buff *skb)
{
	if (!nf_bridge_copy_header(skb)) {
		kfree_skb(skb);
		return 0;
	}

	return br_dev_queue_push_xmit(skb);
}

static int br_nf_dev_queue_xmit(struct sk_buff *skb)
static int br_nf_dev_queue_xmit(struct sk_buff *skb)
{
{
	int ret;
	int ret;
@@ -780,7 +807,7 @@ static int br_nf_dev_queue_xmit(struct sk_buff *skb)
			/* Drop invalid packet */
			/* Drop invalid packet */
			return NF_DROP;
			return NF_DROP;
		IPCB(skb)->frag_max_size = frag_max_size;
		IPCB(skb)->frag_max_size = frag_max_size;
		ret = ip_fragment(skb, br_dev_queue_push_xmit);
		ret = ip_fragment(skb, br_nf_push_frag_xmit);
	} else
	} else
		ret = br_dev_queue_push_xmit(skb);
		ret = br_dev_queue_push_xmit(skb);