Loading include/linux/netfilter_bridge/ebtables.h +2 −2 Original line number Diff line number Diff line Loading @@ -285,8 +285,8 @@ struct ebt_table { struct module *me; }; #define EBT_ALIGN(s) (((s) + (__alignof__(struct ebt_replace)-1)) & \ ~(__alignof__(struct ebt_replace)-1)) #define EBT_ALIGN(s) (((s) + (__alignof__(struct _xt_align)-1)) & \ ~(__alignof__(struct _xt_align)-1)) extern struct ebt_table *ebt_register_table(struct net *net, const struct ebt_table *table); extern void ebt_unregister_table(struct net *net, struct ebt_table *table); Loading net/netfilter/ipvs/ip_vs_core.c +1 −1 Original line number Diff line number Diff line Loading @@ -232,6 +232,7 @@ ip_vs_sched_persist(struct ip_vs_service *svc, __be16 dport = 0; /* destination port to forward */ unsigned int flags; struct ip_vs_conn_param param; const union nf_inet_addr fwmark = { .ip = htonl(svc->fwmark) }; union nf_inet_addr snet; /* source network of the client, after masking */ Loading Loading @@ -267,7 +268,6 @@ ip_vs_sched_persist(struct ip_vs_service *svc, { int protocol = iph.protocol; const union nf_inet_addr *vaddr = &iph.daddr; const union nf_inet_addr fwmark = { .ip = htonl(svc->fwmark) }; __be16 vport = 0; if (dst_port == svc->port) { Loading net/netfilter/nf_conntrack_netlink.c +16 −27 Original line number Diff line number Diff line Loading @@ -1367,15 +1367,12 @@ ctnetlink_create_conntrack(struct net *net, u16 zone, nf_ct_protonum(ct)); if (helper == NULL) { rcu_read_unlock(); spin_unlock_bh(&nf_conntrack_lock); #ifdef CONFIG_MODULES if (request_module("nfct-helper-%s", helpname) < 0) { spin_lock_bh(&nf_conntrack_lock); err = -EOPNOTSUPP; goto err1; } spin_lock_bh(&nf_conntrack_lock); rcu_read_lock(); helper = __nf_conntrack_helper_find(helpname, nf_ct_l3num(ct), Loading Loading @@ -1469,7 +1466,10 @@ ctnetlink_create_conntrack(struct net *net, u16 zone, tstamp->start = ktime_to_ns(ktime_get_real()); add_timer(&ct->timeout); spin_lock_bh(&nf_conntrack_lock); nf_conntrack_hash_insert(ct); nf_conntrack_get(&ct->ct_general); spin_unlock_bh(&nf_conntrack_lock); rcu_read_unlock(); return ct; Loading @@ -1490,6 +1490,7 @@ ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb, struct nf_conntrack_tuple otuple, rtuple; struct nf_conntrack_tuple_hash *h = NULL; struct nfgenmsg *nfmsg = nlmsg_data(nlh); struct nf_conn *ct; u_int8_t u3 = nfmsg->nfgen_family; u16 zone; int err; Loading @@ -1512,25 +1513,22 @@ ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb, spin_lock_bh(&nf_conntrack_lock); if (cda[CTA_TUPLE_ORIG]) h = __nf_conntrack_find(net, zone, &otuple); h = nf_conntrack_find_get(net, zone, &otuple); else if (cda[CTA_TUPLE_REPLY]) h = __nf_conntrack_find(net, zone, &rtuple); h = nf_conntrack_find_get(net, zone, &rtuple); spin_unlock_bh(&nf_conntrack_lock); if (h == NULL) { err = -ENOENT; if (nlh->nlmsg_flags & NLM_F_CREATE) { struct nf_conn *ct; enum ip_conntrack_events events; ct = ctnetlink_create_conntrack(net, zone, cda, &otuple, &rtuple, u3); if (IS_ERR(ct)) { err = PTR_ERR(ct); goto out_unlock; } if (IS_ERR(ct)) return PTR_ERR(ct); err = 0; nf_conntrack_get(&ct->ct_general); spin_unlock_bh(&nf_conntrack_lock); if (test_bit(IPS_EXPECTED_BIT, &ct->status)) events = IPCT_RELATED; else Loading @@ -1545,23 +1543,19 @@ ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb, ct, NETLINK_CB(skb).pid, nlmsg_report(nlh)); nf_ct_put(ct); } else spin_unlock_bh(&nf_conntrack_lock); } return err; } /* implicit 'else' */ /* We manipulate the conntrack inside the global conntrack table lock, * so there's no need to increase the refcount */ err = -EEXIST; ct = nf_ct_tuplehash_to_ctrack(h); if (!(nlh->nlmsg_flags & NLM_F_EXCL)) { struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h); spin_lock_bh(&nf_conntrack_lock); err = ctnetlink_change_conntrack(ct, cda); if (err == 0) { nf_conntrack_get(&ct->ct_general); spin_unlock_bh(&nf_conntrack_lock); if (err == 0) { nf_conntrack_eventmask_report((1 << IPCT_REPLY) | (1 << IPCT_ASSURED) | (1 << IPCT_HELPER) | Loading @@ -1570,15 +1564,10 @@ ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb, (1 << IPCT_MARK), ct, NETLINK_CB(skb).pid, nlmsg_report(nlh)); nf_ct_put(ct); } else spin_unlock_bh(&nf_conntrack_lock); return err; } } out_unlock: spin_unlock_bh(&nf_conntrack_lock); nf_ct_put(ct); return err; } Loading net/netfilter/nf_queue.c +32 −8 Original line number Diff line number Diff line Loading @@ -203,6 +203,27 @@ static int __nf_queue(struct sk_buff *skb, return status; } #ifdef CONFIG_BRIDGE_NETFILTER /* When called from bridge netfilter, skb->data must point to MAC header * before calling skb_gso_segment(). Else, original MAC header is lost * and segmented skbs will be sent to wrong destination. */ static void nf_bridge_adjust_skb_data(struct sk_buff *skb) { if (skb->nf_bridge) __skb_push(skb, skb->network_header - skb->mac_header); } static void nf_bridge_adjust_segmented_data(struct sk_buff *skb) { if (skb->nf_bridge) __skb_pull(skb, skb->network_header - skb->mac_header); } #else #define nf_bridge_adjust_skb_data(s) do {} while (0) #define nf_bridge_adjust_segmented_data(s) do {} while (0) #endif int nf_queue(struct sk_buff *skb, struct list_head *elem, u_int8_t pf, unsigned int hook, Loading @@ -212,7 +233,7 @@ int nf_queue(struct sk_buff *skb, unsigned int queuenum) { struct sk_buff *segs; int err; int err = -EINVAL; unsigned int queued; if (!skb_is_gso(skb)) Loading @@ -228,23 +249,25 @@ int nf_queue(struct sk_buff *skb, break; } nf_bridge_adjust_skb_data(skb); segs = skb_gso_segment(skb, 0); /* Does not use PTR_ERR to limit the number of error codes that can be * returned by nf_queue. For instance, callers rely on -ECANCELED to mean * 'ignore this hook'. */ if (IS_ERR(segs)) return -EINVAL; goto out_err; queued = 0; err = 0; do { struct sk_buff *nskb = segs->next; segs->next = NULL; if (err == 0) if (err == 0) { nf_bridge_adjust_segmented_data(segs); err = __nf_queue(segs, elem, pf, hook, indev, outdev, okfn, queuenum); } if (err == 0) queued++; else Loading @@ -252,11 +275,12 @@ int nf_queue(struct sk_buff *skb, segs = nskb; } while (segs); /* also free orig skb if only some segments were queued */ if (unlikely(err && queued)) err = 0; if (err == 0) if (queued) { kfree_skb(skb); return 0; } out_err: nf_bridge_adjust_segmented_data(skb); return err; } Loading Loading
include/linux/netfilter_bridge/ebtables.h +2 −2 Original line number Diff line number Diff line Loading @@ -285,8 +285,8 @@ struct ebt_table { struct module *me; }; #define EBT_ALIGN(s) (((s) + (__alignof__(struct ebt_replace)-1)) & \ ~(__alignof__(struct ebt_replace)-1)) #define EBT_ALIGN(s) (((s) + (__alignof__(struct _xt_align)-1)) & \ ~(__alignof__(struct _xt_align)-1)) extern struct ebt_table *ebt_register_table(struct net *net, const struct ebt_table *table); extern void ebt_unregister_table(struct net *net, struct ebt_table *table); Loading
net/netfilter/ipvs/ip_vs_core.c +1 −1 Original line number Diff line number Diff line Loading @@ -232,6 +232,7 @@ ip_vs_sched_persist(struct ip_vs_service *svc, __be16 dport = 0; /* destination port to forward */ unsigned int flags; struct ip_vs_conn_param param; const union nf_inet_addr fwmark = { .ip = htonl(svc->fwmark) }; union nf_inet_addr snet; /* source network of the client, after masking */ Loading Loading @@ -267,7 +268,6 @@ ip_vs_sched_persist(struct ip_vs_service *svc, { int protocol = iph.protocol; const union nf_inet_addr *vaddr = &iph.daddr; const union nf_inet_addr fwmark = { .ip = htonl(svc->fwmark) }; __be16 vport = 0; if (dst_port == svc->port) { Loading
net/netfilter/nf_conntrack_netlink.c +16 −27 Original line number Diff line number Diff line Loading @@ -1367,15 +1367,12 @@ ctnetlink_create_conntrack(struct net *net, u16 zone, nf_ct_protonum(ct)); if (helper == NULL) { rcu_read_unlock(); spin_unlock_bh(&nf_conntrack_lock); #ifdef CONFIG_MODULES if (request_module("nfct-helper-%s", helpname) < 0) { spin_lock_bh(&nf_conntrack_lock); err = -EOPNOTSUPP; goto err1; } spin_lock_bh(&nf_conntrack_lock); rcu_read_lock(); helper = __nf_conntrack_helper_find(helpname, nf_ct_l3num(ct), Loading Loading @@ -1469,7 +1466,10 @@ ctnetlink_create_conntrack(struct net *net, u16 zone, tstamp->start = ktime_to_ns(ktime_get_real()); add_timer(&ct->timeout); spin_lock_bh(&nf_conntrack_lock); nf_conntrack_hash_insert(ct); nf_conntrack_get(&ct->ct_general); spin_unlock_bh(&nf_conntrack_lock); rcu_read_unlock(); return ct; Loading @@ -1490,6 +1490,7 @@ ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb, struct nf_conntrack_tuple otuple, rtuple; struct nf_conntrack_tuple_hash *h = NULL; struct nfgenmsg *nfmsg = nlmsg_data(nlh); struct nf_conn *ct; u_int8_t u3 = nfmsg->nfgen_family; u16 zone; int err; Loading @@ -1512,25 +1513,22 @@ ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb, spin_lock_bh(&nf_conntrack_lock); if (cda[CTA_TUPLE_ORIG]) h = __nf_conntrack_find(net, zone, &otuple); h = nf_conntrack_find_get(net, zone, &otuple); else if (cda[CTA_TUPLE_REPLY]) h = __nf_conntrack_find(net, zone, &rtuple); h = nf_conntrack_find_get(net, zone, &rtuple); spin_unlock_bh(&nf_conntrack_lock); if (h == NULL) { err = -ENOENT; if (nlh->nlmsg_flags & NLM_F_CREATE) { struct nf_conn *ct; enum ip_conntrack_events events; ct = ctnetlink_create_conntrack(net, zone, cda, &otuple, &rtuple, u3); if (IS_ERR(ct)) { err = PTR_ERR(ct); goto out_unlock; } if (IS_ERR(ct)) return PTR_ERR(ct); err = 0; nf_conntrack_get(&ct->ct_general); spin_unlock_bh(&nf_conntrack_lock); if (test_bit(IPS_EXPECTED_BIT, &ct->status)) events = IPCT_RELATED; else Loading @@ -1545,23 +1543,19 @@ ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb, ct, NETLINK_CB(skb).pid, nlmsg_report(nlh)); nf_ct_put(ct); } else spin_unlock_bh(&nf_conntrack_lock); } return err; } /* implicit 'else' */ /* We manipulate the conntrack inside the global conntrack table lock, * so there's no need to increase the refcount */ err = -EEXIST; ct = nf_ct_tuplehash_to_ctrack(h); if (!(nlh->nlmsg_flags & NLM_F_EXCL)) { struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h); spin_lock_bh(&nf_conntrack_lock); err = ctnetlink_change_conntrack(ct, cda); if (err == 0) { nf_conntrack_get(&ct->ct_general); spin_unlock_bh(&nf_conntrack_lock); if (err == 0) { nf_conntrack_eventmask_report((1 << IPCT_REPLY) | (1 << IPCT_ASSURED) | (1 << IPCT_HELPER) | Loading @@ -1570,15 +1564,10 @@ ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb, (1 << IPCT_MARK), ct, NETLINK_CB(skb).pid, nlmsg_report(nlh)); nf_ct_put(ct); } else spin_unlock_bh(&nf_conntrack_lock); return err; } } out_unlock: spin_unlock_bh(&nf_conntrack_lock); nf_ct_put(ct); return err; } Loading
net/netfilter/nf_queue.c +32 −8 Original line number Diff line number Diff line Loading @@ -203,6 +203,27 @@ static int __nf_queue(struct sk_buff *skb, return status; } #ifdef CONFIG_BRIDGE_NETFILTER /* When called from bridge netfilter, skb->data must point to MAC header * before calling skb_gso_segment(). Else, original MAC header is lost * and segmented skbs will be sent to wrong destination. */ static void nf_bridge_adjust_skb_data(struct sk_buff *skb) { if (skb->nf_bridge) __skb_push(skb, skb->network_header - skb->mac_header); } static void nf_bridge_adjust_segmented_data(struct sk_buff *skb) { if (skb->nf_bridge) __skb_pull(skb, skb->network_header - skb->mac_header); } #else #define nf_bridge_adjust_skb_data(s) do {} while (0) #define nf_bridge_adjust_segmented_data(s) do {} while (0) #endif int nf_queue(struct sk_buff *skb, struct list_head *elem, u_int8_t pf, unsigned int hook, Loading @@ -212,7 +233,7 @@ int nf_queue(struct sk_buff *skb, unsigned int queuenum) { struct sk_buff *segs; int err; int err = -EINVAL; unsigned int queued; if (!skb_is_gso(skb)) Loading @@ -228,23 +249,25 @@ int nf_queue(struct sk_buff *skb, break; } nf_bridge_adjust_skb_data(skb); segs = skb_gso_segment(skb, 0); /* Does not use PTR_ERR to limit the number of error codes that can be * returned by nf_queue. For instance, callers rely on -ECANCELED to mean * 'ignore this hook'. */ if (IS_ERR(segs)) return -EINVAL; goto out_err; queued = 0; err = 0; do { struct sk_buff *nskb = segs->next; segs->next = NULL; if (err == 0) if (err == 0) { nf_bridge_adjust_segmented_data(segs); err = __nf_queue(segs, elem, pf, hook, indev, outdev, okfn, queuenum); } if (err == 0) queued++; else Loading @@ -252,11 +275,12 @@ int nf_queue(struct sk_buff *skb, segs = nskb; } while (segs); /* also free orig skb if only some segments were queued */ if (unlikely(err && queued)) err = 0; if (err == 0) if (queued) { kfree_skb(skb); return 0; } out_err: nf_bridge_adjust_segmented_data(skb); return err; } Loading