Loading include/net/udp.h +1 −1 Original line number Diff line number Diff line Loading @@ -247,7 +247,7 @@ static inline __be16 udp_flow_src_port(struct net *net, struct sk_buff *skb, /* net/ipv4/udp.c */ void udp_v4_early_demux(struct sk_buff *skb); void udp_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst); bool udp_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst); int udp_get_port(struct sock *sk, unsigned short snum, int (*saddr_cmp)(const struct sock *, const struct sock *)); Loading net/ipv4/udp.c +3 −1 Original line number Diff line number Diff line Loading @@ -1627,14 +1627,16 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) /* For TCP sockets, sk_rx_dst is protected by socket lock * For UDP, we use xchg() to guard against concurrent changes. */ void udp_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst) bool udp_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst) { struct dst_entry *old; if (dst_hold_safe(dst)) { old = xchg(&sk->sk_rx_dst, dst); dst_release(old); return old != dst; } return false; } EXPORT_SYMBOL(udp_sk_rx_dst_set); Loading net/ipv6/udp.c +10 −1 Original line number Diff line number Diff line Loading @@ -754,6 +754,15 @@ static int __udp6_lib_mcast_deliver(struct net *net, struct sk_buff *skb, return 0; } static void udp6_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst) { if (udp_sk_rx_dst_set(sk, dst)) { const struct rt6_info *rt = (const struct rt6_info *)dst; inet6_sk(sk)->rx_dst_cookie = rt6_get_cookie(rt); } } int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable, int proto) { Loading Loading @@ -803,7 +812,7 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable, int ret; if (unlikely(sk->sk_rx_dst != dst)) udp_sk_rx_dst_set(sk, dst); udp6_sk_rx_dst_set(sk, dst); ret = udpv6_queue_rcv_skb(sk, skb); sock_put(sk); Loading Loading
include/net/udp.h +1 −1 Original line number Diff line number Diff line Loading @@ -247,7 +247,7 @@ static inline __be16 udp_flow_src_port(struct net *net, struct sk_buff *skb, /* net/ipv4/udp.c */ void udp_v4_early_demux(struct sk_buff *skb); void udp_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst); bool udp_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst); int udp_get_port(struct sock *sk, unsigned short snum, int (*saddr_cmp)(const struct sock *, const struct sock *)); Loading
net/ipv4/udp.c +3 −1 Original line number Diff line number Diff line Loading @@ -1627,14 +1627,16 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) /* For TCP sockets, sk_rx_dst is protected by socket lock * For UDP, we use xchg() to guard against concurrent changes. */ void udp_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst) bool udp_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst) { struct dst_entry *old; if (dst_hold_safe(dst)) { old = xchg(&sk->sk_rx_dst, dst); dst_release(old); return old != dst; } return false; } EXPORT_SYMBOL(udp_sk_rx_dst_set); Loading
net/ipv6/udp.c +10 −1 Original line number Diff line number Diff line Loading @@ -754,6 +754,15 @@ static int __udp6_lib_mcast_deliver(struct net *net, struct sk_buff *skb, return 0; } static void udp6_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst) { if (udp_sk_rx_dst_set(sk, dst)) { const struct rt6_info *rt = (const struct rt6_info *)dst; inet6_sk(sk)->rx_dst_cookie = rt6_get_cookie(rt); } } int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable, int proto) { Loading Loading @@ -803,7 +812,7 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable, int ret; if (unlikely(sk->sk_rx_dst != dst)) udp_sk_rx_dst_set(sk, dst); udp6_sk_rx_dst_set(sk, dst); ret = udpv6_queue_rcv_skb(sk, skb); sock_put(sk); Loading