Loading include/linux/udp.h +1 −1 Original line number Original line Diff line number Diff line Loading @@ -34,7 +34,7 @@ static inline struct udphdr *inner_udp_hdr(const struct sk_buff *skb) #define UDP_HTABLE_SIZE_MIN (CONFIG_BASE_SMALL ? 128 : 256) #define UDP_HTABLE_SIZE_MIN (CONFIG_BASE_SMALL ? 128 : 256) static inline int udp_hashfn(struct net *net, unsigned num, unsigned mask) static inline u32 udp_hashfn(const struct net *net, u32 num, u32 mask) { { return (num + net_hash_mix(net)) & mask; return (num + net_hash_mix(net)) & mask; } } Loading include/net/inet6_hashtables.h +0 −2 Original line number Original line Diff line number Diff line Loading @@ -38,8 +38,6 @@ static inline unsigned int __inet6_ehashfn(const u32 lhash, return jhash_3words(lhash, fhash, ports, initval); return jhash_3words(lhash, fhash, ports, initval); } } int __inet6_hash(struct sock *sk, struct inet_timewait_sock *twp); /* /* * Sockets in TCP_CLOSE state are _always_ taken out of the hash, so * Sockets in TCP_CLOSE state are _always_ taken out of the hash, so * we need not check it for TCP lookups anymore, thanks Alexey. -DaveM * we need not check it for TCP lookups anymore, thanks Alexey. -DaveM Loading include/net/inet_hashtables.h +29 −50 Original line number Original line Diff line number Diff line Loading @@ -24,7 +24,6 @@ #include <linux/spinlock.h> #include <linux/spinlock.h> #include <linux/types.h> #include <linux/types.h> #include <linux/wait.h> #include <linux/wait.h> #include <linux/vmalloc.h> #include <net/inet_connection_sock.h> #include <net/inet_connection_sock.h> #include <net/inet_sock.h> #include <net/inet_sock.h> Loading Loading @@ -168,53 +167,13 @@ static inline spinlock_t *inet_ehash_lockp( return &hashinfo->ehash_locks[hash & hashinfo->ehash_locks_mask]; return &hashinfo->ehash_locks[hash & hashinfo->ehash_locks_mask]; } } static inline int inet_ehash_locks_alloc(struct inet_hashinfo *hashinfo) int inet_ehash_locks_alloc(struct inet_hashinfo *hashinfo); { unsigned int i, size = 256; #if defined(CONFIG_PROVE_LOCKING) unsigned int nr_pcpus = 2; #else unsigned int nr_pcpus = num_possible_cpus(); #endif if (nr_pcpus >= 4) size = 512; if (nr_pcpus >= 8) size = 1024; if (nr_pcpus >= 16) size = 2048; if (nr_pcpus >= 32) size = 4096; if (sizeof(spinlock_t) != 0) { #ifdef CONFIG_NUMA if (size * sizeof(spinlock_t) > PAGE_SIZE) hashinfo->ehash_locks = vmalloc(size * sizeof(spinlock_t)); else #endif hashinfo->ehash_locks = kmalloc(size * sizeof(spinlock_t), GFP_KERNEL); if (!hashinfo->ehash_locks) return ENOMEM; for (i = 0; i < size; i++) spin_lock_init(&hashinfo->ehash_locks[i]); } hashinfo->ehash_locks_mask = size - 1; return 0; } static inline void inet_ehash_locks_free(struct inet_hashinfo *hashinfo) static inline void inet_ehash_locks_free(struct inet_hashinfo *hashinfo) { { if (hashinfo->ehash_locks) { kvfree(hashinfo->ehash_locks); #ifdef CONFIG_NUMA unsigned int size = (hashinfo->ehash_locks_mask + 1) * sizeof(spinlock_t); if (size > PAGE_SIZE) vfree(hashinfo->ehash_locks); else #endif kfree(hashinfo->ehash_locks); hashinfo->ehash_locks = NULL; hashinfo->ehash_locks = NULL; } } } struct inet_bind_bucket * struct inet_bind_bucket * inet_bind_bucket_create(struct kmem_cache *cachep, struct net *net, inet_bind_bucket_create(struct kmem_cache *cachep, struct net *net, Loading @@ -223,8 +182,8 @@ inet_bind_bucket_create(struct kmem_cache *cachep, struct net *net, void inet_bind_bucket_destroy(struct kmem_cache *cachep, void inet_bind_bucket_destroy(struct kmem_cache *cachep, struct inet_bind_bucket *tb); struct inet_bind_bucket *tb); static inline int inet_bhashfn(struct net *net, const __u16 lport, static inline u32 inet_bhashfn(const struct net *net, const __u16 lport, const int bhash_size) const u32 bhash_size) { { return (lport + net_hash_mix(net)) & (bhash_size - 1); return (lport + net_hash_mix(net)) & (bhash_size - 1); } } Loading @@ -233,7 +192,7 @@ void inet_bind_hash(struct sock *sk, struct inet_bind_bucket *tb, const unsigned short snum); const unsigned short snum); /* These can have wildcards, don't try too hard. */ /* These can have wildcards, don't try too hard. */ static inline int inet_lhashfn(struct net *net, const unsigned short num) static inline u32 inet_lhashfn(const struct net *net, const unsigned short num) { { return (num + net_hash_mix(net)) & (INET_LHTABLE_SIZE - 1); return (num + net_hash_mix(net)) & (INET_LHTABLE_SIZE - 1); } } Loading @@ -251,6 +210,7 @@ void inet_put_port(struct sock *sk); void inet_hashinfo_init(struct inet_hashinfo *h); void inet_hashinfo_init(struct inet_hashinfo *h); int __inet_hash_nolisten(struct sock *sk, struct inet_timewait_sock *tw); int __inet_hash_nolisten(struct sock *sk, struct inet_timewait_sock *tw); int __inet_hash(struct sock *sk, struct inet_timewait_sock *tw); void inet_hash(struct sock *sk); void inet_hash(struct sock *sk); void inet_unhash(struct sock *sk); void inet_unhash(struct sock *sk); Loading Loading @@ -385,13 +345,32 @@ static inline struct sock *__inet_lookup_skb(struct inet_hashinfo *hashinfo, iph->daddr, dport, inet_iif(skb)); iph->daddr, dport, inet_iif(skb)); } } u32 sk_ehashfn(const struct sock *sk); u32 inet6_ehashfn(const struct net *net, const struct in6_addr *laddr, const u16 lport, const struct in6_addr *faddr, const __be16 fport); static inline void sk_daddr_set(struct sock *sk, __be32 addr) { sk->sk_daddr = addr; /* alias of inet_daddr */ #if IS_ENABLED(CONFIG_IPV6) ipv6_addr_set_v4mapped(addr, &sk->sk_v6_daddr); #endif } static inline void sk_rcv_saddr_set(struct sock *sk, __be32 addr) { sk->sk_rcv_saddr = addr; /* alias of inet_rcv_saddr */ #if IS_ENABLED(CONFIG_IPV6) ipv6_addr_set_v4mapped(addr, &sk->sk_v6_rcv_saddr); #endif } int __inet_hash_connect(struct inet_timewait_death_row *death_row, int __inet_hash_connect(struct inet_timewait_death_row *death_row, struct sock *sk, u32 port_offset, struct sock *sk, u32 port_offset, int (*check_established)(struct inet_timewait_death_row *, int (*check_established)(struct inet_timewait_death_row *, struct sock *, __u16, struct sock *, __u16, struct inet_timewait_sock **), struct inet_timewait_sock **)); int (*hash)(struct sock *sk, struct inet_timewait_sock *twp)); int inet_hash_connect(struct inet_timewait_death_row *death_row, int inet_hash_connect(struct inet_timewait_death_row *death_row, struct sock *sk); struct sock *sk); Loading include/net/inet_timewait_sock.h +9 −98 Original line number Original line Diff line number Diff line Loading @@ -31,67 +31,14 @@ struct inet_hashinfo; struct inet_hashinfo; #define INET_TWDR_RECYCLE_SLOTS_LOG 5 #define INET_TWDR_RECYCLE_SLOTS (1 << INET_TWDR_RECYCLE_SLOTS_LOG) /* * If time > 4sec, it is "slow" path, no recycling is required, * so that we select tick to get range about 4 seconds. */ #if HZ <= 16 || HZ > 4096 # error Unsupported: HZ <= 16 or HZ > 4096 #elif HZ <= 32 # define INET_TWDR_RECYCLE_TICK (5 + 2 - INET_TWDR_RECYCLE_SLOTS_LOG) #elif HZ <= 64 # define INET_TWDR_RECYCLE_TICK (6 + 2 - INET_TWDR_RECYCLE_SLOTS_LOG) #elif HZ <= 128 # define INET_TWDR_RECYCLE_TICK (7 + 2 - INET_TWDR_RECYCLE_SLOTS_LOG) #elif HZ <= 256 # define INET_TWDR_RECYCLE_TICK (8 + 2 - INET_TWDR_RECYCLE_SLOTS_LOG) #elif HZ <= 512 # define INET_TWDR_RECYCLE_TICK (9 + 2 - INET_TWDR_RECYCLE_SLOTS_LOG) #elif HZ <= 1024 # define INET_TWDR_RECYCLE_TICK (10 + 2 - INET_TWDR_RECYCLE_SLOTS_LOG) #elif HZ <= 2048 # define INET_TWDR_RECYCLE_TICK (11 + 2 - INET_TWDR_RECYCLE_SLOTS_LOG) #else # define INET_TWDR_RECYCLE_TICK (12 + 2 - INET_TWDR_RECYCLE_SLOTS_LOG) #endif static inline u32 inet_tw_time_stamp(void) { return jiffies; } /* TIME_WAIT reaping mechanism. */ #define INET_TWDR_TWKILL_SLOTS 8 /* Please keep this a power of 2. */ #define INET_TWDR_TWKILL_QUOTA 100 struct inet_timewait_death_row { struct inet_timewait_death_row { /* Short-time timewait calendar */ atomic_t tw_count; int twcal_hand; unsigned long twcal_jiffie; struct inet_hashinfo *hashinfo ____cacheline_aligned_in_smp; struct timer_list twcal_timer; struct hlist_head twcal_row[INET_TWDR_RECYCLE_SLOTS]; spinlock_t death_lock; int tw_count; int period; u32 thread_slots; struct work_struct twkill_work; struct timer_list tw_timer; int slot; struct hlist_head cells[INET_TWDR_TWKILL_SLOTS]; struct inet_hashinfo *hashinfo; int sysctl_tw_recycle; int sysctl_tw_recycle; int sysctl_max_tw_buckets; int sysctl_max_tw_buckets; }; }; void inet_twdr_hangman(unsigned long data); void inet_twdr_twkill_work(struct work_struct *work); void inet_twdr_twcal_tick(unsigned long data); struct inet_bind_bucket; struct inet_bind_bucket; /* /* Loading Loading @@ -132,52 +79,18 @@ struct inet_timewait_sock { __be16 tw_sport; __be16 tw_sport; kmemcheck_bitfield_begin(flags); kmemcheck_bitfield_begin(flags); /* And these are ours. */ /* And these are ours. */ unsigned int tw_pad0 : 1, /* 1 bit hole */ unsigned int tw_kill : 1, tw_transparent : 1, tw_transparent : 1, tw_flowlabel : 20, tw_flowlabel : 20, tw_pad : 2, /* 2 bits hole */ tw_pad : 2, /* 2 bits hole */ tw_tos : 8; tw_tos : 8; kmemcheck_bitfield_end(flags); kmemcheck_bitfield_end(flags); u32 tw_ttd; struct timer_list tw_timer; struct inet_bind_bucket *tw_tb; struct inet_bind_bucket *tw_tb; struct hlist_node tw_death_node; struct inet_timewait_death_row *tw_dr; }; }; #define tw_tclass tw_tos #define tw_tclass tw_tos static inline int inet_twsk_dead_hashed(const struct inet_timewait_sock *tw) { return !hlist_unhashed(&tw->tw_death_node); } static inline void inet_twsk_dead_node_init(struct inet_timewait_sock *tw) { tw->tw_death_node.pprev = NULL; } static inline void __inet_twsk_del_dead_node(struct inet_timewait_sock *tw) { __hlist_del(&tw->tw_death_node); inet_twsk_dead_node_init(tw); } static inline int inet_twsk_del_dead_node(struct inet_timewait_sock *tw) { if (inet_twsk_dead_hashed(tw)) { __inet_twsk_del_dead_node(tw); return 1; } return 0; } #define inet_twsk_for_each(tw, node, head) \ hlist_nulls_for_each_entry(tw, node, head, tw_node) #define inet_twsk_for_each_inmate(tw, jail) \ hlist_for_each_entry(tw, jail, tw_death_node) #define inet_twsk_for_each_inmate_safe(tw, safe, jail) \ hlist_for_each_entry_safe(tw, safe, jail, tw_death_node) static inline struct inet_timewait_sock *inet_twsk(const struct sock *sk) static inline struct inet_timewait_sock *inet_twsk(const struct sock *sk) { { return (struct inet_timewait_sock *)sk; return (struct inet_timewait_sock *)sk; Loading @@ -192,16 +105,14 @@ int inet_twsk_bind_unhash(struct inet_timewait_sock *tw, struct inet_hashinfo *hashinfo); struct inet_hashinfo *hashinfo); struct inet_timewait_sock *inet_twsk_alloc(const struct sock *sk, struct inet_timewait_sock *inet_twsk_alloc(const struct sock *sk, struct inet_timewait_death_row *dr, const int state); const int state); void __inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk, void __inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk, struct inet_hashinfo *hashinfo); struct inet_hashinfo *hashinfo); void inet_twsk_schedule(struct inet_timewait_sock *tw, void inet_twsk_schedule(struct inet_timewait_sock *tw, const int timeo); struct inet_timewait_death_row *twdr, void inet_twsk_deschedule(struct inet_timewait_sock *tw); const int timeo, const int timewait_len); void inet_twsk_deschedule(struct inet_timewait_sock *tw, struct inet_timewait_death_row *twdr); void inet_twsk_purge(struct inet_hashinfo *hashinfo, void inet_twsk_purge(struct inet_hashinfo *hashinfo, struct inet_timewait_death_row *twdr, int family); struct inet_timewait_death_row *twdr, int family); Loading include/net/netns/hash.h +2 −2 Original line number Original line Diff line number Diff line Loading @@ -5,7 +5,7 @@ struct net; struct net; static inline unsigned int net_hash_mix(struct net *net) static inline u32 net_hash_mix(const struct net *net) { { #ifdef CONFIG_NET_NS #ifdef CONFIG_NET_NS /* /* Loading @@ -13,7 +13,7 @@ static inline unsigned int net_hash_mix(struct net *net) * always zeroed * always zeroed */ */ return (unsigned)(((unsigned long)net) >> L1_CACHE_SHIFT); return (u32)(((unsigned long)net) >> L1_CACHE_SHIFT); #else #else return 0; return 0; #endif #endif Loading Loading
include/linux/udp.h +1 −1 Original line number Original line Diff line number Diff line Loading @@ -34,7 +34,7 @@ static inline struct udphdr *inner_udp_hdr(const struct sk_buff *skb) #define UDP_HTABLE_SIZE_MIN (CONFIG_BASE_SMALL ? 128 : 256) #define UDP_HTABLE_SIZE_MIN (CONFIG_BASE_SMALL ? 128 : 256) static inline int udp_hashfn(struct net *net, unsigned num, unsigned mask) static inline u32 udp_hashfn(const struct net *net, u32 num, u32 mask) { { return (num + net_hash_mix(net)) & mask; return (num + net_hash_mix(net)) & mask; } } Loading
include/net/inet6_hashtables.h +0 −2 Original line number Original line Diff line number Diff line Loading @@ -38,8 +38,6 @@ static inline unsigned int __inet6_ehashfn(const u32 lhash, return jhash_3words(lhash, fhash, ports, initval); return jhash_3words(lhash, fhash, ports, initval); } } int __inet6_hash(struct sock *sk, struct inet_timewait_sock *twp); /* /* * Sockets in TCP_CLOSE state are _always_ taken out of the hash, so * Sockets in TCP_CLOSE state are _always_ taken out of the hash, so * we need not check it for TCP lookups anymore, thanks Alexey. -DaveM * we need not check it for TCP lookups anymore, thanks Alexey. -DaveM Loading
include/net/inet_hashtables.h +29 −50 Original line number Original line Diff line number Diff line Loading @@ -24,7 +24,6 @@ #include <linux/spinlock.h> #include <linux/spinlock.h> #include <linux/types.h> #include <linux/types.h> #include <linux/wait.h> #include <linux/wait.h> #include <linux/vmalloc.h> #include <net/inet_connection_sock.h> #include <net/inet_connection_sock.h> #include <net/inet_sock.h> #include <net/inet_sock.h> Loading Loading @@ -168,53 +167,13 @@ static inline spinlock_t *inet_ehash_lockp( return &hashinfo->ehash_locks[hash & hashinfo->ehash_locks_mask]; return &hashinfo->ehash_locks[hash & hashinfo->ehash_locks_mask]; } } static inline int inet_ehash_locks_alloc(struct inet_hashinfo *hashinfo) int inet_ehash_locks_alloc(struct inet_hashinfo *hashinfo); { unsigned int i, size = 256; #if defined(CONFIG_PROVE_LOCKING) unsigned int nr_pcpus = 2; #else unsigned int nr_pcpus = num_possible_cpus(); #endif if (nr_pcpus >= 4) size = 512; if (nr_pcpus >= 8) size = 1024; if (nr_pcpus >= 16) size = 2048; if (nr_pcpus >= 32) size = 4096; if (sizeof(spinlock_t) != 0) { #ifdef CONFIG_NUMA if (size * sizeof(spinlock_t) > PAGE_SIZE) hashinfo->ehash_locks = vmalloc(size * sizeof(spinlock_t)); else #endif hashinfo->ehash_locks = kmalloc(size * sizeof(spinlock_t), GFP_KERNEL); if (!hashinfo->ehash_locks) return ENOMEM; for (i = 0; i < size; i++) spin_lock_init(&hashinfo->ehash_locks[i]); } hashinfo->ehash_locks_mask = size - 1; return 0; } static inline void inet_ehash_locks_free(struct inet_hashinfo *hashinfo) static inline void inet_ehash_locks_free(struct inet_hashinfo *hashinfo) { { if (hashinfo->ehash_locks) { kvfree(hashinfo->ehash_locks); #ifdef CONFIG_NUMA unsigned int size = (hashinfo->ehash_locks_mask + 1) * sizeof(spinlock_t); if (size > PAGE_SIZE) vfree(hashinfo->ehash_locks); else #endif kfree(hashinfo->ehash_locks); hashinfo->ehash_locks = NULL; hashinfo->ehash_locks = NULL; } } } struct inet_bind_bucket * struct inet_bind_bucket * inet_bind_bucket_create(struct kmem_cache *cachep, struct net *net, inet_bind_bucket_create(struct kmem_cache *cachep, struct net *net, Loading @@ -223,8 +182,8 @@ inet_bind_bucket_create(struct kmem_cache *cachep, struct net *net, void inet_bind_bucket_destroy(struct kmem_cache *cachep, void inet_bind_bucket_destroy(struct kmem_cache *cachep, struct inet_bind_bucket *tb); struct inet_bind_bucket *tb); static inline int inet_bhashfn(struct net *net, const __u16 lport, static inline u32 inet_bhashfn(const struct net *net, const __u16 lport, const int bhash_size) const u32 bhash_size) { { return (lport + net_hash_mix(net)) & (bhash_size - 1); return (lport + net_hash_mix(net)) & (bhash_size - 1); } } Loading @@ -233,7 +192,7 @@ void inet_bind_hash(struct sock *sk, struct inet_bind_bucket *tb, const unsigned short snum); const unsigned short snum); /* These can have wildcards, don't try too hard. */ /* These can have wildcards, don't try too hard. */ static inline int inet_lhashfn(struct net *net, const unsigned short num) static inline u32 inet_lhashfn(const struct net *net, const unsigned short num) { { return (num + net_hash_mix(net)) & (INET_LHTABLE_SIZE - 1); return (num + net_hash_mix(net)) & (INET_LHTABLE_SIZE - 1); } } Loading @@ -251,6 +210,7 @@ void inet_put_port(struct sock *sk); void inet_hashinfo_init(struct inet_hashinfo *h); void inet_hashinfo_init(struct inet_hashinfo *h); int __inet_hash_nolisten(struct sock *sk, struct inet_timewait_sock *tw); int __inet_hash_nolisten(struct sock *sk, struct inet_timewait_sock *tw); int __inet_hash(struct sock *sk, struct inet_timewait_sock *tw); void inet_hash(struct sock *sk); void inet_hash(struct sock *sk); void inet_unhash(struct sock *sk); void inet_unhash(struct sock *sk); Loading Loading @@ -385,13 +345,32 @@ static inline struct sock *__inet_lookup_skb(struct inet_hashinfo *hashinfo, iph->daddr, dport, inet_iif(skb)); iph->daddr, dport, inet_iif(skb)); } } u32 sk_ehashfn(const struct sock *sk); u32 inet6_ehashfn(const struct net *net, const struct in6_addr *laddr, const u16 lport, const struct in6_addr *faddr, const __be16 fport); static inline void sk_daddr_set(struct sock *sk, __be32 addr) { sk->sk_daddr = addr; /* alias of inet_daddr */ #if IS_ENABLED(CONFIG_IPV6) ipv6_addr_set_v4mapped(addr, &sk->sk_v6_daddr); #endif } static inline void sk_rcv_saddr_set(struct sock *sk, __be32 addr) { sk->sk_rcv_saddr = addr; /* alias of inet_rcv_saddr */ #if IS_ENABLED(CONFIG_IPV6) ipv6_addr_set_v4mapped(addr, &sk->sk_v6_rcv_saddr); #endif } int __inet_hash_connect(struct inet_timewait_death_row *death_row, int __inet_hash_connect(struct inet_timewait_death_row *death_row, struct sock *sk, u32 port_offset, struct sock *sk, u32 port_offset, int (*check_established)(struct inet_timewait_death_row *, int (*check_established)(struct inet_timewait_death_row *, struct sock *, __u16, struct sock *, __u16, struct inet_timewait_sock **), struct inet_timewait_sock **)); int (*hash)(struct sock *sk, struct inet_timewait_sock *twp)); int inet_hash_connect(struct inet_timewait_death_row *death_row, int inet_hash_connect(struct inet_timewait_death_row *death_row, struct sock *sk); struct sock *sk); Loading
include/net/inet_timewait_sock.h +9 −98 Original line number Original line Diff line number Diff line Loading @@ -31,67 +31,14 @@ struct inet_hashinfo; struct inet_hashinfo; #define INET_TWDR_RECYCLE_SLOTS_LOG 5 #define INET_TWDR_RECYCLE_SLOTS (1 << INET_TWDR_RECYCLE_SLOTS_LOG) /* * If time > 4sec, it is "slow" path, no recycling is required, * so that we select tick to get range about 4 seconds. */ #if HZ <= 16 || HZ > 4096 # error Unsupported: HZ <= 16 or HZ > 4096 #elif HZ <= 32 # define INET_TWDR_RECYCLE_TICK (5 + 2 - INET_TWDR_RECYCLE_SLOTS_LOG) #elif HZ <= 64 # define INET_TWDR_RECYCLE_TICK (6 + 2 - INET_TWDR_RECYCLE_SLOTS_LOG) #elif HZ <= 128 # define INET_TWDR_RECYCLE_TICK (7 + 2 - INET_TWDR_RECYCLE_SLOTS_LOG) #elif HZ <= 256 # define INET_TWDR_RECYCLE_TICK (8 + 2 - INET_TWDR_RECYCLE_SLOTS_LOG) #elif HZ <= 512 # define INET_TWDR_RECYCLE_TICK (9 + 2 - INET_TWDR_RECYCLE_SLOTS_LOG) #elif HZ <= 1024 # define INET_TWDR_RECYCLE_TICK (10 + 2 - INET_TWDR_RECYCLE_SLOTS_LOG) #elif HZ <= 2048 # define INET_TWDR_RECYCLE_TICK (11 + 2 - INET_TWDR_RECYCLE_SLOTS_LOG) #else # define INET_TWDR_RECYCLE_TICK (12 + 2 - INET_TWDR_RECYCLE_SLOTS_LOG) #endif static inline u32 inet_tw_time_stamp(void) { return jiffies; } /* TIME_WAIT reaping mechanism. */ #define INET_TWDR_TWKILL_SLOTS 8 /* Please keep this a power of 2. */ #define INET_TWDR_TWKILL_QUOTA 100 struct inet_timewait_death_row { struct inet_timewait_death_row { /* Short-time timewait calendar */ atomic_t tw_count; int twcal_hand; unsigned long twcal_jiffie; struct inet_hashinfo *hashinfo ____cacheline_aligned_in_smp; struct timer_list twcal_timer; struct hlist_head twcal_row[INET_TWDR_RECYCLE_SLOTS]; spinlock_t death_lock; int tw_count; int period; u32 thread_slots; struct work_struct twkill_work; struct timer_list tw_timer; int slot; struct hlist_head cells[INET_TWDR_TWKILL_SLOTS]; struct inet_hashinfo *hashinfo; int sysctl_tw_recycle; int sysctl_tw_recycle; int sysctl_max_tw_buckets; int sysctl_max_tw_buckets; }; }; void inet_twdr_hangman(unsigned long data); void inet_twdr_twkill_work(struct work_struct *work); void inet_twdr_twcal_tick(unsigned long data); struct inet_bind_bucket; struct inet_bind_bucket; /* /* Loading Loading @@ -132,52 +79,18 @@ struct inet_timewait_sock { __be16 tw_sport; __be16 tw_sport; kmemcheck_bitfield_begin(flags); kmemcheck_bitfield_begin(flags); /* And these are ours. */ /* And these are ours. */ unsigned int tw_pad0 : 1, /* 1 bit hole */ unsigned int tw_kill : 1, tw_transparent : 1, tw_transparent : 1, tw_flowlabel : 20, tw_flowlabel : 20, tw_pad : 2, /* 2 bits hole */ tw_pad : 2, /* 2 bits hole */ tw_tos : 8; tw_tos : 8; kmemcheck_bitfield_end(flags); kmemcheck_bitfield_end(flags); u32 tw_ttd; struct timer_list tw_timer; struct inet_bind_bucket *tw_tb; struct inet_bind_bucket *tw_tb; struct hlist_node tw_death_node; struct inet_timewait_death_row *tw_dr; }; }; #define tw_tclass tw_tos #define tw_tclass tw_tos static inline int inet_twsk_dead_hashed(const struct inet_timewait_sock *tw) { return !hlist_unhashed(&tw->tw_death_node); } static inline void inet_twsk_dead_node_init(struct inet_timewait_sock *tw) { tw->tw_death_node.pprev = NULL; } static inline void __inet_twsk_del_dead_node(struct inet_timewait_sock *tw) { __hlist_del(&tw->tw_death_node); inet_twsk_dead_node_init(tw); } static inline int inet_twsk_del_dead_node(struct inet_timewait_sock *tw) { if (inet_twsk_dead_hashed(tw)) { __inet_twsk_del_dead_node(tw); return 1; } return 0; } #define inet_twsk_for_each(tw, node, head) \ hlist_nulls_for_each_entry(tw, node, head, tw_node) #define inet_twsk_for_each_inmate(tw, jail) \ hlist_for_each_entry(tw, jail, tw_death_node) #define inet_twsk_for_each_inmate_safe(tw, safe, jail) \ hlist_for_each_entry_safe(tw, safe, jail, tw_death_node) static inline struct inet_timewait_sock *inet_twsk(const struct sock *sk) static inline struct inet_timewait_sock *inet_twsk(const struct sock *sk) { { return (struct inet_timewait_sock *)sk; return (struct inet_timewait_sock *)sk; Loading @@ -192,16 +105,14 @@ int inet_twsk_bind_unhash(struct inet_timewait_sock *tw, struct inet_hashinfo *hashinfo); struct inet_hashinfo *hashinfo); struct inet_timewait_sock *inet_twsk_alloc(const struct sock *sk, struct inet_timewait_sock *inet_twsk_alloc(const struct sock *sk, struct inet_timewait_death_row *dr, const int state); const int state); void __inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk, void __inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk, struct inet_hashinfo *hashinfo); struct inet_hashinfo *hashinfo); void inet_twsk_schedule(struct inet_timewait_sock *tw, void inet_twsk_schedule(struct inet_timewait_sock *tw, const int timeo); struct inet_timewait_death_row *twdr, void inet_twsk_deschedule(struct inet_timewait_sock *tw); const int timeo, const int timewait_len); void inet_twsk_deschedule(struct inet_timewait_sock *tw, struct inet_timewait_death_row *twdr); void inet_twsk_purge(struct inet_hashinfo *hashinfo, void inet_twsk_purge(struct inet_hashinfo *hashinfo, struct inet_timewait_death_row *twdr, int family); struct inet_timewait_death_row *twdr, int family); Loading
include/net/netns/hash.h +2 −2 Original line number Original line Diff line number Diff line Loading @@ -5,7 +5,7 @@ struct net; struct net; static inline unsigned int net_hash_mix(struct net *net) static inline u32 net_hash_mix(const struct net *net) { { #ifdef CONFIG_NET_NS #ifdef CONFIG_NET_NS /* /* Loading @@ -13,7 +13,7 @@ static inline unsigned int net_hash_mix(struct net *net) * always zeroed * always zeroed */ */ return (unsigned)(((unsigned long)net) >> L1_CACHE_SHIFT); return (u32)(((unsigned long)net) >> L1_CACHE_SHIFT); #else #else return 0; return 0; #endif #endif Loading