Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 6e04e021 authored by Arnaldo Carvalho de Melo's avatar Arnaldo Carvalho de Melo Committed by David S. Miller
Browse files

[INET]: Move tcp_port_rover to inet_hashinfo



Also expose all of the tcp_hashinfo members, i.e. killing those
tcp_ehash, etc macros, this will more clearly expose already generic
functions and some that need just a bit of work to become generic, as
we'll see in the upcoming changesets.

Signed-off-by: default avatarArnaldo Carvalho de Melo <acme@ghostprotocols.net>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 2d8c4ce5
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -117,6 +117,7 @@ struct inet_hashinfo {
	wait_queue_head_t		lhash_wait;
	spinlock_t			portalloc_lock;
	kmem_cache_t			*bind_bucket_cachep;
	int				port_rover;
};

static inline int inet_ehashfn(const __u32 laddr, const __u16 lport,
+1 −1
Original line number Diff line number Diff line
@@ -136,7 +136,7 @@ struct sock_common {
  *	@sk_no_check: %SO_NO_CHECK setting, wether or not checkup packets
  *	@sk_route_caps: route capabilities (e.g. %NETIF_F_TSO)
  *	@sk_lingertime: %SO_LINGER l_linger setting
  *	@sk_hashent: hash entry in several tables (e.g. tcp_ehash)
  *	@sk_hashent: hash entry in several tables (e.g. inet_hashinfo.ehash)
  *	@sk_backlog: always used with the per-socket spinlock held
  *	@sk_callback_lock: used with the callbacks in the end of this struct
  *	@sk_error_queue: rarely used
+7 −19
Original line number Diff line number Diff line
@@ -42,18 +42,6 @@
#include <linux/seq_file.h>

extern struct inet_hashinfo tcp_hashinfo;
#define tcp_ehash		(tcp_hashinfo.ehash)
#define tcp_bhash		(tcp_hashinfo.bhash)
#define tcp_ehash_size		(tcp_hashinfo.ehash_size)
#define tcp_bhash_size		(tcp_hashinfo.bhash_size)
#define tcp_listening_hash	(tcp_hashinfo.listening_hash)
#define tcp_lhash_lock		(tcp_hashinfo.lhash_lock)
#define tcp_lhash_users		(tcp_hashinfo.lhash_users)
#define tcp_lhash_wait		(tcp_hashinfo.lhash_wait)
#define tcp_portalloc_lock	(tcp_hashinfo.portalloc_lock)
#define tcp_bucket_cachep	(tcp_hashinfo.bind_bucket_cachep)

extern int tcp_port_rover;

#if (BITS_PER_LONG == 64)
#define TCP_ADDRCMP_ALIGN_BYTES 8
@@ -1463,21 +1451,21 @@ extern void tcp_listen_wlock(void);

/* - We may sleep inside this lock.
 * - If sleeping is not required (or called from BH),
 *   use plain read_(un)lock(&tcp_lhash_lock).
 *   use plain read_(un)lock(&inet_hashinfo.lhash_lock).
 */

static inline void tcp_listen_lock(void)
{
	/* read_lock synchronizes to candidates to writers */
	read_lock(&tcp_lhash_lock);
	atomic_inc(&tcp_lhash_users);
	read_unlock(&tcp_lhash_lock);
	read_lock(&tcp_hashinfo.lhash_lock);
	atomic_inc(&tcp_hashinfo.lhash_users);
	read_unlock(&tcp_hashinfo.lhash_lock);
}

static inline void tcp_listen_unlock(void)
{
	if (atomic_dec_and_test(&tcp_lhash_users))
		wake_up(&tcp_lhash_wait);
	if (atomic_dec_and_test(&tcp_hashinfo.lhash_users))
		wake_up(&tcp_hashinfo.lhash_wait);
}

static inline int keepalive_intvl_when(const struct tcp_sock *tp)
+21 −21
Original line number Diff line number Diff line
@@ -2257,11 +2257,11 @@ void __init tcp_init(void)
		__skb_cb_too_small_for_tcp(sizeof(struct tcp_skb_cb),
					   sizeof(skb->cb));

	tcp_bucket_cachep = kmem_cache_create("tcp_bind_bucket",
					      sizeof(struct inet_bind_bucket),
					      0, SLAB_HWCACHE_ALIGN,
					      NULL, NULL);
	if (!tcp_bucket_cachep)
	tcp_hashinfo.bind_bucket_cachep =
		kmem_cache_create("tcp_bind_bucket",
				  sizeof(struct inet_bind_bucket), 0,
				  SLAB_HWCACHE_ALIGN, NULL, NULL);
	if (!tcp_hashinfo.bind_bucket_cachep)
		panic("tcp_init: Cannot alloc tcp_bind_bucket cache.");

	tcp_timewait_cachep = kmem_cache_create("tcp_tw_bucket",
@@ -2276,7 +2276,7 @@ void __init tcp_init(void)
	 *
	 * The methodology is similar to that of the buffer cache.
	 */
	tcp_ehash =
	tcp_hashinfo.ehash =
		alloc_large_system_hash("TCP established",
					sizeof(struct inet_ehash_bucket),
					thash_entries,
@@ -2284,37 +2284,37 @@ void __init tcp_init(void)
						(25 - PAGE_SHIFT) :
						(27 - PAGE_SHIFT),
					HASH_HIGHMEM,
					&tcp_ehash_size,
					&tcp_hashinfo.ehash_size,
					NULL,
					0);
	tcp_ehash_size = (1 << tcp_ehash_size) >> 1;
	for (i = 0; i < (tcp_ehash_size << 1); i++) {
		rwlock_init(&tcp_ehash[i].lock);
		INIT_HLIST_HEAD(&tcp_ehash[i].chain);
	tcp_hashinfo.ehash_size = (1 << tcp_hashinfo.ehash_size) >> 1;
	for (i = 0; i < (tcp_hashinfo.ehash_size << 1); i++) {
		rwlock_init(&tcp_hashinfo.ehash[i].lock);
		INIT_HLIST_HEAD(&tcp_hashinfo.ehash[i].chain);
	}

	tcp_bhash =
	tcp_hashinfo.bhash =
		alloc_large_system_hash("TCP bind",
					sizeof(struct inet_bind_hashbucket),
					tcp_ehash_size,
					tcp_hashinfo.ehash_size,
					(num_physpages >= 128 * 1024) ?
						(25 - PAGE_SHIFT) :
						(27 - PAGE_SHIFT),
					HASH_HIGHMEM,
					&tcp_bhash_size,
					&tcp_hashinfo.bhash_size,
					NULL,
					64 * 1024);
	tcp_bhash_size = 1 << tcp_bhash_size;
	for (i = 0; i < tcp_bhash_size; i++) {
		spin_lock_init(&tcp_bhash[i].lock);
		INIT_HLIST_HEAD(&tcp_bhash[i].chain);
	tcp_hashinfo.bhash_size = 1 << tcp_hashinfo.bhash_size;
	for (i = 0; i < tcp_hashinfo.bhash_size; i++) {
		spin_lock_init(&tcp_hashinfo.bhash[i].lock);
		INIT_HLIST_HEAD(&tcp_hashinfo.bhash[i].chain);
	}

	/* Try to be a bit smarter and adjust defaults depending
	 * on available memory.
	 */
	for (order = 0; ((1 << order) << PAGE_SHIFT) <
			(tcp_bhash_size * sizeof(struct inet_bind_hashbucket));
			(tcp_hashinfo.bhash_size * sizeof(struct inet_bind_hashbucket));
			order++)
		;
	if (order >= 4) {
@@ -2329,7 +2329,7 @@ void __init tcp_init(void)
		sysctl_tcp_max_orphans >>= (3 - order);
		sysctl_max_syn_backlog = 128;
	}
	tcp_port_rover = sysctl_local_port_range[0] - 1;
	tcp_hashinfo.port_rover = sysctl_local_port_range[0] - 1;

	sysctl_tcp_mem[0] =  768 << order;
	sysctl_tcp_mem[1] = 1024 << order;
@@ -2344,7 +2344,7 @@ void __init tcp_init(void)

	printk(KERN_INFO "TCP: Hash tables configured "
	       "(established %d bind %d)\n",
	       tcp_ehash_size << 1, tcp_bhash_size);
	       tcp_hashinfo.ehash_size << 1, tcp_hashinfo.bhash_size);

	tcp_register_congestion_control(&tcp_reno);
}
+4 −4
Original line number Diff line number Diff line
@@ -595,7 +595,7 @@ static int tcpdiag_dump(struct sk_buff *skb, struct netlink_callback *cb)
			struct hlist_node *node;

			num = 0;
			sk_for_each(sk, node, &tcp_listening_hash[i]) {
			sk_for_each(sk, node, &tcp_hashinfo.listening_hash[i]) {
				struct inet_sock *inet = inet_sk(sk);

				if (num < s_num) {
@@ -645,8 +645,8 @@ static int tcpdiag_dump(struct sk_buff *skb, struct netlink_callback *cb)
	if (!(r->tcpdiag_states&~(TCPF_LISTEN|TCPF_SYN_RECV)))
		return skb->len;

	for (i = s_i; i < tcp_ehash_size; i++) {
		struct inet_ehash_bucket *head = &tcp_ehash[i];
	for (i = s_i; i < tcp_hashinfo.ehash_size; i++) {
		struct inet_ehash_bucket *head = &tcp_hashinfo.ehash[i];
		struct sock *sk;
		struct hlist_node *node;

@@ -678,7 +678,7 @@ static int tcpdiag_dump(struct sk_buff *skb, struct netlink_callback *cb)

		if (r->tcpdiag_states&TCPF_TIME_WAIT) {
			sk_for_each(sk, node,
				    &tcp_ehash[i + tcp_ehash_size].chain) {
				    &tcp_hashinfo.ehash[i + tcp_hashinfo.ehash_size].chain) {
				struct inet_sock *inet = inet_sk(sk);

				if (num < s_num)
Loading