Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 698365fa authored by WANG Cong's avatar WANG Cong Committed by David S. Miller
Browse files

net: clean up snmp stats code



commit 8f0ea0fe (snmp: reduce percpu needs by 50%)
reduced snmp array size to 1, so technically it doesn't have to be
an array any more. What's more, after the following commit:

	commit 933393f5
	Date:   Thu Dec 22 11:58:51 2011 -0600

	    percpu: Remove irqsafe_cpu_xxx variants

	    We simply say that regular this_cpu use must be safe regardless of
	    preemption and interrupt state.  That has no material change for x86
	    and s390 implementations of this_cpu operations.  However, arches that
	    do not provide their own implementation for this_cpu operations will
	    now get code generated that disables interrupts instead of preemption.

probably no arch wants to have SNMP_ARRAY_SZ == 2. At least after
almost 3 years, no one complains.

So, just convert the array to a single pointer and remove snmp_mib_init()
and snmp_mib_free() as well.

Cc: Christoph Lameter <cl@linux.com>
Cc: Eric Dumazet <eric.dumazet@gmail.com>
Cc: David S. Miller <davem@davemloft.net>
Signed-off-by: default avatarCong Wang <xiyou.wangcong@gmail.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent d1f88a66
Loading
Loading
Loading
Loading
+3 −15
Original line number Diff line number Diff line
@@ -196,27 +196,15 @@ void ip_send_unicast_reply(struct net *net, struct sk_buff *skb, __be32 daddr,
#define NET_ADD_STATS_BH(net, field, adnd) SNMP_ADD_STATS_BH((net)->mib.net_statistics, field, adnd)
#define NET_ADD_STATS_USER(net, field, adnd) SNMP_ADD_STATS_USER((net)->mib.net_statistics, field, adnd)

unsigned long snmp_fold_field(void __percpu *mib[], int offt);
unsigned long snmp_fold_field(void __percpu *mib, int offt);
#if BITS_PER_LONG==32
u64 snmp_fold_field64(void __percpu *mib[], int offt, size_t sync_off);
u64 snmp_fold_field64(void __percpu *mib, int offt, size_t sync_off);
#else
static inline u64 snmp_fold_field64(void __percpu *mib[], int offt, size_t syncp_off)
static inline u64 snmp_fold_field64(void __percpu *mib, int offt, size_t syncp_off)
{
	return snmp_fold_field(mib, offt);
}
#endif
int snmp_mib_init(void __percpu *ptr[2], size_t mibsize, size_t align);

static inline void snmp_mib_free(void __percpu *ptr[SNMP_ARRAY_SZ])
{
	int i;

	BUG_ON(ptr == NULL);
	for (i = 0; i < SNMP_ARRAY_SZ; i++) {
		free_percpu(ptr[i]);
		ptr[i] = NULL;
	}
}

void inet_get_local_port_range(struct net *net, int *low, int *high);

+15 −17
Original line number Diff line number Diff line
@@ -116,51 +116,49 @@ struct linux_xfrm_mib {
	unsigned long	mibs[LINUX_MIB_XFRMMAX];
};

#define SNMP_ARRAY_SZ 1

#define DEFINE_SNMP_STAT(type, name)	\
	__typeof__(type) __percpu *name[SNMP_ARRAY_SZ]
	__typeof__(type) __percpu *name
#define DEFINE_SNMP_STAT_ATOMIC(type, name)	\
	__typeof__(type) *name
#define DECLARE_SNMP_STAT(type, name)	\
	extern __typeof__(type) __percpu *name[SNMP_ARRAY_SZ]
	extern __typeof__(type) __percpu *name

#define SNMP_INC_STATS_BH(mib, field)	\
			__this_cpu_inc(mib[0]->mibs[field])
			__this_cpu_inc(mib->mibs[field])

#define SNMP_INC_STATS_USER(mib, field)	\
			this_cpu_inc(mib[0]->mibs[field])
			this_cpu_inc(mib->mibs[field])

#define SNMP_INC_STATS_ATOMIC_LONG(mib, field)	\
			atomic_long_inc(&mib->mibs[field])

#define SNMP_INC_STATS(mib, field)	\
			this_cpu_inc(mib[0]->mibs[field])
			this_cpu_inc(mib->mibs[field])

#define SNMP_DEC_STATS(mib, field)	\
			this_cpu_dec(mib[0]->mibs[field])
			this_cpu_dec(mib->mibs[field])

#define SNMP_ADD_STATS_BH(mib, field, addend)	\
			__this_cpu_add(mib[0]->mibs[field], addend)
			__this_cpu_add(mib->mibs[field], addend)

#define SNMP_ADD_STATS_USER(mib, field, addend)	\
			this_cpu_add(mib[0]->mibs[field], addend)
			this_cpu_add(mib->mibs[field], addend)

#define SNMP_ADD_STATS(mib, field, addend)	\
			this_cpu_add(mib[0]->mibs[field], addend)
			this_cpu_add(mib->mibs[field], addend)
/*
 * Use "__typeof__(*mib[0]) *ptr" instead of "__typeof__(mib[0]) ptr"
 * Use "__typeof__(*mib) *ptr" instead of "__typeof__(mib) ptr"
 * to make @ptr a non-percpu pointer.
 */
#define SNMP_UPD_PO_STATS(mib, basefield, addend)	\
	do { \
		__typeof__(*mib[0]->mibs) *ptr = mib[0]->mibs;	\
		__typeof__(*mib->mibs) *ptr = mib->mibs;	\
		this_cpu_inc(ptr[basefield##PKTS]);		\
		this_cpu_add(ptr[basefield##OCTETS], addend);	\
	} while (0)
#define SNMP_UPD_PO_STATS_BH(mib, basefield, addend)	\
	do { \
		__typeof__(*mib[0]->mibs) *ptr = mib[0]->mibs;	\
		__typeof__(*mib->mibs) *ptr = mib->mibs;	\
		__this_cpu_inc(ptr[basefield##PKTS]);		\
		__this_cpu_add(ptr[basefield##OCTETS], addend);	\
	} while (0)
@@ -170,7 +168,7 @@ struct linux_xfrm_mib {

#define SNMP_ADD_STATS64_BH(mib, field, addend) 			\
	do {								\
		__typeof__(*mib[0]) *ptr = __this_cpu_ptr((mib)[0]);	\
		__typeof__(*mib) *ptr = __this_cpu_ptr(mib);		\
		u64_stats_update_begin(&ptr->syncp);			\
		ptr->mibs[field] += addend;				\
		u64_stats_update_end(&ptr->syncp);			\
@@ -191,8 +189,8 @@ struct linux_xfrm_mib {
#define SNMP_INC_STATS64(mib, field) SNMP_ADD_STATS64(mib, field, 1)
#define SNMP_UPD_PO_STATS64_BH(mib, basefield, addend)			\
	do {								\
		__typeof__(*mib[0]) *ptr;				\
		ptr = __this_cpu_ptr((mib)[0]);				\
		__typeof__(*mib) *ptr;					\
		ptr = __this_cpu_ptr(mib);				\
		u64_stats_update_begin(&ptr->syncp);			\
		ptr->mibs[basefield##PKTS]++;				\
		ptr->mibs[basefield##OCTETS] += addend;			\
+5 −4
Original line number Diff line number Diff line
@@ -1084,14 +1084,15 @@ EXPORT_SYMBOL_GPL(dccp_shutdown);

static inline int dccp_mib_init(void)
{
	return snmp_mib_init((void __percpu **)dccp_statistics,
			     sizeof(struct dccp_mib),
			     __alignof__(struct dccp_mib));
	dccp_statistics = alloc_percpu(struct dccp_mib);
	if (!dccp_statistics)
		return -ENOMEM;
	return 0;
}

static inline void dccp_mib_exit(void)
{
	snmp_mib_free((void __percpu **)dccp_statistics);
	free_percpu(dccp_statistics);
}

static int thash_entries;
+31 −62
Original line number Diff line number Diff line
@@ -1476,22 +1476,20 @@ int inet_ctl_sock_create(struct sock **sk, unsigned short family,
}
EXPORT_SYMBOL_GPL(inet_ctl_sock_create);

unsigned long snmp_fold_field(void __percpu *mib[], int offt)
unsigned long snmp_fold_field(void __percpu *mib, int offt)
{
	unsigned long res = 0;
	int i, j;
	int i;

	for_each_possible_cpu(i) {
		for (j = 0; j < SNMP_ARRAY_SZ; j++)
			res += *(((unsigned long *) per_cpu_ptr(mib[j], i)) + offt);
	}
	for_each_possible_cpu(i)
		res += *(((unsigned long *) per_cpu_ptr(mib, i)) + offt);
	return res;
}
EXPORT_SYMBOL_GPL(snmp_fold_field);

#if BITS_PER_LONG==32

u64 snmp_fold_field64(void __percpu *mib[], int offt, size_t syncp_offset)
u64 snmp_fold_field64(void __percpu *mib, int offt, size_t syncp_offset)
{
	u64 res = 0;
	int cpu;
@@ -1502,7 +1500,7 @@ u64 snmp_fold_field64(void __percpu *mib[], int offt, size_t syncp_offset)
		u64 v;
		unsigned int start;

		bhptr = per_cpu_ptr(mib[0], cpu);
		bhptr = per_cpu_ptr(mib, cpu);
		syncp = (struct u64_stats_sync *)(bhptr + syncp_offset);
		do {
			start = u64_stats_fetch_begin_irq(syncp);
@@ -1516,25 +1514,6 @@ u64 snmp_fold_field64(void __percpu *mib[], int offt, size_t syncp_offset)
EXPORT_SYMBOL_GPL(snmp_fold_field64);
#endif

int snmp_mib_init(void __percpu *ptr[2], size_t mibsize, size_t align)
{
	BUG_ON(ptr == NULL);
	ptr[0] = __alloc_percpu(mibsize, align);
	if (!ptr[0])
		return -ENOMEM;

#if SNMP_ARRAY_SZ == 2
	ptr[1] = __alloc_percpu(mibsize, align);
	if (!ptr[1]) {
		free_percpu(ptr[0]);
		ptr[0] = NULL;
		return -ENOMEM;
	}
#endif
	return 0;
}
EXPORT_SYMBOL_GPL(snmp_mib_init);

#ifdef CONFIG_IP_MULTICAST
static const struct net_protocol igmp_protocol = {
	.handler =	igmp_rcv,
@@ -1570,40 +1549,30 @@ static __net_init int ipv4_mib_init_net(struct net *net)
{
	int i;

	if (snmp_mib_init((void __percpu **)net->mib.tcp_statistics,
			  sizeof(struct tcp_mib),
			  __alignof__(struct tcp_mib)) < 0)
	net->mib.tcp_statistics = alloc_percpu(struct tcp_mib);
	if (!net->mib.tcp_statistics)
		goto err_tcp_mib;
	if (snmp_mib_init((void __percpu **)net->mib.ip_statistics,
			  sizeof(struct ipstats_mib),
			  __alignof__(struct ipstats_mib)) < 0)
	net->mib.ip_statistics = alloc_percpu(struct ipstats_mib);
	if (!net->mib.ip_statistics)
		goto err_ip_mib;

	for_each_possible_cpu(i) {
		struct ipstats_mib *af_inet_stats;
		af_inet_stats = per_cpu_ptr(net->mib.ip_statistics[0], i);
		u64_stats_init(&af_inet_stats->syncp);
#if SNMP_ARRAY_SZ == 2
		af_inet_stats = per_cpu_ptr(net->mib.ip_statistics[1], i);
		af_inet_stats = per_cpu_ptr(net->mib.ip_statistics, i);
		u64_stats_init(&af_inet_stats->syncp);
#endif
	}

	if (snmp_mib_init((void __percpu **)net->mib.net_statistics,
			  sizeof(struct linux_mib),
			  __alignof__(struct linux_mib)) < 0)
	net->mib.net_statistics = alloc_percpu(struct linux_mib);
	if (!net->mib.net_statistics)
		goto err_net_mib;
	if (snmp_mib_init((void __percpu **)net->mib.udp_statistics,
			  sizeof(struct udp_mib),
			  __alignof__(struct udp_mib)) < 0)
	net->mib.udp_statistics = alloc_percpu(struct udp_mib);
	if (!net->mib.udp_statistics)
		goto err_udp_mib;
	if (snmp_mib_init((void __percpu **)net->mib.udplite_statistics,
			  sizeof(struct udp_mib),
			  __alignof__(struct udp_mib)) < 0)
	net->mib.udplite_statistics = alloc_percpu(struct udp_mib);
	if (!net->mib.udplite_statistics)
		goto err_udplite_mib;
	if (snmp_mib_init((void __percpu **)net->mib.icmp_statistics,
			  sizeof(struct icmp_mib),
			  __alignof__(struct icmp_mib)) < 0)
	net->mib.icmp_statistics = alloc_percpu(struct icmp_mib);
	if (!net->mib.icmp_statistics)
		goto err_icmp_mib;
	net->mib.icmpmsg_statistics = kzalloc(sizeof(struct icmpmsg_mib),
					      GFP_KERNEL);
@@ -1614,17 +1583,17 @@ static __net_init int ipv4_mib_init_net(struct net *net)
	return 0;

err_icmpmsg_mib:
	snmp_mib_free((void __percpu **)net->mib.icmp_statistics);
	free_percpu(net->mib.icmp_statistics);
err_icmp_mib:
	snmp_mib_free((void __percpu **)net->mib.udplite_statistics);
	free_percpu(net->mib.udplite_statistics);
err_udplite_mib:
	snmp_mib_free((void __percpu **)net->mib.udp_statistics);
	free_percpu(net->mib.udp_statistics);
err_udp_mib:
	snmp_mib_free((void __percpu **)net->mib.net_statistics);
	free_percpu(net->mib.net_statistics);
err_net_mib:
	snmp_mib_free((void __percpu **)net->mib.ip_statistics);
	free_percpu(net->mib.ip_statistics);
err_ip_mib:
	snmp_mib_free((void __percpu **)net->mib.tcp_statistics);
	free_percpu(net->mib.tcp_statistics);
err_tcp_mib:
	return -ENOMEM;
}
@@ -1632,12 +1601,12 @@ static __net_init int ipv4_mib_init_net(struct net *net)
static __net_exit void ipv4_mib_exit_net(struct net *net)
{
	kfree(net->mib.icmpmsg_statistics);
	snmp_mib_free((void __percpu **)net->mib.icmp_statistics);
	snmp_mib_free((void __percpu **)net->mib.udplite_statistics);
	snmp_mib_free((void __percpu **)net->mib.udp_statistics);
	snmp_mib_free((void __percpu **)net->mib.net_statistics);
	snmp_mib_free((void __percpu **)net->mib.ip_statistics);
	snmp_mib_free((void __percpu **)net->mib.tcp_statistics);
	free_percpu(net->mib.icmp_statistics);
	free_percpu(net->mib.udplite_statistics);
	free_percpu(net->mib.udp_statistics);
	free_percpu(net->mib.net_statistics);
	free_percpu(net->mib.ip_statistics);
	free_percpu(net->mib.tcp_statistics);
}

static __net_initdata struct pernet_operations ipv4_mib_ops = {
+12 −12
Original line number Diff line number Diff line
@@ -345,15 +345,15 @@ static void icmp_put(struct seq_file *seq)
	for (i = 0; icmpmibmap[i].name != NULL; i++)
		seq_printf(seq, " Out%s", icmpmibmap[i].name);
	seq_printf(seq, "\nIcmp: %lu %lu %lu",
		snmp_fold_field((void __percpu **) net->mib.icmp_statistics, ICMP_MIB_INMSGS),
		snmp_fold_field((void __percpu **) net->mib.icmp_statistics, ICMP_MIB_INERRORS),
		snmp_fold_field((void __percpu **) net->mib.icmp_statistics, ICMP_MIB_CSUMERRORS));
		snmp_fold_field(net->mib.icmp_statistics, ICMP_MIB_INMSGS),
		snmp_fold_field(net->mib.icmp_statistics, ICMP_MIB_INERRORS),
		snmp_fold_field(net->mib.icmp_statistics, ICMP_MIB_CSUMERRORS));
	for (i = 0; icmpmibmap[i].name != NULL; i++)
		seq_printf(seq, " %lu",
			   atomic_long_read(ptr + icmpmibmap[i].index));
	seq_printf(seq, " %lu %lu",
		snmp_fold_field((void __percpu **) net->mib.icmp_statistics, ICMP_MIB_OUTMSGS),
		snmp_fold_field((void __percpu **) net->mib.icmp_statistics, ICMP_MIB_OUTERRORS));
		snmp_fold_field(net->mib.icmp_statistics, ICMP_MIB_OUTMSGS),
		snmp_fold_field(net->mib.icmp_statistics, ICMP_MIB_OUTERRORS));
	for (i = 0; icmpmibmap[i].name != NULL; i++)
		seq_printf(seq, " %lu",
			   atomic_long_read(ptr + (icmpmibmap[i].index | 0x100)));
@@ -379,7 +379,7 @@ static int snmp_seq_show(struct seq_file *seq, void *v)
	BUILD_BUG_ON(offsetof(struct ipstats_mib, mibs) != 0);
	for (i = 0; snmp4_ipstats_list[i].name != NULL; i++)
		seq_printf(seq, " %llu",
			   snmp_fold_field64((void __percpu **)net->mib.ip_statistics,
			   snmp_fold_field64(net->mib.ip_statistics,
					     snmp4_ipstats_list[i].entry,
					     offsetof(struct ipstats_mib, syncp)));

@@ -395,11 +395,11 @@ static int snmp_seq_show(struct seq_file *seq, void *v)
		/* MaxConn field is signed, RFC 2012 */
		if (snmp4_tcp_list[i].entry == TCP_MIB_MAXCONN)
			seq_printf(seq, " %ld",
				   snmp_fold_field((void __percpu **)net->mib.tcp_statistics,
				   snmp_fold_field(net->mib.tcp_statistics,
						   snmp4_tcp_list[i].entry));
		else
			seq_printf(seq, " %lu",
				   snmp_fold_field((void __percpu **)net->mib.tcp_statistics,
				   snmp_fold_field(net->mib.tcp_statistics,
						   snmp4_tcp_list[i].entry));
	}

@@ -410,7 +410,7 @@ static int snmp_seq_show(struct seq_file *seq, void *v)
	seq_puts(seq, "\nUdp:");
	for (i = 0; snmp4_udp_list[i].name != NULL; i++)
		seq_printf(seq, " %lu",
			   snmp_fold_field((void __percpu **)net->mib.udp_statistics,
			   snmp_fold_field(net->mib.udp_statistics,
					   snmp4_udp_list[i].entry));

	/* the UDP and UDP-Lite MIBs are the same */
@@ -421,7 +421,7 @@ static int snmp_seq_show(struct seq_file *seq, void *v)
	seq_puts(seq, "\nUdpLite:");
	for (i = 0; snmp4_udp_list[i].name != NULL; i++)
		seq_printf(seq, " %lu",
			   snmp_fold_field((void __percpu **)net->mib.udplite_statistics,
			   snmp_fold_field(net->mib.udplite_statistics,
					   snmp4_udp_list[i].entry));

	seq_putc(seq, '\n');
@@ -458,7 +458,7 @@ static int netstat_seq_show(struct seq_file *seq, void *v)
	seq_puts(seq, "\nTcpExt:");
	for (i = 0; snmp4_net_list[i].name != NULL; i++)
		seq_printf(seq, " %lu",
			   snmp_fold_field((void __percpu **)net->mib.net_statistics,
			   snmp_fold_field(net->mib.net_statistics,
					   snmp4_net_list[i].entry));

	seq_puts(seq, "\nIpExt:");
@@ -468,7 +468,7 @@ static int netstat_seq_show(struct seq_file *seq, void *v)
	seq_puts(seq, "\nIpExt:");
	for (i = 0; snmp4_ipextstats_list[i].name != NULL; i++)
		seq_printf(seq, " %llu",
			   snmp_fold_field64((void __percpu **)net->mib.ip_statistics,
			   snmp_fold_field64(net->mib.ip_statistics,
					     snmp4_ipextstats_list[i].entry,
					     offsetof(struct ipstats_mib, syncp)));

Loading