Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit fbc2edb0 authored by Christoph Lameter's avatar Christoph Lameter Committed by Linus Torvalds
Browse files

vmstat: use this_cpu() to avoid irqon/off sequence in refresh_cpu_vm_stats



Disabling interrupts repeatedly can be avoided in the inner loop if we use
a this_cpu operation.

Signed-off-by: default avatarChristoph Lameter <cl@linux.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
CC: Tejun Heo <tj@kernel.org>
Cc: Joonsoo Kim <js1304@gmail.com>
Cc: Alexey Dobriyan <adobriyan@gmail.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 4edb0748
Loading
Loading
Loading
Loading
+16 −19
Original line number Diff line number Diff line
@@ -437,33 +437,29 @@ static inline void fold_diff(int *diff)
 * with the global counters. These could cause remote node cache line
 * bouncing and will have to be only done when necessary.
 */
static void refresh_cpu_vm_stats(int cpu)
static void refresh_cpu_vm_stats(void)
{
	struct zone *zone;
	int i;
	int global_diff[NR_VM_ZONE_STAT_ITEMS] = { 0, };

	for_each_populated_zone(zone) {
		struct per_cpu_pageset *p;

		p = per_cpu_ptr(zone->pageset, cpu);
		struct per_cpu_pageset __percpu *p = zone->pageset;

		for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
			if (p->vm_stat_diff[i]) {
				unsigned long flags;
		for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) {
			int v;

				local_irq_save(flags);
				v = p->vm_stat_diff[i];
				p->vm_stat_diff[i] = 0;
				local_irq_restore(flags);
			v = this_cpu_xchg(p->vm_stat_diff[i], 0);
			if (v) {

				atomic_long_add(v, &zone->vm_stat[i]);
				global_diff[i] += v;
#ifdef CONFIG_NUMA
				/* 3 seconds idle till flush */
				p->expire = 3;
				__this_cpu_write(p->expire, 3);
#endif
			}
		}
		cond_resched();
#ifdef CONFIG_NUMA
		/*
@@ -473,23 +469,24 @@ static void refresh_cpu_vm_stats(int cpu)
		 * Check if there are pages remaining in this pageset
		 * if not then there is nothing to expire.
		 */
		if (!p->expire || !p->pcp.count)
		if (!__this_cpu_read(p->expire) ||
			       !__this_cpu_read(p->pcp.count))
			continue;

		/*
		 * We never drain zones local to this processor.
		 */
		if (zone_to_nid(zone) == numa_node_id()) {
			p->expire = 0;
			__this_cpu_write(p->expire, 0);
			continue;
		}

		p->expire--;
		if (p->expire)

		if (__this_cpu_dec_return(p->expire))
			continue;

		if (p->pcp.count)
			drain_zone_pages(zone, &p->pcp);
		if (__this_cpu_read(p->pcp.count))
			drain_zone_pages(zone, __this_cpu_ptr(&p->pcp));
#endif
	}
	fold_diff(global_diff);
@@ -1216,7 +1213,7 @@ int sysctl_stat_interval __read_mostly = HZ;

static void vmstat_update(struct work_struct *w)
{
	refresh_cpu_vm_stats(smp_processor_id());
	refresh_cpu_vm_stats();
	schedule_delayed_work(&__get_cpu_var(vmstat_work),
		round_jiffies_relative(sysctl_stat_interval));
}