Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 245b2e70 authored by Tejun Heo's avatar Tejun Heo
Browse files

percpu: clean up percpu variable definitions



Percpu variable definition is about to be updated such that all percpu
symbols including the static ones must be unique.  Update percpu
variable definitions accordingly.

* as,cfq: rename ioc_count uniquely

* cpufreq: rename cpu_dbs_info uniquely

* xen: move nesting_count out of xen_evtchn_do_upcall() and rename it

* mm: move ratelimits out of balance_dirty_pages_ratelimited_nr() and
  rename it

* ipv4,6: rename cookie_scratch uniquely

* x86 perf_counter: rename prev_left to pmc_prev_left, irq_entry to
  pmc_irq_entry and nmi_entry to pmc_nmi_entry

* perf_counter: rename disable_count to perf_disable_count

* ftrace: rename test_event_disable to ftrace_test_event_disable

* kmemleak: rename test_pointer to kmemleak_test_pointer

* mce: rename next_interval to mce_next_interval

[ Impact: percpu usage cleanups, no duplicate static percpu var names ]

Signed-off-by: default avatarTejun Heo <tj@kernel.org>
Reviewed-by: default avatarChristoph Lameter <cl@linux-foundation.org>
Cc: Ivan Kokshaysky <ink@jurassic.park.msu.ru>
Cc: Jens Axboe <jens.axboe@oracle.com>
Cc: Dave Jones <davej@redhat.com>
Cc: Jeremy Fitzhardinge <jeremy@xensource.com>
Cc: linux-mm <linux-mm@kvack.org>
Cc: David S. Miller <davem@davemloft.net>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Steven Rostedt <srostedt@redhat.com>
Cc: Li Zefan <lizf@cn.fujitsu.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Andi Kleen <andi@firstfloor.org>
parent b9bf3121
Loading
Loading
Loading
Loading
+4 −4
Original line number Diff line number Diff line
@@ -1091,7 +1091,7 @@ void mce_log_therm_throt_event(__u64 status)
 */
static int check_interval = 5 * 60; /* 5 minutes */

static DEFINE_PER_CPU(int, next_interval); /* in jiffies */
static DEFINE_PER_CPU(int, mce_next_interval); /* in jiffies */
static DEFINE_PER_CPU(struct timer_list, mce_timer);

static void mcheck_timer(unsigned long data)
@@ -1110,7 +1110,7 @@ static void mcheck_timer(unsigned long data)
	 * Alert userspace if needed.  If we logged an MCE, reduce the
	 * polling interval, otherwise increase the polling interval.
	 */
	n = &__get_cpu_var(next_interval);
	n = &__get_cpu_var(mce_next_interval);
	if (mce_notify_irq())
		*n = max(*n/2, HZ/100);
	else
@@ -1311,7 +1311,7 @@ static void mce_cpu_features(struct cpuinfo_x86 *c)
static void mce_init_timer(void)
{
	struct timer_list *t = &__get_cpu_var(mce_timer);
	int *n = &__get_cpu_var(next_interval);
	int *n = &__get_cpu_var(mce_next_interval);

	if (mce_ignore_ce)
		return;
@@ -1914,7 +1914,7 @@ mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
	case CPU_DOWN_FAILED:
	case CPU_DOWN_FAILED_FROZEN:
		t->expires = round_jiffies(jiffies +
						__get_cpu_var(next_interval));
					   __get_cpu_var(mce_next_interval));
		add_timer_on(t, cpu);
		smp_call_function_single(cpu, mce_reenable_cpu, &action, 1);
		break;
+7 −7
Original line number Diff line number Diff line
@@ -862,7 +862,7 @@ amd_pmu_disable_counter(struct hw_perf_counter *hwc, int idx)
	x86_pmu_disable_counter(hwc, idx);
}

static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], prev_left);
static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left);

/*
 * Set the next IRQ period, based on the hwc->period_left value.
@@ -901,7 +901,7 @@ x86_perf_counter_set_period(struct perf_counter *counter,
	if (left > x86_pmu.max_period)
		left = x86_pmu.max_period;

	per_cpu(prev_left[idx], smp_processor_id()) = left;
	per_cpu(pmc_prev_left[idx], smp_processor_id()) = left;

	/*
	 * The hw counter starts counting from this counter offset,
@@ -1089,7 +1089,7 @@ void perf_counter_print_debug(void)
		rdmsrl(x86_pmu.eventsel + idx, pmc_ctrl);
		rdmsrl(x86_pmu.perfctr  + idx, pmc_count);

		prev_left = per_cpu(prev_left[idx], cpu);
		prev_left = per_cpu(pmc_prev_left[idx], cpu);

		pr_info("CPU#%d:   gen-PMC%d ctrl:  %016llx\n",
			cpu, idx, pmc_ctrl);
@@ -1561,8 +1561,8 @@ void callchain_store(struct perf_callchain_entry *entry, u64 ip)
		entry->ip[entry->nr++] = ip;
}

static DEFINE_PER_CPU(struct perf_callchain_entry, irq_entry);
static DEFINE_PER_CPU(struct perf_callchain_entry, nmi_entry);
static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_irq_entry);
static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_nmi_entry);


static void
@@ -1709,9 +1709,9 @@ struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
	struct perf_callchain_entry *entry;

	if (in_nmi())
		entry = &__get_cpu_var(nmi_entry);
		entry = &__get_cpu_var(pmc_nmi_entry);
	else
		entry = &__get_cpu_var(irq_entry);
		entry = &__get_cpu_var(pmc_irq_entry);

	entry->nr = 0;

+5 −5
Original line number Diff line number Diff line
@@ -146,7 +146,7 @@ enum arq_state {
#define RQ_STATE(rq)	((enum arq_state)(rq)->elevator_private2)
#define RQ_SET_STATE(rq, state)	((rq)->elevator_private2 = (void *) state)

static DEFINE_PER_CPU(unsigned long, ioc_count);
static DEFINE_PER_CPU(unsigned long, as_ioc_count);
static struct completion *ioc_gone;
static DEFINE_SPINLOCK(ioc_gone_lock);

@@ -161,7 +161,7 @@ static void as_antic_stop(struct as_data *ad);
static void free_as_io_context(struct as_io_context *aic)
{
	kfree(aic);
	elv_ioc_count_dec(ioc_count);
	elv_ioc_count_dec(as_ioc_count);
	if (ioc_gone) {
		/*
		 * AS scheduler is exiting, grab exit lock and check
@@ -169,7 +169,7 @@ static void free_as_io_context(struct as_io_context *aic)
		 * complete ioc_gone and set it back to NULL.
		 */
		spin_lock(&ioc_gone_lock);
		if (ioc_gone && !elv_ioc_count_read(ioc_count)) {
		if (ioc_gone && !elv_ioc_count_read(as_ioc_count)) {
			complete(ioc_gone);
			ioc_gone = NULL;
		}
@@ -211,7 +211,7 @@ static struct as_io_context *alloc_as_io_context(void)
		ret->seek_total = 0;
		ret->seek_samples = 0;
		ret->seek_mean = 0;
		elv_ioc_count_inc(ioc_count);
		elv_ioc_count_inc(as_ioc_count);
	}

	return ret;
@@ -1507,7 +1507,7 @@ static void __exit as_exit(void)
	ioc_gone = &all_gone;
	/* ioc_gone's update must be visible before reading ioc_count */
	smp_wmb();
	if (elv_ioc_count_read(ioc_count))
	if (elv_ioc_count_read(as_ioc_count))
		wait_for_completion(&all_gone);
	synchronize_rcu();
}
+5 −5
Original line number Diff line number Diff line
@@ -48,7 +48,7 @@ static int cfq_slice_idle = HZ / 125;
static struct kmem_cache *cfq_pool;
static struct kmem_cache *cfq_ioc_pool;

static DEFINE_PER_CPU(unsigned long, ioc_count);
static DEFINE_PER_CPU(unsigned long, cfq_ioc_count);
static struct completion *ioc_gone;
static DEFINE_SPINLOCK(ioc_gone_lock);

@@ -1422,7 +1422,7 @@ static void cfq_cic_free_rcu(struct rcu_head *head)
	cic = container_of(head, struct cfq_io_context, rcu_head);

	kmem_cache_free(cfq_ioc_pool, cic);
	elv_ioc_count_dec(ioc_count);
	elv_ioc_count_dec(cfq_ioc_count);

	if (ioc_gone) {
		/*
@@ -1431,7 +1431,7 @@ static void cfq_cic_free_rcu(struct rcu_head *head)
		 * complete ioc_gone and set it back to NULL
		 */
		spin_lock(&ioc_gone_lock);
		if (ioc_gone && !elv_ioc_count_read(ioc_count)) {
		if (ioc_gone && !elv_ioc_count_read(cfq_ioc_count)) {
			complete(ioc_gone);
			ioc_gone = NULL;
		}
@@ -1557,7 +1557,7 @@ cfq_alloc_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
		INIT_HLIST_NODE(&cic->cic_list);
		cic->dtor = cfq_free_io_context;
		cic->exit = cfq_exit_io_context;
		elv_ioc_count_inc(ioc_count);
		elv_ioc_count_inc(cfq_ioc_count);
	}

	return cic;
@@ -2658,7 +2658,7 @@ static void __exit cfq_exit(void)
	 * this also protects us from entering cfq_slab_kill() with
	 * pending RCU callbacks
	 */
	if (elv_ioc_count_read(ioc_count))
	if (elv_ioc_count_read(cfq_ioc_count))
		wait_for_completion(&all_gone);
	cfq_slab_kill();
}
+6 −6
Original line number Diff line number Diff line
@@ -65,7 +65,7 @@ struct cpu_dbs_info_s {
	int cpu;
	unsigned int enable:1;
};
static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info);
static DEFINE_PER_CPU(struct cpu_dbs_info_s, cs_cpu_dbs_info);

static unsigned int dbs_enable;	/* number of CPUs using this policy */

@@ -138,7 +138,7 @@ dbs_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
		     void *data)
{
	struct cpufreq_freqs *freq = data;
	struct cpu_dbs_info_s *this_dbs_info = &per_cpu(cpu_dbs_info,
	struct cpu_dbs_info_s *this_dbs_info = &per_cpu(cs_cpu_dbs_info,
							freq->cpu);

	struct cpufreq_policy *policy;
@@ -298,7 +298,7 @@ static ssize_t store_ignore_nice_load(struct cpufreq_policy *policy,
	/* we need to re-evaluate prev_cpu_idle */
	for_each_online_cpu(j) {
		struct cpu_dbs_info_s *dbs_info;
		dbs_info = &per_cpu(cpu_dbs_info, j);
		dbs_info = &per_cpu(cs_cpu_dbs_info, j);
		dbs_info->prev_cpu_idle = get_cpu_idle_time(j,
						&dbs_info->prev_cpu_wall);
		if (dbs_tuners_ins.ignore_nice)
@@ -388,7 +388,7 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
		cputime64_t cur_wall_time, cur_idle_time;
		unsigned int idle_time, wall_time;

		j_dbs_info = &per_cpu(cpu_dbs_info, j);
		j_dbs_info = &per_cpu(cs_cpu_dbs_info, j);

		cur_idle_time = get_cpu_idle_time(j, &cur_wall_time);

@@ -528,7 +528,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
	unsigned int j;
	int rc;

	this_dbs_info = &per_cpu(cpu_dbs_info, cpu);
	this_dbs_info = &per_cpu(cs_cpu_dbs_info, cpu);

	switch (event) {
	case CPUFREQ_GOV_START:
@@ -548,7 +548,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,

		for_each_cpu(j, policy->cpus) {
			struct cpu_dbs_info_s *j_dbs_info;
			j_dbs_info = &per_cpu(cpu_dbs_info, j);
			j_dbs_info = &per_cpu(cs_cpu_dbs_info, j);
			j_dbs_info->cur_policy = policy;

			j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j,
Loading