Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 3eb3963f authored by Ingo Molnar's avatar Ingo Molnar
Browse files

Merge branch 'cpus4096' into core/percpu



Conflicts:
	arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
	arch/x86/kernel/tlb_32.c

Merge it here because both the cpumask changes and the ongoing percpu
work is touching the TLB code. The percpu changes take precedence, as
they eliminate tlb_32.c altogether.

Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parents ae2b56b9 5766b842
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -84,7 +84,7 @@ void build_cpu_to_node_map(void);
	.child			= NULL,			\
	.groups			= NULL,			\
	.min_interval		= 8,			\
	.max_interval		= 8*(min(num_online_cpus(), 32)), \
	.max_interval		= 8*(min(num_online_cpus(), 32U)), \
	.busy_factor		= 64,			\
	.imbalance_pct		= 125,			\
	.cache_nice_tries	= 2,			\
+14 −3
Original line number Diff line number Diff line
@@ -190,9 +190,20 @@ extern int __node_distance(int, int);

#else /* !CONFIG_NUMA */

#define numa_node_id()		0
#define	cpu_to_node(cpu)	0
#define	early_cpu_to_node(cpu)	0
static inline int numa_node_id(void)
{
	return 0;
}

static inline int cpu_to_node(int cpu)
{
	return 0;
}

static inline int early_cpu_to_node(int cpu)
{
	return 0;
}

static inline const cpumask_t *cpumask_of_node(int node)
{
+5 −0
Original line number Diff line number Diff line
@@ -1844,6 +1844,11 @@ void __cpuinit generic_processor_info(int apicid, int version)
	num_processors++;
	cpu = cpumask_next_zero(-1, cpu_present_mask);

	if (version != apic_version[boot_cpu_physical_apicid])
		WARN_ONCE(1,
			"ACPI: apic version mismatch, bootcpu: %x cpu %d: %x\n",
			apic_version[boot_cpu_physical_apicid], cpu, version);

	physid_set(apicid, phys_cpu_present_map);
	if (apicid == boot_cpu_physical_apicid) {
		/*
+13 −20
Original line number Diff line number Diff line
@@ -145,13 +145,14 @@ typedef union {

struct drv_cmd {
	unsigned int type;
	cpumask_var_t mask;
	const struct cpumask *mask;
	drv_addr_union addr;
	u32 val;
};

static void do_drv_read(struct drv_cmd *cmd)
static long do_drv_read(void *_cmd)
{
	struct drv_cmd *cmd = _cmd;
	u32 h;

	switch (cmd->type) {
@@ -166,10 +167,12 @@ static void do_drv_read(struct drv_cmd *cmd)
	default:
		break;
	}
	return 0;
}

static void do_drv_write(struct drv_cmd *cmd)
static long do_drv_write(void *_cmd)
{
	struct drv_cmd *cmd = _cmd;
	u32 lo, hi;

	switch (cmd->type) {
@@ -186,30 +189,23 @@ static void do_drv_write(struct drv_cmd *cmd)
	default:
		break;
	}
	return 0;
}

static void drv_read(struct drv_cmd *cmd)
{
	cpumask_t saved_mask = current->cpus_allowed;
	cmd->val = 0;

	set_cpus_allowed_ptr(current, cmd->mask);
	do_drv_read(cmd);
	set_cpus_allowed_ptr(current, &saved_mask);
	work_on_cpu(cpumask_any(cmd->mask), do_drv_read, cmd);
}

static void drv_write(struct drv_cmd *cmd)
{
	cpumask_t saved_mask = current->cpus_allowed;
	unsigned int i;

	for_each_cpu(i, cmd->mask) {
		set_cpus_allowed_ptr(current, cpumask_of(i));
		do_drv_write(cmd);
		work_on_cpu(i, do_drv_write, cmd);
	}

	set_cpus_allowed_ptr(current, &saved_mask);
	return;
}

static u32 get_cur_val(const struct cpumask *mask)
@@ -235,6 +231,7 @@ static u32 get_cur_val(const struct cpumask *mask)
		return 0;
	}

	cmd.mask = mask;
	drv_read(&cmd);

	dprintk("get_cur_val = %u\n", cmd.val);
@@ -366,7 +363,7 @@ static unsigned int get_cur_freq_on_cpu(unsigned int cpu)
	return freq;
}

static unsigned int check_freqs(const cpumask_t *mask, unsigned int freq,
static unsigned int check_freqs(const struct cpumask *mask, unsigned int freq,
				struct acpi_cpufreq_data *data)
{
	unsigned int cur_freq;
@@ -401,9 +398,6 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy,
		return -ENODEV;
	}

	if (unlikely(!alloc_cpumask_var(&cmd.mask, GFP_KERNEL)))
		return -ENOMEM;

	perf = data->acpi_data;
	result = cpufreq_frequency_table_target(policy,
						data->freq_table,
@@ -448,9 +442,9 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy,

	/* cpufreq holds the hotplug lock, so we are safe from here on */
	if (policy->shared_type != CPUFREQ_SHARED_TYPE_ANY)
		cpumask_and(cmd.mask, cpu_online_mask, policy->cpus);
		cmd.mask = policy->cpus;
	else
		cpumask_copy(cmd.mask, cpumask_of(policy->cpu));
		cmd.mask = cpumask_of(policy->cpu);

	freqs.old = perf->states[perf->state].core_frequency * 1000;
	freqs.new = data->freq_table[next_state].frequency;
@@ -477,7 +471,6 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy,
	perf->state = next_perf_state;

out:
	free_cpumask_var(cmd.mask);
	return result;
}

+10 −10
Original line number Diff line number Diff line
@@ -971,6 +971,8 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
}

#ifdef CONFIG_SMP
static struct workqueue_struct *work_on_cpu_wq __read_mostly;

struct work_for_cpu {
	struct work_struct work;
	long (*fn)(void *);
@@ -991,8 +993,8 @@ static void do_work_for_cpu(struct work_struct *w)
 * @fn: the function to run
 * @arg: the function arg
 *
 * This will return -EINVAL in the cpu is not online, or the return value
 * of @fn otherwise.
 * This will return the value @fn returns.
 * It is up to the caller to ensure that the cpu doesn't go offline.
 */
long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg)
{
@@ -1001,14 +1003,8 @@ long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg)
	INIT_WORK(&wfc.work, do_work_for_cpu);
	wfc.fn = fn;
	wfc.arg = arg;
	get_online_cpus();
	if (unlikely(!cpu_online(cpu)))
		wfc.ret = -EINVAL;
	else {
		schedule_work_on(cpu, &wfc.work);
	queue_work_on(cpu, work_on_cpu_wq, &wfc.work);
	flush_work(&wfc.work);
	}
	put_online_cpus();

	return wfc.ret;
}
@@ -1025,4 +1021,8 @@ void __init init_workqueues(void)
	hotcpu_notifier(workqueue_cpu_callback, 0);
	keventd_wq = create_workqueue("events");
	BUG_ON(!keventd_wq);
#ifdef CONFIG_SMP
	work_on_cpu_wq = create_workqueue("work_on_cpu");
	BUG_ON(!work_on_cpu_wq);
#endif
}