Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 5d2068da authored by Rusty Russell's avatar Rusty Russell
Browse files

ia64: fix up obsolete cpu function usage.



Thanks to spatch, then a sweep for for_each_cpu_mask => for_each_cpu.

Signed-off-by: default avatarRusty Russell <rusty@rustcorp.com.au>
Cc: Tony Luck <tony.luck@intel.com>
Cc: Fenghua Yu <fenghua.yu@intel.com>
Cc: linux-ia64@vger.kernel.org
parent f9b531fe
Loading
Loading
Loading
Loading
+3 −3
Original line number Diff line number Diff line
@@ -117,7 +117,7 @@ static inline void arch_acpi_set_pdc_bits(u32 *buf)
#ifdef CONFIG_ACPI_NUMA
extern cpumask_t early_cpu_possible_map;
#define for_each_possible_early_cpu(cpu)  \
	for_each_cpu_mask((cpu), early_cpu_possible_map)
	for_each_cpu((cpu), &early_cpu_possible_map)

static inline void per_cpu_scan_finalize(int min_cpus, int reserve_cpus)
{
@@ -125,13 +125,13 @@ static inline void per_cpu_scan_finalize(int min_cpus, int reserve_cpus)
	int cpu;
	int next_nid = 0;

	low_cpu = cpus_weight(early_cpu_possible_map);
	low_cpu = cpumask_weight(&early_cpu_possible_map);

	high_cpu = max(low_cpu, min_cpus);
	high_cpu = min(high_cpu + reserve_cpus, NR_CPUS);

	for (cpu = low_cpu; cpu < high_cpu; cpu++) {
		cpu_set(cpu, early_cpu_possible_map);
		cpumask_set_cpu(cpu, &early_cpu_possible_map);
		if (node_cpuid[cpu].nid == NUMA_NO_NODE) {
			node_cpuid[cpu].nid = next_nid;
			next_nid++;
+1 −1
Original line number Diff line number Diff line
@@ -483,7 +483,7 @@ acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa)
	    (pa->apic_id << 8) | (pa->local_sapic_eid);
	/* nid should be overridden as logical node id later */
	node_cpuid[srat_num_cpus].nid = pxm;
	cpu_set(srat_num_cpus, early_cpu_possible_map);
	cpumask_set_cpu(srat_num_cpus, &early_cpu_possible_map);
	srat_num_cpus++;
}

+1 −1
Original line number Diff line number Diff line
@@ -690,7 +690,7 @@ get_target_cpu (unsigned int gsi, int irq)
	do {
		if (++cpu >= nr_cpu_ids)
			cpu = 0;
	} while (!cpu_online(cpu) || !cpu_isset(cpu, domain));
	} while (!cpu_online(cpu) || !cpumask_test_cpu(cpu, &domain));

	return cpu_physical_id(cpu);
#else  /* CONFIG_SMP */
+14 −14
Original line number Diff line number Diff line
@@ -109,13 +109,13 @@ static inline int find_unassigned_vector(cpumask_t domain)
	int pos, vector;

	cpumask_and(&mask, &domain, cpu_online_mask);
	if (cpus_empty(mask))
	if (cpumask_empty(&mask))
		return -EINVAL;

	for (pos = 0; pos < IA64_NUM_DEVICE_VECTORS; pos++) {
		vector = IA64_FIRST_DEVICE_VECTOR + pos;
		cpus_and(mask, domain, vector_table[vector]);
		if (!cpus_empty(mask))
		cpumask_and(&mask, &domain, &vector_table[vector]);
		if (!cpumask_empty(&mask))
			continue;
		return vector;
	}
@@ -132,18 +132,18 @@ static int __bind_irq_vector(int irq, int vector, cpumask_t domain)
	BUG_ON((unsigned)vector >= IA64_NUM_VECTORS);

	cpumask_and(&mask, &domain, cpu_online_mask);
	if (cpus_empty(mask))
	if (cpumask_empty(&mask))
		return -EINVAL;
	if ((cfg->vector == vector) && cpus_equal(cfg->domain, domain))
	if ((cfg->vector == vector) && cpumask_equal(&cfg->domain, &domain))
		return 0;
	if (cfg->vector != IRQ_VECTOR_UNASSIGNED)
		return -EBUSY;
	for_each_cpu_mask(cpu, mask)
	for_each_cpu(cpu, &mask)
		per_cpu(vector_irq, cpu)[vector] = irq;
	cfg->vector = vector;
	cfg->domain = domain;
	irq_status[irq] = IRQ_USED;
	cpus_or(vector_table[vector], vector_table[vector], domain);
	cpumask_or(&vector_table[vector], &vector_table[vector], &domain);
	return 0;
}

@@ -242,7 +242,7 @@ void __setup_vector_irq(int cpu)
		per_cpu(vector_irq, cpu)[vector] = -1;
	/* Mark the inuse vectors */
	for (irq = 0; irq < NR_IRQS; ++irq) {
		if (!cpu_isset(cpu, irq_cfg[irq].domain))
		if (!cpumask_test_cpu(cpu, &irq_cfg[irq].domain))
			continue;
		vector = irq_to_vector(irq);
		per_cpu(vector_irq, cpu)[vector] = irq;
@@ -273,7 +273,7 @@ static int __irq_prepare_move(int irq, int cpu)
		return -EBUSY;
	if (cfg->vector == IRQ_VECTOR_UNASSIGNED || !cpu_online(cpu))
		return -EINVAL;
	if (cpu_isset(cpu, cfg->domain))
	if (cpumask_test_cpu(cpu, &cfg->domain))
		return 0;
	domain = vector_allocation_domain(cpu);
	vector = find_unassigned_vector(domain);
@@ -307,12 +307,12 @@ void irq_complete_move(unsigned irq)
	if (likely(!cfg->move_in_progress))
		return;

	if (unlikely(cpu_isset(smp_processor_id(), cfg->old_domain)))
	if (unlikely(cpumask_test_cpu(smp_processor_id(), &cfg->old_domain)))
		return;

	cpumask_and(&cleanup_mask, &cfg->old_domain, cpu_online_mask);
	cfg->move_cleanup_count = cpus_weight(cleanup_mask);
	for_each_cpu_mask(i, cleanup_mask)
	cfg->move_cleanup_count = cpumask_weight(&cleanup_mask);
	for_each_cpu(i, &cleanup_mask)
		platform_send_ipi(i, IA64_IRQ_MOVE_VECTOR, IA64_IPI_DM_INT, 0);
	cfg->move_in_progress = 0;
}
@@ -338,12 +338,12 @@ static irqreturn_t smp_irq_move_cleanup_interrupt(int irq, void *dev_id)
		if (!cfg->move_cleanup_count)
			goto unlock;

		if (!cpu_isset(me, cfg->old_domain))
		if (!cpumask_test_cpu(me, &cfg->old_domain))
			goto unlock;

		spin_lock_irqsave(&vector_lock, flags);
		__this_cpu_write(vector_irq[vector], -1);
		cpu_clear(me, vector_table[vector]);
		cpumask_clear_cpu(me, &vector_table[vector]);
		spin_unlock_irqrestore(&vector_lock, flags);
		cfg->move_cleanup_count--;
	unlock:
+5 −5
Original line number Diff line number Diff line
@@ -1293,7 +1293,7 @@ ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw,
		monarch_cpu = cpu;
		sos->monarch = 1;
	} else {
		cpu_set(cpu, mca_cpu);
		cpumask_set_cpu(cpu, &mca_cpu);
		sos->monarch = 0;
	}
	mprintk(KERN_INFO "Entered OS MCA handler. PSP=%lx cpu=%d "
@@ -1316,7 +1316,7 @@ ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw,
		 */
		ia64_mca_wakeup_all();
	} else {
		while (cpu_isset(cpu, mca_cpu))
		while (cpumask_test_cpu(cpu, &mca_cpu))
			cpu_relax();	/* spin until monarch wakes us */
	}

@@ -1355,9 +1355,9 @@ ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw,
		 * and put this cpu in the rendez loop.
		 */
		for_each_online_cpu(i) {
			if (cpu_isset(i, mca_cpu)) {
			if (cpumask_test_cpu(i, &mca_cpu)) {
				monarch_cpu = i;
				cpu_clear(i, mca_cpu);	/* wake next cpu */
				cpumask_clear_cpu(i, &mca_cpu);	/* wake next cpu */
				while (monarch_cpu != -1)
					cpu_relax();	/* spin until last cpu leaves */
				set_curr_task(cpu, previous_current);
@@ -1822,7 +1822,7 @@ format_mca_init_stack(void *mca_data, unsigned long offset,
	ti->cpu = cpu;
	p->stack = ti;
	p->state = TASK_UNINTERRUPTIBLE;
	cpu_set(cpu, p->cpus_allowed);
	cpumask_set_cpu(cpu, &p->cpus_allowed);
	INIT_LIST_HEAD(&p->tasks);
	p->parent = p->real_parent = p->group_leader = p;
	INIT_LIST_HEAD(&p->children);
Loading