Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 9fe02c03 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge master.kernel.org:/home/rmk/linux-2.6-arm

* master.kernel.org:/home/rmk/linux-2.6-arm: (25 commits)
  [ARM] 5519/1: amba probe: pass "struct amba_id *" instead of void *
  [ARM] 5517/1: integrator: don't put clock lookups in __initdata
  [ARM] 5518/1: versatile: don't put clock lookups in __initdata
  [ARM] mach-l7200: fix spelling of SYS_CLOCK_OFF
  [ARM] Double check memmap is actually valid with a memmap has unexpected holes V2
  [ARM] realview: fix broadcast tick support
  [ARM] realview: remove useless smp_cross_call_done()
  [ARM] smp: fix cpumask usage in ARM SMP code
  [ARM] 5513/1: Eurotech VIPER SBC: fix compilation error
  [ARM] 5509/1: ep93xx: clkdev enable UARTS
  ARM: OMAP2/3: Change omapfb to use clkdev for dispc and rfbi, v2
  ARM: OMAP3: Fix HW SAVEANDRESTORE shift define
  ARM: OMAP3: Fix number of GPIO lines for 34xx
  [ARM] S3C: Do not set clk->owner field if unset
  [ARM] S3C2410: mach-bast.c registering i2c data too early
  [ARM] S3C24XX: Fix unused code warning in arch/arm/plat-s3c24xx/dma.c
  [ARM] S3C64XX: fix GPIO debug
  [ARM] S3C64XX: GPIO include cleanup
  [ARM] nwfpe: fix 'floatx80_is_nan' sparse warning
  [ARM] nwfpe: Add decleration for ExtendedCPDO
  ...
parents 6c2445ef 03fbdb15
Loading
Loading
Loading
Loading
+3 −3
Original line number Original line Diff line number Diff line
@@ -273,6 +273,7 @@ config ARCH_EP93XX
	select HAVE_CLK
	select HAVE_CLK
	select COMMON_CLKDEV
	select COMMON_CLKDEV
	select ARCH_REQUIRE_GPIOLIB
	select ARCH_REQUIRE_GPIOLIB
	select ARCH_HAS_HOLES_MEMORYMODEL
	help
	help
	  This enables support for the Cirrus EP93xx series of CPUs.
	  This enables support for the Cirrus EP93xx series of CPUs.


@@ -976,10 +977,9 @@ config OABI_COMPAT
	  UNPREDICTABLE (in fact it can be predicted that it won't work
	  UNPREDICTABLE (in fact it can be predicted that it won't work
	  at all). If in doubt say Y.
	  at all). If in doubt say Y.


config ARCH_FLATMEM_HAS_HOLES
config ARCH_HAS_HOLES_MEMORYMODEL
	bool
	bool
	default y
	default n
	depends on FLATMEM


# Discontigmem is deprecated
# Discontigmem is deprecated
config ARCH_DISCONTIGMEM_ENABLE
config ARCH_DISCONTIGMEM_ENABLE
+2 −2
Original line number Original line Diff line number Diff line
@@ -253,9 +253,9 @@ void __cpuinit gic_cpu_init(unsigned int gic_nr, void __iomem *base)
}
}


#ifdef CONFIG_SMP
#ifdef CONFIG_SMP
void gic_raise_softirq(cpumask_t cpumask, unsigned int irq)
void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
{
{
	unsigned long map = *cpus_addr(cpumask);
	unsigned long map = *cpus_addr(*mask);


	/* this always happens on GIC0 */
	/* this always happens on GIC0 */
	writel(map << 16 | irq, gic_data[0].dist_base + GIC_DIST_SOFTINT);
	writel(map << 16 | irq, gic_data[0].dist_base + GIC_DIST_SOFTINT);
+1 −1
Original line number Original line Diff line number Diff line
@@ -36,7 +36,7 @@
void gic_dist_init(unsigned int gic_nr, void __iomem *base, unsigned int irq_start);
void gic_dist_init(unsigned int gic_nr, void __iomem *base, unsigned int irq_start);
void gic_cpu_init(unsigned int gic_nr, void __iomem *base);
void gic_cpu_init(unsigned int gic_nr, void __iomem *base);
void gic_cascade_irq(unsigned int gic_nr, unsigned int irq);
void gic_cascade_irq(unsigned int gic_nr, unsigned int irq);
void gic_raise_softirq(cpumask_t cpumask, unsigned int irq);
void gic_raise_softirq(const struct cpumask *mask, unsigned int irq);
#endif
#endif


#endif
#endif
+4 −8
Original line number Original line Diff line number Diff line
@@ -53,17 +53,12 @@ extern void smp_store_cpu_info(unsigned int cpuid);
/*
/*
 * Raise an IPI cross call on CPUs in callmap.
 * Raise an IPI cross call on CPUs in callmap.
 */
 */
extern void smp_cross_call(cpumask_t callmap);
extern void smp_cross_call(const struct cpumask *mask);

/*
 * Broadcast a timer interrupt to the other CPUs.
 */
extern void smp_send_timer(void);


/*
/*
 * Broadcast a clock event to other CPUs.
 * Broadcast a clock event to other CPUs.
 */
 */
extern void smp_timer_broadcast(cpumask_t mask);
extern void smp_timer_broadcast(const struct cpumask *mask);


/*
/*
 * Boot a secondary CPU, and assign it the specified idle task.
 * Boot a secondary CPU, and assign it the specified idle task.
@@ -102,7 +97,8 @@ extern int platform_cpu_kill(unsigned int cpu);
extern void platform_cpu_enable(unsigned int cpu);
extern void platform_cpu_enable(unsigned int cpu);


extern void arch_send_call_function_single_ipi(int cpu);
extern void arch_send_call_function_single_ipi(int cpu);
extern void arch_send_call_function_ipi(cpumask_t mask);
extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
#define arch_send_call_function_ipi_mask arch_send_call_function_ipi_mask


/*
/*
 * Local timer interrupt handling function (can be IPI'ed).
 * Local timer interrupt handling function (can be IPI'ed).
+16 −30
Original line number Original line Diff line number Diff line
@@ -326,14 +326,14 @@ void __init smp_prepare_boot_cpu(void)
	per_cpu(cpu_data, cpu).idle = current;
	per_cpu(cpu_data, cpu).idle = current;
}
}


static void send_ipi_message(cpumask_t callmap, enum ipi_msg_type msg)
static void send_ipi_message(const struct cpumask *mask, enum ipi_msg_type msg)
{
{
	unsigned long flags;
	unsigned long flags;
	unsigned int cpu;
	unsigned int cpu;


	local_irq_save(flags);
	local_irq_save(flags);


	for_each_cpu_mask(cpu, callmap) {
	for_each_cpu(cpu, mask) {
		struct ipi_data *ipi = &per_cpu(ipi_data, cpu);
		struct ipi_data *ipi = &per_cpu(ipi_data, cpu);


		spin_lock(&ipi->lock);
		spin_lock(&ipi->lock);
@@ -344,19 +344,19 @@ static void send_ipi_message(cpumask_t callmap, enum ipi_msg_type msg)
	/*
	/*
	 * Call the platform specific cross-CPU call function.
	 * Call the platform specific cross-CPU call function.
	 */
	 */
	smp_cross_call(callmap);
	smp_cross_call(mask);


	local_irq_restore(flags);
	local_irq_restore(flags);
}
}


void arch_send_call_function_ipi(cpumask_t mask)
void arch_send_call_function_ipi_mask(const struct cpumask *mask)
{
{
	send_ipi_message(mask, IPI_CALL_FUNC);
	send_ipi_message(mask, IPI_CALL_FUNC);
}
}


void arch_send_call_function_single_ipi(int cpu)
void arch_send_call_function_single_ipi(int cpu)
{
{
	send_ipi_message(cpumask_of_cpu(cpu), IPI_CALL_FUNC_SINGLE);
	send_ipi_message(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE);
}
}


void show_ipi_list(struct seq_file *p)
void show_ipi_list(struct seq_file *p)
@@ -498,17 +498,10 @@ asmlinkage void __exception do_IPI(struct pt_regs *regs)


void smp_send_reschedule(int cpu)
void smp_send_reschedule(int cpu)
{
{
	send_ipi_message(cpumask_of_cpu(cpu), IPI_RESCHEDULE);
	send_ipi_message(cpumask_of(cpu), IPI_RESCHEDULE);
}
}


void smp_send_timer(void)
void smp_timer_broadcast(const struct cpumask *mask)
{
	cpumask_t mask = cpu_online_map;
	cpu_clear(smp_processor_id(), mask);
	send_ipi_message(mask, IPI_TIMER);
}

void smp_timer_broadcast(cpumask_t mask)
{
{
	send_ipi_message(mask, IPI_TIMER);
	send_ipi_message(mask, IPI_TIMER);
}
}
@@ -517,7 +510,7 @@ void smp_send_stop(void)
{
{
	cpumask_t mask = cpu_online_map;
	cpumask_t mask = cpu_online_map;
	cpu_clear(smp_processor_id(), mask);
	cpu_clear(smp_processor_id(), mask);
	send_ipi_message(mask, IPI_CPU_STOP);
	send_ipi_message(&mask, IPI_CPU_STOP);
}
}


/*
/*
@@ -528,20 +521,17 @@ int setup_profiling_timer(unsigned int multiplier)
	return -EINVAL;
	return -EINVAL;
}
}


static int
static void
on_each_cpu_mask(void (*func)(void *), void *info, int wait, cpumask_t mask)
on_each_cpu_mask(void (*func)(void *), void *info, int wait,
		const struct cpumask *mask)
{
{
	int ret = 0;

	preempt_disable();
	preempt_disable();


	ret = smp_call_function_mask(mask, func, info, wait);
	smp_call_function_many(mask, func, info, wait);
	if (cpu_isset(smp_processor_id(), mask))
	if (cpumask_test_cpu(smp_processor_id(), mask))
		func(info);
		func(info);


	preempt_enable();
	preempt_enable();

	return ret;
}
}


/**********************************************************************/
/**********************************************************************/
@@ -602,20 +592,17 @@ void flush_tlb_all(void)


void flush_tlb_mm(struct mm_struct *mm)
void flush_tlb_mm(struct mm_struct *mm)
{
{
	cpumask_t mask = mm->cpu_vm_mask;
	on_each_cpu_mask(ipi_flush_tlb_mm, mm, 1, &mm->cpu_vm_mask);

	on_each_cpu_mask(ipi_flush_tlb_mm, mm, 1, mask);
}
}


void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
{
{
	cpumask_t mask = vma->vm_mm->cpu_vm_mask;
	struct tlb_args ta;
	struct tlb_args ta;


	ta.ta_vma = vma;
	ta.ta_vma = vma;
	ta.ta_start = uaddr;
	ta.ta_start = uaddr;


	on_each_cpu_mask(ipi_flush_tlb_page, &ta, 1, mask);
	on_each_cpu_mask(ipi_flush_tlb_page, &ta, 1, &vma->vm_mm->cpu_vm_mask);
}
}


void flush_tlb_kernel_page(unsigned long kaddr)
void flush_tlb_kernel_page(unsigned long kaddr)
@@ -630,14 +617,13 @@ void flush_tlb_kernel_page(unsigned long kaddr)
void flush_tlb_range(struct vm_area_struct *vma,
void flush_tlb_range(struct vm_area_struct *vma,
                     unsigned long start, unsigned long end)
                     unsigned long start, unsigned long end)
{
{
	cpumask_t mask = vma->vm_mm->cpu_vm_mask;
	struct tlb_args ta;
	struct tlb_args ta;


	ta.ta_vma = vma;
	ta.ta_vma = vma;
	ta.ta_start = start;
	ta.ta_start = start;
	ta.ta_end = end;
	ta.ta_end = end;


	on_each_cpu_mask(ipi_flush_tlb_range, &ta, 1, mask);
	on_each_cpu_mask(ipi_flush_tlb_range, &ta, 1, &vma->vm_mm->cpu_vm_mask);
}
}


void flush_tlb_kernel_range(unsigned long start, unsigned long end)
void flush_tlb_kernel_range(unsigned long start, unsigned long end)
Loading