Loading arch/arm/Kconfig +3 −12 Original line number Diff line number Diff line Loading @@ -11,6 +11,7 @@ config ARM select HAVE_OPROFILE if (HAVE_PERF_EVENTS) select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL select HAVE_ARCH_KGDB select HAVE_ARCH_TRACEHOOK select HAVE_KPROBES if !XIP_KERNEL select HAVE_KRETPROBES if (HAVE_KPROBES) select HAVE_FUNCTION_TRACER if (!XIP_KERNEL) Loading @@ -30,6 +31,8 @@ config ARM select HAVE_HW_BREAKPOINT if (PERF_EVENTS && (CPU_V6 || CPU_V6K || CPU_V7)) select HAVE_C_RECORDMCOUNT select HAVE_GENERIC_HARDIRQS select HARDIRQS_SW_RESEND select GENERIC_IRQ_PROBE select GENERIC_IRQ_SHOW select CPU_PM if (SUSPEND || CPU_IDLE) select GENERIC_PCI_IOMAP Loading Loading @@ -126,14 +129,6 @@ config TRACE_IRQFLAGS_SUPPORT bool default y config HARDIRQS_SW_RESEND bool default y config GENERIC_IRQ_PROBE bool default y config GENERIC_LOCKBREAK bool default y Loading Loading @@ -633,7 +628,6 @@ config ARCH_MMP select CLKDEV_LOOKUP select GENERIC_CLOCKEVENTS select GPIO_PXA select TICK_ONESHOT select PLAT_PXA select SPARSE_IRQ select GENERIC_ALLOCATOR Loading Loading @@ -717,7 +711,6 @@ config ARCH_PXA select ARCH_REQUIRE_GPIOLIB select GENERIC_CLOCKEVENTS select GPIO_PXA select TICK_ONESHOT select PLAT_PXA select SPARSE_IRQ select AUTO_ZRELADDR Loading Loading @@ -784,7 +777,6 @@ config ARCH_SA1100 select CPU_FREQ select GENERIC_CLOCKEVENTS select CLKDEV_LOOKUP select TICK_ONESHOT select ARCH_REQUIRE_GPIOLIB select HAVE_IDE select NEED_MACH_MEMORY_H Loading Loading @@ -1562,7 +1554,6 @@ config ARM_ARCH_TIMER config HAVE_ARM_TWD bool depends on SMP select TICK_ONESHOT help This options enables support for the ARM timer and watchdog unit Loading arch/arm/boot/compressed/head.S +18 −11 Original line number Diff line number Diff line Loading @@ -567,6 +567,12 @@ __armv3_mpu_cache_on: mcr p15, 0, r0, c7, c0, 0 @ invalidate whole cache v3 mov pc, lr #ifdef CONFIG_CPU_DCACHE_WRITETHROUGH #define CB_BITS 0x08 #else #define CB_BITS 0x0c #endif __setup_mmu: sub r3, r4, #16384 @ Page directory size bic r3, r3, #0xff @ Align the pointer bic r3, r3, #0x3f00 Loading @@ -578,17 +584,14 @@ __setup_mmu: sub r3, r4, #16384 @ Page directory size mov r9, r0, lsr #18 mov r9, r9, lsl #18 @ start of RAM add r10, r9, #0x10000000 @ a reasonable RAM size mov r1, #0x12 orr r1, r1, #3 << 10 mov r1, #0x12 @ XN|U + section mapping orr r1, r1, #3 << 10 @ AP=11 add r2, r3, #16384 1: cmp r1, r9 @ if virt > start of RAM #ifdef CONFIG_CPU_DCACHE_WRITETHROUGH orrhs r1, r1, #0x08 @ set cacheable #else orrhs r1, r1, #0x0c @ set cacheable, bufferable #endif cmp r1, r10 @ if virt > end of RAM bichs r1, r1, #0x0c @ clear cacheable, bufferable cmphs r10, r1 @ && end of RAM > virt bic r1, r1, #0x1c @ clear XN|U + C + B orrlo r1, r1, #0x10 @ Set XN|U for non-RAM orrhs r1, r1, r6 @ set RAM section settings str r1, [r0], #4 @ 1:1 mapping add r1, r1, #1048576 teq r0, r2 Loading @@ -599,7 +602,7 @@ __setup_mmu: sub r3, r4, #16384 @ Page directory size * so there is no map overlap problem for up to 1 MB compressed kernel. * If the execution is in RAM then we would only be duplicating the above. */ mov r1, #0x1e orr r1, r6, #0x04 @ ensure B is set for this orr r1, r1, #3 << 10 mov r2, pc mov r2, r2, lsr #20 Loading @@ -620,6 +623,7 @@ __arm926ejs_mmu_cache_on: __armv4_mmu_cache_on: mov r12, lr #ifdef CONFIG_MMU mov r6, #CB_BITS | 0x12 @ U bl __setup_mmu mov r0, #0 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer Loading @@ -641,6 +645,7 @@ __armv7_mmu_cache_on: #ifdef CONFIG_MMU mrc p15, 0, r11, c0, c1, 4 @ read ID_MMFR0 tst r11, #0xf @ VMSA movne r6, #CB_BITS | 0x02 @ !XN blne __setup_mmu mov r0, #0 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer Loading @@ -655,7 +660,7 @@ __armv7_mmu_cache_on: orr r0, r0, #1 << 25 @ big-endian page tables #endif orrne r0, r0, #1 @ MMU enabled movne r1, #-1 movne r1, #0xfffffffd @ domain 0 = client mcrne p15, 0, r3, c2, c0, 0 @ load page table pointer mcrne p15, 0, r1, c3, c0, 0 @ load domain access control #endif Loading @@ -668,6 +673,7 @@ __armv7_mmu_cache_on: __fa526_cache_on: mov r12, lr mov r6, #CB_BITS | 0x12 @ U bl __setup_mmu mov r0, #0 mcr p15, 0, r0, c7, c7, 0 @ Invalidate whole cache Loading @@ -682,6 +688,7 @@ __fa526_cache_on: __arm6_mmu_cache_on: mov r12, lr mov r6, #CB_BITS | 0x12 @ U bl __setup_mmu mov r0, #0 mcr p15, 0, r0, c7, c0, 0 @ invalidate whole cache v3 Loading arch/arm/common/vic.c +31 −25 Original line number Diff line number Diff line Loading @@ -39,6 +39,7 @@ * struct vic_device - VIC PM device * @irq: The IRQ number for the base of the VIC. * @base: The register base for the VIC. * @valid_sources: A bitmask of valid interrupts * @resume_sources: A bitmask of interrupts for resume. * @resume_irqs: The IRQs enabled for resume. * @int_select: Save for VIC_INT_SELECT. Loading @@ -50,6 +51,7 @@ struct vic_device { void __iomem *base; int irq; u32 valid_sources; u32 resume_sources; u32 resume_irqs; u32 int_select; Loading Loading @@ -164,10 +166,32 @@ static int __init vic_pm_init(void) late_initcall(vic_pm_init); #endif /* CONFIG_PM */ static struct irq_chip vic_chip; static int vic_irqdomain_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hwirq) { struct vic_device *v = d->host_data; /* Skip invalid IRQs, only register handlers for the real ones */ if (!(v->valid_sources & (1 << hwirq))) return -ENOTSUPP; irq_set_chip_and_handler(irq, &vic_chip, handle_level_irq); irq_set_chip_data(irq, v->base); set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); return 0; } static struct irq_domain_ops vic_irqdomain_ops = { .map = vic_irqdomain_map, .xlate = irq_domain_xlate_onetwocell, }; /** * vic_register() - Register a VIC. * @base: The base address of the VIC. * @irq: The base IRQ for the VIC. * @valid_sources: bitmask of valid interrupts * @resume_sources: bitmask of interrupts allowed for resume sources. * @node: The device tree node associated with the VIC. * Loading @@ -178,7 +202,8 @@ late_initcall(vic_pm_init); * This also configures the IRQ domain for the VIC. */ static void __init vic_register(void __iomem *base, unsigned int irq, u32 resume_sources, struct device_node *node) u32 valid_sources, u32 resume_sources, struct device_node *node) { struct vic_device *v; Loading @@ -189,11 +214,12 @@ static void __init vic_register(void __iomem *base, unsigned int irq, v = &vic_devices[vic_id]; v->base = base; v->valid_sources = valid_sources; v->resume_sources = resume_sources; v->irq = irq; vic_id++; v->domain = irq_domain_add_legacy(node, 32, irq, 0, &irq_domain_simple_ops, v); v->domain = irq_domain_add_legacy(node, fls(valid_sources), irq, 0, &vic_irqdomain_ops, v); } static void vic_ack_irq(struct irq_data *d) Loading Loading @@ -287,23 +313,6 @@ static void __init vic_clear_interrupts(void __iomem *base) } } static void __init vic_set_irq_sources(void __iomem *base, unsigned int irq_start, u32 vic_sources) { unsigned int i; for (i = 0; i < 32; i++) { if (vic_sources & (1 << i)) { unsigned int irq = irq_start + i; irq_set_chip_and_handler(irq, &vic_chip, handle_level_irq); irq_set_chip_data(irq, base); set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); } } } /* * The PL190 cell from ARM has been modified by ST to handle 64 interrupts. * The original cell has 32 interrupts, while the modified one has 64, Loading Loading @@ -338,8 +347,7 @@ static void __init vic_init_st(void __iomem *base, unsigned int irq_start, writel(32, base + VIC_PL190_DEF_VECT_ADDR); } vic_set_irq_sources(base, irq_start, vic_sources); vic_register(base, irq_start, 0, node); vic_register(base, irq_start, vic_sources, 0, node); } void __init __vic_init(void __iomem *base, unsigned int irq_start, Loading Loading @@ -379,9 +387,7 @@ void __init __vic_init(void __iomem *base, unsigned int irq_start, vic_init2(base); vic_set_irq_sources(base, irq_start, vic_sources); vic_register(base, irq_start, resume_sources, node); vic_register(base, irq_start, vic_sources, resume_sources, node); } /** Loading arch/arm/include/asm/cacheflush.h +3 −3 Original line number Diff line number Diff line Loading @@ -101,7 +101,7 @@ struct cpu_cache_fns { void (*flush_user_range)(unsigned long, unsigned long, unsigned int); void (*coherent_kern_range)(unsigned long, unsigned long); void (*coherent_user_range)(unsigned long, unsigned long); int (*coherent_user_range)(unsigned long, unsigned long); void (*flush_kern_dcache_area)(void *, size_t); void (*dma_map_area)(const void *, size_t, int); Loading Loading @@ -142,7 +142,7 @@ extern void __cpuc_flush_kern_all(void); extern void __cpuc_flush_user_all(void); extern void __cpuc_flush_user_range(unsigned long, unsigned long, unsigned int); extern void __cpuc_coherent_kern_range(unsigned long, unsigned long); extern void __cpuc_coherent_user_range(unsigned long, unsigned long); extern int __cpuc_coherent_user_range(unsigned long, unsigned long); extern void __cpuc_flush_dcache_area(void *, size_t); /* Loading Loading @@ -249,7 +249,7 @@ extern void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr * Harvard caches are synchronised for the user space address range. * This is used for the ARM private sys_cacheflush system call. */ #define flush_cache_user_range(vma,start,end) \ #define flush_cache_user_range(start,end) \ __cpuc_coherent_user_range((start) & PAGE_MASK, PAGE_ALIGN(end)) /* Loading arch/arm/include/asm/cmpxchg.h +13 −60 Original line number Diff line number Diff line Loading @@ -229,66 +229,19 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr, (unsigned long)(n), \ sizeof(*(ptr)))) #ifndef CONFIG_CPU_V6 /* min ARCH >= ARMv6K */ /* * Note : ARMv7-M (currently unsupported by Linux) does not support * ldrexd/strexd. If ARMv7-M is ever supported by the Linux kernel, it should * not be allowed to use __cmpxchg64. */ static inline unsigned long long __cmpxchg64(volatile void *ptr, unsigned long long old, unsigned long long new) { register unsigned long long oldval asm("r0"); register unsigned long long __old asm("r2") = old; register unsigned long long __new asm("r4") = new; unsigned long res; do { asm volatile( " @ __cmpxchg8\n" " ldrexd %1, %H1, [%2]\n" " mov %0, #0\n" " teq %1, %3\n" " teqeq %H1, %H3\n" " strexdeq %0, %4, %H4, [%2]\n" : "=&r" (res), "=&r" (oldval) : "r" (ptr), "Ir" (__old), "r" (__new) : "memory", "cc"); } while (res); return oldval; } static inline unsigned long long __cmpxchg64_mb(volatile void *ptr, unsigned long long old, unsigned long long new) { unsigned long long ret; smp_mb(); ret = __cmpxchg64(ptr, old, new); smp_mb(); return ret; } #define cmpxchg64(ptr, o, n) \ ((__typeof__(*(ptr)))__cmpxchg64_mb((ptr), \ (unsigned long long)(o), \ (unsigned long long)(n))) ((__typeof__(*(ptr)))atomic64_cmpxchg(container_of((ptr), \ atomic64_t, \ counter), \ (unsigned long)(o), \ (unsigned long)(n))) #define cmpxchg64_local(ptr, o, n) \ ((__typeof__(*(ptr)))__cmpxchg64((ptr), \ (unsigned long long)(o), \ (unsigned long long)(n))) #else /* min ARCH = ARMv6 */ #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) #endif ((__typeof__(*(ptr)))local64_cmpxchg(container_of((ptr), \ local64_t, \ a), \ (unsigned long)(o), \ (unsigned long)(n))) #endif /* __LINUX_ARM_ARCH__ >= 6 */ Loading Loading
arch/arm/Kconfig +3 −12 Original line number Diff line number Diff line Loading @@ -11,6 +11,7 @@ config ARM select HAVE_OPROFILE if (HAVE_PERF_EVENTS) select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL select HAVE_ARCH_KGDB select HAVE_ARCH_TRACEHOOK select HAVE_KPROBES if !XIP_KERNEL select HAVE_KRETPROBES if (HAVE_KPROBES) select HAVE_FUNCTION_TRACER if (!XIP_KERNEL) Loading @@ -30,6 +31,8 @@ config ARM select HAVE_HW_BREAKPOINT if (PERF_EVENTS && (CPU_V6 || CPU_V6K || CPU_V7)) select HAVE_C_RECORDMCOUNT select HAVE_GENERIC_HARDIRQS select HARDIRQS_SW_RESEND select GENERIC_IRQ_PROBE select GENERIC_IRQ_SHOW select CPU_PM if (SUSPEND || CPU_IDLE) select GENERIC_PCI_IOMAP Loading Loading @@ -126,14 +129,6 @@ config TRACE_IRQFLAGS_SUPPORT bool default y config HARDIRQS_SW_RESEND bool default y config GENERIC_IRQ_PROBE bool default y config GENERIC_LOCKBREAK bool default y Loading Loading @@ -633,7 +628,6 @@ config ARCH_MMP select CLKDEV_LOOKUP select GENERIC_CLOCKEVENTS select GPIO_PXA select TICK_ONESHOT select PLAT_PXA select SPARSE_IRQ select GENERIC_ALLOCATOR Loading Loading @@ -717,7 +711,6 @@ config ARCH_PXA select ARCH_REQUIRE_GPIOLIB select GENERIC_CLOCKEVENTS select GPIO_PXA select TICK_ONESHOT select PLAT_PXA select SPARSE_IRQ select AUTO_ZRELADDR Loading Loading @@ -784,7 +777,6 @@ config ARCH_SA1100 select CPU_FREQ select GENERIC_CLOCKEVENTS select CLKDEV_LOOKUP select TICK_ONESHOT select ARCH_REQUIRE_GPIOLIB select HAVE_IDE select NEED_MACH_MEMORY_H Loading Loading @@ -1562,7 +1554,6 @@ config ARM_ARCH_TIMER config HAVE_ARM_TWD bool depends on SMP select TICK_ONESHOT help This options enables support for the ARM timer and watchdog unit Loading
arch/arm/boot/compressed/head.S +18 −11 Original line number Diff line number Diff line Loading @@ -567,6 +567,12 @@ __armv3_mpu_cache_on: mcr p15, 0, r0, c7, c0, 0 @ invalidate whole cache v3 mov pc, lr #ifdef CONFIG_CPU_DCACHE_WRITETHROUGH #define CB_BITS 0x08 #else #define CB_BITS 0x0c #endif __setup_mmu: sub r3, r4, #16384 @ Page directory size bic r3, r3, #0xff @ Align the pointer bic r3, r3, #0x3f00 Loading @@ -578,17 +584,14 @@ __setup_mmu: sub r3, r4, #16384 @ Page directory size mov r9, r0, lsr #18 mov r9, r9, lsl #18 @ start of RAM add r10, r9, #0x10000000 @ a reasonable RAM size mov r1, #0x12 orr r1, r1, #3 << 10 mov r1, #0x12 @ XN|U + section mapping orr r1, r1, #3 << 10 @ AP=11 add r2, r3, #16384 1: cmp r1, r9 @ if virt > start of RAM #ifdef CONFIG_CPU_DCACHE_WRITETHROUGH orrhs r1, r1, #0x08 @ set cacheable #else orrhs r1, r1, #0x0c @ set cacheable, bufferable #endif cmp r1, r10 @ if virt > end of RAM bichs r1, r1, #0x0c @ clear cacheable, bufferable cmphs r10, r1 @ && end of RAM > virt bic r1, r1, #0x1c @ clear XN|U + C + B orrlo r1, r1, #0x10 @ Set XN|U for non-RAM orrhs r1, r1, r6 @ set RAM section settings str r1, [r0], #4 @ 1:1 mapping add r1, r1, #1048576 teq r0, r2 Loading @@ -599,7 +602,7 @@ __setup_mmu: sub r3, r4, #16384 @ Page directory size * so there is no map overlap problem for up to 1 MB compressed kernel. * If the execution is in RAM then we would only be duplicating the above. */ mov r1, #0x1e orr r1, r6, #0x04 @ ensure B is set for this orr r1, r1, #3 << 10 mov r2, pc mov r2, r2, lsr #20 Loading @@ -620,6 +623,7 @@ __arm926ejs_mmu_cache_on: __armv4_mmu_cache_on: mov r12, lr #ifdef CONFIG_MMU mov r6, #CB_BITS | 0x12 @ U bl __setup_mmu mov r0, #0 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer Loading @@ -641,6 +645,7 @@ __armv7_mmu_cache_on: #ifdef CONFIG_MMU mrc p15, 0, r11, c0, c1, 4 @ read ID_MMFR0 tst r11, #0xf @ VMSA movne r6, #CB_BITS | 0x02 @ !XN blne __setup_mmu mov r0, #0 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer Loading @@ -655,7 +660,7 @@ __armv7_mmu_cache_on: orr r0, r0, #1 << 25 @ big-endian page tables #endif orrne r0, r0, #1 @ MMU enabled movne r1, #-1 movne r1, #0xfffffffd @ domain 0 = client mcrne p15, 0, r3, c2, c0, 0 @ load page table pointer mcrne p15, 0, r1, c3, c0, 0 @ load domain access control #endif Loading @@ -668,6 +673,7 @@ __armv7_mmu_cache_on: __fa526_cache_on: mov r12, lr mov r6, #CB_BITS | 0x12 @ U bl __setup_mmu mov r0, #0 mcr p15, 0, r0, c7, c7, 0 @ Invalidate whole cache Loading @@ -682,6 +688,7 @@ __fa526_cache_on: __arm6_mmu_cache_on: mov r12, lr mov r6, #CB_BITS | 0x12 @ U bl __setup_mmu mov r0, #0 mcr p15, 0, r0, c7, c0, 0 @ invalidate whole cache v3 Loading
arch/arm/common/vic.c +31 −25 Original line number Diff line number Diff line Loading @@ -39,6 +39,7 @@ * struct vic_device - VIC PM device * @irq: The IRQ number for the base of the VIC. * @base: The register base for the VIC. * @valid_sources: A bitmask of valid interrupts * @resume_sources: A bitmask of interrupts for resume. * @resume_irqs: The IRQs enabled for resume. * @int_select: Save for VIC_INT_SELECT. Loading @@ -50,6 +51,7 @@ struct vic_device { void __iomem *base; int irq; u32 valid_sources; u32 resume_sources; u32 resume_irqs; u32 int_select; Loading Loading @@ -164,10 +166,32 @@ static int __init vic_pm_init(void) late_initcall(vic_pm_init); #endif /* CONFIG_PM */ static struct irq_chip vic_chip; static int vic_irqdomain_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hwirq) { struct vic_device *v = d->host_data; /* Skip invalid IRQs, only register handlers for the real ones */ if (!(v->valid_sources & (1 << hwirq))) return -ENOTSUPP; irq_set_chip_and_handler(irq, &vic_chip, handle_level_irq); irq_set_chip_data(irq, v->base); set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); return 0; } static struct irq_domain_ops vic_irqdomain_ops = { .map = vic_irqdomain_map, .xlate = irq_domain_xlate_onetwocell, }; /** * vic_register() - Register a VIC. * @base: The base address of the VIC. * @irq: The base IRQ for the VIC. * @valid_sources: bitmask of valid interrupts * @resume_sources: bitmask of interrupts allowed for resume sources. * @node: The device tree node associated with the VIC. * Loading @@ -178,7 +202,8 @@ late_initcall(vic_pm_init); * This also configures the IRQ domain for the VIC. */ static void __init vic_register(void __iomem *base, unsigned int irq, u32 resume_sources, struct device_node *node) u32 valid_sources, u32 resume_sources, struct device_node *node) { struct vic_device *v; Loading @@ -189,11 +214,12 @@ static void __init vic_register(void __iomem *base, unsigned int irq, v = &vic_devices[vic_id]; v->base = base; v->valid_sources = valid_sources; v->resume_sources = resume_sources; v->irq = irq; vic_id++; v->domain = irq_domain_add_legacy(node, 32, irq, 0, &irq_domain_simple_ops, v); v->domain = irq_domain_add_legacy(node, fls(valid_sources), irq, 0, &vic_irqdomain_ops, v); } static void vic_ack_irq(struct irq_data *d) Loading Loading @@ -287,23 +313,6 @@ static void __init vic_clear_interrupts(void __iomem *base) } } static void __init vic_set_irq_sources(void __iomem *base, unsigned int irq_start, u32 vic_sources) { unsigned int i; for (i = 0; i < 32; i++) { if (vic_sources & (1 << i)) { unsigned int irq = irq_start + i; irq_set_chip_and_handler(irq, &vic_chip, handle_level_irq); irq_set_chip_data(irq, base); set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); } } } /* * The PL190 cell from ARM has been modified by ST to handle 64 interrupts. * The original cell has 32 interrupts, while the modified one has 64, Loading Loading @@ -338,8 +347,7 @@ static void __init vic_init_st(void __iomem *base, unsigned int irq_start, writel(32, base + VIC_PL190_DEF_VECT_ADDR); } vic_set_irq_sources(base, irq_start, vic_sources); vic_register(base, irq_start, 0, node); vic_register(base, irq_start, vic_sources, 0, node); } void __init __vic_init(void __iomem *base, unsigned int irq_start, Loading Loading @@ -379,9 +387,7 @@ void __init __vic_init(void __iomem *base, unsigned int irq_start, vic_init2(base); vic_set_irq_sources(base, irq_start, vic_sources); vic_register(base, irq_start, resume_sources, node); vic_register(base, irq_start, vic_sources, resume_sources, node); } /** Loading
arch/arm/include/asm/cacheflush.h +3 −3 Original line number Diff line number Diff line Loading @@ -101,7 +101,7 @@ struct cpu_cache_fns { void (*flush_user_range)(unsigned long, unsigned long, unsigned int); void (*coherent_kern_range)(unsigned long, unsigned long); void (*coherent_user_range)(unsigned long, unsigned long); int (*coherent_user_range)(unsigned long, unsigned long); void (*flush_kern_dcache_area)(void *, size_t); void (*dma_map_area)(const void *, size_t, int); Loading Loading @@ -142,7 +142,7 @@ extern void __cpuc_flush_kern_all(void); extern void __cpuc_flush_user_all(void); extern void __cpuc_flush_user_range(unsigned long, unsigned long, unsigned int); extern void __cpuc_coherent_kern_range(unsigned long, unsigned long); extern void __cpuc_coherent_user_range(unsigned long, unsigned long); extern int __cpuc_coherent_user_range(unsigned long, unsigned long); extern void __cpuc_flush_dcache_area(void *, size_t); /* Loading Loading @@ -249,7 +249,7 @@ extern void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr * Harvard caches are synchronised for the user space address range. * This is used for the ARM private sys_cacheflush system call. */ #define flush_cache_user_range(vma,start,end) \ #define flush_cache_user_range(start,end) \ __cpuc_coherent_user_range((start) & PAGE_MASK, PAGE_ALIGN(end)) /* Loading
arch/arm/include/asm/cmpxchg.h +13 −60 Original line number Diff line number Diff line Loading @@ -229,66 +229,19 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr, (unsigned long)(n), \ sizeof(*(ptr)))) #ifndef CONFIG_CPU_V6 /* min ARCH >= ARMv6K */ /* * Note : ARMv7-M (currently unsupported by Linux) does not support * ldrexd/strexd. If ARMv7-M is ever supported by the Linux kernel, it should * not be allowed to use __cmpxchg64. */ static inline unsigned long long __cmpxchg64(volatile void *ptr, unsigned long long old, unsigned long long new) { register unsigned long long oldval asm("r0"); register unsigned long long __old asm("r2") = old; register unsigned long long __new asm("r4") = new; unsigned long res; do { asm volatile( " @ __cmpxchg8\n" " ldrexd %1, %H1, [%2]\n" " mov %0, #0\n" " teq %1, %3\n" " teqeq %H1, %H3\n" " strexdeq %0, %4, %H4, [%2]\n" : "=&r" (res), "=&r" (oldval) : "r" (ptr), "Ir" (__old), "r" (__new) : "memory", "cc"); } while (res); return oldval; } static inline unsigned long long __cmpxchg64_mb(volatile void *ptr, unsigned long long old, unsigned long long new) { unsigned long long ret; smp_mb(); ret = __cmpxchg64(ptr, old, new); smp_mb(); return ret; } #define cmpxchg64(ptr, o, n) \ ((__typeof__(*(ptr)))__cmpxchg64_mb((ptr), \ (unsigned long long)(o), \ (unsigned long long)(n))) ((__typeof__(*(ptr)))atomic64_cmpxchg(container_of((ptr), \ atomic64_t, \ counter), \ (unsigned long)(o), \ (unsigned long)(n))) #define cmpxchg64_local(ptr, o, n) \ ((__typeof__(*(ptr)))__cmpxchg64((ptr), \ (unsigned long long)(o), \ (unsigned long long)(n))) #else /* min ARCH = ARMv6 */ #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) #endif ((__typeof__(*(ptr)))local64_cmpxchg(container_of((ptr), \ local64_t, \ a), \ (unsigned long)(o), \ (unsigned long)(n))) #endif /* __LINUX_ARM_ARCH__ >= 6 */ Loading