Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 6b3087c6 authored by Graf Yang's avatar Graf Yang Committed by Bryan Wu
Browse files

Blackfin arch: SMP supporting patchset: Blackfin header files and machine common code

Blackfin dual core BF561 processor can support SMP like features.
https://docs.blackfin.uclinux.org/doku.php?id=linux-kernel:smp-like



In this patch, we provide SMP extend to Blackfin header files
and machine common code

Signed-off-by: default avatarGraf Yang <graf.yang@analog.com>
Signed-off-by: default avatarBryan Wu <cooloney@kernel.org>
parent c51b4488
Loading
Loading
Loading
Loading
+95 −24
Original line number Diff line number Diff line
@@ -15,11 +15,80 @@
 */

#define ATOMIC_INIT(i)	{ (i) }
#define atomic_set(v, i)	(((v)->counter) = i)

#ifdef CONFIG_SMP

#define atomic_read(v)	__raw_uncached_fetch_asm(&(v)->counter)

asmlinkage int __raw_uncached_fetch_asm(const volatile int *ptr);

asmlinkage int __raw_atomic_update_asm(volatile int *ptr, int value);

asmlinkage int __raw_atomic_clear_asm(volatile int *ptr, int value);

asmlinkage int __raw_atomic_set_asm(volatile int *ptr, int value);

asmlinkage int __raw_atomic_xor_asm(volatile int *ptr, int value);

asmlinkage int __raw_atomic_test_asm(const volatile int *ptr, int value);

static inline void atomic_add(int i, atomic_t *v)
{
	__raw_atomic_update_asm(&v->counter, i);
}

static inline void atomic_sub(int i, atomic_t *v)
{
	__raw_atomic_update_asm(&v->counter, -i);
}

static inline int atomic_add_return(int i, atomic_t *v)
{
	return __raw_atomic_update_asm(&v->counter, i);
}

static inline int atomic_sub_return(int i, atomic_t *v)
{
	return __raw_atomic_update_asm(&v->counter, -i);
}

static inline void atomic_inc(volatile atomic_t *v)
{
	__raw_atomic_update_asm(&v->counter, 1);
}

static inline void atomic_dec(volatile atomic_t *v)
{
	__raw_atomic_update_asm(&v->counter, -1);
}

static inline void atomic_clear_mask(int mask, atomic_t *v)
{
	__raw_atomic_clear_asm(&v->counter, mask);
}

static inline void atomic_set_mask(int mask, atomic_t *v)
{
	__raw_atomic_set_asm(&v->counter, mask);
}

static inline int atomic_test_mask(int mask, atomic_t *v)
{
	return __raw_atomic_test_asm(&v->counter, mask);
}

/* Atomic operations are already serializing */
#define smp_mb__before_atomic_dec()    barrier()
#define smp_mb__after_atomic_dec() barrier()
#define smp_mb__before_atomic_inc()    barrier()
#define smp_mb__after_atomic_inc() barrier()

#else /* !CONFIG_SMP */

#define atomic_read(v)	((v)->counter)
#define atomic_set(v, i)	(((v)->counter) = i)

static __inline__ void atomic_add(int i, atomic_t * v)
static inline void atomic_add(int i, atomic_t *v)
{
	long flags;

@@ -28,7 +97,7 @@ static __inline__ void atomic_add(int i, atomic_t * v)
	local_irq_restore(flags);
}

static __inline__ void atomic_sub(int i, atomic_t * v)
static inline void atomic_sub(int i, atomic_t *v)
{
	long flags;

@@ -52,7 +121,6 @@ static inline int atomic_add_return(int i, atomic_t * v)
	return __temp;
}

#define atomic_add_negative(a, v)	(atomic_add_return((a), (v)) < 0)
static inline int atomic_sub_return(int i, atomic_t *v)
{
	int __temp = 0;
@@ -66,7 +134,7 @@ static inline int atomic_sub_return(int i, atomic_t * v)
	return __temp;
}

static __inline__ void atomic_inc(volatile atomic_t * v)
static inline void atomic_inc(volatile atomic_t *v)
{
	long flags;

@@ -75,20 +143,7 @@ static __inline__ void atomic_inc(volatile atomic_t * v)
	local_irq_restore(flags);
}

#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
#define atomic_xchg(v, new) (xchg(&((v)->counter), new))

#define atomic_add_unless(v, a, u)				\
({								\
	int c, old;						\
	c = atomic_read(v);					\
	while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \
		c = old;					\
	c != (u);						\
})
#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)

static __inline__ void atomic_dec(volatile atomic_t * v)
static inline void atomic_dec(volatile atomic_t *v)
{
	long flags;

@@ -97,7 +152,7 @@ static __inline__ void atomic_dec(volatile atomic_t * v)
	local_irq_restore(flags);
}

static __inline__ void atomic_clear_mask(unsigned int mask, atomic_t * v)
static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
{
	long flags;

@@ -106,7 +161,7 @@ static __inline__ void atomic_clear_mask(unsigned int mask, atomic_t * v)
	local_irq_restore(flags);
}

static __inline__ void atomic_set_mask(unsigned int mask, atomic_t * v)
static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
{
	long flags;

@@ -121,9 +176,25 @@ static __inline__ void atomic_set_mask(unsigned int mask, atomic_t * v)
#define smp_mb__before_atomic_inc()    barrier()
#define smp_mb__after_atomic_inc() barrier()

#endif /* !CONFIG_SMP */

#define atomic_add_negative(a, v)	(atomic_add_return((a), (v)) < 0)
#define atomic_dec_return(v) atomic_sub_return(1,(v))
#define atomic_inc_return(v) atomic_add_return(1,(v))

#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
#define atomic_xchg(v, new) (xchg(&((v)->counter), new))

#define atomic_add_unless(v, a, u)				\
({								\
	int c, old;						\
	c = atomic_read(v);					\
	while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \
		c = old;					\
	c != (u);						\
})
#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)

/*
 * atomic_inc_and_test - increment and test
 * @v: pointer of type atomic_t
+3 −2
Original line number Diff line number Diff line
@@ -47,6 +47,9 @@
# define DMA_UNCACHED_REGION (0)
#endif

extern void bfin_setup_caches(unsigned int cpu);
extern void bfin_setup_cpudata(unsigned int cpu);

extern unsigned long get_cclk(void);
extern unsigned long get_sclk(void);
extern unsigned long sclk_to_usecs(unsigned long sclk);
@@ -58,8 +61,6 @@ extern void dump_bfin_trace_buffer(void);

/* init functions only */
extern int init_arch_irq(void);
extern void bfin_icache_init(void);
extern void bfin_dcache_init(void);
extern void init_exception_vectors(void);
extern void program_IAR(void);

+125 −64
Original line number Diff line number Diff line
@@ -7,7 +7,6 @@

#include <linux/compiler.h>
#include <asm/byteorder.h>	/* swab32 */
#include <asm/system.h>		/* save_flags */

#ifdef __KERNEL__

@@ -20,36 +19,71 @@
#include <asm-generic/bitops/sched.h>
#include <asm-generic/bitops/ffz.h>

static __inline__ void set_bit(int nr, volatile unsigned long *addr)
#ifdef CONFIG_SMP

#include <linux/linkage.h>

asmlinkage int __raw_bit_set_asm(volatile unsigned long *addr, int nr);

asmlinkage int __raw_bit_clear_asm(volatile unsigned long *addr, int nr);

asmlinkage int __raw_bit_toggle_asm(volatile unsigned long *addr, int nr);

asmlinkage int __raw_bit_test_set_asm(volatile unsigned long *addr, int nr);

asmlinkage int __raw_bit_test_clear_asm(volatile unsigned long *addr, int nr);

asmlinkage int __raw_bit_test_toggle_asm(volatile unsigned long *addr, int nr);

asmlinkage int __raw_bit_test_asm(const volatile unsigned long *addr, int nr);

static inline void set_bit(int nr, volatile unsigned long *addr)
{
	int *a = (int *)addr;
	int mask;
	unsigned long flags;
	volatile unsigned long *a = addr + (nr >> 5);
	__raw_bit_set_asm(a, nr & 0x1f);
}

	a += nr >> 5;
	mask = 1 << (nr & 0x1f);
	local_irq_save(flags);
	*a |= mask;
	local_irq_restore(flags);
static inline void clear_bit(int nr, volatile unsigned long *addr)
{
	volatile unsigned long *a = addr + (nr >> 5);
	__raw_bit_clear_asm(a, nr & 0x1f);
}

static __inline__ void __set_bit(int nr, volatile unsigned long *addr)
static inline void change_bit(int nr, volatile unsigned long *addr)
{
	int *a = (int *)addr;
	int mask;
	volatile unsigned long *a = addr + (nr >> 5);
	__raw_bit_toggle_asm(a, nr & 0x1f);
}

	a += nr >> 5;
	mask = 1 << (nr & 0x1f);
	*a |= mask;
static inline int test_bit(int nr, const volatile unsigned long *addr)
{
	volatile const unsigned long *a = addr + (nr >> 5);
	return __raw_bit_test_asm(a, nr & 0x1f) != 0;
}

/*
 * clear_bit() doesn't provide any barrier for the compiler.
 */
#define smp_mb__before_clear_bit()	barrier()
#define smp_mb__after_clear_bit()	barrier()
static inline int test_and_set_bit(int nr, volatile unsigned long *addr)
{
	volatile unsigned long *a = addr + (nr >> 5);
	return __raw_bit_test_set_asm(a, nr & 0x1f);
}

static __inline__ void clear_bit(int nr, volatile unsigned long *addr)
static inline int test_and_clear_bit(int nr, volatile unsigned long *addr)
{
	volatile unsigned long *a = addr + (nr >> 5);
	return __raw_bit_test_clear_asm(a, nr & 0x1f);
}

static inline int test_and_change_bit(int nr, volatile unsigned long *addr)
{
	volatile unsigned long *a = addr + (nr >> 5);
	return __raw_bit_test_toggle_asm(a, nr & 0x1f);
}

#else /* !CONFIG_SMP */

#include <asm/system.h>		/* save_flags */

static inline void set_bit(int nr, volatile unsigned long *addr)
{
	int *a = (int *)addr;
	int mask;
@@ -57,21 +91,23 @@ static __inline__ void clear_bit(int nr, volatile unsigned long *addr)
	a += nr >> 5;
	mask = 1 << (nr & 0x1f);
	local_irq_save(flags);
	*a &= ~mask;
	*a |= mask;
	local_irq_restore(flags);
}

static __inline__ void __clear_bit(int nr, volatile unsigned long *addr)
static inline void clear_bit(int nr, volatile unsigned long *addr)
{
	int *a = (int *)addr;
	int mask;

	unsigned long flags;
	a += nr >> 5;
	mask = 1 << (nr & 0x1f);
	local_irq_save(flags);
	*a &= ~mask;
	local_irq_restore(flags);
}

static __inline__ void change_bit(int nr, volatile unsigned long *addr)
static inline void change_bit(int nr, volatile unsigned long *addr)
{
	int mask, flags;
	unsigned long *ADDR = (unsigned long *)addr;
@@ -83,17 +119,7 @@ static __inline__ void change_bit(int nr, volatile unsigned long *addr)
	local_irq_restore(flags);
}

static __inline__ void __change_bit(int nr, volatile unsigned long *addr)
{
	int mask;
	unsigned long *ADDR = (unsigned long *)addr;

	ADDR += nr >> 5;
	mask = 1 << (nr & 31);
	*ADDR ^= mask;
}

static __inline__ int test_and_set_bit(int nr, void *addr)
static inline int test_and_set_bit(int nr, volatile unsigned long *addr)
{
	int mask, retval;
	volatile unsigned int *a = (volatile unsigned int *)addr;
@@ -109,19 +135,23 @@ static __inline__ int test_and_set_bit(int nr, void *addr)
	return retval;
}

static __inline__ int __test_and_set_bit(int nr, volatile unsigned long *addr)
static inline int test_and_clear_bit(int nr, volatile unsigned long *addr)
{
	int mask, retval;
	volatile unsigned int *a = (volatile unsigned int *)addr;
	unsigned long flags;

	a += nr >> 5;
	mask = 1 << (nr & 0x1f);
	local_irq_save(flags);
	retval = (mask & *a) != 0;
	*a |= mask;
	*a &= ~mask;
	local_irq_restore(flags);

	return retval;
}

static __inline__ int test_and_clear_bit(int nr, volatile unsigned long *addr)
static inline int test_and_change_bit(int nr, volatile unsigned long *addr)
{
	int mask, retval;
	volatile unsigned int *a = (volatile unsigned int *)addr;
@@ -131,13 +161,50 @@ static __inline__ int test_and_clear_bit(int nr, volatile unsigned long *addr)
	mask = 1 << (nr & 0x1f);
	local_irq_save(flags);
	retval = (mask & *a) != 0;
	*a &= ~mask;
	*a ^= mask;
	local_irq_restore(flags);

	return retval;
}

static __inline__ int __test_and_clear_bit(int nr, volatile unsigned long *addr)
#endif /* CONFIG_SMP */

/*
 * clear_bit() doesn't provide any barrier for the compiler.
 */
#define smp_mb__before_clear_bit()	barrier()
#define smp_mb__after_clear_bit()	barrier()

static inline void __set_bit(int nr, volatile unsigned long *addr)
{
	int *a = (int *)addr;
	int mask;

	a += nr >> 5;
	mask = 1 << (nr & 0x1f);
	*a |= mask;
}

static inline void __clear_bit(int nr, volatile unsigned long *addr)
{
	int *a = (int *)addr;
	int mask;

	a += nr >> 5;
	mask = 1 << (nr & 0x1f);
	*a &= ~mask;
}

static inline void __change_bit(int nr, volatile unsigned long *addr)
{
	int mask;
	unsigned long *ADDR = (unsigned long *)addr;

	ADDR += nr >> 5;
	mask = 1 << (nr & 31);
	*ADDR ^= mask;
}

static inline int __test_and_set_bit(int nr, volatile unsigned long *addr)
{
	int mask, retval;
	volatile unsigned int *a = (volatile unsigned int *)addr;
@@ -145,26 +212,23 @@ static __inline__ int __test_and_clear_bit(int nr, volatile unsigned long *addr)
	a += nr >> 5;
	mask = 1 << (nr & 0x1f);
	retval = (mask & *a) != 0;
	*a &= ~mask;
	*a |= mask;
	return retval;
}

static __inline__ int test_and_change_bit(int nr, volatile unsigned long *addr)
static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr)
{
	int mask, retval;
	volatile unsigned int *a = (volatile unsigned int *)addr;
	unsigned long flags;

	a += nr >> 5;
	mask = 1 << (nr & 0x1f);
	local_irq_save(flags);
	retval = (mask & *a) != 0;
	*a ^= mask;
	local_irq_restore(flags);
	*a &= ~mask;
	return retval;
}

static __inline__ int __test_and_change_bit(int nr,
static inline int __test_and_change_bit(int nr,
					    volatile unsigned long *addr)
{
	int mask, retval;
@@ -177,16 +241,7 @@ static __inline__ int __test_and_change_bit(int nr,
	return retval;
}

/*
 * This routine doesn't need to be atomic.
 */
static __inline__ int __constant_test_bit(int nr, const void *addr)
{
	return ((1UL << (nr & 31)) &
		(((const volatile unsigned int *)addr)[nr >> 5])) != 0;
}

static __inline__ int __test_bit(int nr, const void *addr)
static inline int __test_bit(int nr, const void *addr)
{
	int *a = (int *)addr;
	int mask;
@@ -196,10 +251,16 @@ static __inline__ int __test_bit(int nr, const void *addr)
	return ((mask & *a) != 0);
}

#define test_bit(nr,addr) \
(__builtin_constant_p(nr) ? \
 __constant_test_bit((nr),(addr)) : \
 __test_bit((nr),(addr)))
#ifndef CONFIG_SMP
/*
 * This routine doesn't need irq save and restore ops in UP
 * context.
 */
static inline int test_bit(int nr, const void *addr)
{
	return __test_bit(nr, addr);
}
#endif

#include <asm-generic/bitops/find.h>
#include <asm-generic/bitops/hweight.h>
+29 −0
Original line number Diff line number Diff line
@@ -12,6 +12,11 @@
#define L1_CACHE_BYTES	(1 << L1_CACHE_SHIFT)
#define SMP_CACHE_BYTES	L1_CACHE_BYTES

#ifdef CONFIG_SMP
#define __cacheline_aligned
#else
#define ____cacheline_aligned

/*
 * Put cacheline_aliged data to L1 data memory
 */
@@ -21,9 +26,33 @@
		__section__(".data_l1.cacheline_aligned")))
#endif

#endif

/*
 * largest L1 which this arch supports
 */
#define L1_CACHE_SHIFT_MAX	5

#if defined(CONFIG_SMP) && \
    !defined(CONFIG_BFIN_CACHE_COHERENT) && \
    defined(CONFIG_BFIN_DCACHE)
#define __ARCH_SYNC_CORE_DCACHE
#ifndef __ASSEMBLY__
asmlinkage void __raw_smp_mark_barrier_asm(void);
asmlinkage void __raw_smp_check_barrier_asm(void);

static inline void smp_mark_barrier(void)
{
	__raw_smp_mark_barrier_asm();
}
static inline void smp_check_barrier(void)
{
	__raw_smp_check_barrier_asm();
}

void resync_core_dcache(void);
#endif
#endif


#endif
+16 −4
Original line number Diff line number Diff line
@@ -35,6 +35,7 @@ extern void blackfin_icache_flush_range(unsigned long start_address, unsigned lo
extern void blackfin_dcache_flush_range(unsigned long start_address, unsigned long end_address);
extern void blackfin_dcache_invalidate_range(unsigned long start_address, unsigned long end_address);
extern void blackfin_dflush_page(void *page);
extern void blackfin_invalidate_entire_dcache(void);

#define flush_dcache_mmap_lock(mapping)		do { } while (0)
#define flush_dcache_mmap_unlock(mapping)	do { } while (0)
@@ -44,12 +45,20 @@ extern void blackfin_dflush_page(void *page);
#define flush_cache_vmap(start, end)		do { } while (0)
#define flush_cache_vunmap(start, end)		do { } while (0)

#ifdef CONFIG_SMP
#define flush_icache_range_others(start, end)	\
	smp_icache_flush_range_others((start), (end))
#else
#define flush_icache_range_others(start, end)	do { } while (0)
#endif

static inline void flush_icache_range(unsigned start, unsigned end)
{
#if defined(CONFIG_BFIN_DCACHE) && defined(CONFIG_BFIN_ICACHE)

# if defined(CONFIG_BFIN_WT)
	blackfin_icache_flush_range((start), (end));
	flush_icache_range_others(start, end);
# else
	blackfin_icache_dcache_flush_range((start), (end));
# endif
@@ -58,6 +67,7 @@ static inline void flush_icache_range(unsigned start, unsigned end)

# if defined(CONFIG_BFIN_ICACHE)
	blackfin_icache_flush_range((start), (end));
	flush_icache_range_others(start, end);
# endif
# if defined(CONFIG_BFIN_DCACHE)
	blackfin_dcache_flush_range((start), (end));
@@ -69,7 +79,9 @@ static inline void flush_icache_range(unsigned start, unsigned end)
#define copy_to_user_page(vma, page, vaddr, dst, src, len)		\
do { memcpy(dst, src, len);						\
     flush_icache_range((unsigned) (dst), (unsigned) (dst) + (len));	\
     flush_icache_range_others((unsigned long) (dst), (unsigned long) (dst) + (len));\
} while (0)

#define copy_from_user_page(vma, page, vaddr, dst, src, len)	memcpy(dst, src, len)

#if defined(CONFIG_BFIN_DCACHE)
Loading