Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit d99cf715 authored by Adrian Bunk's avatar Adrian Bunk Committed by Linus Torvalds
Browse files

[PATCH] xtensa: replace 'extern inline' with 'static inline'



"extern inline" doesn't make sense.

Signed-off-by: default avatarAdrian Bunk <bunk@stusta.de>
Signed-off-by: default avatarChris Zankel <chris@zankel.net>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 7ef93905
Loading
Loading
Loading
Loading
+6 −6
Original line number Diff line number Diff line
@@ -66,7 +66,7 @@ typedef struct { volatile int counter; } atomic_t;
 *
 * Atomically adds @i to @v.
 */
extern __inline__ void atomic_add(int i, atomic_t * v)
static inline void atomic_add(int i, atomic_t * v)
{
    unsigned int vval;

@@ -90,7 +90,7 @@ extern __inline__ void atomic_add(int i, atomic_t * v)
 *
 * Atomically subtracts @i from @v.
 */
extern __inline__ void atomic_sub(int i, atomic_t *v)
static inline void atomic_sub(int i, atomic_t *v)
{
    unsigned int vval;

@@ -111,7 +111,7 @@ extern __inline__ void atomic_sub(int i, atomic_t *v)
 * We use atomic_{add|sub}_return to define other functions.
 */

extern __inline__ int atomic_add_return(int i, atomic_t * v)
static inline int atomic_add_return(int i, atomic_t * v)
{
     unsigned int vval;

@@ -130,7 +130,7 @@ extern __inline__ int atomic_add_return(int i, atomic_t * v)
    return vval;
}

extern __inline__ int atomic_sub_return(int i, atomic_t * v)
static inline int atomic_sub_return(int i, atomic_t * v)
{
    unsigned int vval;

@@ -224,7 +224,7 @@ extern __inline__ int atomic_sub_return(int i, atomic_t * v)
#define atomic_add_negative(i,v) (atomic_add_return((i),(v)) < 0)


extern __inline__ void atomic_clear_mask(unsigned int mask, atomic_t *v)
static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
{
    unsigned int all_f = -1;
    unsigned int vval;
@@ -243,7 +243,7 @@ extern __inline__ void atomic_clear_mask(unsigned int mask, atomic_t *v)
	);
}

extern __inline__ void atomic_set_mask(unsigned int mask, atomic_t *v)
static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
{
    unsigned int vval;

+2 −2
Original line number Diff line number Diff line
@@ -47,14 +47,14 @@ asmlinkage unsigned int csum_partial_copy_generic( const char *src, char *dst, i
 *	If you use these functions directly please don't forget the
 *	verify_area().
 */
extern __inline__
static inline
unsigned int csum_partial_copy_nocheck ( const char *src, char *dst,
					int len, int sum)
{
	return csum_partial_copy_generic ( src, dst, len, sum, NULL, NULL);
}

extern __inline__
static inline
unsigned int csum_partial_copy_from_user ( const char *src, char *dst,
						int len, int sum, int *err_ptr)
{
+1 −1
Original line number Diff line number Diff line
@@ -18,7 +18,7 @@

extern unsigned long loops_per_jiffy;

extern __inline__ void __delay(unsigned long loops)
static inline void __delay(unsigned long loops)
{
  /* 2 cycles per loop. */
  __asm__ __volatile__ ("1: addi %0, %0, -2; bgeui %0, 2, 1b"
+7 −7
Original line number Diff line number Diff line
@@ -41,12 +41,12 @@ static inline unsigned int _swapl (unsigned int v)
 * These are trivial on the 1:1 Linux/Xtensa mapping
 */

extern inline unsigned long virt_to_phys(volatile void * address)
static inline unsigned long virt_to_phys(volatile void * address)
{
	return PHYSADDR((unsigned long)address);
}

extern inline void * phys_to_virt(unsigned long address)
static inline void * phys_to_virt(unsigned long address)
{
	return (void*) CACHED_ADDR(address);
}
@@ -55,12 +55,12 @@ extern inline void * phys_to_virt(unsigned long address)
 * IO bus memory addresses are also 1:1 with the physical address
 */

extern inline unsigned long virt_to_bus(volatile void * address)
static inline unsigned long virt_to_bus(volatile void * address)
{
	return PHYSADDR((unsigned long)address);
}

extern inline void * bus_to_virt (unsigned long address)
static inline void * bus_to_virt (unsigned long address)
{
	return (void *) CACHED_ADDR(address);
}
@@ -69,17 +69,17 @@ extern inline void * bus_to_virt (unsigned long address)
 * Change "struct page" to physical address.
 */

extern inline void *ioremap(unsigned long offset, unsigned long size)
static inline void *ioremap(unsigned long offset, unsigned long size)
{
        return (void *) CACHED_ADDR_IO(offset);
}

extern inline void *ioremap_nocache(unsigned long offset, unsigned long size)
static inline void *ioremap_nocache(unsigned long offset, unsigned long size)
{
        return (void *) BYPASS_ADDR_IO(offset);
}

extern inline void iounmap(void *addr)
static inline void iounmap(void *addr)
{
}

+9 −9
Original line number Diff line number Diff line
@@ -199,13 +199,13 @@ extern pgd_t *current_pgd;
#define ASID_FIRST_VERSION						\
	((unsigned long)(~ASID_VERSION_MASK) + 1 + ASID_FIRST_NONRESERVED)

extern inline void set_rasid_register (unsigned long val)
static inline void set_rasid_register (unsigned long val)
{
	__asm__ __volatile__ (" wsr %0, "__stringify(RASID)"\n\t"
			      " isync\n" : : "a" (val));
}

extern inline unsigned long get_rasid_register (void)
static inline unsigned long get_rasid_register (void)
{
	unsigned long tmp;
	__asm__ __volatile__ (" rsr %0, "__stringify(RASID)"\n\t" : "=a" (tmp));
@@ -215,7 +215,7 @@ extern inline unsigned long get_rasid_register (void)

#if ((XCHAL_MMU_ASID_INVALID == 0) && (XCHAL_MMU_ASID_KERNEL == 1))

extern inline void
static inline void
get_new_mmu_context(struct mm_struct *mm, unsigned long asid)
{
	extern void flush_tlb_all(void);
@@ -234,7 +234,7 @@ get_new_mmu_context(struct mm_struct *mm, unsigned long asid)
/* XCHAL_MMU_ASID_INVALID == 0 and XCHAL_MMU_ASID_KERNEL ==1 are
   really the best, but if you insist... */

extern inline int validate_asid (unsigned long asid)
static inline int validate_asid (unsigned long asid)
{
	switch (asid) {
	case XCHAL_MMU_ASID_INVALID:
@@ -247,7 +247,7 @@ extern inline int validate_asid (unsigned long asid)
	return 1; /* valid */
}

extern inline void
static inline void
get_new_mmu_context(struct mm_struct *mm, unsigned long asid)
{
	extern void flush_tlb_all(void);
@@ -274,14 +274,14 @@ get_new_mmu_context(struct mm_struct *mm, unsigned long asid)
 * instance.
 */

extern inline int
static inline int
init_new_context(struct task_struct *tsk, struct mm_struct *mm)
{
	mm->context = NO_CONTEXT;
	return 0;
}

extern inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
                             struct task_struct *tsk)
{
	unsigned long asid = asid_cache;
@@ -301,7 +301,7 @@ extern inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
 * Destroy context related info for an mm_struct that is about
 * to be put to rest.
 */
extern inline void destroy_context(struct mm_struct *mm)
static inline void destroy_context(struct mm_struct *mm)
{
	/* Nothing to do. */
}
@@ -310,7 +310,7 @@ extern inline void destroy_context(struct mm_struct *mm)
 * After we have set current->mm to a new value, this activates
 * the context for the new mm so we see the new mappings.
 */
extern inline void
static inline void
activate_mm(struct mm_struct *prev, struct mm_struct *next)
{
	/* Unconditionally get a new ASID.  */
Loading