[PATCH] m68knommu: change use of extern inline to static inline in headers
"extern inline" doesn't make much sense here. Signed-off-by: Adrian Bunk <bunk@stusta.de> Signed-off-by: Greg Ungerer <gerg@uclinux.com> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
committed by
Linus Torvalds
parent
f98e85691b
commit
c514b8be7a
@@ -100,7 +100,7 @@ static __inline__ void atomic_set_mask(unsigned long mask, unsigned long *v)
|
|||||||
#define smp_mb__before_atomic_inc() barrier()
|
#define smp_mb__before_atomic_inc() barrier()
|
||||||
#define smp_mb__after_atomic_inc() barrier()
|
#define smp_mb__after_atomic_inc() barrier()
|
||||||
|
|
||||||
extern __inline__ int atomic_add_return(int i, atomic_t * v)
|
static inline int atomic_add_return(int i, atomic_t * v)
|
||||||
{
|
{
|
||||||
unsigned long temp, flags;
|
unsigned long temp, flags;
|
||||||
|
|
||||||
@@ -115,7 +115,7 @@ extern __inline__ int atomic_add_return(int i, atomic_t * v)
|
|||||||
|
|
||||||
#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
|
#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
|
||||||
|
|
||||||
extern __inline__ int atomic_sub_return(int i, atomic_t * v)
|
static inline int atomic_sub_return(int i, atomic_t * v)
|
||||||
{
|
{
|
||||||
unsigned long temp, flags;
|
unsigned long temp, flags;
|
||||||
|
|
||||||
|
@@ -8,7 +8,7 @@
|
|||||||
|
|
||||||
#include <asm/param.h>
|
#include <asm/param.h>
|
||||||
|
|
||||||
extern __inline__ void __delay(unsigned long loops)
|
static inline void __delay(unsigned long loops)
|
||||||
{
|
{
|
||||||
#if defined(CONFIG_COLDFIRE)
|
#if defined(CONFIG_COLDFIRE)
|
||||||
/* The coldfire runs this loop at significantly different speeds
|
/* The coldfire runs this loop at significantly different speeds
|
||||||
@@ -48,7 +48,7 @@ extern __inline__ void __delay(unsigned long loops)
|
|||||||
|
|
||||||
extern unsigned long loops_per_jiffy;
|
extern unsigned long loops_per_jiffy;
|
||||||
|
|
||||||
extern __inline__ void _udelay(unsigned long usecs)
|
static inline void _udelay(unsigned long usecs)
|
||||||
{
|
{
|
||||||
#if defined(CONFIG_M68328) || defined(CONFIG_M68EZ328) || \
|
#if defined(CONFIG_M68328) || defined(CONFIG_M68EZ328) || \
|
||||||
defined(CONFIG_M68VZ328) || defined(CONFIG_M68360) || \
|
defined(CONFIG_M68VZ328) || defined(CONFIG_M68360) || \
|
||||||
|
@@ -147,19 +147,19 @@ static inline void io_insl(unsigned int addr, void *buf, int len)
|
|||||||
extern void *__ioremap(unsigned long physaddr, unsigned long size, int cacheflag);
|
extern void *__ioremap(unsigned long physaddr, unsigned long size, int cacheflag);
|
||||||
extern void __iounmap(void *addr, unsigned long size);
|
extern void __iounmap(void *addr, unsigned long size);
|
||||||
|
|
||||||
extern inline void *ioremap(unsigned long physaddr, unsigned long size)
|
static inline void *ioremap(unsigned long physaddr, unsigned long size)
|
||||||
{
|
{
|
||||||
return __ioremap(physaddr, size, IOMAP_NOCACHE_SER);
|
return __ioremap(physaddr, size, IOMAP_NOCACHE_SER);
|
||||||
}
|
}
|
||||||
extern inline void *ioremap_nocache(unsigned long physaddr, unsigned long size)
|
static inline void *ioremap_nocache(unsigned long physaddr, unsigned long size)
|
||||||
{
|
{
|
||||||
return __ioremap(physaddr, size, IOMAP_NOCACHE_SER);
|
return __ioremap(physaddr, size, IOMAP_NOCACHE_SER);
|
||||||
}
|
}
|
||||||
extern inline void *ioremap_writethrough(unsigned long physaddr, unsigned long size)
|
static inline void *ioremap_writethrough(unsigned long physaddr, unsigned long size)
|
||||||
{
|
{
|
||||||
return __ioremap(physaddr, size, IOMAP_WRITETHROUGH);
|
return __ioremap(physaddr, size, IOMAP_WRITETHROUGH);
|
||||||
}
|
}
|
||||||
extern inline void *ioremap_fullcache(unsigned long physaddr, unsigned long size)
|
static inline void *ioremap_fullcache(unsigned long physaddr, unsigned long size)
|
||||||
{
|
{
|
||||||
return __ioremap(physaddr, size, IOMAP_FULL_CACHING);
|
return __ioremap(physaddr, size, IOMAP_FULL_CACHING);
|
||||||
}
|
}
|
||||||
|
@@ -90,7 +90,7 @@
|
|||||||
* that the debug module instructions (2 longs) must be long word aligned and
|
* that the debug module instructions (2 longs) must be long word aligned and
|
||||||
* some pointer fiddling is performed to ensure this.
|
* some pointer fiddling is performed to ensure this.
|
||||||
*/
|
*/
|
||||||
extern inline void wdebug(int reg, unsigned long data) {
|
static inline void wdebug(int reg, unsigned long data) {
|
||||||
unsigned short dbg_spc[6];
|
unsigned short dbg_spc[6];
|
||||||
unsigned short *dbg;
|
unsigned short *dbg;
|
||||||
|
|
||||||
|
@@ -10,7 +10,7 @@ static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
|
|||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
extern inline int
|
static inline int
|
||||||
init_new_context(struct task_struct *tsk, struct mm_struct *mm)
|
init_new_context(struct task_struct *tsk, struct mm_struct *mm)
|
||||||
{
|
{
|
||||||
// mm->context = virt_to_phys(mm->pgd);
|
// mm->context = virt_to_phys(mm->pgd);
|
||||||
@@ -25,7 +25,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, str
|
|||||||
|
|
||||||
#define deactivate_mm(tsk,mm) do { } while (0)
|
#define deactivate_mm(tsk,mm) do { } while (0)
|
||||||
|
|
||||||
extern inline void activate_mm(struct mm_struct *prev_mm,
|
static inline void activate_mm(struct mm_struct *prev_mm,
|
||||||
struct mm_struct *next_mm)
|
struct mm_struct *next_mm)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
@@ -21,7 +21,7 @@
|
|||||||
#include <asm/ptrace.h>
|
#include <asm/ptrace.h>
|
||||||
#include <asm/current.h>
|
#include <asm/current.h>
|
||||||
|
|
||||||
extern inline unsigned long rdusp(void)
|
static inline unsigned long rdusp(void)
|
||||||
{
|
{
|
||||||
#ifdef CONFIG_COLDFIRE
|
#ifdef CONFIG_COLDFIRE
|
||||||
extern unsigned int sw_usp;
|
extern unsigned int sw_usp;
|
||||||
@@ -33,7 +33,7 @@ extern inline unsigned long rdusp(void)
|
|||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
extern inline void wrusp(unsigned long usp)
|
static inline void wrusp(unsigned long usp)
|
||||||
{
|
{
|
||||||
#ifdef CONFIG_COLDFIRE
|
#ifdef CONFIG_COLDFIRE
|
||||||
extern unsigned int sw_usp;
|
extern unsigned int sw_usp;
|
||||||
|
@@ -41,7 +41,7 @@ struct semaphore {
|
|||||||
#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1)
|
#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1)
|
||||||
#define DECLARE_MUTEX_LOCKED(name) __DECLARE_SEMAPHORE_GENERIC(name,0)
|
#define DECLARE_MUTEX_LOCKED(name) __DECLARE_SEMAPHORE_GENERIC(name,0)
|
||||||
|
|
||||||
extern inline void sema_init (struct semaphore *sem, int val)
|
static inline void sema_init (struct semaphore *sem, int val)
|
||||||
{
|
{
|
||||||
*sem = (struct semaphore)__SEMAPHORE_INITIALIZER(*sem, val);
|
*sem = (struct semaphore)__SEMAPHORE_INITIALIZER(*sem, val);
|
||||||
}
|
}
|
||||||
@@ -73,7 +73,7 @@ extern spinlock_t semaphore_wake_lock;
|
|||||||
* "down_failed" is a special asm handler that calls the C
|
* "down_failed" is a special asm handler that calls the C
|
||||||
* routine that actually waits. See arch/m68k/lib/semaphore.S
|
* routine that actually waits. See arch/m68k/lib/semaphore.S
|
||||||
*/
|
*/
|
||||||
extern inline void down(struct semaphore * sem)
|
static inline void down(struct semaphore * sem)
|
||||||
{
|
{
|
||||||
might_sleep();
|
might_sleep();
|
||||||
__asm__ __volatile__(
|
__asm__ __volatile__(
|
||||||
@@ -88,7 +88,7 @@ extern inline void down(struct semaphore * sem)
|
|||||||
: "cc", "%a0", "%a1", "memory");
|
: "cc", "%a0", "%a1", "memory");
|
||||||
}
|
}
|
||||||
|
|
||||||
extern inline int down_interruptible(struct semaphore * sem)
|
static inline int down_interruptible(struct semaphore * sem)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
@@ -107,7 +107,7 @@ extern inline int down_interruptible(struct semaphore * sem)
|
|||||||
return(ret);
|
return(ret);
|
||||||
}
|
}
|
||||||
|
|
||||||
extern inline int down_trylock(struct semaphore * sem)
|
static inline int down_trylock(struct semaphore * sem)
|
||||||
{
|
{
|
||||||
register struct semaphore *sem1 __asm__ ("%a1") = sem;
|
register struct semaphore *sem1 __asm__ ("%a1") = sem;
|
||||||
register int result __asm__ ("%d0");
|
register int result __asm__ ("%d0");
|
||||||
@@ -135,7 +135,7 @@ extern inline int down_trylock(struct semaphore * sem)
|
|||||||
* The default case (no contention) will result in NO
|
* The default case (no contention) will result in NO
|
||||||
* jumps for both down() and up().
|
* jumps for both down() and up().
|
||||||
*/
|
*/
|
||||||
extern inline void up(struct semaphore * sem)
|
static inline void up(struct semaphore * sem)
|
||||||
{
|
{
|
||||||
__asm__ __volatile__(
|
__asm__ __volatile__(
|
||||||
"| atomic up operation\n\t"
|
"| atomic up operation\n\t"
|
||||||
|
@@ -47,12 +47,12 @@ static inline void flush_tlb_range(struct mm_struct *mm,
|
|||||||
BUG();
|
BUG();
|
||||||
}
|
}
|
||||||
|
|
||||||
extern inline void flush_tlb_kernel_page(unsigned long addr)
|
static inline void flush_tlb_kernel_page(unsigned long addr)
|
||||||
{
|
{
|
||||||
BUG();
|
BUG();
|
||||||
}
|
}
|
||||||
|
|
||||||
extern inline void flush_tlb_pgtables(struct mm_struct *mm,
|
static inline void flush_tlb_pgtables(struct mm_struct *mm,
|
||||||
unsigned long start, unsigned long end)
|
unsigned long start, unsigned long end)
|
||||||
{
|
{
|
||||||
BUG();
|
BUG();
|
||||||
|
Reference in New Issue
Block a user