x86: include/asm-x86/string_32.h - style only

Looked at this file because of __memcpy warnings.
Thought it could use a style/checkpatch cleanup.

No change in vmlinux.

[tglx: fixed the remaining issues ]

Signed-off-by: Joe Perches <joe@perches.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
This commit is contained in:
Joe Perches
2008-05-12 15:44:39 +02:00
committed by Thomas Gleixner
parent 2141261a11
commit 78d64fc21d

View File

@@ -32,8 +32,7 @@ extern size_t strlen(const char *s);
static __always_inline void *__memcpy(void *to, const void *from, size_t n) static __always_inline void *__memcpy(void *to, const void *from, size_t n)
{ {
int d0, d1, d2; int d0, d1, d2;
__asm__ __volatile__( asm volatile("rep ; movsl\n\t"
"rep ; movsl\n\t"
"movl %4,%%ecx\n\t" "movl %4,%%ecx\n\t"
"andl $3,%%ecx\n\t" "andl $3,%%ecx\n\t"
"jz 1f\n\t" "jz 1f\n\t"
@@ -42,67 +41,103 @@ __asm__ __volatile__(
: "=&c" (d0), "=&D" (d1), "=&S" (d2) : "=&c" (d0), "=&D" (d1), "=&S" (d2)
: "0" (n / 4), "g" (n), "1" ((long)to), "2" ((long)from) : "0" (n / 4), "g" (n), "1" ((long)to), "2" ((long)from)
: "memory"); : "memory");
return (to); return to;
} }
/* /*
* This looks ugly, but the compiler can optimize it totally, * This looks ugly, but the compiler can optimize it totally,
* as the count is constant. * as the count is constant.
*/ */
static __always_inline void * __constant_memcpy(void * to, const void * from, size_t n) static __always_inline void *__constant_memcpy(void *to, const void *from,
size_t n)
{ {
long esi, edi; long esi, edi;
if (!n) return to; if (!n)
#if 1 /* want to do small copies with non-string ops? */ return to;
switch (n) { switch (n) {
case 1: *(char*)to = *(char*)from; return to; case 1:
case 2: *(short*)to = *(short*)from; return to; *(char *)to = *(char *)from;
case 4: *(int*)to = *(int*)from; return to; return to;
#if 1 /* including those doable with two moves? */ case 2:
case 3: *(short*)to = *(short*)from; *(short *)to = *(short *)from;
*((char*)to+2) = *((char*)from+2); return to; return to;
case 5: *(int*)to = *(int*)from; case 4:
*((char*)to+4) = *((char*)from+4); return to; *(int *)to = *(int *)from;
case 6: *(int*)to = *(int*)from; return to;
*((short*)to+2) = *((short*)from+2); return to;
case 8: *(int*)to = *(int*)from; case 3:
*((int*)to+1) = *((int*)from+1); return to; *(short *)to = *(short *)from;
#endif *((char *)to + 2) = *((char *)from + 2);
return to;
case 5:
*(int *)to = *(int *)from;
*((char *)to + 4) = *((char *)from + 4);
return to;
case 6:
*(int *)to = *(int *)from;
*((short *)to + 2) = *((short *)from + 2);
return to;
case 8:
*(int *)to = *(int *)from;
*((int *)to + 1) = *((int *)from + 1);
return to;
} }
#endif
esi = (long)from; esi = (long)from;
edi = (long)to; edi = (long)to;
if (n >= 5 * 4) { if (n >= 5 * 4) {
/* large block: use rep prefix */ /* large block: use rep prefix */
int ecx; int ecx;
__asm__ __volatile__( asm volatile("rep ; movsl"
"rep ; movsl"
: "=&c" (ecx), "=&D" (edi), "=&S" (esi) : "=&c" (ecx), "=&D" (edi), "=&S" (esi)
: "0" (n / 4), "1" (edi), "2" (esi) : "0" (n / 4), "1" (edi), "2" (esi)
: "memory" : "memory"
); );
} else { } else {
/* small block: don't clobber ecx + smaller code */ /* small block: don't clobber ecx + smaller code */
if (n >= 4*4) __asm__ __volatile__("movsl" if (n >= 4 * 4)
:"=&D"(edi),"=&S"(esi):"0"(edi),"1"(esi):"memory"); asm volatile("movsl"
if (n >= 3*4) __asm__ __volatile__("movsl" : "=&D"(edi), "=&S"(esi)
:"=&D"(edi),"=&S"(esi):"0"(edi),"1"(esi):"memory"); : "0"(edi), "1"(esi)
if (n >= 2*4) __asm__ __volatile__("movsl" : "memory");
:"=&D"(edi),"=&S"(esi):"0"(edi),"1"(esi):"memory"); if (n >= 3 * 4)
if (n >= 1*4) __asm__ __volatile__("movsl" asm volatile("movsl"
:"=&D"(edi),"=&S"(esi):"0"(edi),"1"(esi):"memory"); : "=&D"(edi), "=&S"(esi)
: "0"(edi), "1"(esi)
: "memory");
if (n >= 2 * 4)
asm volatile("movsl"
: "=&D"(edi), "=&S"(esi)
: "0"(edi), "1"(esi)
: "memory");
if (n >= 1 * 4)
asm volatile("movsl"
: "=&D"(edi), "=&S"(esi)
: "0"(edi), "1"(esi)
: "memory");
} }
switch (n % 4) { switch (n % 4) {
/* tail */ /* tail */
case 0: return to; case 0:
case 1: __asm__ __volatile__("movsb"
:"=&D"(edi),"=&S"(esi):"0"(edi),"1"(esi):"memory");
return to; return to;
case 2: __asm__ __volatile__("movsw" case 1:
:"=&D"(edi),"=&S"(esi):"0"(edi),"1"(esi):"memory"); asm volatile("movsb"
: "=&D"(edi), "=&S"(esi)
: "0"(edi), "1"(esi)
: "memory");
return to; return to;
default: __asm__ __volatile__("movsw\n\tmovsb" case 2:
:"=&D"(edi),"=&S"(esi):"0"(edi),"1"(esi):"memory"); asm volatile("movsw"
: "=&D"(edi), "=&S"(esi)
: "0"(edi), "1"(esi)
: "memory");
return to;
default:
asm volatile("movsw\n\tmovsb"
: "=&D"(edi), "=&S"(esi)
: "0"(edi), "1"(esi)
: "memory");
return to; return to;
} }
} }
@@ -124,7 +159,7 @@ static inline void * __constant_memcpy3d(void * to, const void * from, size_t le
return _mmx_memcpy(to, from, len); return _mmx_memcpy(to, from, len);
} }
static __inline__ void *__memcpy3d(void *to, const void *from, size_t len) static inline void *__memcpy3d(void *to, const void *from, size_t len)
{ {
if (len < 512) if (len < 512)
return __memcpy(to, from, len); return __memcpy(to, from, len);
@@ -132,9 +167,9 @@ static __inline__ void *__memcpy3d(void *to, const void *from, size_t len)
} }
#define memcpy(t, f, n) \ #define memcpy(t, f, n) \
(__builtin_constant_p(n) ? \ (__builtin_constant_p((n)) \
__constant_memcpy3d((t),(f),(n)) : \ ? __constant_memcpy3d((t), (f), (n)) \
__memcpy3d((t),(f),(n))) : __memcpy3d((t), (f), (n)))
#else #else
@@ -143,9 +178,9 @@ static __inline__ void *__memcpy3d(void *to, const void *from, size_t len)
*/ */
#define memcpy(t, f, n) \ #define memcpy(t, f, n) \
(__builtin_constant_p(n) ? \ (__builtin_constant_p((n)) \
__constant_memcpy((t),(f),(n)) : \ ? __constant_memcpy((t), (f), (n)) \
__memcpy((t),(f),(n))) : __memcpy((t), (f), (n)))
#endif #endif
@@ -160,8 +195,7 @@ extern void *memchr(const void * cs,int c,size_t count);
static inline void *__memset_generic(void *s, char c, size_t count) static inline void *__memset_generic(void *s, char c, size_t count)
{ {
int d0, d1; int d0, d1;
__asm__ __volatile__( asm volatile("rep\n\t"
"rep\n\t"
"stosb" "stosb"
: "=&c" (d0), "=&D" (d1) : "=&c" (d0), "=&D" (d1)
: "a" (c), "1" (s), "0" (count) : "a" (c), "1" (s), "0" (count)
@@ -177,11 +211,11 @@ return s;
* things 32 bits at a time even when we don't know the size of the * things 32 bits at a time even when we don't know the size of the
* area at compile-time.. * area at compile-time..
*/ */
static __always_inline void * __constant_c_memset(void * s, unsigned long c, size_t count) static __always_inline
void *__constant_c_memset(void *s, unsigned long c, size_t count)
{ {
int d0, d1; int d0, d1;
__asm__ __volatile__( asm volatile("rep ; stosl\n\t"
"rep ; stosl\n\t"
"testb $2,%b3\n\t" "testb $2,%b3\n\t"
"je 1f\n\t" "je 1f\n\t"
"stosw\n" "stosw\n"
@@ -192,7 +226,7 @@ __asm__ __volatile__(
: "=&c" (d0), "=&D" (d1) : "=&c" (d0), "=&D" (d1)
: "a" (c), "q" (count), "0" (count/4), "1" ((long)s) : "a" (c), "q" (count), "0" (count/4), "1" ((long)s)
: "memory"); : "memory");
return (s); return s;
} }
/* Added by Gertjan van Wingerde to make minix and sysv module work */ /* Added by Gertjan van Wingerde to make minix and sysv module work */
@@ -207,7 +241,9 @@ extern char *strstr(const char *cs, const char *ct);
* This looks horribly ugly, but the compiler can optimize it totally, * This looks horribly ugly, but the compiler can optimize it totally,
* as we by now know that both pattern and count is constant.. * as we by now know that both pattern and count is constant..
*/ */
static __always_inline void * __constant_c_and_count_memset(void * s, unsigned long pattern, size_t count) static __always_inline
void *__constant_c_and_count_memset(void *s, unsigned long pattern,
size_t count)
{ {
switch (count) { switch (count) {
case 0: case 0:
@@ -220,26 +256,35 @@ static __always_inline void * __constant_c_and_count_memset(void * s, unsigned l
return s; return s;
case 3: case 3:
*(unsigned short *)s = pattern & 0xffff; *(unsigned short *)s = pattern & 0xffff;
*(2+(unsigned char *)s) = pattern & 0xff; *((unsigned char *)s + 2) = pattern & 0xff;
return s; return s;
case 4: case 4:
*(unsigned long *)s = pattern; *(unsigned long *)s = pattern;
return s; return s;
} }
#define COMMON(x) \ #define COMMON(x) \
__asm__ __volatile__( \ asm volatile("rep ; stosl" \
"rep ; stosl" \
x \ x \
: "=&c" (d0), "=&D" (d1) \ : "=&c" (d0), "=&D" (d1) \
: "a" (pattern), "0" (count/4), "1" ((long)s) \ : "a" (pattern), "0" (count/4), "1" ((long)s) \
: "memory") : "memory")
{ {
int d0, d1; int d0, d1;
switch (count % 4) { switch (count % 4) {
case 0: COMMON(""); return s; case 0:
case 1: COMMON("\n\tstosb"); return s; COMMON("");
case 2: COMMON("\n\tstosw"); return s; return s;
default: COMMON("\n\tstosw\n\tstosb"); return s; case 1:
COMMON("\n\tstosb");
return s;
case 2:
COMMON("\n\tstosw");
return s;
default:
COMMON("\n\tstosw\n\tstosb");
return s;
} }
} }
@@ -247,20 +292,21 @@ __asm__ __volatile__( \
} }
#define __constant_c_x_memset(s, c, count) \ #define __constant_c_x_memset(s, c, count) \
(__builtin_constant_p(count) ? \ (__builtin_constant_p(count) \
__constant_c_and_count_memset((s),(c),(count)) : \ ? __constant_c_and_count_memset((s), (c), (count)) \
__constant_c_memset((s),(c),(count))) : __constant_c_memset((s), (c), (count)))
#define __memset(s, c, count) \ #define __memset(s, c, count) \
(__builtin_constant_p(count) ? \ (__builtin_constant_p(count) \
__constant_count_memset((s),(c),(count)) : \ ? __constant_count_memset((s), (c), (count)) \
__memset_generic((s),(c),(count))) : __memset_generic((s), (c), (count)))
#define __HAVE_ARCH_MEMSET #define __HAVE_ARCH_MEMSET
#define memset(s, c, count) \ #define memset(s, c, count) \
(__builtin_constant_p(c) ? \ (__builtin_constant_p(c) \
__constant_c_x_memset((s),(0x01010101UL*(unsigned char)(c)),(count)) : \ ? __constant_c_x_memset((s), (0x01010101UL * (unsigned char)(c)), \
__memset((s),(c),(count))) (count)) \
: __memset((s), (c), (count)))
/* /*
* find the first occurrence of byte 'c', or 1 past the area if none * find the first occurrence of byte 'c', or 1 past the area if none