]> git.wh0rd.org - patches.git/blob - linux-use-__asm__.patch
more random patches. who knows.
[patches.git] / linux-use-__asm__.patch
1 diff --git a/include/asm-arm/byteorder.h b/include/asm-arm/byteorder.h
2 index e6f7fcd..39105dc 100644
3 --- a/include/asm-arm/byteorder.h
4 +++ b/include/asm-arm/byteorder.h
5 @@ -29,7 +29,7 @@ static inline __attribute_const__ __u32 ___arch__swab32(__u32 x)
6 * right thing and not screw it up to different degrees
7 * depending on the gcc version.
8 */
9 - asm ("eor\t%0, %1, %1, ror #16" : "=r" (t) : "r" (x));
10 + __asm__ ("eor\t%0, %1, %1, ror #16" : "=r" (t) : "r" (x));
11 } else
12 #endif
13 t = x ^ ((x << 16) | (x >> 16)); /* eor r1,r0,r0,ror #16 */
14 diff --git a/include/asm-i386/byteorder.h b/include/asm-i386/byteorder.h
15 index a45470a..4ead40b 100644
16 --- a/include/asm-i386/byteorder.h
17 +++ b/include/asm-i386/byteorder.h
18 @@ -32,13 +32,13 @@ static __inline__ __attribute_const__ __u64 ___arch__swab64(__u64 val)
19 } v;
20 v.u = val;
21 #ifdef CONFIG_X86_BSWAP
22 - asm("bswapl %0 ; bswapl %1 ; xchgl %0,%1"
23 - : "=r" (v.s.a), "=r" (v.s.b)
24 - : "0" (v.s.a), "1" (v.s.b));
25 + __asm__("bswapl %0 ; bswapl %1 ; xchgl %0,%1"
26 + : "=r" (v.s.a), "=r" (v.s.b)
27 + : "0" (v.s.a), "1" (v.s.b));
28 #else
29 v.s.a = ___arch__swab32(v.s.a);
30 v.s.b = ___arch__swab32(v.s.b);
31 - asm("xchgl %0,%1" : "=r" (v.s.a), "=r" (v.s.b) : "0" (v.s.a), "1" (v.s.b));
32 + __asm__("xchgl %0,%1" : "=r" (v.s.a), "=r" (v.s.b) : "0" (v.s.a), "1" (v.s.b));
33 #endif
34 return v.u;
35 }
36 diff --git a/include/asm-s390/byteorder.h b/include/asm-s390/byteorder.h
37 index 1fe2492..07230f6 100644
38 --- a/include/asm-s390/byteorder.h
39 +++ b/include/asm-s390/byteorder.h
40 @@ -18,7 +18,7 @@ static inline __u64 ___arch__swab64p(const __u64 *x)
41 {
42 __u64 result;
43
44 - asm volatile("lrvg %0,%1" : "=d" (result) : "m" (*x));
45 + __asm__ __volatile__("lrvg %0,%1" : "=d" (result) : "m" (*x));
46 return result;
47 }
48
49 @@ -26,7 +26,7 @@ static inline __u64 ___arch__swab64(__u64 x)
50 {
51 __u64 result;
52
53 - asm volatile("lrvgr %0,%1" : "=d" (result) : "d" (x));
54 + __asm__ __volatile__("lrvgr %0,%1" : "=d" (result) : "d" (x));
55 return result;
56 }
57
58 @@ -40,7 +40,7 @@ static inline __u32 ___arch__swab32p(const __u32 *x)
59 {
60 __u32 result;
61
62 - asm volatile(
63 + __asm__ __volatile__(
64 #ifndef __s390x__
65 " icm %0,8,3(%1)\n"
66 " icm %0,4,2(%1)\n"
67 @@ -61,7 +61,7 @@ static inline __u32 ___arch__swab32(__u32 x)
68 #else /* __s390x__ */
69 __u32 result;
70
71 - asm volatile("lrvr %0,%1" : "=d" (result) : "d" (x));
72 + __asm__ __volatile__("lrvr %0,%1" : "=d" (result) : "d" (x));
73 return result;
74 #endif /* __s390x__ */
75 }
76 @@ -75,7 +75,7 @@ static __inline__ __u16 ___arch__swab16p(const __u16 *x)
77 {
78 __u16 result;
79
80 - asm volatile(
81 + __asm__ __volatile__(
82 #ifndef __s390x__
83 " icm %0,2,1(%1)\n"
84 " ic %0,0(%1)\n"