]> git.wh0rd.org - patches.git/blame - linux-ia64-asm-volatile-gcc-intrin.patch
more random patches. who knows.
[patches.git] / linux-ia64-asm-volatile-gcc-intrin.patch
CommitLineData
5e993f12 1diff --git a/include/asm-ia64/gcc_intrin.h b/include/asm-ia64/gcc_intrin.h
2index 4fb4e43..0bb24df 100644
3--- a/include/asm-ia64/gcc_intrin.h
4+++ b/include/asm-ia64/gcc_intrin.h
5@@ -13,41 +13,41 @@
6
7 /* Optimization barrier */
8 /* The "volatile" is due to gcc bugs */
9-#define ia64_barrier() asm volatile ("":::"memory")
10+#define ia64_barrier() __asm__ __volatile__ ("":::"memory")
11
12-#define ia64_stop() asm volatile (";;"::)
13+#define ia64_stop() __asm__ __volatile__ (";;"::)
14
15-#define ia64_invala_gr(regnum) asm volatile ("invala.e r%0" :: "i"(regnum))
16+#define ia64_invala_gr(regnum) __asm__ __volatile__ ("invala.e r%0" :: "i"(regnum))
17
18-#define ia64_invala_fr(regnum) asm volatile ("invala.e f%0" :: "i"(regnum))
19+#define ia64_invala_fr(regnum) __asm__ __volatile__ ("invala.e f%0" :: "i"(regnum))
20
21 extern void ia64_bad_param_for_setreg (void);
22 extern void ia64_bad_param_for_getreg (void);
23
24-register unsigned long ia64_r13 asm ("r13") __attribute_used__;
25+register unsigned long ia64_r13 __asm__ ("r13") __attribute_used__;
26
27 #define ia64_setreg(regnum, val) \
28 ({ \
29 switch (regnum) { \
30 case _IA64_REG_PSR_L: \
31- asm volatile ("mov psr.l=%0" :: "r"(val) : "memory"); \
32+ __asm__ __volatile__ ("mov psr.l=%0" :: "r"(val) : "memory"); \
33 break; \
34 case _IA64_REG_AR_KR0 ... _IA64_REG_AR_EC: \
35- asm volatile ("mov ar%0=%1" :: \
36+ __asm__ __volatile__ ("mov ar%0=%1" :: \
37 "i" (regnum - _IA64_REG_AR_KR0), \
38 "r"(val): "memory"); \
39 break; \
40 case _IA64_REG_CR_DCR ... _IA64_REG_CR_LRR1: \
41- asm volatile ("mov cr%0=%1" :: \
42+ __asm__ __volatile__ ("mov cr%0=%1" :: \
43 "i" (regnum - _IA64_REG_CR_DCR), \
44 "r"(val): "memory" ); \
45 break; \
46 case _IA64_REG_SP: \
47- asm volatile ("mov r12=%0" :: \
48+ __asm__ __volatile__ ("mov r12=%0" :: \
49 "r"(val): "memory"); \
50 break; \
51 case _IA64_REG_GP: \
52- asm volatile ("mov gp=%0" :: "r"(val) : "memory"); \
53+ __asm__ __volatile__ ("mov gp=%0" :: "r"(val) : "memory"); \
54 break; \
55 default: \
56 ia64_bad_param_for_setreg(); \
57@@ -61,27 +61,27 @@ register unsigned long ia64_r13 asm ("r13") __attribute_used__;
58 \
59 switch (regnum) { \
60 case _IA64_REG_GP: \
61- asm volatile ("mov %0=gp" : "=r"(ia64_intri_res)); \
62+ __asm__ __volatile__ ("mov %0=gp" : "=r"(ia64_intri_res)); \
63 break; \
64 case _IA64_REG_IP: \
65- asm volatile ("mov %0=ip" : "=r"(ia64_intri_res)); \
66+ __asm__ __volatile__ ("mov %0=ip" : "=r"(ia64_intri_res)); \
67 break; \
68 case _IA64_REG_PSR: \
69- asm volatile ("mov %0=psr" : "=r"(ia64_intri_res)); \
70+ __asm__ __volatile__ ("mov %0=psr" : "=r"(ia64_intri_res)); \
71 break; \
72 case _IA64_REG_TP: /* for current() */ \
73 ia64_intri_res = ia64_r13; \
74 break; \
75 case _IA64_REG_AR_KR0 ... _IA64_REG_AR_EC: \
76- asm volatile ("mov %0=ar%1" : "=r" (ia64_intri_res) \
77+ __asm__ __volatile__ ("mov %0=ar%1" : "=r" (ia64_intri_res) \
78 : "i"(regnum - _IA64_REG_AR_KR0)); \
79 break; \
80 case _IA64_REG_CR_DCR ... _IA64_REG_CR_LRR1: \
81- asm volatile ("mov %0=cr%1" : "=r" (ia64_intri_res) \
82+ __asm__ __volatile__ ("mov %0=cr%1" : "=r" (ia64_intri_res) \
83 : "i" (regnum - _IA64_REG_CR_DCR)); \
84 break; \
85 case _IA64_REG_SP: \
86- asm volatile ("mov %0=sp" : "=r" (ia64_intri_res)); \
87+ __asm__ __volatile__ ("mov %0=sp" : "=r" (ia64_intri_res)); \
88 break; \
89 default: \
90 ia64_bad_param_for_getreg(); \
91@@ -96,7 +96,7 @@ register unsigned long ia64_r13 asm ("r13") __attribute_used__;
92 ({ \
93 switch (mode) { \
94 case ia64_hint_pause: \
95- asm volatile ("hint @pause" ::: "memory"); \
96+ __asm__ __volatile__ ("hint @pause" ::: "memory"); \
97 break; \
98 } \
99 })
100@@ -115,19 +115,19 @@ register unsigned long ia64_r13 asm ("r13") __attribute_used__;
101 \
102 switch (mode) { \
103 case ia64_mux1_brcst: \
104- asm ("mux1 %0=%1,@brcst" : "=r" (ia64_intri_res) : "r" (x)); \
105+ __asm__ ("mux1 %0=%1,@brcst" : "=r" (ia64_intri_res) : "r" (x)); \
106 break; \
107 case ia64_mux1_mix: \
108- asm ("mux1 %0=%1,@mix" : "=r" (ia64_intri_res) : "r" (x)); \
109+ __asm__ ("mux1 %0=%1,@mix" : "=r" (ia64_intri_res) : "r" (x)); \
110 break; \
111 case ia64_mux1_shuf: \
112- asm ("mux1 %0=%1,@shuf" : "=r" (ia64_intri_res) : "r" (x)); \
113+ __asm__ ("mux1 %0=%1,@shuf" : "=r" (ia64_intri_res) : "r" (x)); \
114 break; \
115 case ia64_mux1_alt: \
116- asm ("mux1 %0=%1,@alt" : "=r" (ia64_intri_res) : "r" (x)); \
117+ __asm__ ("mux1 %0=%1,@alt" : "=r" (ia64_intri_res) : "r" (x)); \
118 break; \
119 case ia64_mux1_rev: \
120- asm ("mux1 %0=%1,@rev" : "=r" (ia64_intri_res) : "r" (x)); \
121+ __asm__ ("mux1 %0=%1,@rev" : "=r" (ia64_intri_res) : "r" (x)); \
122 break; \
123 } \
124 ia64_intri_res; \
125@@ -139,7 +139,7 @@ register unsigned long ia64_r13 asm ("r13") __attribute_used__;
126 # define ia64_popcnt(x) \
127 ({ \
128 __u64 ia64_intri_res; \
129- asm ("popcnt %0=%1" : "=r" (ia64_intri_res) : "r" (x)); \
130+ __asm__ ("popcnt %0=%1" : "=r" (ia64_intri_res) : "r" (x)); \
131 \
132 ia64_intri_res; \
133 })
134@@ -149,7 +149,7 @@ register unsigned long ia64_r13 asm ("r13") __attribute_used__;
135 ({ \
136 long ia64_intri_res; \
137 \
138- asm ("getf.exp %0=%1" : "=r"(ia64_intri_res) : "f"(x)); \
139+ __asm__ ("getf.exp %0=%1" : "=r"(ia64_intri_res) : "f"(x)); \
140 \
141 ia64_intri_res; \
142 })
143@@ -157,75 +157,75 @@ register unsigned long ia64_r13 asm ("r13") __attribute_used__;
144 #define ia64_shrp(a, b, count) \
145 ({ \
146 __u64 ia64_intri_res; \
147- asm ("shrp %0=%1,%2,%3" : "=r"(ia64_intri_res) : "r"(a), "r"(b), "i"(count)); \
148+ __asm__ ("shrp %0=%1,%2,%3" : "=r"(ia64_intri_res) : "r"(a), "r"(b), "i"(count)); \
149 ia64_intri_res; \
150 })
151
152 #define ia64_ldfs(regnum, x) \
153 ({ \
154- register double __f__ asm ("f"#regnum); \
155- asm volatile ("ldfs %0=[%1]" :"=f"(__f__): "r"(x)); \
156+ register double __f__ __asm__ ("f"#regnum); \
157+ __asm__ __volatile__ ("ldfs %0=[%1]" :"=f"(__f__): "r"(x)); \
158 })
159
160 #define ia64_ldfd(regnum, x) \
161 ({ \
162- register double __f__ asm ("f"#regnum); \
163- asm volatile ("ldfd %0=[%1]" :"=f"(__f__): "r"(x)); \
164+ register double __f__ __asm__ ("f"#regnum); \
165+ __asm__ __volatile__ ("ldfd %0=[%1]" :"=f"(__f__): "r"(x)); \
166 })
167
168 #define ia64_ldfe(regnum, x) \
169 ({ \
170- register double __f__ asm ("f"#regnum); \
171- asm volatile ("ldfe %0=[%1]" :"=f"(__f__): "r"(x)); \
172+ register double __f__ __asm__ ("f"#regnum); \
173+ __asm__ __volatile__ ("ldfe %0=[%1]" :"=f"(__f__): "r"(x)); \
174 })
175
176 #define ia64_ldf8(regnum, x) \
177 ({ \
178- register double __f__ asm ("f"#regnum); \
179- asm volatile ("ldf8 %0=[%1]" :"=f"(__f__): "r"(x)); \
180+ register double __f__ __asm__ ("f"#regnum); \
181+ __asm__ __volatile__ ("ldf8 %0=[%1]" :"=f"(__f__): "r"(x)); \
182 })
183
184 #define ia64_ldf_fill(regnum, x) \
185 ({ \
186- register double __f__ asm ("f"#regnum); \
187- asm volatile ("ldf.fill %0=[%1]" :"=f"(__f__): "r"(x)); \
188+ register double __f__ __asm__ ("f"#regnum); \
189+ __asm__ __volatile__ ("ldf.fill %0=[%1]" :"=f"(__f__): "r"(x)); \
190 })
191
192 #define ia64_stfs(x, regnum) \
193 ({ \
194- register double __f__ asm ("f"#regnum); \
195- asm volatile ("stfs [%0]=%1" :: "r"(x), "f"(__f__) : "memory"); \
196+ register double __f__ __asm__ ("f"#regnum); \
197+ __asm__ __volatile__ ("stfs [%0]=%1" :: "r"(x), "f"(__f__) : "memory"); \
198 })
199
200 #define ia64_stfd(x, regnum) \
201 ({ \
202- register double __f__ asm ("f"#regnum); \
203- asm volatile ("stfd [%0]=%1" :: "r"(x), "f"(__f__) : "memory"); \
204+ register double __f__ __asm__ ("f"#regnum); \
205+ __asm__ __volatile__ ("stfd [%0]=%1" :: "r"(x), "f"(__f__) : "memory"); \
206 })
207
208 #define ia64_stfe(x, regnum) \
209 ({ \
210- register double __f__ asm ("f"#regnum); \
211- asm volatile ("stfe [%0]=%1" :: "r"(x), "f"(__f__) : "memory"); \
212+ register double __f__ __asm__ ("f"#regnum); \
213+ __asm__ __volatile__ ("stfe [%0]=%1" :: "r"(x), "f"(__f__) : "memory"); \
214 })
215
216 #define ia64_stf8(x, regnum) \
217 ({ \
218- register double __f__ asm ("f"#regnum); \
219- asm volatile ("stf8 [%0]=%1" :: "r"(x), "f"(__f__) : "memory"); \
220+ register double __f__ __asm__ ("f"#regnum); \
221+ __asm__ __volatile__ ("stf8 [%0]=%1" :: "r"(x), "f"(__f__) : "memory"); \
222 })
223
224 #define ia64_stf_spill(x, regnum) \
225 ({ \
226- register double __f__ asm ("f"#regnum); \
227- asm volatile ("stf.spill [%0]=%1" :: "r"(x), "f"(__f__) : "memory"); \
228+ register double __f__ __asm__ ("f"#regnum); \
229+ __asm__ __volatile__ ("stf.spill [%0]=%1" :: "r"(x), "f"(__f__) : "memory"); \
230 })
231
232 #define ia64_fetchadd4_acq(p, inc) \
233 ({ \
234 \
235 __u64 ia64_intri_res; \
236- asm volatile ("fetchadd4.acq %0=[%1],%2" \
237+ __asm__ __volatile__ ("fetchadd4.acq %0=[%1],%2" \
238 : "=r"(ia64_intri_res) : "r"(p), "i" (inc) \
239 : "memory"); \
240 \
241@@ -235,7 +235,7 @@ register unsigned long ia64_r13 asm ("r13") __attribute_used__;
242 #define ia64_fetchadd4_rel(p, inc) \
243 ({ \
244 __u64 ia64_intri_res; \
245- asm volatile ("fetchadd4.rel %0=[%1],%2" \
246+ __asm__ __volatile__ ("fetchadd4.rel %0=[%1],%2" \
247 : "=r"(ia64_intri_res) : "r"(p), "i" (inc) \
248 : "memory"); \
249 \
250@@ -246,7 +246,7 @@ register unsigned long ia64_r13 asm ("r13") __attribute_used__;
251 ({ \
252 \
253 __u64 ia64_intri_res; \
254- asm volatile ("fetchadd8.acq %0=[%1],%2" \
255+ __asm__ __volatile__ ("fetchadd8.acq %0=[%1],%2" \
256 : "=r"(ia64_intri_res) : "r"(p), "i" (inc) \
257 : "memory"); \
258 \
259@@ -256,7 +256,7 @@ register unsigned long ia64_r13 asm ("r13") __attribute_used__;
260 #define ia64_fetchadd8_rel(p, inc) \
261 ({ \
262 __u64 ia64_intri_res; \
263- asm volatile ("fetchadd8.rel %0=[%1],%2" \
264+ __asm__ __volatile__ ("fetchadd8.rel %0=[%1],%2" \
265 : "=r"(ia64_intri_res) : "r"(p), "i" (inc) \
266 : "memory"); \
267 \
268@@ -266,7 +266,7 @@ register unsigned long ia64_r13 asm ("r13") __attribute_used__;
269 #define ia64_xchg1(ptr,x) \
270 ({ \
271 __u64 ia64_intri_res; \
272- asm volatile ("xchg1 %0=[%1],%2" \
273+ __asm__ __volatile__ ("xchg1 %0=[%1],%2" \
274 : "=r" (ia64_intri_res) : "r" (ptr), "r" (x) : "memory"); \
275 ia64_intri_res; \
276 })
277@@ -274,7 +274,7 @@ register unsigned long ia64_r13 asm ("r13") __attribute_used__;
278 #define ia64_xchg2(ptr,x) \
279 ({ \
280 __u64 ia64_intri_res; \
281- asm volatile ("xchg2 %0=[%1],%2" : "=r" (ia64_intri_res) \
282+ __asm__ __volatile__ ("xchg2 %0=[%1],%2" : "=r" (ia64_intri_res) \
283 : "r" (ptr), "r" (x) : "memory"); \
284 ia64_intri_res; \
285 })
286@@ -282,7 +282,7 @@ register unsigned long ia64_r13 asm ("r13") __attribute_used__;
287 #define ia64_xchg4(ptr,x) \
288 ({ \
289 __u64 ia64_intri_res; \
290- asm volatile ("xchg4 %0=[%1],%2" : "=r" (ia64_intri_res) \
291+ __asm__ __volatile__ ("xchg4 %0=[%1],%2" : "=r" (ia64_intri_res) \
292 : "r" (ptr), "r" (x) : "memory"); \
293 ia64_intri_res; \
294 })
295@@ -290,7 +290,7 @@ register unsigned long ia64_r13 asm ("r13") __attribute_used__;
296 #define ia64_xchg8(ptr,x) \
297 ({ \
298 __u64 ia64_intri_res; \
299- asm volatile ("xchg8 %0=[%1],%2" : "=r" (ia64_intri_res) \
300+ __asm__ __volatile__ ("xchg8 %0=[%1],%2" : "=r" (ia64_intri_res) \
301 : "r" (ptr), "r" (x) : "memory"); \
302 ia64_intri_res; \
303 })
304@@ -298,8 +298,8 @@ register unsigned long ia64_r13 asm ("r13") __attribute_used__;
305 #define ia64_cmpxchg1_acq(ptr, new, old) \
306 ({ \
307 __u64 ia64_intri_res; \
308- asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
309- asm volatile ("cmpxchg1.acq %0=[%1],%2,ar.ccv": \
310+ __asm__ __volatile__ ("mov ar.ccv=%0;;" :: "rO"(old)); \
311+ __asm__ __volatile__ ("cmpxchg1.acq %0=[%1],%2,ar.ccv": \
312 "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
313 ia64_intri_res; \
314 })
315@@ -307,8 +307,8 @@ register unsigned long ia64_r13 asm ("r13") __attribute_used__;
316 #define ia64_cmpxchg1_rel(ptr, new, old) \
317 ({ \
318 __u64 ia64_intri_res; \
319- asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
320- asm volatile ("cmpxchg1.rel %0=[%1],%2,ar.ccv": \
321+ __asm__ __volatile__ ("mov ar.ccv=%0;;" :: "rO"(old)); \
322+ __asm__ __volatile__ ("cmpxchg1.rel %0=[%1],%2,ar.ccv": \
323 "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
324 ia64_intri_res; \
325 })
326@@ -316,8 +316,8 @@ register unsigned long ia64_r13 asm ("r13") __attribute_used__;
327 #define ia64_cmpxchg2_acq(ptr, new, old) \
328 ({ \
329 __u64 ia64_intri_res; \
330- asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
331- asm volatile ("cmpxchg2.acq %0=[%1],%2,ar.ccv": \
332+ __asm__ __volatile__ ("mov ar.ccv=%0;;" :: "rO"(old)); \
333+ __asm__ __volatile__ ("cmpxchg2.acq %0=[%1],%2,ar.ccv": \
334 "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
335 ia64_intri_res; \
336 })
337@@ -325,9 +325,9 @@ register unsigned long ia64_r13 asm ("r13") __attribute_used__;
338 #define ia64_cmpxchg2_rel(ptr, new, old) \
339 ({ \
340 __u64 ia64_intri_res; \
341- asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
342+ __asm__ __volatile__ ("mov ar.ccv=%0;;" :: "rO"(old)); \
343 \
344- asm volatile ("cmpxchg2.rel %0=[%1],%2,ar.ccv": \
345+ __asm__ __volatile__ ("cmpxchg2.rel %0=[%1],%2,ar.ccv": \
346 "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
347 ia64_intri_res; \
348 })
349@@ -335,8 +335,8 @@ register unsigned long ia64_r13 asm ("r13") __attribute_used__;
350 #define ia64_cmpxchg4_acq(ptr, new, old) \
351 ({ \
352 __u64 ia64_intri_res; \
353- asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
354- asm volatile ("cmpxchg4.acq %0=[%1],%2,ar.ccv": \
355+ __asm__ __volatile__ ("mov ar.ccv=%0;;" :: "rO"(old)); \
356+ __asm__ __volatile__ ("cmpxchg4.acq %0=[%1],%2,ar.ccv": \
357 "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
358 ia64_intri_res; \
359 })
360@@ -344,8 +344,8 @@ register unsigned long ia64_r13 asm ("r13") __attribute_used__;
361 #define ia64_cmpxchg4_rel(ptr, new, old) \
362 ({ \
363 __u64 ia64_intri_res; \
364- asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
365- asm volatile ("cmpxchg4.rel %0=[%1],%2,ar.ccv": \
366+ __asm__ __volatile__ ("mov ar.ccv=%0;;" :: "rO"(old)); \
367+ __asm__ __volatile__ ("cmpxchg4.rel %0=[%1],%2,ar.ccv": \
368 "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
369 ia64_intri_res; \
370 })
371@@ -353,8 +353,8 @@ register unsigned long ia64_r13 asm ("r13") __attribute_used__;
372 #define ia64_cmpxchg8_acq(ptr, new, old) \
373 ({ \
374 __u64 ia64_intri_res; \
375- asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
376- asm volatile ("cmpxchg8.acq %0=[%1],%2,ar.ccv": \
377+ __asm__ __volatile__ ("mov ar.ccv=%0;;" :: "rO"(old)); \
378+ __asm__ __volatile__ ("cmpxchg8.acq %0=[%1],%2,ar.ccv": \
379 "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
380 ia64_intri_res; \
381 })
382@@ -362,106 +362,106 @@ register unsigned long ia64_r13 asm ("r13") __attribute_used__;
383 #define ia64_cmpxchg8_rel(ptr, new, old) \
384 ({ \
385 __u64 ia64_intri_res; \
386- asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
387+ __asm__ __volatile__ ("mov ar.ccv=%0;;" :: "rO"(old)); \
388 \
389- asm volatile ("cmpxchg8.rel %0=[%1],%2,ar.ccv": \
390+ __asm__ __volatile__ ("cmpxchg8.rel %0=[%1],%2,ar.ccv": \
391 "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
392 ia64_intri_res; \
393 })
394
395-#define ia64_mf() asm volatile ("mf" ::: "memory")
396-#define ia64_mfa() asm volatile ("mf.a" ::: "memory")
397+#define ia64_mf() __asm__ __volatile__ ("mf" ::: "memory")
398+#define ia64_mfa() __asm__ __volatile__ ("mf.a" ::: "memory")
399
400-#define ia64_invala() asm volatile ("invala" ::: "memory")
401+#define ia64_invala() __asm__ __volatile__ ("invala" ::: "memory")
402
403 #define ia64_thash(addr) \
404 ({ \
405 __u64 ia64_intri_res; \
406- asm volatile ("thash %0=%1" : "=r"(ia64_intri_res) : "r" (addr)); \
407+ __asm__ __volatile__ ("thash %0=%1" : "=r"(ia64_intri_res) : "r" (addr)); \
408 ia64_intri_res; \
409 })
410
411-#define ia64_srlz_i() asm volatile (";; srlz.i ;;" ::: "memory")
412-#define ia64_srlz_d() asm volatile (";; srlz.d" ::: "memory");
413+#define ia64_srlz_i() __asm__ __volatile__ (";; srlz.i ;;" ::: "memory")
414+#define ia64_srlz_d() __asm__ __volatile__ (";; srlz.d" ::: "memory");
415
416 #ifdef HAVE_SERIALIZE_DIRECTIVE
417-# define ia64_dv_serialize_data() asm volatile (".serialize.data");
418-# define ia64_dv_serialize_instruction() asm volatile (".serialize.instruction");
419+# define ia64_dv_serialize_data() __asm__ __volatile__ (".serialize.data");
420+# define ia64_dv_serialize_instruction() __asm__ __volatile__ (".serialize.instruction");
421 #else
422 # define ia64_dv_serialize_data()
423 # define ia64_dv_serialize_instruction()
424 #endif
425
426-#define ia64_nop(x) asm volatile ("nop %0"::"i"(x));
427+#define ia64_nop(x) __asm__ __volatile__ ("nop %0"::"i"(x));
428
429-#define ia64_itci(addr) asm volatile ("itc.i %0;;" :: "r"(addr) : "memory")
430+#define ia64_itci(addr) __asm__ __volatile__ ("itc.i %0;;" :: "r"(addr) : "memory")
431
432-#define ia64_itcd(addr) asm volatile ("itc.d %0;;" :: "r"(addr) : "memory")
433+#define ia64_itcd(addr) __asm__ __volatile__ ("itc.d %0;;" :: "r"(addr) : "memory")
434
435
436-#define ia64_itri(trnum, addr) asm volatile ("itr.i itr[%0]=%1" \
437+#define ia64_itri(trnum, addr) __asm__ __volatile__ ("itr.i itr[%0]=%1" \
438 :: "r"(trnum), "r"(addr) : "memory")
439
440-#define ia64_itrd(trnum, addr) asm volatile ("itr.d dtr[%0]=%1" \
441+#define ia64_itrd(trnum, addr) __asm__ __volatile__ ("itr.d dtr[%0]=%1" \
442 :: "r"(trnum), "r"(addr) : "memory")
443
444 #define ia64_tpa(addr) \
445 ({ \
446 __u64 ia64_pa; \
447- asm volatile ("tpa %0 = %1" : "=r"(ia64_pa) : "r"(addr) : "memory"); \
448+ __asm__ __volatile__ ("tpa %0 = %1" : "=r"(ia64_pa) : "r"(addr) : "memory"); \
449 ia64_pa; \
450 })
451
452 #define __ia64_set_dbr(index, val) \
453- asm volatile ("mov dbr[%0]=%1" :: "r"(index), "r"(val) : "memory")
454+ __asm__ __volatile__ ("mov dbr[%0]=%1" :: "r"(index), "r"(val) : "memory")
455
456 #define ia64_set_ibr(index, val) \
457- asm volatile ("mov ibr[%0]=%1" :: "r"(index), "r"(val) : "memory")
458+ __asm__ __volatile__ ("mov ibr[%0]=%1" :: "r"(index), "r"(val) : "memory")
459
460 #define ia64_set_pkr(index, val) \
461- asm volatile ("mov pkr[%0]=%1" :: "r"(index), "r"(val) : "memory")
462+ __asm__ __volatile__ ("mov pkr[%0]=%1" :: "r"(index), "r"(val) : "memory")
463
464 #define ia64_set_pmc(index, val) \
465- asm volatile ("mov pmc[%0]=%1" :: "r"(index), "r"(val) : "memory")
466+ __asm__ __volatile__ ("mov pmc[%0]=%1" :: "r"(index), "r"(val) : "memory")
467
468 #define ia64_set_pmd(index, val) \
469- asm volatile ("mov pmd[%0]=%1" :: "r"(index), "r"(val) : "memory")
470+ __asm__ __volatile__ ("mov pmd[%0]=%1" :: "r"(index), "r"(val) : "memory")
471
472 #define ia64_set_rr(index, val) \
473- asm volatile ("mov rr[%0]=%1" :: "r"(index), "r"(val) : "memory");
474+ __asm__ __volatile__ ("mov rr[%0]=%1" :: "r"(index), "r"(val) : "memory");
475
476 #define ia64_get_cpuid(index) \
477 ({ \
478 __u64 ia64_intri_res; \
479- asm volatile ("mov %0=cpuid[%r1]" : "=r"(ia64_intri_res) : "rO"(index)); \
480+ __asm__ __volatile__ ("mov %0=cpuid[%r1]" : "=r"(ia64_intri_res) : "rO"(index)); \
481 ia64_intri_res; \
482 })
483
484 #define __ia64_get_dbr(index) \
485 ({ \
486 __u64 ia64_intri_res; \
487- asm volatile ("mov %0=dbr[%1]" : "=r"(ia64_intri_res) : "r"(index)); \
488+ __asm__ __volatile__ ("mov %0=dbr[%1]" : "=r"(ia64_intri_res) : "r"(index)); \
489 ia64_intri_res; \
490 })
491
492 #define ia64_get_ibr(index) \
493 ({ \
494 __u64 ia64_intri_res; \
495- asm volatile ("mov %0=ibr[%1]" : "=r"(ia64_intri_res) : "r"(index)); \
496+ __asm__ __volatile__ ("mov %0=ibr[%1]" : "=r"(ia64_intri_res) : "r"(index)); \
497 ia64_intri_res; \
498 })
499
500 #define ia64_get_pkr(index) \
501 ({ \
502 __u64 ia64_intri_res; \
503- asm volatile ("mov %0=pkr[%1]" : "=r"(ia64_intri_res) : "r"(index)); \
504+ __asm__ __volatile__ ("mov %0=pkr[%1]" : "=r"(ia64_intri_res) : "r"(index)); \
505 ia64_intri_res; \
506 })
507
508 #define ia64_get_pmc(index) \
509 ({ \
510 __u64 ia64_intri_res; \
511- asm volatile ("mov %0=pmc[%1]" : "=r"(ia64_intri_res) : "r"(index)); \
512+ __asm__ __volatile__ ("mov %0=pmc[%1]" : "=r"(ia64_intri_res) : "r"(index)); \
513 ia64_intri_res; \
514 })
515
516@@ -469,46 +469,46 @@ register unsigned long ia64_r13 asm ("r13") __attribute_used__;
517 #define ia64_get_pmd(index) \
518 ({ \
519 __u64 ia64_intri_res; \
520- asm volatile ("mov %0=pmd[%1]" : "=r"(ia64_intri_res) : "r"(index)); \
521+ __asm__ __volatile__ ("mov %0=pmd[%1]" : "=r"(ia64_intri_res) : "r"(index)); \
522 ia64_intri_res; \
523 })
524
525 #define ia64_get_rr(index) \
526 ({ \
527 __u64 ia64_intri_res; \
528- asm volatile ("mov %0=rr[%1]" : "=r"(ia64_intri_res) : "r" (index)); \
529+ __asm__ __volatile__ ("mov %0=rr[%1]" : "=r"(ia64_intri_res) : "r" (index)); \
530 ia64_intri_res; \
531 })
532
533-#define ia64_fc(addr) asm volatile ("fc %0" :: "r"(addr) : "memory")
534+#define ia64_fc(addr) __asm__ __volatile__ ("fc %0" :: "r"(addr) : "memory")
535
536
537-#define ia64_sync_i() asm volatile (";; sync.i" ::: "memory")
538+#define ia64_sync_i() __asm__ __volatile__ (";; sync.i" ::: "memory")
539
540-#define ia64_ssm(mask) asm volatile ("ssm %0":: "i"((mask)) : "memory")
541-#define ia64_rsm(mask) asm volatile ("rsm %0":: "i"((mask)) : "memory")
542-#define ia64_sum(mask) asm volatile ("sum %0":: "i"((mask)) : "memory")
543-#define ia64_rum(mask) asm volatile ("rum %0":: "i"((mask)) : "memory")
544+#define ia64_ssm(mask) __asm__ __volatile__ ("ssm %0":: "i"((mask)) : "memory")
545+#define ia64_rsm(mask) __asm__ __volatile__ ("rsm %0":: "i"((mask)) : "memory")
546+#define ia64_sum(mask) __asm__ __volatile__ ("sum %0":: "i"((mask)) : "memory")
547+#define ia64_rum(mask) __asm__ __volatile__ ("rum %0":: "i"((mask)) : "memory")
548
549-#define ia64_ptce(addr) asm volatile ("ptc.e %0" :: "r"(addr))
550+#define ia64_ptce(addr) __asm__ __volatile__ ("ptc.e %0" :: "r"(addr))
551
552 #define ia64_ptcga(addr, size) \
553 do { \
554- asm volatile ("ptc.ga %0,%1" :: "r"(addr), "r"(size) : "memory"); \
555+ __asm__ __volatile__ ("ptc.ga %0,%1" :: "r"(addr), "r"(size) : "memory"); \
556 ia64_dv_serialize_data(); \
557 } while (0)
558
559 #define ia64_ptcl(addr, size) \
560 do { \
561- asm volatile ("ptc.l %0,%1" :: "r"(addr), "r"(size) : "memory"); \
562+ __asm__ __volatile__ ("ptc.l %0,%1" :: "r"(addr), "r"(size) : "memory"); \
563 ia64_dv_serialize_data(); \
564 } while (0)
565
566 #define ia64_ptri(addr, size) \
567- asm volatile ("ptr.i %0,%1" :: "r"(addr), "r"(size) : "memory")
568+ __asm__ __volatile__ ("ptr.i %0,%1" :: "r"(addr), "r"(size) : "memory")
569
570 #define ia64_ptrd(addr, size) \
571- asm volatile ("ptr.d %0,%1" :: "r"(addr), "r"(size) : "memory")
572+ __asm__ __volatile__ ("ptr.d %0,%1" :: "r"(addr), "r"(size) : "memory")
573
574 /* Values for lfhint in ia64_lfetch and ia64_lfetch_fault */
575
576@@ -521,16 +521,16 @@ do { \
577 ({ \
578 switch (lfhint) { \
579 case ia64_lfhint_none: \
580- asm volatile ("lfetch [%0]" : : "r"(y)); \
581+ __asm__ __volatile__ ("lfetch [%0]" : : "r"(y)); \
582 break; \
583 case ia64_lfhint_nt1: \
584- asm volatile ("lfetch.nt1 [%0]" : : "r"(y)); \
585+ __asm__ __volatile__ ("lfetch.nt1 [%0]" : : "r"(y)); \
586 break; \
587 case ia64_lfhint_nt2: \
588- asm volatile ("lfetch.nt2 [%0]" : : "r"(y)); \
589+ __asm__ __volatile__ ("lfetch.nt2 [%0]" : : "r"(y)); \
590 break; \
591 case ia64_lfhint_nta: \
592- asm volatile ("lfetch.nta [%0]" : : "r"(y)); \
593+ __asm__ __volatile__ ("lfetch.nta [%0]" : : "r"(y)); \
594 break; \
595 } \
596 })
597@@ -539,16 +539,16 @@ do { \
598 ({ \
599 switch (lfhint) { \
600 case ia64_lfhint_none: \
601- asm volatile ("lfetch.excl [%0]" :: "r"(y)); \
602+ __asm__ __volatile__ ("lfetch.excl [%0]" :: "r"(y)); \
603 break; \
604 case ia64_lfhint_nt1: \
605- asm volatile ("lfetch.excl.nt1 [%0]" :: "r"(y)); \
606+ __asm__ __volatile__ ("lfetch.excl.nt1 [%0]" :: "r"(y)); \
607 break; \
608 case ia64_lfhint_nt2: \
609- asm volatile ("lfetch.excl.nt2 [%0]" :: "r"(y)); \
610+ __asm__ __volatile__ ("lfetch.excl.nt2 [%0]" :: "r"(y)); \
611 break; \
612 case ia64_lfhint_nta: \
613- asm volatile ("lfetch.excl.nta [%0]" :: "r"(y)); \
614+ __asm__ __volatile__ ("lfetch.excl.nta [%0]" :: "r"(y)); \
615 break; \
616 } \
617 })
618@@ -557,16 +557,16 @@ do { \
619 ({ \
620 switch (lfhint) { \
621 case ia64_lfhint_none: \
622- asm volatile ("lfetch.fault [%0]" : : "r"(y)); \
623+ __asm__ __volatile__ ("lfetch.fault [%0]" : : "r"(y)); \
624 break; \
625 case ia64_lfhint_nt1: \
626- asm volatile ("lfetch.fault.nt1 [%0]" : : "r"(y)); \
627+ __asm__ __volatile__ ("lfetch.fault.nt1 [%0]" : : "r"(y)); \
628 break; \
629 case ia64_lfhint_nt2: \
630- asm volatile ("lfetch.fault.nt2 [%0]" : : "r"(y)); \
631+ __asm__ __volatile__ ("lfetch.fault.nt2 [%0]" : : "r"(y)); \
632 break; \
633 case ia64_lfhint_nta: \
634- asm volatile ("lfetch.fault.nta [%0]" : : "r"(y)); \
635+ __asm__ __volatile__ ("lfetch.fault.nta [%0]" : : "r"(y)); \
636 break; \
637 } \
638 })
639@@ -575,23 +575,23 @@ do { \
640 ({ \
641 switch (lfhint) { \
642 case ia64_lfhint_none: \
643- asm volatile ("lfetch.fault.excl [%0]" :: "r"(y)); \
644+ __asm__ __volatile__ ("lfetch.fault.excl [%0]" :: "r"(y)); \
645 break; \
646 case ia64_lfhint_nt1: \
647- asm volatile ("lfetch.fault.excl.nt1 [%0]" :: "r"(y)); \
648+ __asm__ __volatile__ ("lfetch.fault.excl.nt1 [%0]" :: "r"(y)); \
649 break; \
650 case ia64_lfhint_nt2: \
651- asm volatile ("lfetch.fault.excl.nt2 [%0]" :: "r"(y)); \
652+ __asm__ __volatile__ ("lfetch.fault.excl.nt2 [%0]" :: "r"(y)); \
653 break; \
654 case ia64_lfhint_nta: \
655- asm volatile ("lfetch.fault.excl.nta [%0]" :: "r"(y)); \
656+ __asm__ __volatile__ ("lfetch.fault.excl.nta [%0]" :: "r"(y)); \
657 break; \
658 } \
659 })
660
661 #define ia64_intrin_local_irq_restore(x) \
662 do { \
663- asm volatile (";; cmp.ne p6,p7=%0,r0;;" \
664+ __asm__ __volatile__ (";; cmp.ne p6,p7=%0,r0;;" \
665 "(p6) ssm psr.i;" \
666 "(p7) rsm psr.i;;" \
667 "(p6) srlz.d" \