diff options
Diffstat (limited to 'libc/sysdeps')
-rw-r--r-- | libc/sysdeps/linux/i386/bits/mathinline.h | 8 | ||||
-rw-r--r-- | libc/sysdeps/linux/ia64/bits/syscalls.h | 2 | ||||
-rw-r--r-- | libc/sysdeps/linux/mips/sysdep.h | 16 | ||||
-rw-r--r-- | libc/sysdeps/linux/powerpc/bits/atomic.h | 2 | ||||
-rw-r--r-- | libc/sysdeps/linux/sh/bits/atomic.h | 6 | ||||
-rw-r--r-- | libc/sysdeps/linux/sh/bits/syscalls.h | 2 | ||||
-rw-r--r-- | libc/sysdeps/linux/sparc/bits/atomic.h | 20 | ||||
-rw-r--r-- | libc/sysdeps/linux/sparc/bits/syscalls.h | 2 |
8 files changed, 29 insertions, 29 deletions
diff --git a/libc/sysdeps/linux/i386/bits/mathinline.h b/libc/sysdeps/linux/i386/bits/mathinline.h index 5caf73353..53cbcb2b3 100644 --- a/libc/sysdeps/linux/i386/bits/mathinline.h +++ b/libc/sysdeps/linux/i386/bits/mathinline.h @@ -529,8 +529,8 @@ __inline_mathcodeNP (tanh, __x, \ __inline_mathcodeNP (floor, __x, \ register long double __value; \ - __volatile unsigned short int __cw; \ - __volatile unsigned short int __cwtmp; \ + __volatile__ unsigned short int __cw; \ + __volatile__ unsigned short int __cwtmp; \ __asm__ __volatile__ ("fnstcw %0" : "=m" (__cw)); \ __cwtmp = (__cw & 0xf3ff) | 0x0400; /* rounding down */ \ __asm__ __volatile__ ("fldcw %0" : : "m" (__cwtmp)); \ @@ -540,8 +540,8 @@ __inline_mathcodeNP (floor, __x, \ __inline_mathcodeNP (ceil, __x, \ register long double __value; \ - __volatile unsigned short int __cw; \ - __volatile unsigned short int __cwtmp; \ + __volatile__ unsigned short int __cw; \ + __volatile__ unsigned short int __cwtmp; \ __asm__ __volatile__ ("fnstcw %0" : "=m" (__cw)); \ __cwtmp = (__cw & 0xf3ff) | 0x0800; /* rounding up */ \ __asm__ __volatile__ ("fldcw %0" : : "m" (__cwtmp)); \ diff --git a/libc/sysdeps/linux/ia64/bits/syscalls.h b/libc/sysdeps/linux/ia64/bits/syscalls.h index 9fda3d07a..9c6a2b77b 100644 --- a/libc/sysdeps/linux/ia64/bits/syscalls.h +++ b/libc/sysdeps/linux/ia64/bits/syscalls.h @@ -54,7 +54,7 @@ register long _r15 __asm__ ("r15") = name; \ long _retval; \ LOAD_REGS_##nr \ - __asm __volatile (BREAK_INSN (__IA64_BREAK_SYSCALL) \ + __asm__ __volatile__ (BREAK_INSN (__IA64_BREAK_SYSCALL) \ : "=r" (_r8), "=r" (_r10), "=r" (_r15) \ ASM_OUTARGS_##nr \ : "2" (_r15) ASM_ARGS_##nr \ diff --git a/libc/sysdeps/linux/mips/sysdep.h b/libc/sysdeps/linux/mips/sysdep.h index 4cefd6198..0860c1a48 100644 --- a/libc/sysdeps/linux/mips/sysdep.h +++ b/libc/sysdeps/linux/mips/sysdep.h @@ -169,7 +169,7 @@ L(syse1): { \ register long __v0 __asm__("$2") ncs_init; \ register long __a3 __asm__("$7"); \ - __asm__ volatile ( \ + __asm__ __volatile__ ( \ ".set\tnoreorder\n\t" \ cs_init \ "syscall\n\t" \ @@ -192,7 +192,7 @@ L(syse1): register long __v0 __asm__("$2") ncs_init; \ register long __a0 __asm__("$4") = (long) arg1; \ register long __a3 __asm__("$7"); \ - __asm__ volatile ( \ + __asm__ __volatile__ ( \ ".set\tnoreorder\n\t" \ cs_init \ "syscall\n\t" \ @@ -216,7 +216,7 @@ L(syse1): register long __a0 __asm__("$4") = (long) arg1; \ register long __a1 __asm__("$5") = (long) arg2; \ register long __a3 __asm__("$7"); \ - __asm__ volatile ( \ + __asm__ __volatile__ ( \ ".set\tnoreorder\n\t" \ cs_init \ "syscall\n\t" \ @@ -241,7 +241,7 @@ L(syse1): register long __a1 __asm__("$5") = (long) arg2; \ register long __a2 __asm__("$6") = (long) arg3; \ register long __a3 __asm__("$7"); \ - __asm__ volatile ( \ + __asm__ __volatile__ ( \ ".set\tnoreorder\n\t" \ cs_init \ "syscall\n\t" \ @@ -266,7 +266,7 @@ L(syse1): register long __a1 __asm__("$5") = (long) arg2; \ register long __a2 __asm__("$6") = (long) arg3; \ register long __a3 __asm__("$7") = (long) arg4; \ - __asm__ volatile ( \ + __asm__ __volatile__ ( \ ".set\tnoreorder\n\t" \ cs_init \ "syscall\n\t" \ @@ -298,7 +298,7 @@ L(syse1): register long __a1 __asm__("$5") = (long) arg2; \ register long __a2 __asm__("$6") = (long) arg3; \ register long __a3 __asm__("$7") = (long) arg4; \ - __asm__ volatile ( \ + __asm__ __volatile__ ( \ ".set\tnoreorder\n\t" \ "subu\t$29, 32\n\t" \ "sw\t%6, 16($29)\n\t" \ @@ -328,7 +328,7 @@ L(syse1): register long __a1 __asm__("$5") = (long) arg2; \ register long __a2 __asm__("$6") = (long) arg3; \ register long __a3 __asm__("$7") = (long) arg4; \ - __asm__ volatile ( \ + __asm__ __volatile__ ( \ ".set\tnoreorder\n\t" \ "subu\t$29, 32\n\t" \ "sw\t%6, 16($29)\n\t" \ @@ -359,7 +359,7 @@ L(syse1): register long __a1 __asm__("$5") = (long) arg2; \ register long __a2 __asm__("$6") = (long) arg3; \ register long __a3 __asm__("$7") = (long) arg4; \ - __asm__ volatile ( \ + __asm__ __volatile__ ( \ ".set\tnoreorder\n\t" \ "subu\t$29, 32\n\t" \ "sw\t%6, 16($29)\n\t" \ diff --git a/libc/sysdeps/linux/powerpc/bits/atomic.h b/libc/sysdeps/linux/powerpc/bits/atomic.h index 3dbbb3a09..dba113670 100644 --- a/libc/sysdeps/linux/powerpc/bits/atomic.h +++ b/libc/sysdeps/linux/powerpc/bits/atomic.h @@ -341,7 +341,7 @@ * So if the build is using -mcpu=[power4,power5,power5+,970] we can * safely use lwsync. */ -# define atomic_read_barrier() __asm ("lwsync" ::: "memory") +# define atomic_read_barrier() __asm__ ("lwsync" ::: "memory") /* * "light weight" sync can also be used for the release barrier. */ diff --git a/libc/sysdeps/linux/sh/bits/atomic.h b/libc/sysdeps/linux/sh/bits/atomic.h index a099b43a8..bc79b29b7 100644 --- a/libc/sysdeps/linux/sh/bits/atomic.h +++ b/libc/sysdeps/linux/sh/bits/atomic.h @@ -80,7 +80,7 @@ typedef uintmax_t uatomic_max_t; #define __arch_compare_and_exchange_n(mem, newval, oldval, bwl, version) \ ({ signed long __result; \ - __asm __volatile ("\ + __asm__ __volatile__ ("\ .align 2\n\ mova 1f,r0\n\ nop\n\ @@ -139,7 +139,7 @@ typedef uintmax_t uatomic_max_t; */ #define __arch_operate_old_new_n(mem, value, old, new, bwl, oper) \ - (void) ({ __asm __volatile ("\ + (void) ({ __asm__ __volatile__ ("\ .align 2\n\ mova 1f,r0\n\ mov r15,r1\n\ @@ -185,7 +185,7 @@ typedef uintmax_t uatomic_max_t; #define __arch_operate_new_n(mem, value, bwl, oper) \ ({ int32_t __value = (value), __new; \ - __asm __volatile ("\ + __asm__ __volatile__ ("\ .align 2\n\ mova 1f,r0\n\ mov r15,r1\n\ diff --git a/libc/sysdeps/linux/sh/bits/syscalls.h b/libc/sysdeps/linux/sh/bits/syscalls.h index c69dce537..59d2d1ec7 100644 --- a/libc/sysdeps/linux/sh/bits/syscalls.h +++ b/libc/sysdeps/linux/sh/bits/syscalls.h @@ -119,7 +119,7 @@ register long int r3 __asm__ ("%r3") = (name); \ SUBSTITUTE_ARGS_##nr(args); \ \ - __asm__ volatile (SYSCALL_INST_STR##nr SYSCALL_INST_PAD \ + __asm__ __volatile__ (SYSCALL_INST_STR##nr SYSCALL_INST_PAD \ : "=z" (resultvar) \ : "r" (r3) ASMFMT_##nr \ : "memory"); \ diff --git a/libc/sysdeps/linux/sparc/bits/atomic.h b/libc/sysdeps/linux/sparc/bits/atomic.h index f625eb92a..16c3c78c9 100644 --- a/libc/sysdeps/linux/sparc/bits/atomic.h +++ b/libc/sysdeps/linux/sparc/bits/atomic.h @@ -69,7 +69,7 @@ volatile unsigned char __sparc32_atomic_locks[64] unsigned int __idx = (((long) addr >> 2) ^ ((long) addr >> 12)) \ & 63; \ do \ - __asm __volatile ("ldstub %1, %0" \ + __asm__ __volatile__ ("ldstub %1, %0" \ : "=r" (__old_lock), \ "=m" (__sparc32_atomic_locks[__idx]) \ : "m" (__sparc32_atomic_locks[__idx]) \ @@ -83,7 +83,7 @@ volatile unsigned char __sparc32_atomic_locks[64] { \ __sparc32_atomic_locks[(((long) addr >> 2) \ ^ ((long) addr >> 12)) & 63] = 0; \ - __asm __volatile ("" ::: "memory"); \ + __asm__ __volatile__ ("" ::: "memory"); \ } \ while (0) @@ -92,7 +92,7 @@ volatile unsigned char __sparc32_atomic_locks[64] { \ unsigned int __old_lock; \ do \ - __asm __volatile ("ldstub %1, %0" \ + __asm__ __volatile__ ("ldstub %1, %0" \ : "=r" (__old_lock), "=m" (*(addr)) \ : "m" (*(addr)) \ : "memory"); \ @@ -104,7 +104,7 @@ volatile unsigned char __sparc32_atomic_locks[64] do \ { \ *(char *) (addr) = 0; \ - __asm __volatile ("" ::: "memory"); \ + __asm__ __volatile__ ("" ::: "memory"); \ } \ while (0) @@ -112,14 +112,14 @@ volatile unsigned char __sparc32_atomic_locks[64] #ifndef SHARED # define __v9_compare_and_exchange_val_32_acq(mem, newval, oldval) \ ({ \ - register __typeof (*(mem)) __acev_tmp __asm ("%g6"); \ - register __typeof (mem) __acev_mem __asm ("%g1") = (mem); \ - register __typeof (*(mem)) __acev_oldval __asm ("%g5"); \ + register __typeof (*(mem)) __acev_tmp __asm__ ("%g6"); \ + register __typeof (mem) __acev_mem __asm__ ("%g1") = (mem); \ + register __typeof (*(mem)) __acev_oldval __asm__ ("%g5"); \ __acev_tmp = (newval); \ __acev_oldval = (oldval); \ /* .word 0xcde05005 is cas [%g1], %g5, %g6. Can't use cas here though, \ because as will then mark the object file as V8+ arch. */ \ - __asm __volatile (".word 0xcde05005" \ + __asm__ __volatile__ (".word 0xcde05005" \ : "+r" (__acev_tmp), "=m" (*__acev_mem) \ : "r" (__acev_oldval), "m" (*__acev_mem), \ "r" (__acev_mem) : "memory"); \ @@ -187,7 +187,7 @@ volatile unsigned char __sparc32_atomic_locks[64] *__acev_memp = __acev_newval; \ else \ __sparc32_atomic_do_unlock24 (__acev_memp); \ - __asm __volatile ("" ::: "memory"); \ + __asm__ __volatile__ ("" ::: "memory"); \ __acev_ret; }) #define __v7_exchange_24_rel(mem, newval) \ @@ -198,7 +198,7 @@ volatile unsigned char __sparc32_atomic_locks[64] __sparc32_atomic_do_lock24 (__acev_memp); \ __acev_ret = *__acev_memp & 0xffffff; \ *__acev_memp = __acev_newval; \ - __asm __volatile ("" ::: "memory"); \ + __asm__ __volatile__ ("" ::: "memory"); \ __acev_ret; }) #ifdef SHARED diff --git a/libc/sysdeps/linux/sparc/bits/syscalls.h b/libc/sysdeps/linux/sparc/bits/syscalls.h index 78f143911..dc8d63713 100644 --- a/libc/sysdeps/linux/sparc/bits/syscalls.h +++ b/libc/sysdeps/linux/sparc/bits/syscalls.h @@ -78,7 +78,7 @@ register long __o3 __asm__ ("o3") = (long)(arg4); \ register long __o4 __asm__ ("o4") = (long)(arg5); \ register long __g1 __asm__ ("g1") = __NR_clone; \ - __asm __volatile (__CLONE_SYSCALL_STRING : \ + __asm__ __volatile__ (__CLONE_SYSCALL_STRING : \ "=r" (__g1), "=r" (__o0), "=r" (__o1) : \ "0" (__g1), "1" (__o0), "2" (__o1), \ "r" (__o2), "r" (__o3), "r" (__o4) : \ |