diff options
author | Vineet Gupta <Vineet.Gupta1@synopsys.com> | 2016-11-07 16:31:40 -0800 |
---|---|---|
committer | Waldemar Brodkorb <wbx@uclibc-ng.org> | 2016-11-13 13:44:35 +0100 |
commit | b985fa069187e4c5a7ee84213d9fbead2f219ce5 (patch) | |
tree | 1b34d11506b950e6e99978053bc458545773a321 /libc/sysdeps/linux/arc | |
parent | 17ea4f9622a80cc8717beeefe1371ccbcd501fe3 (diff) |
NPTL/ARC: provide a kernel assisted atomic cmpxchg
For hardware configurations lacking LLOCK/SCOND (say ARC750),
use a syscall to atomically do the cmpxchg.
This is costly and painful, but really the only way out.
Note that kenrel only guarantees this to work in a UP configuraion
Reported-by: Avinash Patil <avinashp@quantenna.com>
Signed-off-by: Vineet Gupta <vgupta@synopsys.com>
Diffstat (limited to 'libc/sysdeps/linux/arc')
-rw-r--r-- | libc/sysdeps/linux/arc/bits/atomic.h | 37 |
1 files changed, 35 insertions, 2 deletions
diff --git a/libc/sysdeps/linux/arc/bits/atomic.h b/libc/sysdeps/linux/arc/bits/atomic.h index 48f37879c..587860964 100644 --- a/libc/sysdeps/linux/arc/bits/atomic.h +++ b/libc/sysdeps/linux/arc/bits/atomic.h @@ -38,6 +38,11 @@ void __arc_link_error (void); #define __arch_compare_and_exchange_val_16_acq(mem, newval, oldval) \ ({ __arc_link_error (); oldval; }) +#define __arch_compare_and_exchange_val_64_acq(mem, newval, oldval) \ + ({ __arc_link_error (); oldval; }) + +#ifdef __CONFIG_ARC_HAS_ATOMICS__ + #define __arch_compare_and_exchange_val_32_acq(mem, newval, oldval) \ ({ \ __typeof(oldval) prev; \ @@ -56,8 +61,36 @@ void __arc_link_error (void); prev; \ }) -#define __arch_compare_and_exchange_val_64_acq(mem, newval, oldval) \ - ({ __arc_link_error (); oldval; }) +#else + +#ifndef __NR_arc_usr_cmpxchg +#error "__NR_arc_usr_cmpxchg missing: Please upgrade to kernel 4.9+ headers" +#endif + +/* With lack of hardware assist, use kernel to do the atomic operation + This will only work in a UP configuration + */ +#define __arch_compare_and_exchange_val_32_acq(mem, newval, oldval) \ + ({ \ + /* opecode INTERNAL_SYSCALL as it lacks cc clobber */ \ + register int __ret __asm__("r0") = (int)(mem); \ + register int __a1 __asm__("r1") = (int)(oldval); \ + register int __a2 __asm__("r2") = (int)(newval); \ + register int _sys_num __asm__("r8") = __NR_arc_usr_cmpxchg; \ + \ + __asm__ volatile ( \ + ARC_TRAP_INSN \ + : "+r" (__ret) \ + : "r"(_sys_num), "r"(__ret), "r"(__a1), "r"(__a2) \ + : "memory", "cc"); \ + \ + /* syscall returns previous value */ \ + /* Z bit is set if cmpxchg succeeded (we don't use that yet) */ \ + \ + (__typeof(oldval)) __ret; \ + }) + +#endif /* Store NEWVALUE in *MEM and return the old value. Atomic EX is present in all configurations |