From 8d8b6ba629c8f510b152ec4bac03a7d7ac25ab2f Mon Sep 17 00:00:00 2001 From: Mike Frysinger Date: Tue, 20 Feb 2007 15:25:12 +0000 Subject: call kernel helper to do spinlocks --- .../linuxthreads.old/sysdeps/bfin/pt-machine.h | 35 +++++++++------------- 1 file changed, 14 insertions(+), 21 deletions(-) (limited to 'libpthread') diff --git a/libpthread/linuxthreads.old/sysdeps/bfin/pt-machine.h b/libpthread/linuxthreads.old/sysdeps/bfin/pt-machine.h index fef16263e..c384cab06 100644 --- a/libpthread/linuxthreads.old/sysdeps/bfin/pt-machine.h +++ b/libpthread/linuxthreads.old/sysdeps/bfin/pt-machine.h @@ -26,32 +26,25 @@ #endif extern long int testandset (int *spinlock); -extern int __compare_and_swap (long *, long , long); +#include /* Spinlock implementation; required. */ +/* The semantics of the TESTSET instruction cannot be guaranteed. We cannot + easily move all locks used by linux kernel to non-cacheable memory. + EXCPT 0x4 is used to trap into kernel to do the atomic testandset. + It's ugly. But it's the only thing we can do now. + The handler of EXCPT 0x4 expects the address of the lock is passed through + R0. And the result is returned by R0. */ PT_EI long int testandset (int *spinlock) { - if (*spinlock) - return 1; - else - { - *spinlock=1; - return 0; - } -} - -#define HAS_COMPARE_AND_SWAP - -PT_EI int -__compare_and_swap (long int *p, long int oldval, long int newval) -{ - if((*p ^ oldval) == 0) { - *p = newval; - return 1; - } - else - return 0; + long int res; + asm volatile ("R0 = %2; P0 = %4; EXCPT 0; %0 = R0;" + : "=d" (res), "=m" (*spinlock) + : "d" (spinlock), "m" (*spinlock), + "ida" (__NR_bfin_spinlock) + :"R0", "P0", "cc"); + return res; } #endif /* pt-machine.h */ -- cgit v1.2.3