summaryrefslogtreecommitdiff
path: root/libc
diff options
context:
space:
mode:
authorWaldemar Brodkorb <wbx@openadk.org>2015-02-20 12:24:26 -0600
committerWaldemar Brodkorb <wbx@openadk.org>2015-02-20 12:24:26 -0600
commitd4389d613cc49f776fffe5ee0f9af854e9a93074 (patch)
treeacb65e19f6482e2599e31003c9a811154580a23c /libc
parent42e9e0a0406473a9b017342eaffc03c85f9bb51b (diff)
parent409f14d9b5e47513d5c939120a33965997c8ceb2 (diff)
sync with uClibc
Diffstat (limited to 'libc')
-rw-r--r--libc/string/arc/arcv2/memcpy.S236
-rw-r--r--libc/string/arc/arcv2/memset.S85
-rw-r--r--libc/string/arc/arcv2/strcmp.S83
-rw-r--r--libc/string/arc/memcmp.S29
-rw-r--r--libc/sysdeps/linux/arc/bits/syscalls.h10
-rwxr-xr-xlibc/sysdeps/linux/arc/bits/uClibc_arch_features.h7
-rw-r--r--libc/sysdeps/linux/common/Makefile.in1
-rw-r--r--libc/sysdeps/linux/common/bits/sched.h53
-rw-r--r--libc/sysdeps/linux/common/eventfd.c2
-rw-r--r--libc/sysdeps/linux/common/posix_fadvise.c10
-rw-r--r--libc/sysdeps/linux/common/posix_fadvise64.c11
-rw-r--r--libc/sysdeps/linux/common/setns.c15
-rw-r--r--libc/sysdeps/linux/common/stubs.c4
-rw-r--r--libc/sysdeps/linux/common/sync_file_range.c6
-rw-r--r--libc/sysdeps/linux/common/sys/eventfd.h2
-rw-r--r--libc/sysdeps/linux/sparc/bits/eventfd.h2
16 files changed, 530 insertions, 26 deletions
diff --git a/libc/string/arc/arcv2/memcpy.S b/libc/string/arc/arcv2/memcpy.S
new file mode 100644
index 000000000..7573daf51
--- /dev/null
+++ b/libc/string/arc/arcv2/memcpy.S
@@ -0,0 +1,236 @@
+/*
+ * Copyright (C) 2014-15 Synopsys, Inc. (www.synopsys.com)
+ *
+ * Licensed under the LGPL v2.1 or later, see the file COPYING.LIB in this tarball.
+ */
+
+#include <features.h>
+#include <sysdep.h>
+
+#ifdef __LITTLE_ENDIAN__
+# define SHIFT_1(RX,RY,IMM) asl RX, RY, IMM ; <<
+# define SHIFT_2(RX,RY,IMM) lsr RX, RY, IMM ; >>
+# define MERGE_1(RX,RY,IMM) asl RX, RY, IMM
+# define MERGE_2(RX,RY,IMM)
+# define EXTRACT_1(RX,RY,IMM) and RX, RY, 0xFFFF
+# define EXTRACT_2(RX,RY,IMM) lsr RX, RY, IMM
+#else
+# define SHIFT_1(RX,RY,IMM) lsr RX, RY, IMM ; >>
+# define SHIFT_2(RX,RY,IMM) asl RX, RY, IMM ; <<
+# define MERGE_1(RX,RY,IMM) asl RX, RY, IMM ; <<
+# define MERGE_2(RX,RY,IMM) asl RX, RY, IMM ; <<
+# define EXTRACT_1(RX,RY,IMM) lsr RX, RY, IMM
+# define EXTRACT_2(RX,RY,IMM) lsr RX, RY, 0x08
+#endif
+
+#ifdef __LL64__
+# define PREFETCH_READ(RX) prefetch [RX, 56]
+# define PREFETCH_WRITE(RX) prefetchw [RX, 64]
+# define LOADX(DST,RX) ldd.ab DST, [RX, 8]
+# define STOREX(SRC,RX) std.ab SRC, [RX, 8]
+# define ZOLSHFT 5
+# define ZOLAND 0x1F
+#else
+# define PREFETCH_READ(RX) prefetch [RX, 28]
+# define PREFETCH_WRITE(RX) prefetchw [RX, 32]
+# define LOADX(DST,RX) ld.ab DST, [RX, 4]
+# define STOREX(SRC,RX) st.ab SRC, [RX, 4]
+# define ZOLSHFT 4
+# define ZOLAND 0xF
+#endif
+
+ENTRY(memcpy)
+ prefetch [r1] ; Prefetch the read location
+ prefetchw [r0] ; Prefetch the write location
+ mov.f 0, r2
+;;; if size is zero
+ jz.d [blink]
+ mov r3, r0 ; don't clobber ret val
+
+;;; if size <= 8
+ cmp r2, 8
+ bls.d @.Lsmallchunk
+ mov.f lp_count, r2
+
+ and.f r4, r0, 0x03
+ rsub lp_count, r4, 4
+ lpnz @.Laligndestination
+ ;; LOOP BEGIN
+ ldb.ab r5, [r1,1]
+ sub r2, r2, 1
+ stb.ab r5, [r3,1]
+.Laligndestination:
+
+;;; Check the alignment of the source
+ and.f r4, r1, 0x03
+ bnz.d @.Lsourceunaligned
+
+;;; CASE 0: Both source and destination are 32bit aligned
+;;; Convert len to Dwords, unfold x4
+ lsr.f lp_count, r2, ZOLSHFT
+ lpnz @.Lcopy32_64bytes
+ ;; LOOP START
+ LOADX (r6, r1)
+ PREFETCH_READ (r1)
+ PREFETCH_WRITE (r3)
+ LOADX (r8, r1)
+ LOADX (r10, r1)
+ LOADX (r4, r1)
+ STOREX (r6, r3)
+ STOREX (r8, r3)
+ STOREX (r10, r3)
+ STOREX (r4, r3)
+.Lcopy32_64bytes:
+
+ and.f lp_count, r2, ZOLAND ;Last remaining 31 bytes
+.Lsmallchunk:
+ lpnz @.Lcopyremainingbytes
+ ;; LOOP START
+ ldb.ab r5, [r1,1]
+ stb.ab r5, [r3,1]
+.Lcopyremainingbytes:
+
+ j [blink]
+;;; END CASE 0
+
+.Lsourceunaligned:
+ cmp r4, 2
+ beq.d @.LunalignedOffby2
+ sub r2, r2, 1
+
+ bhi.d @.LunalignedOffby3
+ ldb.ab r5, [r1, 1]
+
+;;; CASE 1: The source is unaligned, off by 1
+ ;; Hence I need to read 1 byte for a 16bit alignment
+ ;; and 2bytes to reach 32bit alignment
+ ldh.ab r6, [r1, 2]
+ sub r2, r2, 2
+ ;; Convert to words, unfold x2
+ lsr.f lp_count, r2, 3
+ MERGE_1 (r6, r6, 8)
+ MERGE_2 (r5, r5, 24)
+ or r5, r5, r6
+
+ ;; Both src and dst are aligned
+ lpnz @.Lcopy8bytes_1
+ ;; LOOP START
+ ld.ab r6, [r1, 4]
+ prefetch [r1, 28] ;Prefetch the next read location
+ ld.ab r8, [r1,4]
+ prefetchw [r3, 32] ;Prefetch the next write location
+
+ SHIFT_1 (r7, r6, 24)
+ or r7, r7, r5
+ SHIFT_2 (r5, r6, 8)
+
+ SHIFT_1 (r9, r8, 24)
+ or r9, r9, r5
+ SHIFT_2 (r5, r8, 8)
+
+ st.ab r7, [r3, 4]
+ st.ab r9, [r3, 4]
+.Lcopy8bytes_1:
+
+ ;; Write back the remaining 16bits
+ EXTRACT_1 (r6, r5, 16)
+ sth.ab r6, [r3, 2]
+ ;; Write back the remaining 8bits
+ EXTRACT_2 (r5, r5, 16)
+ stb.ab r5, [r3, 1]
+
+ and.f lp_count, r2, 0x07 ;Last 8bytes
+ lpnz @.Lcopybytewise_1
+ ;; LOOP START
+ ldb.ab r6, [r1,1]
+ stb.ab r6, [r3,1]
+.Lcopybytewise_1:
+ j [blink]
+
+.LunalignedOffby2:
+;;; CASE 2: The source is unaligned, off by 2
+ ldh.ab r5, [r1, 2]
+ sub r2, r2, 1
+
+ ;; Both src and dst are aligned
+ ;; Convert to words, unfold x2
+ lsr.f lp_count, r2, 3
+#ifdef __BIG_ENDIAN__
+ asl.nz r5, r5, 16
+#endif
+ lpnz @.Lcopy8bytes_2
+ ;; LOOP START
+ ld.ab r6, [r1, 4]
+ prefetch [r1, 28] ;Prefetch the next read location
+ ld.ab r8, [r1,4]
+ prefetchw [r3, 32] ;Prefetch the next write location
+
+ SHIFT_1 (r7, r6, 16)
+ or r7, r7, r5
+ SHIFT_2 (r5, r6, 16)
+
+ SHIFT_1 (r9, r8, 16)
+ or r9, r9, r5
+ SHIFT_2 (r5, r8, 16)
+
+ st.ab r7, [r3, 4]
+ st.ab r9, [r3, 4]
+.Lcopy8bytes_2:
+
+#ifdef __BIG_ENDIAN__
+ lsr.nz r5, r5, 16
+#endif
+ sth.ab r5, [r3, 2]
+
+ and.f lp_count, r2, 0x07 ;Last 8bytes
+ lpnz @.Lcopybytewise_2
+ ;; LOOP START
+ ldb.ab r6, [r1,1]
+ stb.ab r6, [r3,1]
+.Lcopybytewise_2:
+ j [blink]
+
+.LunalignedOffby3:
+;;; CASE 3: The source is unaligned, off by 3
+;;; Hence, I need to read 1byte for achieve the 32bit alignment
+
+ ;; Both src and dst are aligned
+ ;; Convert to words, unfold x2
+ lsr.f lp_count, r2, 3
+#ifdef __BIG_ENDIAN__
+ asl.ne r5, r5, 24
+#endif
+ lpnz @.Lcopy8bytes_3
+ ;; LOOP START
+ ld.ab r6, [r1, 4]
+ prefetch [r1, 28] ;Prefetch the next read location
+ ld.ab r8, [r1,4]
+ prefetchw [r3, 32] ;Prefetch the next write location
+
+ SHIFT_1 (r7, r6, 8)
+ or r7, r7, r5
+ SHIFT_2 (r5, r6, 24)
+
+ SHIFT_1 (r9, r8, 8)
+ or r9, r9, r5
+ SHIFT_2 (r5, r8, 24)
+
+ st.ab r7, [r3, 4]
+ st.ab r9, [r3, 4]
+.Lcopy8bytes_3:
+
+#ifdef __BIG_ENDIAN__
+ lsr.nz r5, r5, 24
+#endif
+ stb.ab r5, [r3, 1]
+
+ and.f lp_count, r2, 0x07 ;Last 8bytes
+ lpnz @.Lcopybytewise_3
+ ;; LOOP START
+ ldb.ab r6, [r1,1]
+ stb.ab r6, [r3,1]
+.Lcopybytewise_3:
+ j [blink]
+
+END(memcpy)
+libc_hidden_def(memcpy)
diff --git a/libc/string/arc/arcv2/memset.S b/libc/string/arc/arcv2/memset.S
new file mode 100644
index 000000000..d076ad1cd
--- /dev/null
+++ b/libc/string/arc/arcv2/memset.S
@@ -0,0 +1,85 @@
+
+/*
+ * Copyright (C) 2014-15 Synopsys, Inc. (www.synopsys.com)
+ *
+ * Licensed under the LGPL v2.1 or later, see the file COPYING.LIB in this tarball.
+ */
+
+#include <features.h>
+#include <sysdep.h>
+
+#ifdef DONT_USE_PREALLOC
+#define PREWRITE(A,B) prefetchw [(A),(B)]
+#else
+#define PREWRITE(A,B) prealloc [(A),(B)]
+#endif
+
+ENTRY(memset)
+ prefetchw [r0] ; Prefetch the write location
+ mov.f 0, r2
+;;; if size is zero
+ jz.d [blink]
+ mov r3, r0 ; don't clobber ret val
+
+;;; if length < 8
+ brls.d.nt r2, 8, .Lsmallchunk
+ mov.f lp_count,r2
+
+ and.f r4, r0, 0x03
+ rsub lp_count, r4, 4
+ lpnz @.Laligndestination
+ ;; LOOP BEGIN
+ stb.ab r1, [r3,1]
+ sub r2, r2, 1
+.Laligndestination:
+
+;;; Destination is aligned
+ and r1, r1, 0xFF
+ asl r4, r1, 8
+ or r4, r4, r1
+ asl r5, r4, 16
+ or r5, r5, r4
+ mov r4, r5
+
+ sub3 lp_count, r2, 8
+ cmp r2, 64
+ bmsk.hi r2, r2, 5
+ mov.ls lp_count, 0
+ add3.hi r2, r2, 8
+
+;;; Convert len to Dwords, unfold x8
+ lsr.f lp_count, lp_count, 6
+ lpnz @.Lset64bytes
+ ;; LOOP START
+ PREWRITE(r3, 64) ;Prefetch the next write location
+ std.ab r4, [r3, 8]
+ std.ab r4, [r3, 8]
+ std.ab r4, [r3, 8]
+ std.ab r4, [r3, 8]
+ std.ab r4, [r3, 8]
+ std.ab r4, [r3, 8]
+ std.ab r4, [r3, 8]
+ std.ab r4, [r3, 8]
+.Lset64bytes:
+
+ lsr.f lp_count, r2, 5 ;Last remaining max 124 bytes
+ lpnz .Lset32bytes
+ ;; LOOP START
+ prefetchw [r3, 32] ;Prefetch the next write location
+ std.ab r4, [r3, 8]
+ std.ab r4, [r3, 8]
+ std.ab r4, [r3, 8]
+ std.ab r4, [r3, 8]
+.Lset32bytes:
+
+ and.f lp_count, r2, 0x1F ;Last remaining 31 bytes
+.Lsmallchunk:
+ lpnz .Lcopy3bytes
+ ;; LOOP START
+ stb.ab r1, [r3, 1]
+.Lcopy3bytes:
+
+ j [blink]
+
+END(memset)
+libc_hidden_def(memset)
diff --git a/libc/string/arc/arcv2/strcmp.S b/libc/string/arc/arcv2/strcmp.S
new file mode 100644
index 000000000..2e0e64a0c
--- /dev/null
+++ b/libc/string/arc/arcv2/strcmp.S
@@ -0,0 +1,83 @@
+/*
+ * Copyright (C) 2014-15 Synopsys, Inc. (www.synopsys.com)
+ *
+ * Licensed under the LGPL v2.1 or later, see the file COPYING.LIB in this tarball.
+ */
+
+#include <features.h>
+#include <sysdep.h>
+
+ENTRY(strcmp)
+ or r2, r0, r1
+ bmsk_s r2, r2, 1
+ brne r2, 0, @.Lcharloop
+
+;;; s1 and s2 are word aligned
+ ld.ab r2, [r0, 4]
+
+ mov_s r12, 0x01010101
+ ror r11, r12
+ .align 4
+.LwordLoop:
+ ld.ab r3, [r1, 4]
+ ;; Detect NULL char in str1
+ sub r4, r2, r12
+ ld.ab r5, [r0, 4]
+ bic r4, r4, r2
+ and r4, r4, r11
+ brne.d.nt r4, 0, .LfoundNULL
+ ;; Check if the read locations are the same
+ cmp r2, r3
+ beq.d .LwordLoop
+ mov.eq r2, r5
+
+ ;; A match is found, spot it out
+#ifdef __LITTLE_ENDIAN__
+ swape r3, r3
+ mov_s r0, 1
+ swape r2, r2
+#else
+ mov_s r0, 1
+#endif
+ cmp_s r2, r3
+ j_s.d [blink]
+ bset.lo r0, r0, 31
+
+ .align 4
+.LfoundNULL:
+#ifdef __BIG_ENDIAN__
+ swape r4, r4
+ swape r2, r2
+ swape r3, r3
+#endif
+ ;; Find null byte
+ ffs r0, r4
+ bmsk r2, r2, r0
+ bmsk r3, r3, r0
+ swape r2, r2
+ swape r3, r3
+ ;; make the return value
+ sub.f r0, r2, r3
+ mov.hi r0, 1
+ j_s.d [blink]
+ bset.lo r0, r0, 31
+
+ .align 4
+.Lcharloop:
+ ldb.ab r2, [r0, 1]
+ ldb.ab r3, [r1, 1]
+ nop
+ breq r2, 0, .Lcmpend
+ breq r2, r3, .Lcharloop
+
+ .align 4
+.Lcmpend:
+ j_s.d [blink]
+ sub r0, r2, r3
+END(strcmp)
+libc_hidden_def(strcmp)
+
+#ifndef __UCLIBC_HAS_LOCALE__
+strong_alias(strcmp,strcoll)
+libc_hidden_def(strcoll)
+#endif
diff --git a/libc/string/arc/memcmp.S b/libc/string/arc/memcmp.S
index 4c0e39143..a60757e7a 100644
--- a/libc/string/arc/memcmp.S
+++ b/libc/string/arc/memcmp.S
@@ -24,14 +24,32 @@ ENTRY(memcmp)
ld r4,[r0,0]
ld r5,[r1,0]
lsr.f lp_count,r3,3
+#ifdef __HS__
+ /* In ARCv2 a branch can't be the last instruction in a zero overhead
+ * loop.
+ * So we move the branch to the start of the loop, duplicate it
+ * after the end, and set up r12 so that the branch isn't taken
+ * initially.
+ */
+ mov_s r12,WORD2
+ lpne .Loop_end
+ brne WORD2,r12,.Lodd
+ ld WORD2,[r0,4]
+#else
lpne .Loop_end
ld_s WORD2,[r0,4]
+#endif
ld_s r12,[r1,4]
brne r4,r5,.Leven
ld.a r4,[r0,8]
ld.a r5,[r1,8]
+#ifdef __HS__
+.Loop_end:
+ brne WORD2,r12,.Lodd
+#else
brne WORD2,r12,.Lodd
.Loop_end:
+#endif
asl_s SHIFT,SHIFT,3
bhs_s .Last_cmp
brne r4,r5,.Leven
@@ -99,14 +117,25 @@ ENTRY(memcmp)
ldb r4,[r0,0]
ldb r5,[r1,0]
lsr.f lp_count,r3
+#ifdef __HS__
+ mov r12,r3
lpne .Lbyte_end
+ brne r3,r12,.Lbyte_odd
+#else
+ lpne .Lbyte_end
+#endif
ldb_s r3,[r0,1]
ldb r12,[r1,1]
brne r4,r5,.Lbyte_even
ldb.a r4,[r0,2]
ldb.a r5,[r1,2]
+#ifdef __HS__
+.Lbyte_end:
+ brne r3,r12,.Lbyte_odd
+#else
brne r3,r12,.Lbyte_odd
.Lbyte_end:
+#endif
bcc .Lbyte_even
brne r4,r5,.Lbyte_even
ldb_s r3,[r0,1]
diff --git a/libc/sysdeps/linux/arc/bits/syscalls.h b/libc/sysdeps/linux/arc/bits/syscalls.h
index 5da6aadb3..248ef7844 100644
--- a/libc/sysdeps/linux/arc/bits/syscalls.h
+++ b/libc/sysdeps/linux/arc/bits/syscalls.h
@@ -98,7 +98,11 @@ extern int __syscall_error (int);
* for syscall itself.
*-------------------------------------------------------------------------*/
-#define ARC_TRAP_INSN "trap0 \n\t"
+#ifdef __A7__
+#define ARC_TRAP_INSN "trap0 \n\t"
+#elif defined(__HS__)
+#define ARC_TRAP_INSN "trap_s 0 \n\t"
+#endif
#define INTERNAL_SYSCALL_NCS(nm, err, nr_args, args...) \
({ \
@@ -176,7 +180,11 @@ extern int __syscall_error (int);
#else
+#ifdef __A7__
#define ARC_TRAP_INSN trap0
+#elif defined(__HS__)
+#define ARC_TRAP_INSN trap_s 0
+#endif
#endif /* __ASSEMBLER__ */
diff --git a/libc/sysdeps/linux/arc/bits/uClibc_arch_features.h b/libc/sysdeps/linux/arc/bits/uClibc_arch_features.h
index 8af6eca4c..451575586 100755
--- a/libc/sysdeps/linux/arc/bits/uClibc_arch_features.h
+++ b/libc/sysdeps/linux/arc/bits/uClibc_arch_features.h
@@ -47,4 +47,11 @@
/* The default ';' is a comment on ARC. */
#define __UCLIBC_ASM_LINE_SEP__ `
+/* does your target align 64bit values in register pairs ? (32bit arches only) */
+#if defined(__A7__)
+#undef __UCLIBC_SYSCALL_ALIGN_64BIT__
+#else
+#define __UCLIBC_SYSCALL_ALIGN_64BIT__
+#endif
+
#endif /* _BITS_UCLIBC_ARCH_FEATURES_H */
diff --git a/libc/sysdeps/linux/common/Makefile.in b/libc/sysdeps/linux/common/Makefile.in
index 9d41771e2..8ee956b6b 100644
--- a/libc/sysdeps/linux/common/Makefile.in
+++ b/libc/sysdeps/linux/common/Makefile.in
@@ -45,6 +45,7 @@ CSRC-$(UCLIBC_LINUX_SPECIFIC) += \
sendfile.c \
setfsgid.c \
setfsuid.c \
+ setns.c \
setresgid.c \
setresuid.c \
signalfd.c \
diff --git a/libc/sysdeps/linux/common/bits/sched.h b/libc/sysdeps/linux/common/bits/sched.h
index a5eb6ee55..9d05314f5 100644
--- a/libc/sysdeps/linux/common/bits/sched.h
+++ b/libc/sysdeps/linux/common/bits/sched.h
@@ -1,7 +1,6 @@
/* Definitions of constants and data structure for POSIX 1003.1b-1993
scheduling interface.
- Copyright (C) 1996-1999,2001-2003,2005,2006,2007,2008
- Free Software Foundation, Inc.
+ Copyright (C) 1996-2015 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -26,14 +25,17 @@
/* Scheduling algorithms. */
-#define SCHED_OTHER 0
-#define SCHED_FIFO 1
-#define SCHED_RR 2
+#define SCHED_OTHER 0
+#define SCHED_FIFO 1
+#define SCHED_RR 2
#ifdef __USE_GNU
-# define SCHED_BATCH 3
+# define SCHED_BATCH 3
+# define SCHED_IDLE 5
+
+# define SCHED_RESET_ON_FORK 0x40000000
#endif
-#ifdef __USE_MISC
+#ifdef __USE_GNU
/* Cloning flags. */
# define CSIGNAL 0x000000ff /* Signal mask to be sent at exit. */
# define CLONE_VM 0x00000100 /* Set if VM shared between processes. */
@@ -58,7 +60,6 @@
force CLONE_PTRACE on this clone. */
# define CLONE_CHILD_SETTID 0x01000000 /* Store TID in userlevel buffer in
the child. */
-# define CLONE_STOPPED 0x02000000 /* Start in stopped state. */
# define CLONE_NEWUTS 0x04000000 /* New utsname group. */
# define CLONE_NEWIPC 0x08000000 /* New ipcs. */
# define CLONE_NEWUSER 0x10000000 /* New user namespace. */
@@ -75,7 +76,7 @@ struct sched_param
__BEGIN_DECLS
-#ifdef __USE_MISC
+#ifdef __USE_GNU
/* Clone current process. */
extern int clone (int (*__fn) (void *__arg), void *__child_stack,
int __flags, void *__arg, ...) __THROW;
@@ -85,8 +86,12 @@ extern int unshare (int __flags) __THROW;
/* Get index of currently used CPU. */
extern int sched_getcpu (void) __THROW;
+
+/* Switch process to namespace of type NSTYPE indicated by FD. */
+extern int setns (int __fd, int __nstype) __THROW;
#endif
+
__END_DECLS
#endif /* need schedparam */
@@ -124,7 +129,11 @@ typedef struct
} cpu_set_t;
/* Access functions for CPU masks. */
-# define __CPU_ZERO_S(setsize, cpusetp) \
+# if __GNUC_PREREQ (2, 91)
+# define __CPU_ZERO_S(setsize, cpusetp) \
+ do __builtin_memset (cpusetp, '\0', setsize); while (0)
+# else
+# define __CPU_ZERO_S(setsize, cpusetp) \
do { \
size_t __i; \
size_t __imax = (setsize) / sizeof (__cpu_mask); \
@@ -132,47 +141,53 @@ typedef struct
for (__i = 0; __i < __imax; ++__i) \
__bits[__i] = 0; \
} while (0)
+# endif
# define __CPU_SET_S(cpu, setsize, cpusetp) \
(__extension__ \
({ size_t __cpu = (cpu); \
- __cpu < 8 * (setsize) \
+ __cpu / 8 < (setsize) \
? (((__cpu_mask *) ((cpusetp)->__bits))[__CPUELT (__cpu)] \
|= __CPUMASK (__cpu)) \
: 0; }))
# define __CPU_CLR_S(cpu, setsize, cpusetp) \
(__extension__ \
({ size_t __cpu = (cpu); \
- __cpu < 8 * (setsize) \
+ __cpu / 8 < (setsize) \
? (((__cpu_mask *) ((cpusetp)->__bits))[__CPUELT (__cpu)] \
&= ~__CPUMASK (__cpu)) \
: 0; }))
# define __CPU_ISSET_S(cpu, setsize, cpusetp) \
(__extension__ \
({ size_t __cpu = (cpu); \
- __cpu < 8 * (setsize) \
- ? ((((__cpu_mask *) ((cpusetp)->__bits))[__CPUELT (__cpu)] \
+ __cpu / 8 < (setsize) \
+ ? ((((const __cpu_mask *) ((cpusetp)->__bits))[__CPUELT (__cpu)] \
& __CPUMASK (__cpu))) != 0 \
: 0; }))
# define __CPU_COUNT_S(setsize, cpusetp) \
__sched_cpucount (setsize, cpusetp)
-# define __CPU_EQUAL_S(setsize, cpusetp1, cpusetp2) \
+# if __GNUC_PREREQ (2, 91)
+# define __CPU_EQUAL_S(setsize, cpusetp1, cpusetp2) \
+ (__builtin_memcmp (cpusetp1, cpusetp2, setsize) == 0)
+# else
+# define __CPU_EQUAL_S(setsize, cpusetp1, cpusetp2) \
(__extension__ \
- ({ __cpu_mask *__arr1 = (cpusetp1)->__bits; \
- __cpu_mask *__arr2 = (cpusetp2)->__bits; \
+ ({ const __cpu_mask *__arr1 = (cpusetp1)->__bits; \
+ const __cpu_mask *__arr2 = (cpusetp2)->__bits; \
size_t __imax = (setsize) / sizeof (__cpu_mask); \
size_t __i; \
for (__i = 0; __i < __imax; ++__i) \
if (__arr1[__i] != __arr2[__i]) \
break; \
__i == __imax; }))
+# endif
# define __CPU_OP_S(setsize, destset, srcset1, srcset2, op) \
(__extension__ \
({ cpu_set_t *__dest = (destset); \
- __cpu_mask *__arr1 = (srcset1)->__bits; \
- __cpu_mask *__arr2 = (srcset2)->__bits; \
+ const __cpu_mask *__arr1 = (srcset1)->__bits; \
+ const __cpu_mask *__arr2 = (srcset2)->__bits; \
size_t __imax = (setsize) / sizeof (__cpu_mask); \
size_t __i; \
for (__i = 0; __i < __imax; ++__i) \
diff --git a/libc/sysdeps/linux/common/eventfd.c b/libc/sysdeps/linux/common/eventfd.c
index 96597ab33..500b0c002 100644
--- a/libc/sysdeps/linux/common/eventfd.c
+++ b/libc/sysdeps/linux/common/eventfd.c
@@ -15,7 +15,7 @@
* eventfd()
*/
#if defined __NR_eventfd || defined __NR_eventfd2
-int eventfd (int count, int flags)
+int eventfd (unsigned int count, int flags)
{
#if defined __NR_eventfd2
return INLINE_SYSCALL (eventfd2, 2, count, flags);
diff --git a/libc/sysdeps/linux/common/posix_fadvise.c b/libc/sysdeps/linux/common/posix_fadvise.c
index 14bbeeea1..74d8409c0 100644
--- a/libc/sysdeps/linux/common/posix_fadvise.c
+++ b/libc/sysdeps/linux/common/posix_fadvise.c
@@ -41,9 +41,17 @@ int posix_fadvise(int fd, off_t offset, off_t len, int advice)
# if __WORDSIZE == 64
ret = INTERNAL_SYSCALL(fadvise64_64, err, 4, fd, offset, len, advice);
# else
-# if defined(__UCLIBC_SYSCALL_ALIGN_64BIT__) || defined(__arm__)
+# if defined (__arm__) || \
+ (defined(__UCLIBC_SYSCALL_ALIGN_64BIT__) && (defined(__powerpc__) || defined(__xtensa__)))
+ /* arch with 64-bit data in even reg alignment #1: [powerpc/xtensa]
+ * custom syscall handler (rearranges @advice to avoid register hole punch) */
ret = INTERNAL_SYSCALL(fadvise64_64, err, 6, fd, advice,
OFF_HI_LO (offset), OFF_HI_LO (len));
+# elif defined(__UCLIBC_SYSCALL_ALIGN_64BIT__)
+ /* arch with 64-bit data in even reg alignment #2: [arcv2/others-in-future]
+ * stock syscall handler in kernel (reg hole punched) */
+ ret = INTERNAL_SYSCALL(fadvise64_64, err, 7, fd, 0,
+ OFF_HI_LO (offset), OFF_HI_LO (len), advice);
# else
ret = INTERNAL_SYSCALL(fadvise64_64, err, 6, fd,
OFF_HI_LO (offset), OFF_HI_LO (len), advice);
diff --git a/libc/sysdeps/linux/common/posix_fadvise64.c b/libc/sysdeps/linux/common/posix_fadvise64.c
index 5d8989121..37fb269ca 100644
--- a/libc/sysdeps/linux/common/posix_fadvise64.c
+++ b/libc/sysdeps/linux/common/posix_fadvise64.c
@@ -24,9 +24,18 @@ int posix_fadvise64(int fd, off64_t offset, off64_t len, int advice)
{
INTERNAL_SYSCALL_DECL (err);
/* ARM has always been funky. */
-# if defined(__UCLIBC_SYSCALL_ALIGN_64BIT__) || defined(__arm__)
+#if defined (__arm__) || \
+ (defined(__UCLIBC_SYSCALL_ALIGN_64BIT__) && (defined(__powerpc__) || defined(__xtensa__)))
+ /* arch with 64-bit data in even reg alignment #1: [powerpc/xtensa]
+ * custom syscall handler (rearranges @advice to avoid register hole punch) */
int ret = INTERNAL_SYSCALL (fadvise64_64, err, 6, fd, advice,
OFF64_HI_LO (offset), OFF64_HI_LO (len));
+#elif defined(__UCLIBC_SYSCALL_ALIGN_64BIT__)
+ /* arch with 64-bit data in even reg alignment #2: [arcv2/others-in-future]
+ * stock syscall handler in kernel (reg hole punched) */
+ int ret = INTERNAL_SYSCALL (fadvise64_64, err, 7, fd, 0,
+ OFF64_HI_LO (offset), OFF64_HI_LO (len),
+ advice);
# else
int ret = INTERNAL_SYSCALL (fadvise64_64, err, 6, fd,
OFF64_HI_LO (offset), OFF64_HI_LO (len),
diff --git a/libc/sysdeps/linux/common/setns.c b/libc/sysdeps/linux/common/setns.c
new file mode 100644
index 000000000..a697720b9
--- /dev/null
+++ b/libc/sysdeps/linux/common/setns.c
@@ -0,0 +1,15 @@
+/* vi: set sw=4 ts=4: */
+/*
+ * setns() for uClibc
+ *
+ * Copyright (C) 2015 Bernhard Reutner-Fischer <uclibc@uclibc.org>
+ *
+ * Licensed under the LGPL v2.1 or later, see the file COPYING.LIB in this tarball.
+ */
+
+#include <sys/syscall.h>
+#include <sched.h>
+
+#ifdef __NR_setns
+_syscall2(int, setns, int, fd, int, nstype)
+#endif
diff --git a/libc/sysdeps/linux/common/stubs.c b/libc/sysdeps/linux/common/stubs.c
index 57c4664aa..2c50307aa 100644
--- a/libc/sysdeps/linux/common/stubs.c
+++ b/libc/sysdeps/linux/common/stubs.c
@@ -346,6 +346,10 @@ make_stub(setfsgid)
make_stub(setfsuid)
#endif
+#if !defined __NR_setns && defined __UCLIBC_LINUX_SPECIFIC__
+make_stub(setns)
+#endif
+
#if !defined __NR_setresgid32 && !defined __NR_setresgid && defined __UCLIBC_LINUX_SPECIFIC__
make_stub(setresgid)
#endif
diff --git a/libc/sysdeps/linux/common/sync_file_range.c b/libc/sysdeps/linux/common/sync_file_range.c
index 6cd7e94d6..db797de62 100644
--- a/libc/sysdeps/linux/common/sync_file_range.c
+++ b/libc/sysdeps/linux/common/sync_file_range.c
@@ -24,7 +24,11 @@ static int __NC(sync_file_range)(int fd, off64_t offset, off64_t nbytes, unsigne
{
# if defined __powerpc__ && __WORDSIZE == 64
return INLINE_SYSCALL(sync_file_range, 4, fd, flags, offset, nbytes);
-# elif defined __mips__ && _MIPS_SIM == _ABIO32
+# elif (defined __mips__ && _MIPS_SIM == _ABIO32) || \
+ (defined(__UCLIBC_SYSCALL_ALIGN_64BIT__) && !(defined(__powerpc__) || defined(__xtensa__)))
+ /* arch with 64-bit data in even reg alignment #2: [arcv2/others-in-future]
+ * stock syscall handler in kernel (reg hole punched)
+ * see libc/sysdeps/linux/common/posix_fadvise.c for more details */
return INLINE_SYSCALL(sync_file_range, 7, fd, 0,
OFF64_HI_LO(offset), OFF64_HI_LO(nbytes), flags);
# elif defined __NR_sync_file_range2
diff --git a/libc/sysdeps/linux/common/sys/eventfd.h b/libc/sysdeps/linux/common/sys/eventfd.h
index 91b265b2c..a47b5fecf 100644
--- a/libc/sysdeps/linux/common/sys/eventfd.h
+++ b/libc/sysdeps/linux/common/sys/eventfd.h
@@ -31,7 +31,7 @@ __BEGIN_DECLS
/* Return file descriptor for generic event channel. Set initial
value to COUNT. */
-extern int eventfd (int __count, int __flags) __THROW;
+extern int eventfd (unsigned int __count, int __flags) __THROW;
/* Read event counter and possibly wait for events. */
extern int eventfd_read (int __fd, eventfd_t *__value);
diff --git a/libc/sysdeps/linux/sparc/bits/eventfd.h b/libc/sysdeps/linux/sparc/bits/eventfd.h
index bed9f093b..e348cc6fb 100644
--- a/libc/sysdeps/linux/sparc/bits/eventfd.h
+++ b/libc/sysdeps/linux/sparc/bits/eventfd.h
@@ -22,7 +22,7 @@
/* Flags for eventfd. */
enum
{
- EFD_SEMAPHORE = 1,
+ EFD_SEMAPHORE = 0x000001,
#define EFD_SEMAPHORE EFD_SEMAPHORE
EFD_CLOEXEC = 0x400000,
#define EFD_CLOEXEC EFD_CLOEXEC