summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorManuel Novoa III <mjn3@codepoet.org>2001-06-27 17:59:13 +0000
committerManuel Novoa III <mjn3@codepoet.org>2001-06-27 17:59:13 +0000
commita79cec095d20bca36305211ce36b6f92a96b1245 (patch)
tree542b1208ed72737d93b5205fcbd87708145aab27
parentb52e867b793000a08d2eea36daa182c272d8827f (diff)
Make these PIC and _LIBC_REENTRANT compatible.
-rw-r--r--libc/sysdeps/linux/i386/__uClibc_syscall.S50
-rw-r--r--libc/sysdeps/linux/i386/vfork.S47
2 files changed, 89 insertions, 8 deletions
diff --git a/libc/sysdeps/linux/i386/__uClibc_syscall.S b/libc/sysdeps/linux/i386/__uClibc_syscall.S
index cc785efd7..b79d2666e 100644
--- a/libc/sysdeps/linux/i386/__uClibc_syscall.S
+++ b/libc/sysdeps/linux/i386/__uClibc_syscall.S
@@ -1,3 +1,21 @@
+/*
+ * June 27, 2001 Manuel Novoa III
+ *
+ * This is a heavily modified version of gcc's output for the syscall5 macro.
+ * The idea (originally from dietlibc) is that all syscall functions simply
+ * set the syscall number in %al (since <= 255) and then jump here. All the
+ * common work is done by __uClibc_syscall, saving a fair amount of generated
+ * code where a number of syscalls are used. The (potential) cost is some
+ * unnecessary pushes, pops, and movs but the execution time penalty should
+ * be relatively small compared to the cost of the syscall itself.
+ *
+ * WARNING: If the startup code for uClibc changes, I suppose it is possible
+ * that this code might try to access memory under the bottom of
+ * the stack.
+ * WARNING: This will need to be modified if the number of syscalls ever
+ * exceeds 255. So will the associated syscall macros.
+ */
+
.text
.align 4
.globl __uClibc_syscall
@@ -16,15 +34,39 @@ __uClibc_syscall:
int $0x80
#NO_APP
cmpl $-4095,%eax
- jbe .L5
+ jbe .Ldone
+
+#ifdef PIC
+ call Lhere
+Lhere:
+ popl %ebx
+ addl $_GLOBAL_OFFSET_TABLE_+[.-Lhere],%ebx
+ negl %eax
+ movl %eax,%ecx
+#ifdef _LIBC_REENTRANT
+ call __errno_location@PLT
+#else
+ movl errno@GOT(%ebx),%eax
+#endif /* _LIBC_REENTRANT */
+ movl %ecx,(%eax)
+#else
negl %eax
+#ifdef _LIBC_REENTRANT
+ movl %eax,%ecx
+ call __errno_location
+ movl %ecx,(%eax)
+#else
movl %eax,errno
+#endif /* _LIBC_REENTRANT */
+
+#endif /* PIC */
+
movl $-1,%eax
.p2align 4,,7
-.L5:
+.Ldone:
popl %ebx
popl %esi
popl %edi
ret
-.Lfe1:
- .size __uClibc_syscall,.Lfe1-__uClibc_syscall
+.Lsize:
+ .size __uClibc_syscall,.Lsize-__uClibc_syscall
diff --git a/libc/sysdeps/linux/i386/vfork.S b/libc/sysdeps/linux/i386/vfork.S
index 5775daee3..10e95b170 100644
--- a/libc/sysdeps/linux/i386/vfork.S
+++ b/libc/sysdeps/linux/i386/vfork.S
@@ -1,3 +1,10 @@
+/*
+ * June 27, 2001 Manuel Novoa III
+ *
+ * Modified to (hopefully) be PIC and REENTRANT safe.
+ *
+ */
+
.text
.align 4
.globl vfork
@@ -5,18 +12,50 @@
vfork:
popl %ecx
movl $190,%eax
+#ifdef PIC
+ pushl %ebx
+#endif
#APP
int $0x80
#NO_APP
+#ifdef PIC
+ popl %ebx
+#endif
cmpl $-4095,%eax
- jae .L5
+ jae .Lerror
jmp *%ecx
.p2align 4,,7
-.L5:
+.Lerror:
pushl %ecx
+
+#ifdef PIC
+ pushl %ebx
+ call .Lhere
+.Lhere:
+ popl %ebx
+ addl $_GLOBAL_OFFSET_TABLE_+[.-.Lhere],%ebx
negl %eax
+ movl %eax,%ecx
+#ifdef _LIBC_REENTRANT
+ call __errno_location@PLT
+#else
+ movl errno@GOT(%ebx),%eax
+#endif /* _LIBC_REENTRANT */
+ movl %ecx,(%eax)
+ popl %ebx
+#else
+ negl %eax
+#ifdef _LIBC_REENTRANT
+ movl %eax,%ecx
+ call __errno_location
+ movl %ecx,(%eax)
+#else
movl %eax,errno
+#endif /* _LIBC_REENTRANT */
+
+#endif /* PIC */
+
movl $-1,%eax
ret
-.Lfe1:
- .size vfork,.Lfe1-vfork
+.Lsize:
+ .size vfork,.Lsize-vfork