diff options
-rw-r--r-- | libc/sysdeps/linux/i386/__uClibc_syscall.S | 50 | ||||
-rw-r--r-- | libc/sysdeps/linux/i386/vfork.S | 47 |
2 files changed, 89 insertions, 8 deletions
diff --git a/libc/sysdeps/linux/i386/__uClibc_syscall.S b/libc/sysdeps/linux/i386/__uClibc_syscall.S index cc785efd7..b79d2666e 100644 --- a/libc/sysdeps/linux/i386/__uClibc_syscall.S +++ b/libc/sysdeps/linux/i386/__uClibc_syscall.S @@ -1,3 +1,21 @@ +/* + * June 27, 2001 Manuel Novoa III + * + * This is a heavily modified version of gcc's output for the syscall5 macro. + * The idea (originally from dietlibc) is that all syscall functions simply + * set the syscall number in %al (since <= 255) and then jump here. All the + * common work is done by __uClibc_syscall, saving a fair amount of generated + * code where a number of syscalls are used. The (potential) cost is some + * unnecessary pushes, pops, and movs but the execution time penalty should + * be relatively small compared to the cost of the syscall itself. + * + * WARNING: If the startup code for uClibc changes, I suppose it is possible + * that this code might try to access memory under the bottom of + * the stack. + * WARNING: This will need to be modified if the number of syscalls ever + * exceeds 255. So will the associated syscall macros. + */ + .text .align 4 .globl __uClibc_syscall @@ -16,15 +34,39 @@ __uClibc_syscall: int $0x80 #NO_APP cmpl $-4095,%eax - jbe .L5 + jbe .Ldone + +#ifdef PIC + call Lhere +Lhere: + popl %ebx + addl $_GLOBAL_OFFSET_TABLE_+[.-Lhere],%ebx + negl %eax + movl %eax,%ecx +#ifdef _LIBC_REENTRANT + call __errno_location@PLT +#else + movl errno@GOT(%ebx),%eax +#endif /* _LIBC_REENTRANT */ + movl %ecx,(%eax) +#else negl %eax +#ifdef _LIBC_REENTRANT + movl %eax,%ecx + call __errno_location + movl %ecx,(%eax) +#else movl %eax,errno +#endif /* _LIBC_REENTRANT */ + +#endif /* PIC */ + movl $-1,%eax .p2align 4,,7 -.L5: +.Ldone: popl %ebx popl %esi popl %edi ret -.Lfe1: - .size __uClibc_syscall,.Lfe1-__uClibc_syscall +.Lsize: + .size __uClibc_syscall,.Lsize-__uClibc_syscall diff --git a/libc/sysdeps/linux/i386/vfork.S b/libc/sysdeps/linux/i386/vfork.S index 5775daee3..10e95b170 100644 --- a/libc/sysdeps/linux/i386/vfork.S +++ b/libc/sysdeps/linux/i386/vfork.S @@ -1,3 +1,10 @@ +/* + * June 27, 2001 Manuel Novoa III + * + * Modified to (hopefully) be PIC and REENTRANT safe. + * + */ + .text .align 4 .globl vfork @@ -5,18 +12,50 @@ vfork: popl %ecx movl $190,%eax +#ifdef PIC + pushl %ebx +#endif #APP int $0x80 #NO_APP +#ifdef PIC + popl %ebx +#endif cmpl $-4095,%eax - jae .L5 + jae .Lerror jmp *%ecx .p2align 4,,7 -.L5: +.Lerror: pushl %ecx + +#ifdef PIC + pushl %ebx + call .Lhere +.Lhere: + popl %ebx + addl $_GLOBAL_OFFSET_TABLE_+[.-.Lhere],%ebx negl %eax + movl %eax,%ecx +#ifdef _LIBC_REENTRANT + call __errno_location@PLT +#else + movl errno@GOT(%ebx),%eax +#endif /* _LIBC_REENTRANT */ + movl %ecx,(%eax) + popl %ebx +#else + negl %eax +#ifdef _LIBC_REENTRANT + movl %eax,%ecx + call __errno_location + movl %ecx,(%eax) +#else movl %eax,errno +#endif /* _LIBC_REENTRANT */ + +#endif /* PIC */ + movl $-1,%eax ret -.Lfe1: - .size vfork,.Lfe1-vfork +.Lsize: + .size vfork,.Lsize-vfork |