1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
|
#ifndef _BITS_SYSCALLS_H
#define _BITS_SYSCALLS_H
#ifndef _SYSCALL_H
# error "Never use <bits/syscalls.h> directly; include <sys/syscall.h> instead."
#endif
/*
* Some of the sneaky macros in the code were taken from
* glibc-2.2.5/sysdeps/unix/sysv/linux/i386/sysdep.h
*/
#ifndef __ASSEMBLER__
#include <errno.h>
#define INTERNAL_SYSCALL_NCS(name, err, nr, args...) \
(__extension__ \
({ \
register unsigned int resultvar; \
__asm__ __volatile__ ( \
LOADARGS_##nr \
"movl %1, %%eax\n\t" \
"int $0x80\n\t" \
RESTOREARGS_##nr \
: "=a" (resultvar) \
: "g" (name) ASMFMT_##nr(args) : "memory", "cc" \
); \
(int) resultvar; \
}) \
)
#if 1 /* defined __PIC__ || defined __pic__ */
/* This code avoids pushing/popping ebx as much as possible.
* I think the main reason was that older GCCs had problems
* with proper saving/restoring of ebx if "b" constraint was used,
* which was breaking -fPIC code really badly.
* At least gcc 4.2.x seems to not need these tricks anymore,
* but this code is still useful because it often avoids
* using stack for saving ebx.
* Keeping it unconditionally enabled for now.
*/
/* We need some help from the assembler to generate optimal code.
* We define some macros here which later will be used. */
/* FIXME: drop these b* macros! */
__asm__ (
".L__X'%ebx = 1\n\t"
".L__X'%ecx = 2\n\t"
".L__X'%edx = 2\n\t"
".L__X'%eax = 3\n\t"
".L__X'%esi = 3\n\t"
".L__X'%edi = 3\n\t"
".L__X'%ebp = 3\n\t"
".L__X'%esp = 3\n\t"
/* Loading param #1 (ebx) is done by loading it into
* another register, and then performing bpushl+bmovl,
* since we must preserve ebx */
".macro bpushl name reg\n\t"
".if 1 - \\name\n\t" /* if reg!=ebx... */
".if 2 - \\name\n\t" /* if reg can't be clobbered... */
"pushl %ebx\n\t" /* save ebx on stack */
".else\n\t"
"xchgl \\reg, %ebx\n\t" /* else save ebx in reg, and load reg to ebx */
".endif\n\t"
".endif\n\t"
".endm\n\t"
".macro bmovl name reg\n\t"
".if 1 - \\name\n\t"
".if 2 - \\name\n\t" /* if reg can't be clobbered... */
"movl \\reg, %ebx\n\t" /* load reg to ebx */
".endif\n\t"
".endif\n\t"
".endm\n\t"
".macro bpopl name reg\n\t"
".if 1 - \\name\n\t"
".if 2 - \\name\n\t" /* if reg can't be clobbered... */
"popl %ebx\n\t" /* restore ebx from stack */
".else\n\t"
"xchgl \\reg, %ebx\n\t" /* else restore ebx from reg */
".endif\n\t"
".endif\n\t"
".endm\n\t"
);
#define LOADARGS_0
#define LOADARGS_1 "bpushl .L__X'%k2, %k2\n\t" "bmovl .L__X'%k2, %k2\n\t"
#define LOADARGS_2 LOADARGS_1
#define LOADARGS_3 LOADARGS_1
#define LOADARGS_4 LOADARGS_1
#define LOADARGS_5 LOADARGS_1
#define LOADARGS_6 LOADARGS_1 "push %%ebp\n\t" "movl %7, %%ebp\n\t"
#define RESTOREARGS_0
#define RESTOREARGS_1 "bpopl .L__X'%k2, %k2\n\t"
#define RESTOREARGS_2 RESTOREARGS_1
#define RESTOREARGS_3 RESTOREARGS_1
#define RESTOREARGS_4 RESTOREARGS_1
#define RESTOREARGS_5 RESTOREARGS_1
#define RESTOREARGS_6 "pop %%ebp\n\t" RESTOREARGS_1
#define ASMFMT_0()
/* "acdSD" constraint would work too, but "SD" would use esi/edi and cause
* them to be pushed/popped by compiler, "a" would use eax and cause ebx
* to be saved/restored on stack, not in register. Narrowing choice down
* to "ecx or edx" results in smaller and faster code: */
#define ASMFMT_1(arg1) \
, "cd" (arg1)
/* Can use "adSD" constraint here: */
#define ASMFMT_2(arg1, arg2) \
, "d" (arg1), "c" (arg2)
/* Can use "aSD" constraint here: */
#define ASMFMT_3(arg1, arg2, arg3) \
, "a" (arg1), "c" (arg2), "d" (arg3)
/* Can use "aD" constraint here: */
#define ASMFMT_4(arg1, arg2, arg3, arg4) \
, "a" (arg1), "c" (arg2), "d" (arg3), "S" (arg4)
#define ASMFMT_5(arg1, arg2, arg3, arg4, arg5) \
, "a" (arg1), "c" (arg2), "d" (arg3), "S" (arg4), "D" (arg5)
#define ASMFMT_6(arg1, arg2, arg3, arg4, arg5, arg6) \
, "a" (arg1), "c" (arg2), "d" (arg3), "S" (arg4), "D" (arg5), "g" (arg6)
#else /* !PIC */
/* Simpler code which just uses "b" constraint to load ebx.
* Seems to work with gc 4.2.x, and generates slightly smaller,
* but slightly slower code. Example (time syscall):
*
* - 8b 4c 24 04 mov 0x4(%esp),%ecx
* - 87 cb xchg %ecx,%ebx
* + 53 push %ebx
* + 8b 5c 24 08 mov 0x8(%esp),%ebx
* b8 0d 00 00 00 mov $0xd,%eax
* cd 80 int $0x80
* - 87 cb xchg %ecx,%ebx
* + 5b pop %ebx
* c3 ret
*
* 2 bytes smaller, but uses stack via "push/pop ebx"
*/
#define LOADARGS_0
#define LOADARGS_1
#define LOADARGS_2
#define LOADARGS_3
#define LOADARGS_4
#define LOADARGS_5
#define LOADARGS_6 "push %%ebp\n\t" "movl %7, %%ebp\n\t"
#define RESTOREARGS_0
#define RESTOREARGS_1
#define RESTOREARGS_2
#define RESTOREARGS_3
#define RESTOREARGS_4
#define RESTOREARGS_5
#define RESTOREARGS_6 "pop %%ebp\n\t"
#define ASMFMT_0()
#define ASMFMT_1(arg1) \
, "b" (arg1)
#define ASMFMT_2(arg1, arg2) \
, "b" (arg1), "c" (arg2)
#define ASMFMT_3(arg1, arg2, arg3) \
, "b" (arg1), "c" (arg2), "d" (arg3)
#define ASMFMT_4(arg1, arg2, arg3, arg4) \
, "b" (arg1), "c" (arg2), "d" (arg3), "S" (arg4)
#define ASMFMT_5(arg1, arg2, arg3, arg4, arg5) \
, "b" (arg1), "c" (arg2), "d" (arg3), "S" (arg4), "D" (arg5)
#define ASMFMT_6(arg1, arg2, arg3, arg4, arg5, arg6) \
, "b" (arg1), "c" (arg2), "d" (arg3), "S" (arg4), "D" (arg5), "m" (arg6)
#endif /* !PIC */
#endif /* __ASSEMBLER__ */
#endif /* _BITS_SYSCALLS_H */
|