1 /* SPDX-License-Identifier: LGPL-2.1 OR MIT */
2 /*
3 * x86 specific definitions for NOLIBC (both 32- and 64-bit)
4 * Copyright (C) 2017-2025 Willy Tarreau <w@1wt.eu>
5 */
6
7 #ifndef _NOLIBC_ARCH_X86_H
8 #define _NOLIBC_ARCH_X86_H
9
10 #include "compiler.h"
11 #include "crt.h"
12
13 #if !defined(__x86_64__)
14
15 /* Syscalls for i386 :
16 * - mostly similar to x86_64
17 * - registers are 32-bit
18 * - syscall number is passed in eax
19 * - arguments are in ebx, ecx, edx, esi, edi, ebp respectively
20 * - all registers are preserved (except eax of course)
21 * - the system call is performed by calling int $0x80
22 * - syscall return comes in eax
23 * - the arguments are cast to long and assigned into the target registers
24 * which are then simply passed as registers to the asm code, so that we
25 * don't have to experience issues with register constraints.
26 * - the syscall number is always specified last in order to allow to force
27 * some registers before (gcc refuses a %-register at the last position).
28 *
29 * Also, i386 supports the old_select syscall if newselect is not available
30 */
31 #define __ARCH_WANT_SYS_OLD_SELECT
32
33 #define my_syscall0(num) \
34 ({ \
35 long _ret; \
36 register long _num __asm__ ("eax") = (num); \
37 \
38 __asm__ volatile ( \
39 "int $0x80\n" \
40 : "=a" (_ret) \
41 : "0"(_num) \
42 : "memory", "cc" \
43 ); \
44 _ret; \
45 })
46
47 #define my_syscall1(num, arg1) \
48 ({ \
49 long _ret; \
50 register long _num __asm__ ("eax") = (num); \
51 register long _arg1 __asm__ ("ebx") = (long)(arg1); \
52 \
53 __asm__ volatile ( \
54 "int $0x80\n" \
55 : "=a" (_ret) \
56 : "r"(_arg1), \
57 "0"(_num) \
58 : "memory", "cc" \
59 ); \
60 _ret; \
61 })
62
63 #define my_syscall2(num, arg1, arg2) \
64 ({ \
65 long _ret; \
66 register long _num __asm__ ("eax") = (num); \
67 register long _arg1 __asm__ ("ebx") = (long)(arg1); \
68 register long _arg2 __asm__ ("ecx") = (long)(arg2); \
69 \
70 __asm__ volatile ( \
71 "int $0x80\n" \
72 : "=a" (_ret) \
73 : "r"(_arg1), "r"(_arg2), \
74 "0"(_num) \
75 : "memory", "cc" \
76 ); \
77 _ret; \
78 })
79
80 #define my_syscall3(num, arg1, arg2, arg3) \
81 ({ \
82 long _ret; \
83 register long _num __asm__ ("eax") = (num); \
84 register long _arg1 __asm__ ("ebx") = (long)(arg1); \
85 register long _arg2 __asm__ ("ecx") = (long)(arg2); \
86 register long _arg3 __asm__ ("edx") = (long)(arg3); \
87 \
88 __asm__ volatile ( \
89 "int $0x80\n" \
90 : "=a" (_ret) \
91 : "r"(_arg1), "r"(_arg2), "r"(_arg3), \
92 "0"(_num) \
93 : "memory", "cc" \
94 ); \
95 _ret; \
96 })
97
98 #define my_syscall4(num, arg1, arg2, arg3, arg4) \
99 ({ \
100 long _ret; \
101 register long _num __asm__ ("eax") = (num); \
102 register long _arg1 __asm__ ("ebx") = (long)(arg1); \
103 register long _arg2 __asm__ ("ecx") = (long)(arg2); \
104 register long _arg3 __asm__ ("edx") = (long)(arg3); \
105 register long _arg4 __asm__ ("esi") = (long)(arg4); \
106 \
107 __asm__ volatile ( \
108 "int $0x80\n" \
109 : "=a" (_ret) \
110 : "r"(_arg1), "r"(_arg2), "r"(_arg3), "r"(_arg4), \
111 "0"(_num) \
112 : "memory", "cc" \
113 ); \
114 _ret; \
115 })
116
117 #define my_syscall5(num, arg1, arg2, arg3, arg4, arg5) \
118 ({ \
119 long _ret; \
120 register long _num __asm__ ("eax") = (num); \
121 register long _arg1 __asm__ ("ebx") = (long)(arg1); \
122 register long _arg2 __asm__ ("ecx") = (long)(arg2); \
123 register long _arg3 __asm__ ("edx") = (long)(arg3); \
124 register long _arg4 __asm__ ("esi") = (long)(arg4); \
125 register long _arg5 __asm__ ("edi") = (long)(arg5); \
126 \
127 __asm__ volatile ( \
128 "int $0x80\n" \
129 : "=a" (_ret) \
130 : "r"(_arg1), "r"(_arg2), "r"(_arg3), "r"(_arg4), "r"(_arg5), \
131 "0"(_num) \
132 : "memory", "cc" \
133 ); \
134 _ret; \
135 })
136
137 #define my_syscall6(num, arg1, arg2, arg3, arg4, arg5, arg6) \
138 ({ \
139 long _eax = (long)(num); \
140 long _arg6 = (long)(arg6); /* Always in memory */ \
141 __asm__ volatile ( \
142 "pushl %[_arg6]\n\t" \
143 "pushl %%ebp\n\t" \
144 "movl 4(%%esp),%%ebp\n\t" \
145 "int $0x80\n\t" \
146 "popl %%ebp\n\t" \
147 "addl $4,%%esp\n\t" \
148 : "+a"(_eax) /* %eax */ \
149 : "b"(arg1), /* %ebx */ \
150 "c"(arg2), /* %ecx */ \
151 "d"(arg3), /* %edx */ \
152 "S"(arg4), /* %esi */ \
153 "D"(arg5), /* %edi */ \
154 [_arg6]"m"(_arg6) /* memory */ \
155 : "memory", "cc" \
156 ); \
157 _eax; \
158 })
159
160 /* startup code */
161 /*
162 * i386 System V ABI mandates:
163 * 1) last pushed argument must be 16-byte aligned.
164 * 2) The deepest stack frame should be set to zero
165 *
166 */
_start(void)167 void __attribute__((weak, noreturn)) __nolibc_entrypoint __no_stack_protector _start(void)
168 {
169 __asm__ volatile (
170 "xor %ebp, %ebp\n" /* zero the stack frame */
171 "mov %esp, %eax\n" /* save stack pointer to %eax, as arg1 of _start_c */
172 "sub $12, %esp\n" /* sub 12 to keep it aligned after the push %eax */
173 "push %eax\n" /* push arg1 on stack to support plain stack modes too */
174 "call _start_c\n" /* transfer to c runtime */
175 "hlt\n" /* ensure it does not return */
176 );
177 __nolibc_entrypoint_epilogue();
178 }
179
180 #else /* !defined(__x86_64__) */
181
182 /* Syscalls for x86_64 :
183 * - registers are 64-bit
184 * - syscall number is passed in rax
185 * - arguments are in rdi, rsi, rdx, r10, r8, r9 respectively
186 * - the system call is performed by calling the syscall instruction
187 * - syscall return comes in rax
188 * - rcx and r11 are clobbered, others are preserved.
189 * - the arguments are cast to long and assigned into the target registers
190 * which are then simply passed as registers to the asm code, so that we
191 * don't have to experience issues with register constraints.
192 * - the syscall number is always specified last in order to allow to force
193 * some registers before (gcc refuses a %-register at the last position).
194 * - see also x86-64 ABI section A.2 AMD64 Linux Kernel Conventions, A.2.1
195 * Calling Conventions.
196 *
197 * Link x86-64 ABI: https://gitlab.com/x86-psABIs/x86-64-ABI/-/wikis/home
198 *
199 */
200
201 #define my_syscall0(num) \
202 ({ \
203 long _ret; \
204 register long _num __asm__ ("rax") = (num); \
205 \
206 __asm__ volatile ( \
207 "syscall\n" \
208 : "=a"(_ret) \
209 : "0"(_num) \
210 : "rcx", "r11", "memory", "cc" \
211 ); \
212 _ret; \
213 })
214
215 #define my_syscall1(num, arg1) \
216 ({ \
217 long _ret; \
218 register long _num __asm__ ("rax") = (num); \
219 register long _arg1 __asm__ ("rdi") = (long)(arg1); \
220 \
221 __asm__ volatile ( \
222 "syscall\n" \
223 : "=a"(_ret) \
224 : "r"(_arg1), \
225 "0"(_num) \
226 : "rcx", "r11", "memory", "cc" \
227 ); \
228 _ret; \
229 })
230
231 #define my_syscall2(num, arg1, arg2) \
232 ({ \
233 long _ret; \
234 register long _num __asm__ ("rax") = (num); \
235 register long _arg1 __asm__ ("rdi") = (long)(arg1); \
236 register long _arg2 __asm__ ("rsi") = (long)(arg2); \
237 \
238 __asm__ volatile ( \
239 "syscall\n" \
240 : "=a"(_ret) \
241 : "r"(_arg1), "r"(_arg2), \
242 "0"(_num) \
243 : "rcx", "r11", "memory", "cc" \
244 ); \
245 _ret; \
246 })
247
248 #define my_syscall3(num, arg1, arg2, arg3) \
249 ({ \
250 long _ret; \
251 register long _num __asm__ ("rax") = (num); \
252 register long _arg1 __asm__ ("rdi") = (long)(arg1); \
253 register long _arg2 __asm__ ("rsi") = (long)(arg2); \
254 register long _arg3 __asm__ ("rdx") = (long)(arg3); \
255 \
256 __asm__ volatile ( \
257 "syscall\n" \
258 : "=a"(_ret) \
259 : "r"(_arg1), "r"(_arg2), "r"(_arg3), \
260 "0"(_num) \
261 : "rcx", "r11", "memory", "cc" \
262 ); \
263 _ret; \
264 })
265
266 #define my_syscall4(num, arg1, arg2, arg3, arg4) \
267 ({ \
268 long _ret; \
269 register long _num __asm__ ("rax") = (num); \
270 register long _arg1 __asm__ ("rdi") = (long)(arg1); \
271 register long _arg2 __asm__ ("rsi") = (long)(arg2); \
272 register long _arg3 __asm__ ("rdx") = (long)(arg3); \
273 register long _arg4 __asm__ ("r10") = (long)(arg4); \
274 \
275 __asm__ volatile ( \
276 "syscall\n" \
277 : "=a"(_ret) \
278 : "r"(_arg1), "r"(_arg2), "r"(_arg3), "r"(_arg4), \
279 "0"(_num) \
280 : "rcx", "r11", "memory", "cc" \
281 ); \
282 _ret; \
283 })
284
285 #define my_syscall5(num, arg1, arg2, arg3, arg4, arg5) \
286 ({ \
287 long _ret; \
288 register long _num __asm__ ("rax") = (num); \
289 register long _arg1 __asm__ ("rdi") = (long)(arg1); \
290 register long _arg2 __asm__ ("rsi") = (long)(arg2); \
291 register long _arg3 __asm__ ("rdx") = (long)(arg3); \
292 register long _arg4 __asm__ ("r10") = (long)(arg4); \
293 register long _arg5 __asm__ ("r8") = (long)(arg5); \
294 \
295 __asm__ volatile ( \
296 "syscall\n" \
297 : "=a"(_ret) \
298 : "r"(_arg1), "r"(_arg2), "r"(_arg3), "r"(_arg4), "r"(_arg5), \
299 "0"(_num) \
300 : "rcx", "r11", "memory", "cc" \
301 ); \
302 _ret; \
303 })
304
305 #define my_syscall6(num, arg1, arg2, arg3, arg4, arg5, arg6) \
306 ({ \
307 long _ret; \
308 register long _num __asm__ ("rax") = (num); \
309 register long _arg1 __asm__ ("rdi") = (long)(arg1); \
310 register long _arg2 __asm__ ("rsi") = (long)(arg2); \
311 register long _arg3 __asm__ ("rdx") = (long)(arg3); \
312 register long _arg4 __asm__ ("r10") = (long)(arg4); \
313 register long _arg5 __asm__ ("r8") = (long)(arg5); \
314 register long _arg6 __asm__ ("r9") = (long)(arg6); \
315 \
316 __asm__ volatile ( \
317 "syscall\n" \
318 : "=a"(_ret) \
319 : "r"(_arg1), "r"(_arg2), "r"(_arg3), "r"(_arg4), "r"(_arg5), \
320 "r"(_arg6), "0"(_num) \
321 : "rcx", "r11", "memory", "cc" \
322 ); \
323 _ret; \
324 })
325
326 /* startup code */
327 /*
328 * x86-64 System V ABI mandates:
329 * 1) %rsp must be 16-byte aligned right before the function call.
330 * 2) The deepest stack frame should be zero (the %rbp).
331 *
332 */
_start(void)333 void __attribute__((weak, noreturn)) __nolibc_entrypoint __no_stack_protector _start(void)
334 {
335 __asm__ volatile (
336 "xor %ebp, %ebp\n" /* zero the stack frame */
337 "mov %rsp, %rdi\n" /* save stack pointer to %rdi, as arg1 of _start_c */
338 "call _start_c\n" /* transfer to c runtime */
339 "hlt\n" /* ensure it does not return */
340 );
341 __nolibc_entrypoint_epilogue();
342 }
343
344 #define NOLIBC_ARCH_HAS_MEMMOVE
345 void *memmove(void *dst, const void *src, size_t len);
346
347 #define NOLIBC_ARCH_HAS_MEMCPY
348 void *memcpy(void *dst, const void *src, size_t len);
349
350 #define NOLIBC_ARCH_HAS_MEMSET
351 void *memset(void *dst, int c, size_t len);
352
353 __asm__ (
354 ".section .text.nolibc_memmove_memcpy\n"
355 ".weak memmove\n"
356 ".weak memcpy\n"
357 "memmove:\n"
358 "memcpy:\n"
359 "movq %rdx, %rcx\n\t"
360 "movq %rdi, %rax\n\t"
361 "movq %rdi, %rdx\n\t"
362 "subq %rsi, %rdx\n\t"
363 "cmpq %rcx, %rdx\n\t"
364 "jb 1f\n\t"
365 "rep movsb\n\t"
366 "retq\n"
367 "1:" /* backward copy */
368 "leaq -1(%rdi, %rcx, 1), %rdi\n\t"
369 "leaq -1(%rsi, %rcx, 1), %rsi\n\t"
370 "std\n\t"
371 "rep movsb\n\t"
372 "cld\n\t"
373 "retq\n"
374
375 ".section .text.nolibc_memset\n"
376 ".weak memset\n"
377 "memset:\n"
378 "xchgl %eax, %esi\n\t"
379 "movq %rdx, %rcx\n\t"
380 "pushq %rdi\n\t"
381 "rep stosb\n\t"
382 "popq %rax\n\t"
383 "retq\n"
384 );
385
386 #endif /* !defined(__x86_64__) */
387 #endif /* _NOLIBC_ARCH_X86_H */
388