Commit 271661c1 authored by Willy Tarreau's avatar Willy Tarreau Committed by Paul E. McKenney

tools/nolibc/arch: split arch-specific code into individual files

In order to ease maintenance, this splits the arch-specific code into
one file per architecture. A common file "arch.h" is used to include the
right file among arch-* based on the detected architecture. Projects
which are already split per architecture could simply rename these
files to $arch/arch.h and get rid of the common arch.h. For this
reason, include guards were placed into each arch-specific file.
Signed-off-by: default avatarWilly Tarreau <w@1wt.eu>
Signed-off-by: default avatarPaul E. McKenney <paulmck@kernel.org>
parent cc7a492a
This diff is collapsed.
/* SPDX-License-Identifier: LGPL-2.1 OR MIT */
/*
* ARM specific definitions for NOLIBC
* Copyright (C) 2017-2022 Willy Tarreau <w@1wt.eu>
*/
#ifndef _NOLIBC_ARCH_ARM_H
#define _NOLIBC_ARCH_ARM_H
/* O_* macros for fcntl/open are architecture-specific */
#define O_RDONLY 0
#define O_WRONLY 1
#define O_RDWR 2
#define O_CREAT 0x40
#define O_EXCL 0x80
#define O_NOCTTY 0x100
#define O_TRUNC 0x200
#define O_APPEND 0x400
#define O_NONBLOCK 0x800
#define O_DIRECTORY 0x4000
/* The struct returned by the stat() syscall, 32-bit only, the syscall returns
* exactly 56 bytes (stops before the unused array). In big endian, the format
* differs as devices are returned as short only.
*/
struct sys_stat_struct {
#if defined(__ARMEB__)
unsigned short st_dev;
unsigned short __pad1;
#else
unsigned long st_dev;
#endif
unsigned long st_ino;
unsigned short st_mode;
unsigned short st_nlink;
unsigned short st_uid;
unsigned short st_gid;
#if defined(__ARMEB__)
unsigned short st_rdev;
unsigned short __pad2;
#else
unsigned long st_rdev;
#endif
unsigned long st_size;
unsigned long st_blksize;
unsigned long st_blocks;
unsigned long st_atime;
unsigned long st_atime_nsec;
unsigned long st_mtime;
unsigned long st_mtime_nsec;
unsigned long st_ctime;
unsigned long st_ctime_nsec;
unsigned long __unused[2];
};
/* Syscalls for ARM in ARM or Thumb modes :
* - registers are 32-bit
* - stack is 8-byte aligned
* ( http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.faqs/ka4127.html)
* - syscall number is passed in r7
* - arguments are in r0, r1, r2, r3, r4, r5
* - the system call is performed by calling svc #0
* - syscall return comes in r0.
* - only lr is clobbered.
* - the arguments are cast to long and assigned into the target registers
* which are then simply passed as registers to the asm code, so that we
* don't have to experience issues with register constraints.
* - the syscall number is always specified last in order to allow to force
* some registers before (gcc refuses a %-register at the last position).
*
* Also, ARM supports the old_select syscall if newselect is not available
*/
#define __ARCH_WANT_SYS_OLD_SELECT
#define my_syscall0(num) \
({ \
register long _num asm("r7") = (num); \
register long _arg1 asm("r0"); \
\
asm volatile ( \
"svc #0\n" \
: "=r"(_arg1) \
: "r"(_num) \
: "memory", "cc", "lr" \
); \
_arg1; \
})
#define my_syscall1(num, arg1) \
({ \
register long _num asm("r7") = (num); \
register long _arg1 asm("r0") = (long)(arg1); \
\
asm volatile ( \
"svc #0\n" \
: "=r"(_arg1) \
: "r"(_arg1), \
"r"(_num) \
: "memory", "cc", "lr" \
); \
_arg1; \
})
#define my_syscall2(num, arg1, arg2) \
({ \
register long _num asm("r7") = (num); \
register long _arg1 asm("r0") = (long)(arg1); \
register long _arg2 asm("r1") = (long)(arg2); \
\
asm volatile ( \
"svc #0\n" \
: "=r"(_arg1) \
: "r"(_arg1), "r"(_arg2), \
"r"(_num) \
: "memory", "cc", "lr" \
); \
_arg1; \
})
#define my_syscall3(num, arg1, arg2, arg3) \
({ \
register long _num asm("r7") = (num); \
register long _arg1 asm("r0") = (long)(arg1); \
register long _arg2 asm("r1") = (long)(arg2); \
register long _arg3 asm("r2") = (long)(arg3); \
\
asm volatile ( \
"svc #0\n" \
: "=r"(_arg1) \
: "r"(_arg1), "r"(_arg2), "r"(_arg3), \
"r"(_num) \
: "memory", "cc", "lr" \
); \
_arg1; \
})
#define my_syscall4(num, arg1, arg2, arg3, arg4) \
({ \
register long _num asm("r7") = (num); \
register long _arg1 asm("r0") = (long)(arg1); \
register long _arg2 asm("r1") = (long)(arg2); \
register long _arg3 asm("r2") = (long)(arg3); \
register long _arg4 asm("r3") = (long)(arg4); \
\
asm volatile ( \
"svc #0\n" \
: "=r"(_arg1) \
: "r"(_arg1), "r"(_arg2), "r"(_arg3), "r"(_arg4), \
"r"(_num) \
: "memory", "cc", "lr" \
); \
_arg1; \
})
#define my_syscall5(num, arg1, arg2, arg3, arg4, arg5) \
({ \
register long _num asm("r7") = (num); \
register long _arg1 asm("r0") = (long)(arg1); \
register long _arg2 asm("r1") = (long)(arg2); \
register long _arg3 asm("r2") = (long)(arg3); \
register long _arg4 asm("r3") = (long)(arg4); \
register long _arg5 asm("r4") = (long)(arg5); \
\
asm volatile ( \
"svc #0\n" \
: "=r" (_arg1) \
: "r"(_arg1), "r"(_arg2), "r"(_arg3), "r"(_arg4), "r"(_arg5), \
"r"(_num) \
: "memory", "cc", "lr" \
); \
_arg1; \
})
/* startup code */
asm(".section .text\n"
".global _start\n"
"_start:\n"
#if defined(__THUMBEB__) || defined(__THUMBEL__)
/* We enter here in 32-bit mode but if some previous functions were in
* 16-bit mode, the assembler cannot know, so we need to tell it we're in
* 32-bit now, then switch to 16-bit (is there a better way to do it than
* adding 1 by hand ?) and tell the asm we're now in 16-bit mode so that
* it generates correct instructions. Note that we do not support thumb1.
*/
".code 32\n"
"add r0, pc, #1\n"
"bx r0\n"
".code 16\n"
#endif
"pop {%r0}\n" // argc was in the stack
"mov %r1, %sp\n" // argv = sp
"add %r2, %r1, %r0, lsl #2\n" // envp = argv + 4*argc ...
"add %r2, %r2, $4\n" // ... + 4
"and %r3, %r1, $-8\n" // AAPCS : sp must be 8-byte aligned in the
"mov %sp, %r3\n" // callee, an bl doesn't push (lr=pc)
"bl main\n" // main() returns the status code, we'll exit with it.
"movs r7, $1\n" // NR_exit == 1
"svc $0x00\n"
"");
#endif // _NOLIBC_ARCH_ARM_H
/* SPDX-License-Identifier: LGPL-2.1 OR MIT */
/*
* i386 specific definitions for NOLIBC
* Copyright (C) 2017-2022 Willy Tarreau <w@1wt.eu>
*/
#ifndef _NOLIBC_ARCH_I386_H
#define _NOLIBC_ARCH_I386_H
/* O_* macros for fcntl/open are architecture-specific */
#define O_RDONLY 0
#define O_WRONLY 1
#define O_RDWR 2
#define O_CREAT 0x40
#define O_EXCL 0x80
#define O_NOCTTY 0x100
#define O_TRUNC 0x200
#define O_APPEND 0x400
#define O_NONBLOCK 0x800
#define O_DIRECTORY 0x10000
/* The struct returned by the stat() syscall, 32-bit only, the syscall returns
* exactly 56 bytes (stops before the unused array).
*/
struct sys_stat_struct {
unsigned long st_dev;
unsigned long st_ino;
unsigned short st_mode;
unsigned short st_nlink;
unsigned short st_uid;
unsigned short st_gid;
unsigned long st_rdev;
unsigned long st_size;
unsigned long st_blksize;
unsigned long st_blocks;
unsigned long st_atime;
unsigned long st_atime_nsec;
unsigned long st_mtime;
unsigned long st_mtime_nsec;
unsigned long st_ctime;
unsigned long st_ctime_nsec;
unsigned long __unused[2];
};
/* Syscalls for i386 :
* - mostly similar to x86_64
* - registers are 32-bit
* - syscall number is passed in eax
* - arguments are in ebx, ecx, edx, esi, edi, ebp respectively
* - all registers are preserved (except eax of course)
* - the system call is performed by calling int $0x80
* - syscall return comes in eax
* - the arguments are cast to long and assigned into the target registers
* which are then simply passed as registers to the asm code, so that we
* don't have to experience issues with register constraints.
* - the syscall number is always specified last in order to allow to force
* some registers before (gcc refuses a %-register at the last position).
*
* Also, i386 supports the old_select syscall if newselect is not available
*/
#define __ARCH_WANT_SYS_OLD_SELECT
#define my_syscall0(num) \
({ \
long _ret; \
register long _num asm("eax") = (num); \
\
asm volatile ( \
"int $0x80\n" \
: "=a" (_ret) \
: "0"(_num) \
: "memory", "cc" \
); \
_ret; \
})
#define my_syscall1(num, arg1) \
({ \
long _ret; \
register long _num asm("eax") = (num); \
register long _arg1 asm("ebx") = (long)(arg1); \
\
asm volatile ( \
"int $0x80\n" \
: "=a" (_ret) \
: "r"(_arg1), \
"0"(_num) \
: "memory", "cc" \
); \
_ret; \
})
#define my_syscall2(num, arg1, arg2) \
({ \
long _ret; \
register long _num asm("eax") = (num); \
register long _arg1 asm("ebx") = (long)(arg1); \
register long _arg2 asm("ecx") = (long)(arg2); \
\
asm volatile ( \
"int $0x80\n" \
: "=a" (_ret) \
: "r"(_arg1), "r"(_arg2), \
"0"(_num) \
: "memory", "cc" \
); \
_ret; \
})
#define my_syscall3(num, arg1, arg2, arg3) \
({ \
long _ret; \
register long _num asm("eax") = (num); \
register long _arg1 asm("ebx") = (long)(arg1); \
register long _arg2 asm("ecx") = (long)(arg2); \
register long _arg3 asm("edx") = (long)(arg3); \
\
asm volatile ( \
"int $0x80\n" \
: "=a" (_ret) \
: "r"(_arg1), "r"(_arg2), "r"(_arg3), \
"0"(_num) \
: "memory", "cc" \
); \
_ret; \
})
#define my_syscall4(num, arg1, arg2, arg3, arg4) \
({ \
long _ret; \
register long _num asm("eax") = (num); \
register long _arg1 asm("ebx") = (long)(arg1); \
register long _arg2 asm("ecx") = (long)(arg2); \
register long _arg3 asm("edx") = (long)(arg3); \
register long _arg4 asm("esi") = (long)(arg4); \
\
asm volatile ( \
"int $0x80\n" \
: "=a" (_ret) \
: "r"(_arg1), "r"(_arg2), "r"(_arg3), "r"(_arg4), \
"0"(_num) \
: "memory", "cc" \
); \
_ret; \
})
#define my_syscall5(num, arg1, arg2, arg3, arg4, arg5) \
({ \
long _ret; \
register long _num asm("eax") = (num); \
register long _arg1 asm("ebx") = (long)(arg1); \
register long _arg2 asm("ecx") = (long)(arg2); \
register long _arg3 asm("edx") = (long)(arg3); \
register long _arg4 asm("esi") = (long)(arg4); \
register long _arg5 asm("edi") = (long)(arg5); \
\
asm volatile ( \
"int $0x80\n" \
: "=a" (_ret) \
: "r"(_arg1), "r"(_arg2), "r"(_arg3), "r"(_arg4), "r"(_arg5), \
"0"(_num) \
: "memory", "cc" \
); \
_ret; \
})
/* startup code */
/*
* i386 System V ABI mandates:
* 1) last pushed argument must be 16-byte aligned.
* 2) The deepest stack frame should be set to zero
*
*/
asm(".section .text\n"
".global _start\n"
"_start:\n"
"pop %eax\n" // argc (first arg, %eax)
"mov %esp, %ebx\n" // argv[] (second arg, %ebx)
"lea 4(%ebx,%eax,4),%ecx\n" // then a NULL then envp (third arg, %ecx)
"xor %ebp, %ebp\n" // zero the stack frame
"and $-16, %esp\n" // x86 ABI : esp must be 16-byte aligned before
"sub $4, %esp\n" // the call instruction (args are aligned)
"push %ecx\n" // push all registers on the stack so that we
"push %ebx\n" // support both regparm and plain stack modes
"push %eax\n"
"call main\n" // main() returns the status code in %eax
"mov %eax, %ebx\n" // retrieve exit code (32-bit int)
"movl $1, %eax\n" // NR_exit == 1
"int $0x80\n" // exit now
"hlt\n" // ensure it does not
"");
#endif // _NOLIBC_ARCH_I386_H
This diff is collapsed.
/* SPDX-License-Identifier: LGPL-2.1 OR MIT */
/*
* RISCV (32 and 64) specific definitions for NOLIBC
* Copyright (C) 2017-2022 Willy Tarreau <w@1wt.eu>
*/
#ifndef _NOLIBC_ARCH_RISCV_H
#define _NOLIBC_ARCH_RISCV_H
/* O_* macros for fcntl/open are architecture-specific */
#define O_RDONLY 0
#define O_WRONLY 1
#define O_RDWR 2
#define O_CREAT 0x100
#define O_EXCL 0x200
#define O_NOCTTY 0x400
#define O_TRUNC 0x1000
#define O_APPEND 0x2000
#define O_NONBLOCK 0x4000
#define O_DIRECTORY 0x200000
struct sys_stat_struct {
unsigned long st_dev; /* Device. */
unsigned long st_ino; /* File serial number. */
unsigned int st_mode; /* File mode. */
unsigned int st_nlink; /* Link count. */
unsigned int st_uid; /* User ID of the file's owner. */
unsigned int st_gid; /* Group ID of the file's group. */
unsigned long st_rdev; /* Device number, if device. */
unsigned long __pad1;
long st_size; /* Size of file, in bytes. */
int st_blksize; /* Optimal block size for I/O. */
int __pad2;
long st_blocks; /* Number 512-byte blocks allocated. */
long st_atime; /* Time of last access. */
unsigned long st_atime_nsec;
long st_mtime; /* Time of last modification. */
unsigned long st_mtime_nsec;
long st_ctime; /* Time of last status change. */
unsigned long st_ctime_nsec;
unsigned int __unused4;
unsigned int __unused5;
};
#if __riscv_xlen == 64
#define PTRLOG "3"
#define SZREG "8"
#elif __riscv_xlen == 32
#define PTRLOG "2"
#define SZREG "4"
#endif
/* Syscalls for RISCV :
* - stack is 16-byte aligned
* - syscall number is passed in a7
* - arguments are in a0, a1, a2, a3, a4, a5
* - the system call is performed by calling ecall
* - syscall return comes in a0
* - the arguments are cast to long and assigned into the target
* registers which are then simply passed as registers to the asm code,
* so that we don't have to experience issues with register constraints.
*
* On riscv, select() is not implemented so we have to use pselect6().
*/
#define __ARCH_WANT_SYS_PSELECT6
#define my_syscall0(num) \
({ \
register long _num asm("a7") = (num); \
register long _arg1 asm("a0"); \
\
asm volatile ( \
"ecall\n\t" \
: "=r"(_arg1) \
: "r"(_num) \
: "memory", "cc" \
); \
_arg1; \
})
#define my_syscall1(num, arg1) \
({ \
register long _num asm("a7") = (num); \
register long _arg1 asm("a0") = (long)(arg1); \
\
asm volatile ( \
"ecall\n" \
: "+r"(_arg1) \
: "r"(_num) \
: "memory", "cc" \
); \
_arg1; \
})
#define my_syscall2(num, arg1, arg2) \
({ \
register long _num asm("a7") = (num); \
register long _arg1 asm("a0") = (long)(arg1); \
register long _arg2 asm("a1") = (long)(arg2); \
\
asm volatile ( \
"ecall\n" \
: "+r"(_arg1) \
: "r"(_arg2), \
"r"(_num) \
: "memory", "cc" \
); \
_arg1; \
})
#define my_syscall3(num, arg1, arg2, arg3) \
({ \
register long _num asm("a7") = (num); \
register long _arg1 asm("a0") = (long)(arg1); \
register long _arg2 asm("a1") = (long)(arg2); \
register long _arg3 asm("a2") = (long)(arg3); \
\
asm volatile ( \
"ecall\n\t" \
: "+r"(_arg1) \
: "r"(_arg2), "r"(_arg3), \
"r"(_num) \
: "memory", "cc" \
); \
_arg1; \
})
#define my_syscall4(num, arg1, arg2, arg3, arg4) \
({ \
register long _num asm("a7") = (num); \
register long _arg1 asm("a0") = (long)(arg1); \
register long _arg2 asm("a1") = (long)(arg2); \
register long _arg3 asm("a2") = (long)(arg3); \
register long _arg4 asm("a3") = (long)(arg4); \
\
asm volatile ( \
"ecall\n" \
: "+r"(_arg1) \
: "r"(_arg2), "r"(_arg3), "r"(_arg4), \
"r"(_num) \
: "memory", "cc" \
); \
_arg1; \
})
#define my_syscall5(num, arg1, arg2, arg3, arg4, arg5) \
({ \
register long _num asm("a7") = (num); \
register long _arg1 asm("a0") = (long)(arg1); \
register long _arg2 asm("a1") = (long)(arg2); \
register long _arg3 asm("a2") = (long)(arg3); \
register long _arg4 asm("a3") = (long)(arg4); \
register long _arg5 asm("a4") = (long)(arg5); \
\
asm volatile ( \
"ecall\n" \
: "+r"(_arg1) \
: "r"(_arg2), "r"(_arg3), "r"(_arg4), "r"(_arg5), \
"r"(_num) \
: "memory", "cc" \
); \
_arg1; \
})
#define my_syscall6(num, arg1, arg2, arg3, arg4, arg5, arg6) \
({ \
register long _num asm("a7") = (num); \
register long _arg1 asm("a0") = (long)(arg1); \
register long _arg2 asm("a1") = (long)(arg2); \
register long _arg3 asm("a2") = (long)(arg3); \
register long _arg4 asm("a3") = (long)(arg4); \
register long _arg5 asm("a4") = (long)(arg5); \
register long _arg6 asm("a5") = (long)(arg6); \
\
asm volatile ( \
"ecall\n" \
: "+r"(_arg1) \
: "r"(_arg2), "r"(_arg3), "r"(_arg4), "r"(_arg5), "r"(_arg6), \
"r"(_num) \
: "memory", "cc" \
); \
_arg1; \
})
/* startup code */
asm(".section .text\n"
".global _start\n"
"_start:\n"
".option push\n"
".option norelax\n"
"lla gp, __global_pointer$\n"
".option pop\n"
"ld a0, 0(sp)\n" // argc (a0) was in the stack
"add a1, sp, "SZREG"\n" // argv (a1) = sp
"slli a2, a0, "PTRLOG"\n" // envp (a2) = SZREG*argc ...
"add a2, a2, "SZREG"\n" // + SZREG (skip null)
"add a2,a2,a1\n" // + argv
"andi sp,a1,-16\n" // sp must be 16-byte aligned
"call main\n" // main() returns the status code, we'll exit with it.
"li a7, 93\n" // NR_exit == 93
"ecall\n"
"");
#endif // _NOLIBC_ARCH_RISCV_H
This diff is collapsed.
/* SPDX-License-Identifier: LGPL-2.1 OR MIT */
/*
* Copyright (C) 2017-2022 Willy Tarreau <w@1wt.eu>
*/
/* Below comes the architecture-specific code. For each architecture, we have
* the syscall declarations and the _start code definition. This is the only
* global part. On all architectures the kernel puts everything in the stack
* before jumping to _start just above us, without any return address (_start
* is not a function but an entry pint). So at the stack pointer we find argc.
* Then argv[] begins, and ends at the first NULL. Then we have envp which
* starts and ends with a NULL as well. So envp=argv+argc+1.
*/
#ifndef _NOLIBC_ARCH_H
#define _NOLIBC_ARCH_H
#if defined(__x86_64__)
#include "arch-x86_64.h"
#elif defined(__i386__) || defined(__i486__) || defined(__i586__) || defined(__i686__)
#include "arch-i386.h"
#elif defined(__ARM_EABI__)
#include "arch-arm.h"
#elif defined(__aarch64__)
#include "arch-aarch64.h"
#elif defined(__mips__) && defined(_ABIO32)
#include "arch-mips.h"
#elif defined(__riscv)
#include "arch-riscv.h"
#endif
#endif /* _NOLIBC_ARCH_H */
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment