131e31b8aSbellard /* This is the Linux kernel elf-loading code, ported into user space */ 2d39594e9SPeter Maydell #include "qemu/osdep.h" 3edf8e2afSMika Westerberg #include <sys/param.h> 431e31b8aSbellard 5edf8e2afSMika Westerberg #include <sys/resource.h> 631e31b8aSbellard 73ef693a0Sbellard #include "qemu.h" 876cad711SPaolo Bonzini #include "disas/disas.h" 9f348b6d1SVeronia Bahaa #include "qemu/path.h" 1031e31b8aSbellard 11e58ffeb3Smalc #ifdef _ARCH_PPC64 12a6cc84f4Smalc #undef ARCH_DLINFO 13a6cc84f4Smalc #undef ELF_PLATFORM 14a6cc84f4Smalc #undef ELF_HWCAP 15ad6919dcSPeter Maydell #undef ELF_HWCAP2 16a6cc84f4Smalc #undef ELF_CLASS 17a6cc84f4Smalc #undef ELF_DATA 18a6cc84f4Smalc #undef ELF_ARCH 19a6cc84f4Smalc #endif 20a6cc84f4Smalc 21edf8e2afSMika Westerberg #define ELF_OSABI ELFOSABI_SYSV 22edf8e2afSMika Westerberg 23cb33da57Sblueswir1 /* from personality.h */ 24cb33da57Sblueswir1 25cb33da57Sblueswir1 /* 26cb33da57Sblueswir1 * Flags for bug emulation. 27cb33da57Sblueswir1 * 28cb33da57Sblueswir1 * These occupy the top three bytes. 29cb33da57Sblueswir1 */ 30cb33da57Sblueswir1 enum { 31cb33da57Sblueswir1 ADDR_NO_RANDOMIZE = 0x0040000, /* disable randomization of VA space */ 32d97ef72eSRichard Henderson FDPIC_FUNCPTRS = 0x0080000, /* userspace function ptrs point to 33d97ef72eSRichard Henderson descriptors (signal handling) */ 34cb33da57Sblueswir1 MMAP_PAGE_ZERO = 0x0100000, 35cb33da57Sblueswir1 ADDR_COMPAT_LAYOUT = 0x0200000, 36cb33da57Sblueswir1 READ_IMPLIES_EXEC = 0x0400000, 37cb33da57Sblueswir1 ADDR_LIMIT_32BIT = 0x0800000, 38cb33da57Sblueswir1 SHORT_INODE = 0x1000000, 39cb33da57Sblueswir1 WHOLE_SECONDS = 0x2000000, 40cb33da57Sblueswir1 STICKY_TIMEOUTS = 0x4000000, 41cb33da57Sblueswir1 ADDR_LIMIT_3GB = 0x8000000, 42cb33da57Sblueswir1 }; 43cb33da57Sblueswir1 44cb33da57Sblueswir1 /* 45cb33da57Sblueswir1 * Personality types. 46cb33da57Sblueswir1 * 47cb33da57Sblueswir1 * These go in the low byte. Avoid using the top bit, it will 48cb33da57Sblueswir1 * conflict with error returns. 49cb33da57Sblueswir1 */ 50cb33da57Sblueswir1 enum { 51cb33da57Sblueswir1 PER_LINUX = 0x0000, 52cb33da57Sblueswir1 PER_LINUX_32BIT = 0x0000 | ADDR_LIMIT_32BIT, 53cb33da57Sblueswir1 PER_LINUX_FDPIC = 0x0000 | FDPIC_FUNCPTRS, 54cb33da57Sblueswir1 PER_SVR4 = 0x0001 | STICKY_TIMEOUTS | MMAP_PAGE_ZERO, 55cb33da57Sblueswir1 PER_SVR3 = 0x0002 | STICKY_TIMEOUTS | SHORT_INODE, 56d97ef72eSRichard Henderson PER_SCOSVR3 = 0x0003 | STICKY_TIMEOUTS | WHOLE_SECONDS | SHORT_INODE, 57cb33da57Sblueswir1 PER_OSR5 = 0x0003 | STICKY_TIMEOUTS | WHOLE_SECONDS, 58cb33da57Sblueswir1 PER_WYSEV386 = 0x0004 | STICKY_TIMEOUTS | SHORT_INODE, 59cb33da57Sblueswir1 PER_ISCR4 = 0x0005 | STICKY_TIMEOUTS, 60cb33da57Sblueswir1 PER_BSD = 0x0006, 61cb33da57Sblueswir1 PER_SUNOS = 0x0006 | STICKY_TIMEOUTS, 62cb33da57Sblueswir1 PER_XENIX = 0x0007 | STICKY_TIMEOUTS | SHORT_INODE, 63cb33da57Sblueswir1 PER_LINUX32 = 0x0008, 64cb33da57Sblueswir1 PER_LINUX32_3GB = 0x0008 | ADDR_LIMIT_3GB, 65cb33da57Sblueswir1 PER_IRIX32 = 0x0009 | STICKY_TIMEOUTS,/* IRIX5 32-bit */ 66cb33da57Sblueswir1 PER_IRIXN32 = 0x000a | STICKY_TIMEOUTS,/* IRIX6 new 32-bit */ 67cb33da57Sblueswir1 PER_IRIX64 = 0x000b | STICKY_TIMEOUTS,/* IRIX6 64-bit */ 68cb33da57Sblueswir1 PER_RISCOS = 0x000c, 69cb33da57Sblueswir1 PER_SOLARIS = 0x000d | STICKY_TIMEOUTS, 70cb33da57Sblueswir1 PER_UW7 = 0x000e | STICKY_TIMEOUTS | MMAP_PAGE_ZERO, 71cb33da57Sblueswir1 PER_OSF4 = 0x000f, /* OSF/1 v4 */ 72cb33da57Sblueswir1 PER_HPUX = 0x0010, 73cb33da57Sblueswir1 PER_MASK = 0x00ff, 74cb33da57Sblueswir1 }; 75cb33da57Sblueswir1 76cb33da57Sblueswir1 /* 77cb33da57Sblueswir1 * Return the base personality without flags. 78cb33da57Sblueswir1 */ 79cb33da57Sblueswir1 #define personality(pers) (pers & PER_MASK) 80cb33da57Sblueswir1 8183fb7adfSbellard /* this flag is uneffective under linux too, should be deleted */ 8283fb7adfSbellard #ifndef MAP_DENYWRITE 8383fb7adfSbellard #define MAP_DENYWRITE 0 8483fb7adfSbellard #endif 8583fb7adfSbellard 8683fb7adfSbellard /* should probably go in elf.h */ 8783fb7adfSbellard #ifndef ELIBBAD 8883fb7adfSbellard #define ELIBBAD 80 8983fb7adfSbellard #endif 9083fb7adfSbellard 9128490231SRichard Henderson #ifdef TARGET_WORDS_BIGENDIAN 9228490231SRichard Henderson #define ELF_DATA ELFDATA2MSB 9328490231SRichard Henderson #else 9428490231SRichard Henderson #define ELF_DATA ELFDATA2LSB 9528490231SRichard Henderson #endif 9628490231SRichard Henderson 97a29f998dSPaolo Bonzini #ifdef TARGET_ABI_MIPSN32 98918fc54cSPaolo Bonzini typedef abi_ullong target_elf_greg_t; 99918fc54cSPaolo Bonzini #define tswapreg(ptr) tswap64(ptr) 100a29f998dSPaolo Bonzini #else 101a29f998dSPaolo Bonzini typedef abi_ulong target_elf_greg_t; 102a29f998dSPaolo Bonzini #define tswapreg(ptr) tswapal(ptr) 103a29f998dSPaolo Bonzini #endif 104a29f998dSPaolo Bonzini 10521e807faSNathan Froyd #ifdef USE_UID16 1061ddd592fSPaolo Bonzini typedef abi_ushort target_uid_t; 1071ddd592fSPaolo Bonzini typedef abi_ushort target_gid_t; 10821e807faSNathan Froyd #else 109f8fd4fc4SPaolo Bonzini typedef abi_uint target_uid_t; 110f8fd4fc4SPaolo Bonzini typedef abi_uint target_gid_t; 11121e807faSNathan Froyd #endif 112f8fd4fc4SPaolo Bonzini typedef abi_int target_pid_t; 11321e807faSNathan Froyd 11430ac07d4Sbellard #ifdef TARGET_I386 11530ac07d4Sbellard 11615338fd7Sbellard #define ELF_PLATFORM get_elf_platform() 11715338fd7Sbellard 11815338fd7Sbellard static const char *get_elf_platform(void) 11915338fd7Sbellard { 12015338fd7Sbellard static char elf_platform[] = "i386"; 121a2247f8eSAndreas Färber int family = object_property_get_int(OBJECT(thread_cpu), "family", NULL); 12215338fd7Sbellard if (family > 6) 12315338fd7Sbellard family = 6; 12415338fd7Sbellard if (family >= 3) 12515338fd7Sbellard elf_platform[1] = '0' + family; 12615338fd7Sbellard return elf_platform; 12715338fd7Sbellard } 12815338fd7Sbellard 12915338fd7Sbellard #define ELF_HWCAP get_elf_hwcap() 13015338fd7Sbellard 13115338fd7Sbellard static uint32_t get_elf_hwcap(void) 13215338fd7Sbellard { 133a2247f8eSAndreas Färber X86CPU *cpu = X86_CPU(thread_cpu); 134a2247f8eSAndreas Färber 135a2247f8eSAndreas Färber return cpu->env.features[FEAT_1_EDX]; 13615338fd7Sbellard } 13715338fd7Sbellard 13884409ddbSj_mayer #ifdef TARGET_X86_64 13984409ddbSj_mayer #define ELF_START_MMAP 0x2aaaaab000ULL 14084409ddbSj_mayer 14184409ddbSj_mayer #define ELF_CLASS ELFCLASS64 14284409ddbSj_mayer #define ELF_ARCH EM_X86_64 14384409ddbSj_mayer 14484409ddbSj_mayer static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop) 14584409ddbSj_mayer { 14684409ddbSj_mayer regs->rax = 0; 14784409ddbSj_mayer regs->rsp = infop->start_stack; 14884409ddbSj_mayer regs->rip = infop->entry; 14984409ddbSj_mayer } 15084409ddbSj_mayer 1519edc5d79SMika Westerberg #define ELF_NREG 27 152c227f099SAnthony Liguori typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG]; 1539edc5d79SMika Westerberg 1549edc5d79SMika Westerberg /* 1559edc5d79SMika Westerberg * Note that ELF_NREG should be 29 as there should be place for 1569edc5d79SMika Westerberg * TRAPNO and ERR "registers" as well but linux doesn't dump 1579edc5d79SMika Westerberg * those. 1589edc5d79SMika Westerberg * 1599edc5d79SMika Westerberg * See linux kernel: arch/x86/include/asm/elf.h 1609edc5d79SMika Westerberg */ 16105390248SAndreas Färber static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUX86State *env) 1629edc5d79SMika Westerberg { 1639edc5d79SMika Westerberg (*regs)[0] = env->regs[15]; 1649edc5d79SMika Westerberg (*regs)[1] = env->regs[14]; 1659edc5d79SMika Westerberg (*regs)[2] = env->regs[13]; 1669edc5d79SMika Westerberg (*regs)[3] = env->regs[12]; 1679edc5d79SMika Westerberg (*regs)[4] = env->regs[R_EBP]; 1689edc5d79SMika Westerberg (*regs)[5] = env->regs[R_EBX]; 1699edc5d79SMika Westerberg (*regs)[6] = env->regs[11]; 1709edc5d79SMika Westerberg (*regs)[7] = env->regs[10]; 1719edc5d79SMika Westerberg (*regs)[8] = env->regs[9]; 1729edc5d79SMika Westerberg (*regs)[9] = env->regs[8]; 1739edc5d79SMika Westerberg (*regs)[10] = env->regs[R_EAX]; 1749edc5d79SMika Westerberg (*regs)[11] = env->regs[R_ECX]; 1759edc5d79SMika Westerberg (*regs)[12] = env->regs[R_EDX]; 1769edc5d79SMika Westerberg (*regs)[13] = env->regs[R_ESI]; 1779edc5d79SMika Westerberg (*regs)[14] = env->regs[R_EDI]; 1789edc5d79SMika Westerberg (*regs)[15] = env->regs[R_EAX]; /* XXX */ 1799edc5d79SMika Westerberg (*regs)[16] = env->eip; 1809edc5d79SMika Westerberg (*regs)[17] = env->segs[R_CS].selector & 0xffff; 1819edc5d79SMika Westerberg (*regs)[18] = env->eflags; 1829edc5d79SMika Westerberg (*regs)[19] = env->regs[R_ESP]; 1839edc5d79SMika Westerberg (*regs)[20] = env->segs[R_SS].selector & 0xffff; 1849edc5d79SMika Westerberg (*regs)[21] = env->segs[R_FS].selector & 0xffff; 1859edc5d79SMika Westerberg (*regs)[22] = env->segs[R_GS].selector & 0xffff; 1869edc5d79SMika Westerberg (*regs)[23] = env->segs[R_DS].selector & 0xffff; 1879edc5d79SMika Westerberg (*regs)[24] = env->segs[R_ES].selector & 0xffff; 1889edc5d79SMika Westerberg (*regs)[25] = env->segs[R_FS].selector & 0xffff; 1899edc5d79SMika Westerberg (*regs)[26] = env->segs[R_GS].selector & 0xffff; 1909edc5d79SMika Westerberg } 1919edc5d79SMika Westerberg 19284409ddbSj_mayer #else 19384409ddbSj_mayer 19430ac07d4Sbellard #define ELF_START_MMAP 0x80000000 19530ac07d4Sbellard 19630ac07d4Sbellard /* 19730ac07d4Sbellard * This is used to ensure we don't load something for the wrong architecture. 19830ac07d4Sbellard */ 19930ac07d4Sbellard #define elf_check_arch(x) ( ((x) == EM_386) || ((x) == EM_486) ) 20030ac07d4Sbellard 20130ac07d4Sbellard /* 20230ac07d4Sbellard * These are used to set parameters in the core dumps. 20330ac07d4Sbellard */ 20430ac07d4Sbellard #define ELF_CLASS ELFCLASS32 20530ac07d4Sbellard #define ELF_ARCH EM_386 20630ac07d4Sbellard 207d97ef72eSRichard Henderson static inline void init_thread(struct target_pt_regs *regs, 208d97ef72eSRichard Henderson struct image_info *infop) 209e5fe0c52Spbrook { 210e5fe0c52Spbrook regs->esp = infop->start_stack; 211e5fe0c52Spbrook regs->eip = infop->entry; 212e5fe0c52Spbrook 21330ac07d4Sbellard /* SVR4/i386 ABI (pages 3-31, 3-32) says that when the program 21430ac07d4Sbellard starts %edx contains a pointer to a function which might be 21530ac07d4Sbellard registered using `atexit'. This provides a mean for the 21630ac07d4Sbellard dynamic linker to call DT_FINI functions for shared libraries 21730ac07d4Sbellard that have been loaded before the code runs. 21830ac07d4Sbellard 21930ac07d4Sbellard A value of 0 tells we have no such handler. */ 220e5fe0c52Spbrook regs->edx = 0; 221b346ff46Sbellard } 2229edc5d79SMika Westerberg 2239edc5d79SMika Westerberg #define ELF_NREG 17 224c227f099SAnthony Liguori typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG]; 2259edc5d79SMika Westerberg 2269edc5d79SMika Westerberg /* 2279edc5d79SMika Westerberg * Note that ELF_NREG should be 19 as there should be place for 2289edc5d79SMika Westerberg * TRAPNO and ERR "registers" as well but linux doesn't dump 2299edc5d79SMika Westerberg * those. 2309edc5d79SMika Westerberg * 2319edc5d79SMika Westerberg * See linux kernel: arch/x86/include/asm/elf.h 2329edc5d79SMika Westerberg */ 23305390248SAndreas Färber static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUX86State *env) 2349edc5d79SMika Westerberg { 2359edc5d79SMika Westerberg (*regs)[0] = env->regs[R_EBX]; 2369edc5d79SMika Westerberg (*regs)[1] = env->regs[R_ECX]; 2379edc5d79SMika Westerberg (*regs)[2] = env->regs[R_EDX]; 2389edc5d79SMika Westerberg (*regs)[3] = env->regs[R_ESI]; 2399edc5d79SMika Westerberg (*regs)[4] = env->regs[R_EDI]; 2409edc5d79SMika Westerberg (*regs)[5] = env->regs[R_EBP]; 2419edc5d79SMika Westerberg (*regs)[6] = env->regs[R_EAX]; 2429edc5d79SMika Westerberg (*regs)[7] = env->segs[R_DS].selector & 0xffff; 2439edc5d79SMika Westerberg (*regs)[8] = env->segs[R_ES].selector & 0xffff; 2449edc5d79SMika Westerberg (*regs)[9] = env->segs[R_FS].selector & 0xffff; 2459edc5d79SMika Westerberg (*regs)[10] = env->segs[R_GS].selector & 0xffff; 2469edc5d79SMika Westerberg (*regs)[11] = env->regs[R_EAX]; /* XXX */ 2479edc5d79SMika Westerberg (*regs)[12] = env->eip; 2489edc5d79SMika Westerberg (*regs)[13] = env->segs[R_CS].selector & 0xffff; 2499edc5d79SMika Westerberg (*regs)[14] = env->eflags; 2509edc5d79SMika Westerberg (*regs)[15] = env->regs[R_ESP]; 2519edc5d79SMika Westerberg (*regs)[16] = env->segs[R_SS].selector & 0xffff; 2529edc5d79SMika Westerberg } 25384409ddbSj_mayer #endif 254b346ff46Sbellard 2559edc5d79SMika Westerberg #define USE_ELF_CORE_DUMP 256b346ff46Sbellard #define ELF_EXEC_PAGESIZE 4096 257b346ff46Sbellard 258b346ff46Sbellard #endif 259b346ff46Sbellard 260b346ff46Sbellard #ifdef TARGET_ARM 261b346ff46Sbellard 26224e76ff0SPeter Maydell #ifndef TARGET_AARCH64 26324e76ff0SPeter Maydell /* 32 bit ARM definitions */ 26424e76ff0SPeter Maydell 265b346ff46Sbellard #define ELF_START_MMAP 0x80000000 266b346ff46Sbellard 267b597c3f7SPeter Crosthwaite #define ELF_ARCH EM_ARM 268b346ff46Sbellard #define ELF_CLASS ELFCLASS32 269b346ff46Sbellard 270d97ef72eSRichard Henderson static inline void init_thread(struct target_pt_regs *regs, 271d97ef72eSRichard Henderson struct image_info *infop) 272b346ff46Sbellard { 273992f48a0Sblueswir1 abi_long stack = infop->start_stack; 274b346ff46Sbellard memset(regs, 0, sizeof(*regs)); 27599033caeSAlexander Graf 276167e4cdcSPeter Maydell regs->uregs[16] = ARM_CPU_MODE_USR; 277167e4cdcSPeter Maydell if (infop->entry & 1) { 278167e4cdcSPeter Maydell regs->uregs[16] |= CPSR_T; 279167e4cdcSPeter Maydell } 280167e4cdcSPeter Maydell regs->uregs[15] = infop->entry & 0xfffffffe; 281167e4cdcSPeter Maydell regs->uregs[13] = infop->start_stack; 2822f619698Sbellard /* FIXME - what to for failure of get_user()? */ 283167e4cdcSPeter Maydell get_user_ual(regs->uregs[2], stack + 8); /* envp */ 284167e4cdcSPeter Maydell get_user_ual(regs->uregs[1], stack + 4); /* envp */ 285a1516e92Sbellard /* XXX: it seems that r0 is zeroed after ! */ 286167e4cdcSPeter Maydell regs->uregs[0] = 0; 287e5fe0c52Spbrook /* For uClinux PIC binaries. */ 288863cf0b7Sj_mayer /* XXX: Linux does this only on ARM with no MMU (do we care ?) */ 289167e4cdcSPeter Maydell regs->uregs[10] = infop->start_data; 290b346ff46Sbellard } 291b346ff46Sbellard 292edf8e2afSMika Westerberg #define ELF_NREG 18 293c227f099SAnthony Liguori typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG]; 294edf8e2afSMika Westerberg 29505390248SAndreas Färber static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUARMState *env) 296edf8e2afSMika Westerberg { 29786cd7b2dSPaolo Bonzini (*regs)[0] = tswapreg(env->regs[0]); 29886cd7b2dSPaolo Bonzini (*regs)[1] = tswapreg(env->regs[1]); 29986cd7b2dSPaolo Bonzini (*regs)[2] = tswapreg(env->regs[2]); 30086cd7b2dSPaolo Bonzini (*regs)[3] = tswapreg(env->regs[3]); 30186cd7b2dSPaolo Bonzini (*regs)[4] = tswapreg(env->regs[4]); 30286cd7b2dSPaolo Bonzini (*regs)[5] = tswapreg(env->regs[5]); 30386cd7b2dSPaolo Bonzini (*regs)[6] = tswapreg(env->regs[6]); 30486cd7b2dSPaolo Bonzini (*regs)[7] = tswapreg(env->regs[7]); 30586cd7b2dSPaolo Bonzini (*regs)[8] = tswapreg(env->regs[8]); 30686cd7b2dSPaolo Bonzini (*regs)[9] = tswapreg(env->regs[9]); 30786cd7b2dSPaolo Bonzini (*regs)[10] = tswapreg(env->regs[10]); 30886cd7b2dSPaolo Bonzini (*regs)[11] = tswapreg(env->regs[11]); 30986cd7b2dSPaolo Bonzini (*regs)[12] = tswapreg(env->regs[12]); 31086cd7b2dSPaolo Bonzini (*regs)[13] = tswapreg(env->regs[13]); 31186cd7b2dSPaolo Bonzini (*regs)[14] = tswapreg(env->regs[14]); 31286cd7b2dSPaolo Bonzini (*regs)[15] = tswapreg(env->regs[15]); 313edf8e2afSMika Westerberg 31486cd7b2dSPaolo Bonzini (*regs)[16] = tswapreg(cpsr_read((CPUARMState *)env)); 31586cd7b2dSPaolo Bonzini (*regs)[17] = tswapreg(env->regs[0]); /* XXX */ 316edf8e2afSMika Westerberg } 317edf8e2afSMika Westerberg 31830ac07d4Sbellard #define USE_ELF_CORE_DUMP 31930ac07d4Sbellard #define ELF_EXEC_PAGESIZE 4096 32030ac07d4Sbellard 321afce2927Sbellard enum 322afce2927Sbellard { 323afce2927Sbellard ARM_HWCAP_ARM_SWP = 1 << 0, 324afce2927Sbellard ARM_HWCAP_ARM_HALF = 1 << 1, 325afce2927Sbellard ARM_HWCAP_ARM_THUMB = 1 << 2, 326afce2927Sbellard ARM_HWCAP_ARM_26BIT = 1 << 3, 327afce2927Sbellard ARM_HWCAP_ARM_FAST_MULT = 1 << 4, 328afce2927Sbellard ARM_HWCAP_ARM_FPA = 1 << 5, 329afce2927Sbellard ARM_HWCAP_ARM_VFP = 1 << 6, 330afce2927Sbellard ARM_HWCAP_ARM_EDSP = 1 << 7, 331cf6de34aSRiku Voipio ARM_HWCAP_ARM_JAVA = 1 << 8, 332cf6de34aSRiku Voipio ARM_HWCAP_ARM_IWMMXT = 1 << 9, 33343ce393eSPeter Maydell ARM_HWCAP_ARM_CRUNCH = 1 << 10, 33443ce393eSPeter Maydell ARM_HWCAP_ARM_THUMBEE = 1 << 11, 33543ce393eSPeter Maydell ARM_HWCAP_ARM_NEON = 1 << 12, 33643ce393eSPeter Maydell ARM_HWCAP_ARM_VFPv3 = 1 << 13, 33743ce393eSPeter Maydell ARM_HWCAP_ARM_VFPv3D16 = 1 << 14, 33824682654SPeter Maydell ARM_HWCAP_ARM_TLS = 1 << 15, 33924682654SPeter Maydell ARM_HWCAP_ARM_VFPv4 = 1 << 16, 34024682654SPeter Maydell ARM_HWCAP_ARM_IDIVA = 1 << 17, 34124682654SPeter Maydell ARM_HWCAP_ARM_IDIVT = 1 << 18, 34224682654SPeter Maydell ARM_HWCAP_ARM_VFPD32 = 1 << 19, 34324682654SPeter Maydell ARM_HWCAP_ARM_LPAE = 1 << 20, 34424682654SPeter Maydell ARM_HWCAP_ARM_EVTSTRM = 1 << 21, 345afce2927Sbellard }; 346afce2927Sbellard 347ad6919dcSPeter Maydell enum { 348ad6919dcSPeter Maydell ARM_HWCAP2_ARM_AES = 1 << 0, 349ad6919dcSPeter Maydell ARM_HWCAP2_ARM_PMULL = 1 << 1, 350ad6919dcSPeter Maydell ARM_HWCAP2_ARM_SHA1 = 1 << 2, 351ad6919dcSPeter Maydell ARM_HWCAP2_ARM_SHA2 = 1 << 3, 352ad6919dcSPeter Maydell ARM_HWCAP2_ARM_CRC32 = 1 << 4, 353ad6919dcSPeter Maydell }; 354ad6919dcSPeter Maydell 3556b1275ffSPeter Maydell /* The commpage only exists for 32 bit kernels */ 3566b1275ffSPeter Maydell 357806d1021SMeador Inge /* Return 1 if the proposed guest space is suitable for the guest. 358806d1021SMeador Inge * Return 0 if the proposed guest space isn't suitable, but another 359806d1021SMeador Inge * address space should be tried. 360806d1021SMeador Inge * Return -1 if there is no way the proposed guest space can be 361806d1021SMeador Inge * valid regardless of the base. 362806d1021SMeador Inge * The guest code may leave a page mapped and populate it if the 363806d1021SMeador Inge * address is suitable. 364806d1021SMeador Inge */ 365c3637eafSLuke Shumaker static int init_guest_commpage(unsigned long guest_base, 366806d1021SMeador Inge unsigned long guest_size) 36797cc7560SDr. David Alan Gilbert { 36897cc7560SDr. David Alan Gilbert unsigned long real_start, test_page_addr; 36997cc7560SDr. David Alan Gilbert 37097cc7560SDr. David Alan Gilbert /* We need to check that we can force a fault on access to the 37197cc7560SDr. David Alan Gilbert * commpage at 0xffff0fxx 37297cc7560SDr. David Alan Gilbert */ 37397cc7560SDr. David Alan Gilbert test_page_addr = guest_base + (0xffff0f00 & qemu_host_page_mask); 374806d1021SMeador Inge 375806d1021SMeador Inge /* If the commpage lies within the already allocated guest space, 376806d1021SMeador Inge * then there is no way we can allocate it. 377955e304fSLuke Shumaker * 378955e304fSLuke Shumaker * You may be thinking that that this check is redundant because 379955e304fSLuke Shumaker * we already validated the guest size against MAX_RESERVED_VA; 380955e304fSLuke Shumaker * but if qemu_host_page_mask is unusually large, then 381955e304fSLuke Shumaker * test_page_addr may be lower. 382806d1021SMeador Inge */ 383806d1021SMeador Inge if (test_page_addr >= guest_base 384e568f9dfSPeter Maydell && test_page_addr < (guest_base + guest_size)) { 385806d1021SMeador Inge return -1; 386806d1021SMeador Inge } 387806d1021SMeador Inge 38897cc7560SDr. David Alan Gilbert /* Note it needs to be writeable to let us initialise it */ 38997cc7560SDr. David Alan Gilbert real_start = (unsigned long) 39097cc7560SDr. David Alan Gilbert mmap((void *)test_page_addr, qemu_host_page_size, 39197cc7560SDr. David Alan Gilbert PROT_READ | PROT_WRITE, 39297cc7560SDr. David Alan Gilbert MAP_ANONYMOUS | MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); 39397cc7560SDr. David Alan Gilbert 39497cc7560SDr. David Alan Gilbert /* If we can't map it then try another address */ 39597cc7560SDr. David Alan Gilbert if (real_start == -1ul) { 39697cc7560SDr. David Alan Gilbert return 0; 39797cc7560SDr. David Alan Gilbert } 39897cc7560SDr. David Alan Gilbert 39997cc7560SDr. David Alan Gilbert if (real_start != test_page_addr) { 40097cc7560SDr. David Alan Gilbert /* OS didn't put the page where we asked - unmap and reject */ 40197cc7560SDr. David Alan Gilbert munmap((void *)real_start, qemu_host_page_size); 40297cc7560SDr. David Alan Gilbert return 0; 40397cc7560SDr. David Alan Gilbert } 40497cc7560SDr. David Alan Gilbert 40597cc7560SDr. David Alan Gilbert /* Leave the page mapped 40697cc7560SDr. David Alan Gilbert * Populate it (mmap should have left it all 0'd) 40797cc7560SDr. David Alan Gilbert */ 40897cc7560SDr. David Alan Gilbert 40997cc7560SDr. David Alan Gilbert /* Kernel helper versions */ 41097cc7560SDr. David Alan Gilbert __put_user(5, (uint32_t *)g2h(0xffff0ffcul)); 41197cc7560SDr. David Alan Gilbert 41297cc7560SDr. David Alan Gilbert /* Now it's populated make it RO */ 41397cc7560SDr. David Alan Gilbert if (mprotect((void *)test_page_addr, qemu_host_page_size, PROT_READ)) { 41497cc7560SDr. David Alan Gilbert perror("Protecting guest commpage"); 41597cc7560SDr. David Alan Gilbert exit(-1); 41697cc7560SDr. David Alan Gilbert } 41797cc7560SDr. David Alan Gilbert 41897cc7560SDr. David Alan Gilbert return 1; /* All good */ 41997cc7560SDr. David Alan Gilbert } 420adf050b1SBenoit Canet 421adf050b1SBenoit Canet #define ELF_HWCAP get_elf_hwcap() 422ad6919dcSPeter Maydell #define ELF_HWCAP2 get_elf_hwcap2() 423adf050b1SBenoit Canet 424adf050b1SBenoit Canet static uint32_t get_elf_hwcap(void) 425adf050b1SBenoit Canet { 426a2247f8eSAndreas Färber ARMCPU *cpu = ARM_CPU(thread_cpu); 427adf050b1SBenoit Canet uint32_t hwcaps = 0; 428adf050b1SBenoit Canet 429adf050b1SBenoit Canet hwcaps |= ARM_HWCAP_ARM_SWP; 430adf050b1SBenoit Canet hwcaps |= ARM_HWCAP_ARM_HALF; 431adf050b1SBenoit Canet hwcaps |= ARM_HWCAP_ARM_THUMB; 432adf050b1SBenoit Canet hwcaps |= ARM_HWCAP_ARM_FAST_MULT; 433adf050b1SBenoit Canet 434adf050b1SBenoit Canet /* probe for the extra features */ 435adf050b1SBenoit Canet #define GET_FEATURE(feat, hwcap) \ 436a2247f8eSAndreas Färber do { if (arm_feature(&cpu->env, feat)) { hwcaps |= hwcap; } } while (0) 43724682654SPeter Maydell /* EDSP is in v5TE and above, but all our v5 CPUs are v5TE */ 43824682654SPeter Maydell GET_FEATURE(ARM_FEATURE_V5, ARM_HWCAP_ARM_EDSP); 439adf050b1SBenoit Canet GET_FEATURE(ARM_FEATURE_VFP, ARM_HWCAP_ARM_VFP); 440adf050b1SBenoit Canet GET_FEATURE(ARM_FEATURE_IWMMXT, ARM_HWCAP_ARM_IWMMXT); 441adf050b1SBenoit Canet GET_FEATURE(ARM_FEATURE_THUMB2EE, ARM_HWCAP_ARM_THUMBEE); 442adf050b1SBenoit Canet GET_FEATURE(ARM_FEATURE_NEON, ARM_HWCAP_ARM_NEON); 443adf050b1SBenoit Canet GET_FEATURE(ARM_FEATURE_VFP3, ARM_HWCAP_ARM_VFPv3); 44424682654SPeter Maydell GET_FEATURE(ARM_FEATURE_V6K, ARM_HWCAP_ARM_TLS); 44524682654SPeter Maydell GET_FEATURE(ARM_FEATURE_VFP4, ARM_HWCAP_ARM_VFPv4); 44624682654SPeter Maydell GET_FEATURE(ARM_FEATURE_ARM_DIV, ARM_HWCAP_ARM_IDIVA); 44724682654SPeter Maydell GET_FEATURE(ARM_FEATURE_THUMB_DIV, ARM_HWCAP_ARM_IDIVT); 44824682654SPeter Maydell /* All QEMU's VFPv3 CPUs have 32 registers, see VFP_DREG in translate.c. 44924682654SPeter Maydell * Note that the ARM_HWCAP_ARM_VFPv3D16 bit is always the inverse of 45024682654SPeter Maydell * ARM_HWCAP_ARM_VFPD32 (and so always clear for QEMU); it is unrelated 45124682654SPeter Maydell * to our VFP_FP16 feature bit. 45224682654SPeter Maydell */ 45324682654SPeter Maydell GET_FEATURE(ARM_FEATURE_VFP3, ARM_HWCAP_ARM_VFPD32); 45424682654SPeter Maydell GET_FEATURE(ARM_FEATURE_LPAE, ARM_HWCAP_ARM_LPAE); 455adf050b1SBenoit Canet 456adf050b1SBenoit Canet return hwcaps; 457adf050b1SBenoit Canet } 458afce2927Sbellard 459ad6919dcSPeter Maydell static uint32_t get_elf_hwcap2(void) 460ad6919dcSPeter Maydell { 461ad6919dcSPeter Maydell ARMCPU *cpu = ARM_CPU(thread_cpu); 462ad6919dcSPeter Maydell uint32_t hwcaps = 0; 463ad6919dcSPeter Maydell 464ad6919dcSPeter Maydell GET_FEATURE(ARM_FEATURE_V8_AES, ARM_HWCAP2_ARM_AES); 4654e624edaSPeter Maydell GET_FEATURE(ARM_FEATURE_V8_PMULL, ARM_HWCAP2_ARM_PMULL); 466f1ecb913SArd Biesheuvel GET_FEATURE(ARM_FEATURE_V8_SHA1, ARM_HWCAP2_ARM_SHA1); 467f1ecb913SArd Biesheuvel GET_FEATURE(ARM_FEATURE_V8_SHA256, ARM_HWCAP2_ARM_SHA2); 468ad6919dcSPeter Maydell GET_FEATURE(ARM_FEATURE_CRC, ARM_HWCAP2_ARM_CRC32); 469ad6919dcSPeter Maydell return hwcaps; 470ad6919dcSPeter Maydell } 471ad6919dcSPeter Maydell 472ad6919dcSPeter Maydell #undef GET_FEATURE 473ad6919dcSPeter Maydell 47424e76ff0SPeter Maydell #else 47524e76ff0SPeter Maydell /* 64 bit ARM definitions */ 47624e76ff0SPeter Maydell #define ELF_START_MMAP 0x80000000 47724e76ff0SPeter Maydell 478b597c3f7SPeter Crosthwaite #define ELF_ARCH EM_AARCH64 47924e76ff0SPeter Maydell #define ELF_CLASS ELFCLASS64 48024e76ff0SPeter Maydell #define ELF_PLATFORM "aarch64" 48124e76ff0SPeter Maydell 48224e76ff0SPeter Maydell static inline void init_thread(struct target_pt_regs *regs, 48324e76ff0SPeter Maydell struct image_info *infop) 48424e76ff0SPeter Maydell { 48524e76ff0SPeter Maydell abi_long stack = infop->start_stack; 48624e76ff0SPeter Maydell memset(regs, 0, sizeof(*regs)); 48724e76ff0SPeter Maydell 48824e76ff0SPeter Maydell regs->pc = infop->entry & ~0x3ULL; 48924e76ff0SPeter Maydell regs->sp = stack; 49024e76ff0SPeter Maydell } 49124e76ff0SPeter Maydell 49224e76ff0SPeter Maydell #define ELF_NREG 34 49324e76ff0SPeter Maydell typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG]; 49424e76ff0SPeter Maydell 49524e76ff0SPeter Maydell static void elf_core_copy_regs(target_elf_gregset_t *regs, 49624e76ff0SPeter Maydell const CPUARMState *env) 49724e76ff0SPeter Maydell { 49824e76ff0SPeter Maydell int i; 49924e76ff0SPeter Maydell 50024e76ff0SPeter Maydell for (i = 0; i < 32; i++) { 50124e76ff0SPeter Maydell (*regs)[i] = tswapreg(env->xregs[i]); 50224e76ff0SPeter Maydell } 50324e76ff0SPeter Maydell (*regs)[32] = tswapreg(env->pc); 50424e76ff0SPeter Maydell (*regs)[33] = tswapreg(pstate_read((CPUARMState *)env)); 50524e76ff0SPeter Maydell } 50624e76ff0SPeter Maydell 50724e76ff0SPeter Maydell #define USE_ELF_CORE_DUMP 50824e76ff0SPeter Maydell #define ELF_EXEC_PAGESIZE 4096 50924e76ff0SPeter Maydell 51024e76ff0SPeter Maydell enum { 51124e76ff0SPeter Maydell ARM_HWCAP_A64_FP = 1 << 0, 51224e76ff0SPeter Maydell ARM_HWCAP_A64_ASIMD = 1 << 1, 51324e76ff0SPeter Maydell ARM_HWCAP_A64_EVTSTRM = 1 << 2, 51424e76ff0SPeter Maydell ARM_HWCAP_A64_AES = 1 << 3, 51524e76ff0SPeter Maydell ARM_HWCAP_A64_PMULL = 1 << 4, 51624e76ff0SPeter Maydell ARM_HWCAP_A64_SHA1 = 1 << 5, 51724e76ff0SPeter Maydell ARM_HWCAP_A64_SHA2 = 1 << 6, 51824e76ff0SPeter Maydell ARM_HWCAP_A64_CRC32 = 1 << 7, 519955f56d4SArd Biesheuvel ARM_HWCAP_A64_ATOMICS = 1 << 8, 520955f56d4SArd Biesheuvel ARM_HWCAP_A64_FPHP = 1 << 9, 521955f56d4SArd Biesheuvel ARM_HWCAP_A64_ASIMDHP = 1 << 10, 522955f56d4SArd Biesheuvel ARM_HWCAP_A64_CPUID = 1 << 11, 523955f56d4SArd Biesheuvel ARM_HWCAP_A64_ASIMDRDM = 1 << 12, 524955f56d4SArd Biesheuvel ARM_HWCAP_A64_JSCVT = 1 << 13, 525955f56d4SArd Biesheuvel ARM_HWCAP_A64_FCMA = 1 << 14, 526955f56d4SArd Biesheuvel ARM_HWCAP_A64_LRCPC = 1 << 15, 527955f56d4SArd Biesheuvel ARM_HWCAP_A64_DCPOP = 1 << 16, 528955f56d4SArd Biesheuvel ARM_HWCAP_A64_SHA3 = 1 << 17, 529955f56d4SArd Biesheuvel ARM_HWCAP_A64_SM3 = 1 << 18, 530955f56d4SArd Biesheuvel ARM_HWCAP_A64_SM4 = 1 << 19, 531955f56d4SArd Biesheuvel ARM_HWCAP_A64_ASIMDDP = 1 << 20, 532955f56d4SArd Biesheuvel ARM_HWCAP_A64_SHA512 = 1 << 21, 533955f56d4SArd Biesheuvel ARM_HWCAP_A64_SVE = 1 << 22, 53424e76ff0SPeter Maydell }; 53524e76ff0SPeter Maydell 53624e76ff0SPeter Maydell #define ELF_HWCAP get_elf_hwcap() 53724e76ff0SPeter Maydell 53824e76ff0SPeter Maydell static uint32_t get_elf_hwcap(void) 53924e76ff0SPeter Maydell { 54024e76ff0SPeter Maydell ARMCPU *cpu = ARM_CPU(thread_cpu); 54124e76ff0SPeter Maydell uint32_t hwcaps = 0; 54224e76ff0SPeter Maydell 54324e76ff0SPeter Maydell hwcaps |= ARM_HWCAP_A64_FP; 54424e76ff0SPeter Maydell hwcaps |= ARM_HWCAP_A64_ASIMD; 54524e76ff0SPeter Maydell 54624e76ff0SPeter Maydell /* probe for the extra features */ 54724e76ff0SPeter Maydell #define GET_FEATURE(feat, hwcap) \ 54824e76ff0SPeter Maydell do { if (arm_feature(&cpu->env, feat)) { hwcaps |= hwcap; } } while (0) 5495acc765cSPeter Maydell GET_FEATURE(ARM_FEATURE_V8_AES, ARM_HWCAP_A64_AES); 550411bdc78SPeter Maydell GET_FEATURE(ARM_FEATURE_V8_PMULL, ARM_HWCAP_A64_PMULL); 551f6fe04d5SPeter Maydell GET_FEATURE(ARM_FEATURE_V8_SHA1, ARM_HWCAP_A64_SHA1); 552f6fe04d5SPeter Maydell GET_FEATURE(ARM_FEATURE_V8_SHA256, ARM_HWCAP_A64_SHA2); 553130f2e7dSPeter Maydell GET_FEATURE(ARM_FEATURE_CRC, ARM_HWCAP_A64_CRC32); 554955f56d4SArd Biesheuvel GET_FEATURE(ARM_FEATURE_V8_SHA3, ARM_HWCAP_A64_SHA3); 555955f56d4SArd Biesheuvel GET_FEATURE(ARM_FEATURE_V8_SM3, ARM_HWCAP_A64_SM3); 556955f56d4SArd Biesheuvel GET_FEATURE(ARM_FEATURE_V8_SM4, ARM_HWCAP_A64_SM4); 557955f56d4SArd Biesheuvel GET_FEATURE(ARM_FEATURE_V8_SHA512, ARM_HWCAP_A64_SHA512); 558201b19d5SPeter Maydell GET_FEATURE(ARM_FEATURE_V8_FP16, 559201b19d5SPeter Maydell ARM_HWCAP_A64_FPHP | ARM_HWCAP_A64_ASIMDHP); 5601dc81c15SRichard Henderson GET_FEATURE(ARM_FEATURE_V8_RDM, ARM_HWCAP_A64_ASIMDRDM); 5610438f037SRichard Henderson GET_FEATURE(ARM_FEATURE_V8_FCMA, ARM_HWCAP_A64_FCMA); 56224e76ff0SPeter Maydell #undef GET_FEATURE 56324e76ff0SPeter Maydell 56424e76ff0SPeter Maydell return hwcaps; 56524e76ff0SPeter Maydell } 56624e76ff0SPeter Maydell 56724e76ff0SPeter Maydell #endif /* not TARGET_AARCH64 */ 56824e76ff0SPeter Maydell #endif /* TARGET_ARM */ 56930ac07d4Sbellard 570853d6f7aSbellard #ifdef TARGET_SPARC 571a315a145Sbellard #ifdef TARGET_SPARC64 572853d6f7aSbellard 573853d6f7aSbellard #define ELF_START_MMAP 0x80000000 574cf973e46SArtyom Tarasenko #define ELF_HWCAP (HWCAP_SPARC_FLUSH | HWCAP_SPARC_STBAR | HWCAP_SPARC_SWAP \ 575cf973e46SArtyom Tarasenko | HWCAP_SPARC_MULDIV | HWCAP_SPARC_V9) 576992f48a0Sblueswir1 #ifndef TARGET_ABI32 577cb33da57Sblueswir1 #define elf_check_arch(x) ( (x) == EM_SPARCV9 || (x) == EM_SPARC32PLUS ) 578992f48a0Sblueswir1 #else 579992f48a0Sblueswir1 #define elf_check_arch(x) ( (x) == EM_SPARC32PLUS || (x) == EM_SPARC ) 580992f48a0Sblueswir1 #endif 581853d6f7aSbellard 582a315a145Sbellard #define ELF_CLASS ELFCLASS64 5835ef54116Sbellard #define ELF_ARCH EM_SPARCV9 5845ef54116Sbellard 5855ef54116Sbellard #define STACK_BIAS 2047 586a315a145Sbellard 587d97ef72eSRichard Henderson static inline void init_thread(struct target_pt_regs *regs, 588d97ef72eSRichard Henderson struct image_info *infop) 589a315a145Sbellard { 590992f48a0Sblueswir1 #ifndef TARGET_ABI32 591a315a145Sbellard regs->tstate = 0; 592992f48a0Sblueswir1 #endif 593a315a145Sbellard regs->pc = infop->entry; 594a315a145Sbellard regs->npc = regs->pc + 4; 595a315a145Sbellard regs->y = 0; 596992f48a0Sblueswir1 #ifdef TARGET_ABI32 597992f48a0Sblueswir1 regs->u_regs[14] = infop->start_stack - 16 * 4; 598992f48a0Sblueswir1 #else 599cb33da57Sblueswir1 if (personality(infop->personality) == PER_LINUX32) 600cb33da57Sblueswir1 regs->u_regs[14] = infop->start_stack - 16 * 4; 601cb33da57Sblueswir1 else 6025ef54116Sbellard regs->u_regs[14] = infop->start_stack - 16 * 8 - STACK_BIAS; 603992f48a0Sblueswir1 #endif 604a315a145Sbellard } 605a315a145Sbellard 606a315a145Sbellard #else 607a315a145Sbellard #define ELF_START_MMAP 0x80000000 608cf973e46SArtyom Tarasenko #define ELF_HWCAP (HWCAP_SPARC_FLUSH | HWCAP_SPARC_STBAR | HWCAP_SPARC_SWAP \ 609cf973e46SArtyom Tarasenko | HWCAP_SPARC_MULDIV) 610a315a145Sbellard 611853d6f7aSbellard #define ELF_CLASS ELFCLASS32 612853d6f7aSbellard #define ELF_ARCH EM_SPARC 613853d6f7aSbellard 614d97ef72eSRichard Henderson static inline void init_thread(struct target_pt_regs *regs, 615d97ef72eSRichard Henderson struct image_info *infop) 616853d6f7aSbellard { 617f5155289Sbellard regs->psr = 0; 618f5155289Sbellard regs->pc = infop->entry; 619f5155289Sbellard regs->npc = regs->pc + 4; 620f5155289Sbellard regs->y = 0; 621f5155289Sbellard regs->u_regs[14] = infop->start_stack - 16 * 4; 622853d6f7aSbellard } 623853d6f7aSbellard 624853d6f7aSbellard #endif 625a315a145Sbellard #endif 626853d6f7aSbellard 62767867308Sbellard #ifdef TARGET_PPC 62867867308Sbellard 6294ecd4d16SPeter Crosthwaite #define ELF_MACHINE PPC_ELF_MACHINE 63067867308Sbellard #define ELF_START_MMAP 0x80000000 63167867308Sbellard 632e85e7c6eSj_mayer #if defined(TARGET_PPC64) && !defined(TARGET_ABI32) 63384409ddbSj_mayer 63484409ddbSj_mayer #define elf_check_arch(x) ( (x) == EM_PPC64 ) 63584409ddbSj_mayer 63684409ddbSj_mayer #define ELF_CLASS ELFCLASS64 63784409ddbSj_mayer 63884409ddbSj_mayer #else 63984409ddbSj_mayer 64067867308Sbellard #define ELF_CLASS ELFCLASS32 64184409ddbSj_mayer 64284409ddbSj_mayer #endif 64384409ddbSj_mayer 64467867308Sbellard #define ELF_ARCH EM_PPC 64567867308Sbellard 646df84e4f3SNathan Froyd /* Feature masks for the Aux Vector Hardware Capabilities (AT_HWCAP). 647df84e4f3SNathan Froyd See arch/powerpc/include/asm/cputable.h. */ 648df84e4f3SNathan Froyd enum { 6493efa9a67Smalc QEMU_PPC_FEATURE_32 = 0x80000000, 6503efa9a67Smalc QEMU_PPC_FEATURE_64 = 0x40000000, 6513efa9a67Smalc QEMU_PPC_FEATURE_601_INSTR = 0x20000000, 6523efa9a67Smalc QEMU_PPC_FEATURE_HAS_ALTIVEC = 0x10000000, 6533efa9a67Smalc QEMU_PPC_FEATURE_HAS_FPU = 0x08000000, 6543efa9a67Smalc QEMU_PPC_FEATURE_HAS_MMU = 0x04000000, 6553efa9a67Smalc QEMU_PPC_FEATURE_HAS_4xxMAC = 0x02000000, 6563efa9a67Smalc QEMU_PPC_FEATURE_UNIFIED_CACHE = 0x01000000, 6573efa9a67Smalc QEMU_PPC_FEATURE_HAS_SPE = 0x00800000, 6583efa9a67Smalc QEMU_PPC_FEATURE_HAS_EFP_SINGLE = 0x00400000, 6593efa9a67Smalc QEMU_PPC_FEATURE_HAS_EFP_DOUBLE = 0x00200000, 6603efa9a67Smalc QEMU_PPC_FEATURE_NO_TB = 0x00100000, 6613efa9a67Smalc QEMU_PPC_FEATURE_POWER4 = 0x00080000, 6623efa9a67Smalc QEMU_PPC_FEATURE_POWER5 = 0x00040000, 6633efa9a67Smalc QEMU_PPC_FEATURE_POWER5_PLUS = 0x00020000, 6643efa9a67Smalc QEMU_PPC_FEATURE_CELL = 0x00010000, 6653efa9a67Smalc QEMU_PPC_FEATURE_BOOKE = 0x00008000, 6663efa9a67Smalc QEMU_PPC_FEATURE_SMT = 0x00004000, 6673efa9a67Smalc QEMU_PPC_FEATURE_ICACHE_SNOOP = 0x00002000, 6683efa9a67Smalc QEMU_PPC_FEATURE_ARCH_2_05 = 0x00001000, 6693efa9a67Smalc QEMU_PPC_FEATURE_PA6T = 0x00000800, 6703efa9a67Smalc QEMU_PPC_FEATURE_HAS_DFP = 0x00000400, 6713efa9a67Smalc QEMU_PPC_FEATURE_POWER6_EXT = 0x00000200, 6723efa9a67Smalc QEMU_PPC_FEATURE_ARCH_2_06 = 0x00000100, 6733efa9a67Smalc QEMU_PPC_FEATURE_HAS_VSX = 0x00000080, 6743efa9a67Smalc QEMU_PPC_FEATURE_PSERIES_PERFMON_COMPAT = 0x00000040, 675df84e4f3SNathan Froyd 6763efa9a67Smalc QEMU_PPC_FEATURE_TRUE_LE = 0x00000002, 6773efa9a67Smalc QEMU_PPC_FEATURE_PPC_LE = 0x00000001, 678a60438ddSTom Musta 679a60438ddSTom Musta /* Feature definitions in AT_HWCAP2. */ 680a60438ddSTom Musta QEMU_PPC_FEATURE2_ARCH_2_07 = 0x80000000, /* ISA 2.07 */ 681a60438ddSTom Musta QEMU_PPC_FEATURE2_HAS_HTM = 0x40000000, /* Hardware Transactional Memory */ 682a60438ddSTom Musta QEMU_PPC_FEATURE2_HAS_DSCR = 0x20000000, /* Data Stream Control Register */ 683a60438ddSTom Musta QEMU_PPC_FEATURE2_HAS_EBB = 0x10000000, /* Event Base Branching */ 684a60438ddSTom Musta QEMU_PPC_FEATURE2_HAS_ISEL = 0x08000000, /* Integer Select */ 685a60438ddSTom Musta QEMU_PPC_FEATURE2_HAS_TAR = 0x04000000, /* Target Address Register */ 686df84e4f3SNathan Froyd }; 687df84e4f3SNathan Froyd 688df84e4f3SNathan Froyd #define ELF_HWCAP get_elf_hwcap() 689df84e4f3SNathan Froyd 690df84e4f3SNathan Froyd static uint32_t get_elf_hwcap(void) 691df84e4f3SNathan Froyd { 692a2247f8eSAndreas Färber PowerPCCPU *cpu = POWERPC_CPU(thread_cpu); 693df84e4f3SNathan Froyd uint32_t features = 0; 694df84e4f3SNathan Froyd 695df84e4f3SNathan Froyd /* We don't have to be terribly complete here; the high points are 696df84e4f3SNathan Froyd Altivec/FP/SPE support. Anything else is just a bonus. */ 697df84e4f3SNathan Froyd #define GET_FEATURE(flag, feature) \ 698a2247f8eSAndreas Färber do { if (cpu->env.insns_flags & flag) { features |= feature; } } while (0) 69958eb5308SMichael Walle #define GET_FEATURE2(flags, feature) \ 70058eb5308SMichael Walle do { \ 70158eb5308SMichael Walle if ((cpu->env.insns_flags2 & flags) == flags) { \ 70258eb5308SMichael Walle features |= feature; \ 70358eb5308SMichael Walle } \ 70458eb5308SMichael Walle } while (0) 7053efa9a67Smalc GET_FEATURE(PPC_64B, QEMU_PPC_FEATURE_64); 7063efa9a67Smalc GET_FEATURE(PPC_FLOAT, QEMU_PPC_FEATURE_HAS_FPU); 7073efa9a67Smalc GET_FEATURE(PPC_ALTIVEC, QEMU_PPC_FEATURE_HAS_ALTIVEC); 7083efa9a67Smalc GET_FEATURE(PPC_SPE, QEMU_PPC_FEATURE_HAS_SPE); 7093efa9a67Smalc GET_FEATURE(PPC_SPE_SINGLE, QEMU_PPC_FEATURE_HAS_EFP_SINGLE); 7103efa9a67Smalc GET_FEATURE(PPC_SPE_DOUBLE, QEMU_PPC_FEATURE_HAS_EFP_DOUBLE); 7113efa9a67Smalc GET_FEATURE(PPC_BOOKE, QEMU_PPC_FEATURE_BOOKE); 7123efa9a67Smalc GET_FEATURE(PPC_405_MAC, QEMU_PPC_FEATURE_HAS_4xxMAC); 7130e019746STom Musta GET_FEATURE2(PPC2_DFP, QEMU_PPC_FEATURE_HAS_DFP); 7140e019746STom Musta GET_FEATURE2(PPC2_VSX, QEMU_PPC_FEATURE_HAS_VSX); 7150e019746STom Musta GET_FEATURE2((PPC2_PERM_ISA206 | PPC2_DIVE_ISA206 | PPC2_ATOMIC_ISA206 | 7160e019746STom Musta PPC2_FP_CVT_ISA206 | PPC2_FP_TST_ISA206), 7170e019746STom Musta QEMU_PPC_FEATURE_ARCH_2_06); 718df84e4f3SNathan Froyd #undef GET_FEATURE 7190e019746STom Musta #undef GET_FEATURE2 720df84e4f3SNathan Froyd 721df84e4f3SNathan Froyd return features; 722df84e4f3SNathan Froyd } 723df84e4f3SNathan Froyd 724a60438ddSTom Musta #define ELF_HWCAP2 get_elf_hwcap2() 725a60438ddSTom Musta 726a60438ddSTom Musta static uint32_t get_elf_hwcap2(void) 727a60438ddSTom Musta { 728a60438ddSTom Musta PowerPCCPU *cpu = POWERPC_CPU(thread_cpu); 729a60438ddSTom Musta uint32_t features = 0; 730a60438ddSTom Musta 731a60438ddSTom Musta #define GET_FEATURE(flag, feature) \ 732a60438ddSTom Musta do { if (cpu->env.insns_flags & flag) { features |= feature; } } while (0) 733a60438ddSTom Musta #define GET_FEATURE2(flag, feature) \ 734a60438ddSTom Musta do { if (cpu->env.insns_flags2 & flag) { features |= feature; } } while (0) 735a60438ddSTom Musta 736a60438ddSTom Musta GET_FEATURE(PPC_ISEL, QEMU_PPC_FEATURE2_HAS_ISEL); 737a60438ddSTom Musta GET_FEATURE2(PPC2_BCTAR_ISA207, QEMU_PPC_FEATURE2_HAS_TAR); 738a60438ddSTom Musta GET_FEATURE2((PPC2_BCTAR_ISA207 | PPC2_LSQ_ISA207 | PPC2_ALTIVEC_207 | 739a60438ddSTom Musta PPC2_ISA207S), QEMU_PPC_FEATURE2_ARCH_2_07); 740a60438ddSTom Musta 741a60438ddSTom Musta #undef GET_FEATURE 742a60438ddSTom Musta #undef GET_FEATURE2 743a60438ddSTom Musta 744a60438ddSTom Musta return features; 745a60438ddSTom Musta } 746a60438ddSTom Musta 747f5155289Sbellard /* 748f5155289Sbellard * The requirements here are: 749f5155289Sbellard * - keep the final alignment of sp (sp & 0xf) 750f5155289Sbellard * - make sure the 32-bit value at the first 16 byte aligned position of 751f5155289Sbellard * AUXV is greater than 16 for glibc compatibility. 752f5155289Sbellard * AT_IGNOREPPC is used for that. 753f5155289Sbellard * - for compatibility with glibc ARCH_DLINFO must always be defined on PPC, 754f5155289Sbellard * even if DLINFO_ARCH_ITEMS goes to zero or is undefined. 755f5155289Sbellard */ 7560bccf03dSbellard #define DLINFO_ARCH_ITEMS 5 757f5155289Sbellard #define ARCH_DLINFO \ 758f5155289Sbellard do { \ 759623e250aSTom Musta PowerPCCPU *cpu = POWERPC_CPU(thread_cpu); \ 760f5155289Sbellard /* \ 76182991bedSPeter Maydell * Handle glibc compatibility: these magic entries must \ 76282991bedSPeter Maydell * be at the lowest addresses in the final auxv. \ 763f5155289Sbellard */ \ 7640bccf03dSbellard NEW_AUX_ENT(AT_IGNOREPPC, AT_IGNOREPPC); \ 7650bccf03dSbellard NEW_AUX_ENT(AT_IGNOREPPC, AT_IGNOREPPC); \ 76682991bedSPeter Maydell NEW_AUX_ENT(AT_DCACHEBSIZE, cpu->env.dcache_line_size); \ 76782991bedSPeter Maydell NEW_AUX_ENT(AT_ICACHEBSIZE, cpu->env.icache_line_size); \ 76882991bedSPeter Maydell NEW_AUX_ENT(AT_UCACHEBSIZE, 0); \ 769f5155289Sbellard } while (0) 770f5155289Sbellard 77167867308Sbellard static inline void init_thread(struct target_pt_regs *_regs, struct image_info *infop) 77267867308Sbellard { 77367867308Sbellard _regs->gpr[1] = infop->start_stack; 774e85e7c6eSj_mayer #if defined(TARGET_PPC64) && !defined(TARGET_ABI32) 775d90b94cdSDoug Kwan if (get_ppc64_abi(infop) < 2) { 7762ccf97ecSPeter Maydell uint64_t val; 7772ccf97ecSPeter Maydell get_user_u64(val, infop->entry + 8); 7782ccf97ecSPeter Maydell _regs->gpr[2] = val + infop->load_bias; 7792ccf97ecSPeter Maydell get_user_u64(val, infop->entry); 7802ccf97ecSPeter Maydell infop->entry = val + infop->load_bias; 781d90b94cdSDoug Kwan } else { 782d90b94cdSDoug Kwan _regs->gpr[12] = infop->entry; /* r12 set to global entry address */ 783d90b94cdSDoug Kwan } 78484409ddbSj_mayer #endif 78567867308Sbellard _regs->nip = infop->entry; 78667867308Sbellard } 78767867308Sbellard 788e2f3e741SNathan Froyd /* See linux kernel: arch/powerpc/include/asm/elf.h. */ 789e2f3e741SNathan Froyd #define ELF_NREG 48 790e2f3e741SNathan Froyd typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG]; 791e2f3e741SNathan Froyd 79205390248SAndreas Färber static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUPPCState *env) 793e2f3e741SNathan Froyd { 794e2f3e741SNathan Froyd int i; 795e2f3e741SNathan Froyd target_ulong ccr = 0; 796e2f3e741SNathan Froyd 797e2f3e741SNathan Froyd for (i = 0; i < ARRAY_SIZE(env->gpr); i++) { 79886cd7b2dSPaolo Bonzini (*regs)[i] = tswapreg(env->gpr[i]); 799e2f3e741SNathan Froyd } 800e2f3e741SNathan Froyd 80186cd7b2dSPaolo Bonzini (*regs)[32] = tswapreg(env->nip); 80286cd7b2dSPaolo Bonzini (*regs)[33] = tswapreg(env->msr); 80386cd7b2dSPaolo Bonzini (*regs)[35] = tswapreg(env->ctr); 80486cd7b2dSPaolo Bonzini (*regs)[36] = tswapreg(env->lr); 80586cd7b2dSPaolo Bonzini (*regs)[37] = tswapreg(env->xer); 806e2f3e741SNathan Froyd 807e2f3e741SNathan Froyd for (i = 0; i < ARRAY_SIZE(env->crf); i++) { 808e2f3e741SNathan Froyd ccr |= env->crf[i] << (32 - ((i + 1) * 4)); 809e2f3e741SNathan Froyd } 81086cd7b2dSPaolo Bonzini (*regs)[38] = tswapreg(ccr); 811e2f3e741SNathan Froyd } 812e2f3e741SNathan Froyd 813e2f3e741SNathan Froyd #define USE_ELF_CORE_DUMP 81467867308Sbellard #define ELF_EXEC_PAGESIZE 4096 81567867308Sbellard 81667867308Sbellard #endif 81767867308Sbellard 818048f6b4dSbellard #ifdef TARGET_MIPS 819048f6b4dSbellard 820048f6b4dSbellard #define ELF_START_MMAP 0x80000000 821048f6b4dSbellard 822388bb21aSths #ifdef TARGET_MIPS64 823388bb21aSths #define ELF_CLASS ELFCLASS64 824388bb21aSths #else 825048f6b4dSbellard #define ELF_CLASS ELFCLASS32 826388bb21aSths #endif 827048f6b4dSbellard #define ELF_ARCH EM_MIPS 828048f6b4dSbellard 829d97ef72eSRichard Henderson static inline void init_thread(struct target_pt_regs *regs, 830d97ef72eSRichard Henderson struct image_info *infop) 831048f6b4dSbellard { 832623a930eSths regs->cp0_status = 2 << CP0St_KSU; 833048f6b4dSbellard regs->cp0_epc = infop->entry; 834048f6b4dSbellard regs->regs[29] = infop->start_stack; 835048f6b4dSbellard } 836048f6b4dSbellard 83751e52606SNathan Froyd /* See linux kernel: arch/mips/include/asm/elf.h. */ 83851e52606SNathan Froyd #define ELF_NREG 45 83951e52606SNathan Froyd typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG]; 84051e52606SNathan Froyd 84151e52606SNathan Froyd /* See linux kernel: arch/mips/include/asm/reg.h. */ 84251e52606SNathan Froyd enum { 84351e52606SNathan Froyd #ifdef TARGET_MIPS64 84451e52606SNathan Froyd TARGET_EF_R0 = 0, 84551e52606SNathan Froyd #else 84651e52606SNathan Froyd TARGET_EF_R0 = 6, 84751e52606SNathan Froyd #endif 84851e52606SNathan Froyd TARGET_EF_R26 = TARGET_EF_R0 + 26, 84951e52606SNathan Froyd TARGET_EF_R27 = TARGET_EF_R0 + 27, 85051e52606SNathan Froyd TARGET_EF_LO = TARGET_EF_R0 + 32, 85151e52606SNathan Froyd TARGET_EF_HI = TARGET_EF_R0 + 33, 85251e52606SNathan Froyd TARGET_EF_CP0_EPC = TARGET_EF_R0 + 34, 85351e52606SNathan Froyd TARGET_EF_CP0_BADVADDR = TARGET_EF_R0 + 35, 85451e52606SNathan Froyd TARGET_EF_CP0_STATUS = TARGET_EF_R0 + 36, 85551e52606SNathan Froyd TARGET_EF_CP0_CAUSE = TARGET_EF_R0 + 37 85651e52606SNathan Froyd }; 85751e52606SNathan Froyd 85851e52606SNathan Froyd /* See linux kernel: arch/mips/kernel/process.c:elf_dump_regs. */ 85905390248SAndreas Färber static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUMIPSState *env) 86051e52606SNathan Froyd { 86151e52606SNathan Froyd int i; 86251e52606SNathan Froyd 86351e52606SNathan Froyd for (i = 0; i < TARGET_EF_R0; i++) { 86451e52606SNathan Froyd (*regs)[i] = 0; 86551e52606SNathan Froyd } 86651e52606SNathan Froyd (*regs)[TARGET_EF_R0] = 0; 86751e52606SNathan Froyd 86851e52606SNathan Froyd for (i = 1; i < ARRAY_SIZE(env->active_tc.gpr); i++) { 869a29f998dSPaolo Bonzini (*regs)[TARGET_EF_R0 + i] = tswapreg(env->active_tc.gpr[i]); 87051e52606SNathan Froyd } 87151e52606SNathan Froyd 87251e52606SNathan Froyd (*regs)[TARGET_EF_R26] = 0; 87351e52606SNathan Froyd (*regs)[TARGET_EF_R27] = 0; 874a29f998dSPaolo Bonzini (*regs)[TARGET_EF_LO] = tswapreg(env->active_tc.LO[0]); 875a29f998dSPaolo Bonzini (*regs)[TARGET_EF_HI] = tswapreg(env->active_tc.HI[0]); 876a29f998dSPaolo Bonzini (*regs)[TARGET_EF_CP0_EPC] = tswapreg(env->active_tc.PC); 877a29f998dSPaolo Bonzini (*regs)[TARGET_EF_CP0_BADVADDR] = tswapreg(env->CP0_BadVAddr); 878a29f998dSPaolo Bonzini (*regs)[TARGET_EF_CP0_STATUS] = tswapreg(env->CP0_Status); 879a29f998dSPaolo Bonzini (*regs)[TARGET_EF_CP0_CAUSE] = tswapreg(env->CP0_Cause); 88051e52606SNathan Froyd } 88151e52606SNathan Froyd 88251e52606SNathan Froyd #define USE_ELF_CORE_DUMP 883388bb21aSths #define ELF_EXEC_PAGESIZE 4096 884388bb21aSths 88546a1ee4fSJames Cowgill /* See arch/mips/include/uapi/asm/hwcap.h. */ 88646a1ee4fSJames Cowgill enum { 88746a1ee4fSJames Cowgill HWCAP_MIPS_R6 = (1 << 0), 88846a1ee4fSJames Cowgill HWCAP_MIPS_MSA = (1 << 1), 88946a1ee4fSJames Cowgill }; 89046a1ee4fSJames Cowgill 89146a1ee4fSJames Cowgill #define ELF_HWCAP get_elf_hwcap() 89246a1ee4fSJames Cowgill 89346a1ee4fSJames Cowgill static uint32_t get_elf_hwcap(void) 89446a1ee4fSJames Cowgill { 89546a1ee4fSJames Cowgill MIPSCPU *cpu = MIPS_CPU(thread_cpu); 89646a1ee4fSJames Cowgill uint32_t hwcaps = 0; 89746a1ee4fSJames Cowgill 89846a1ee4fSJames Cowgill #define GET_FEATURE(flag, hwcap) \ 89946a1ee4fSJames Cowgill do { if (cpu->env.insn_flags & (flag)) { hwcaps |= hwcap; } } while (0) 90046a1ee4fSJames Cowgill 90146a1ee4fSJames Cowgill GET_FEATURE(ISA_MIPS32R6 | ISA_MIPS64R6, HWCAP_MIPS_R6); 90246a1ee4fSJames Cowgill GET_FEATURE(ASE_MSA, HWCAP_MIPS_MSA); 90346a1ee4fSJames Cowgill 90446a1ee4fSJames Cowgill #undef GET_FEATURE 90546a1ee4fSJames Cowgill 90646a1ee4fSJames Cowgill return hwcaps; 90746a1ee4fSJames Cowgill } 90846a1ee4fSJames Cowgill 909048f6b4dSbellard #endif /* TARGET_MIPS */ 910048f6b4dSbellard 911b779e29eSEdgar E. Iglesias #ifdef TARGET_MICROBLAZE 912b779e29eSEdgar E. Iglesias 913b779e29eSEdgar E. Iglesias #define ELF_START_MMAP 0x80000000 914b779e29eSEdgar E. Iglesias 9150d5d4699SEdgar E. Iglesias #define elf_check_arch(x) ( (x) == EM_MICROBLAZE || (x) == EM_MICROBLAZE_OLD) 916b779e29eSEdgar E. Iglesias 917b779e29eSEdgar E. Iglesias #define ELF_CLASS ELFCLASS32 9180d5d4699SEdgar E. Iglesias #define ELF_ARCH EM_MICROBLAZE 919b779e29eSEdgar E. Iglesias 920d97ef72eSRichard Henderson static inline void init_thread(struct target_pt_regs *regs, 921d97ef72eSRichard Henderson struct image_info *infop) 922b779e29eSEdgar E. Iglesias { 923b779e29eSEdgar E. Iglesias regs->pc = infop->entry; 924b779e29eSEdgar E. Iglesias regs->r1 = infop->start_stack; 925b779e29eSEdgar E. Iglesias 926b779e29eSEdgar E. Iglesias } 927b779e29eSEdgar E. Iglesias 928b779e29eSEdgar E. Iglesias #define ELF_EXEC_PAGESIZE 4096 929b779e29eSEdgar E. Iglesias 930e4cbd44dSEdgar E. Iglesias #define USE_ELF_CORE_DUMP 931e4cbd44dSEdgar E. Iglesias #define ELF_NREG 38 932e4cbd44dSEdgar E. Iglesias typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG]; 933e4cbd44dSEdgar E. Iglesias 934e4cbd44dSEdgar E. Iglesias /* See linux kernel: arch/mips/kernel/process.c:elf_dump_regs. */ 93505390248SAndreas Färber static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUMBState *env) 936e4cbd44dSEdgar E. Iglesias { 937e4cbd44dSEdgar E. Iglesias int i, pos = 0; 938e4cbd44dSEdgar E. Iglesias 939e4cbd44dSEdgar E. Iglesias for (i = 0; i < 32; i++) { 94086cd7b2dSPaolo Bonzini (*regs)[pos++] = tswapreg(env->regs[i]); 941e4cbd44dSEdgar E. Iglesias } 942e4cbd44dSEdgar E. Iglesias 943e4cbd44dSEdgar E. Iglesias for (i = 0; i < 6; i++) { 94486cd7b2dSPaolo Bonzini (*regs)[pos++] = tswapreg(env->sregs[i]); 945e4cbd44dSEdgar E. Iglesias } 946e4cbd44dSEdgar E. Iglesias } 947e4cbd44dSEdgar E. Iglesias 948b779e29eSEdgar E. Iglesias #endif /* TARGET_MICROBLAZE */ 949b779e29eSEdgar E. Iglesias 950a0a839b6SMarek Vasut #ifdef TARGET_NIOS2 951a0a839b6SMarek Vasut 952a0a839b6SMarek Vasut #define ELF_START_MMAP 0x80000000 953a0a839b6SMarek Vasut 954a0a839b6SMarek Vasut #define elf_check_arch(x) ((x) == EM_ALTERA_NIOS2) 955a0a839b6SMarek Vasut 956a0a839b6SMarek Vasut #define ELF_CLASS ELFCLASS32 957a0a839b6SMarek Vasut #define ELF_ARCH EM_ALTERA_NIOS2 958a0a839b6SMarek Vasut 959a0a839b6SMarek Vasut static void init_thread(struct target_pt_regs *regs, struct image_info *infop) 960a0a839b6SMarek Vasut { 961a0a839b6SMarek Vasut regs->ea = infop->entry; 962a0a839b6SMarek Vasut regs->sp = infop->start_stack; 963a0a839b6SMarek Vasut regs->estatus = 0x3; 964a0a839b6SMarek Vasut } 965a0a839b6SMarek Vasut 966a0a839b6SMarek Vasut #define ELF_EXEC_PAGESIZE 4096 967a0a839b6SMarek Vasut 968a0a839b6SMarek Vasut #define USE_ELF_CORE_DUMP 969a0a839b6SMarek Vasut #define ELF_NREG 49 970a0a839b6SMarek Vasut typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG]; 971a0a839b6SMarek Vasut 972a0a839b6SMarek Vasut /* See linux kernel: arch/mips/kernel/process.c:elf_dump_regs. */ 973a0a839b6SMarek Vasut static void elf_core_copy_regs(target_elf_gregset_t *regs, 974a0a839b6SMarek Vasut const CPUNios2State *env) 975a0a839b6SMarek Vasut { 976a0a839b6SMarek Vasut int i; 977a0a839b6SMarek Vasut 978a0a839b6SMarek Vasut (*regs)[0] = -1; 979a0a839b6SMarek Vasut for (i = 1; i < 8; i++) /* r0-r7 */ 980a0a839b6SMarek Vasut (*regs)[i] = tswapreg(env->regs[i + 7]); 981a0a839b6SMarek Vasut 982a0a839b6SMarek Vasut for (i = 8; i < 16; i++) /* r8-r15 */ 983a0a839b6SMarek Vasut (*regs)[i] = tswapreg(env->regs[i - 8]); 984a0a839b6SMarek Vasut 985a0a839b6SMarek Vasut for (i = 16; i < 24; i++) /* r16-r23 */ 986a0a839b6SMarek Vasut (*regs)[i] = tswapreg(env->regs[i + 7]); 987a0a839b6SMarek Vasut (*regs)[24] = -1; /* R_ET */ 988a0a839b6SMarek Vasut (*regs)[25] = -1; /* R_BT */ 989a0a839b6SMarek Vasut (*regs)[26] = tswapreg(env->regs[R_GP]); 990a0a839b6SMarek Vasut (*regs)[27] = tswapreg(env->regs[R_SP]); 991a0a839b6SMarek Vasut (*regs)[28] = tswapreg(env->regs[R_FP]); 992a0a839b6SMarek Vasut (*regs)[29] = tswapreg(env->regs[R_EA]); 993a0a839b6SMarek Vasut (*regs)[30] = -1; /* R_SSTATUS */ 994a0a839b6SMarek Vasut (*regs)[31] = tswapreg(env->regs[R_RA]); 995a0a839b6SMarek Vasut 996a0a839b6SMarek Vasut (*regs)[32] = tswapreg(env->regs[R_PC]); 997a0a839b6SMarek Vasut 998a0a839b6SMarek Vasut (*regs)[33] = -1; /* R_STATUS */ 999a0a839b6SMarek Vasut (*regs)[34] = tswapreg(env->regs[CR_ESTATUS]); 1000a0a839b6SMarek Vasut 1001a0a839b6SMarek Vasut for (i = 35; i < 49; i++) /* ... */ 1002a0a839b6SMarek Vasut (*regs)[i] = -1; 1003a0a839b6SMarek Vasut } 1004a0a839b6SMarek Vasut 1005a0a839b6SMarek Vasut #endif /* TARGET_NIOS2 */ 1006a0a839b6SMarek Vasut 1007d962783eSJia Liu #ifdef TARGET_OPENRISC 1008d962783eSJia Liu 1009d962783eSJia Liu #define ELF_START_MMAP 0x08000000 1010d962783eSJia Liu 1011d962783eSJia Liu #define ELF_ARCH EM_OPENRISC 1012d962783eSJia Liu #define ELF_CLASS ELFCLASS32 1013d962783eSJia Liu #define ELF_DATA ELFDATA2MSB 1014d962783eSJia Liu 1015d962783eSJia Liu static inline void init_thread(struct target_pt_regs *regs, 1016d962783eSJia Liu struct image_info *infop) 1017d962783eSJia Liu { 1018d962783eSJia Liu regs->pc = infop->entry; 1019d962783eSJia Liu regs->gpr[1] = infop->start_stack; 1020d962783eSJia Liu } 1021d962783eSJia Liu 1022d962783eSJia Liu #define USE_ELF_CORE_DUMP 1023d962783eSJia Liu #define ELF_EXEC_PAGESIZE 8192 1024d962783eSJia Liu 1025d962783eSJia Liu /* See linux kernel arch/openrisc/include/asm/elf.h. */ 1026d962783eSJia Liu #define ELF_NREG 34 /* gprs and pc, sr */ 1027d962783eSJia Liu typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG]; 1028d962783eSJia Liu 1029d962783eSJia Liu static void elf_core_copy_regs(target_elf_gregset_t *regs, 1030d962783eSJia Liu const CPUOpenRISCState *env) 1031d962783eSJia Liu { 1032d962783eSJia Liu int i; 1033d962783eSJia Liu 1034d962783eSJia Liu for (i = 0; i < 32; i++) { 1035d89e71e8SStafford Horne (*regs)[i] = tswapreg(cpu_get_gpr(env, i)); 1036d962783eSJia Liu } 103786cd7b2dSPaolo Bonzini (*regs)[32] = tswapreg(env->pc); 103884775c43SRichard Henderson (*regs)[33] = tswapreg(cpu_get_sr(env)); 1039d962783eSJia Liu } 1040d962783eSJia Liu #define ELF_HWCAP 0 1041d962783eSJia Liu #define ELF_PLATFORM NULL 1042d962783eSJia Liu 1043d962783eSJia Liu #endif /* TARGET_OPENRISC */ 1044d962783eSJia Liu 1045fdf9b3e8Sbellard #ifdef TARGET_SH4 1046fdf9b3e8Sbellard 1047fdf9b3e8Sbellard #define ELF_START_MMAP 0x80000000 1048fdf9b3e8Sbellard 1049fdf9b3e8Sbellard #define ELF_CLASS ELFCLASS32 1050fdf9b3e8Sbellard #define ELF_ARCH EM_SH 1051fdf9b3e8Sbellard 1052d97ef72eSRichard Henderson static inline void init_thread(struct target_pt_regs *regs, 1053d97ef72eSRichard Henderson struct image_info *infop) 1054fdf9b3e8Sbellard { 1055fdf9b3e8Sbellard /* Check other registers XXXXX */ 1056fdf9b3e8Sbellard regs->pc = infop->entry; 1057072ae847Sths regs->regs[15] = infop->start_stack; 1058fdf9b3e8Sbellard } 1059fdf9b3e8Sbellard 10607631c97eSNathan Froyd /* See linux kernel: arch/sh/include/asm/elf.h. */ 10617631c97eSNathan Froyd #define ELF_NREG 23 10627631c97eSNathan Froyd typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG]; 10637631c97eSNathan Froyd 10647631c97eSNathan Froyd /* See linux kernel: arch/sh/include/asm/ptrace.h. */ 10657631c97eSNathan Froyd enum { 10667631c97eSNathan Froyd TARGET_REG_PC = 16, 10677631c97eSNathan Froyd TARGET_REG_PR = 17, 10687631c97eSNathan Froyd TARGET_REG_SR = 18, 10697631c97eSNathan Froyd TARGET_REG_GBR = 19, 10707631c97eSNathan Froyd TARGET_REG_MACH = 20, 10717631c97eSNathan Froyd TARGET_REG_MACL = 21, 10727631c97eSNathan Froyd TARGET_REG_SYSCALL = 22 10737631c97eSNathan Froyd }; 10747631c97eSNathan Froyd 1075d97ef72eSRichard Henderson static inline void elf_core_copy_regs(target_elf_gregset_t *regs, 107605390248SAndreas Färber const CPUSH4State *env) 10777631c97eSNathan Froyd { 10787631c97eSNathan Froyd int i; 10797631c97eSNathan Froyd 10807631c97eSNathan Froyd for (i = 0; i < 16; i++) { 108172cd500bSPhilippe Mathieu-Daudé (*regs)[i] = tswapreg(env->gregs[i]); 10827631c97eSNathan Froyd } 10837631c97eSNathan Froyd 108486cd7b2dSPaolo Bonzini (*regs)[TARGET_REG_PC] = tswapreg(env->pc); 108586cd7b2dSPaolo Bonzini (*regs)[TARGET_REG_PR] = tswapreg(env->pr); 108686cd7b2dSPaolo Bonzini (*regs)[TARGET_REG_SR] = tswapreg(env->sr); 108786cd7b2dSPaolo Bonzini (*regs)[TARGET_REG_GBR] = tswapreg(env->gbr); 108886cd7b2dSPaolo Bonzini (*regs)[TARGET_REG_MACH] = tswapreg(env->mach); 108986cd7b2dSPaolo Bonzini (*regs)[TARGET_REG_MACL] = tswapreg(env->macl); 10907631c97eSNathan Froyd (*regs)[TARGET_REG_SYSCALL] = 0; /* FIXME */ 10917631c97eSNathan Froyd } 10927631c97eSNathan Froyd 10937631c97eSNathan Froyd #define USE_ELF_CORE_DUMP 1094fdf9b3e8Sbellard #define ELF_EXEC_PAGESIZE 4096 1095fdf9b3e8Sbellard 1096e42fd944SRichard Henderson enum { 1097e42fd944SRichard Henderson SH_CPU_HAS_FPU = 0x0001, /* Hardware FPU support */ 1098e42fd944SRichard Henderson SH_CPU_HAS_P2_FLUSH_BUG = 0x0002, /* Need to flush the cache in P2 area */ 1099e42fd944SRichard Henderson SH_CPU_HAS_MMU_PAGE_ASSOC = 0x0004, /* SH3: TLB way selection bit support */ 1100e42fd944SRichard Henderson SH_CPU_HAS_DSP = 0x0008, /* SH-DSP: DSP support */ 1101e42fd944SRichard Henderson SH_CPU_HAS_PERF_COUNTER = 0x0010, /* Hardware performance counters */ 1102e42fd944SRichard Henderson SH_CPU_HAS_PTEA = 0x0020, /* PTEA register */ 1103e42fd944SRichard Henderson SH_CPU_HAS_LLSC = 0x0040, /* movli.l/movco.l */ 1104e42fd944SRichard Henderson SH_CPU_HAS_L2_CACHE = 0x0080, /* Secondary cache / URAM */ 1105e42fd944SRichard Henderson SH_CPU_HAS_OP32 = 0x0100, /* 32-bit instruction support */ 1106e42fd944SRichard Henderson SH_CPU_HAS_PTEAEX = 0x0200, /* PTE ASID Extension support */ 1107e42fd944SRichard Henderson }; 1108e42fd944SRichard Henderson 1109e42fd944SRichard Henderson #define ELF_HWCAP get_elf_hwcap() 1110e42fd944SRichard Henderson 1111e42fd944SRichard Henderson static uint32_t get_elf_hwcap(void) 1112e42fd944SRichard Henderson { 1113e42fd944SRichard Henderson SuperHCPU *cpu = SUPERH_CPU(thread_cpu); 1114e42fd944SRichard Henderson uint32_t hwcap = 0; 1115e42fd944SRichard Henderson 1116e42fd944SRichard Henderson hwcap |= SH_CPU_HAS_FPU; 1117e42fd944SRichard Henderson 1118e42fd944SRichard Henderson if (cpu->env.features & SH_FEATURE_SH4A) { 1119e42fd944SRichard Henderson hwcap |= SH_CPU_HAS_LLSC; 1120e42fd944SRichard Henderson } 1121e42fd944SRichard Henderson 1122e42fd944SRichard Henderson return hwcap; 1123e42fd944SRichard Henderson } 1124e42fd944SRichard Henderson 1125fdf9b3e8Sbellard #endif 1126fdf9b3e8Sbellard 112748733d19Sths #ifdef TARGET_CRIS 112848733d19Sths 112948733d19Sths #define ELF_START_MMAP 0x80000000 113048733d19Sths 113148733d19Sths #define ELF_CLASS ELFCLASS32 113248733d19Sths #define ELF_ARCH EM_CRIS 113348733d19Sths 1134d97ef72eSRichard Henderson static inline void init_thread(struct target_pt_regs *regs, 1135d97ef72eSRichard Henderson struct image_info *infop) 113648733d19Sths { 113748733d19Sths regs->erp = infop->entry; 113848733d19Sths } 113948733d19Sths 114048733d19Sths #define ELF_EXEC_PAGESIZE 8192 114148733d19Sths 114248733d19Sths #endif 114348733d19Sths 1144e6e5906bSpbrook #ifdef TARGET_M68K 1145e6e5906bSpbrook 1146e6e5906bSpbrook #define ELF_START_MMAP 0x80000000 1147e6e5906bSpbrook 1148e6e5906bSpbrook #define ELF_CLASS ELFCLASS32 1149e6e5906bSpbrook #define ELF_ARCH EM_68K 1150e6e5906bSpbrook 1151e6e5906bSpbrook /* ??? Does this need to do anything? 1152e6e5906bSpbrook #define ELF_PLAT_INIT(_r) */ 1153e6e5906bSpbrook 1154d97ef72eSRichard Henderson static inline void init_thread(struct target_pt_regs *regs, 1155d97ef72eSRichard Henderson struct image_info *infop) 1156e6e5906bSpbrook { 1157e6e5906bSpbrook regs->usp = infop->start_stack; 1158e6e5906bSpbrook regs->sr = 0; 1159e6e5906bSpbrook regs->pc = infop->entry; 1160e6e5906bSpbrook } 1161e6e5906bSpbrook 11627a93cc55SNathan Froyd /* See linux kernel: arch/m68k/include/asm/elf.h. */ 11637a93cc55SNathan Froyd #define ELF_NREG 20 11647a93cc55SNathan Froyd typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG]; 11657a93cc55SNathan Froyd 116605390248SAndreas Färber static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUM68KState *env) 11677a93cc55SNathan Froyd { 116886cd7b2dSPaolo Bonzini (*regs)[0] = tswapreg(env->dregs[1]); 116986cd7b2dSPaolo Bonzini (*regs)[1] = tswapreg(env->dregs[2]); 117086cd7b2dSPaolo Bonzini (*regs)[2] = tswapreg(env->dregs[3]); 117186cd7b2dSPaolo Bonzini (*regs)[3] = tswapreg(env->dregs[4]); 117286cd7b2dSPaolo Bonzini (*regs)[4] = tswapreg(env->dregs[5]); 117386cd7b2dSPaolo Bonzini (*regs)[5] = tswapreg(env->dregs[6]); 117486cd7b2dSPaolo Bonzini (*regs)[6] = tswapreg(env->dregs[7]); 117586cd7b2dSPaolo Bonzini (*regs)[7] = tswapreg(env->aregs[0]); 117686cd7b2dSPaolo Bonzini (*regs)[8] = tswapreg(env->aregs[1]); 117786cd7b2dSPaolo Bonzini (*regs)[9] = tswapreg(env->aregs[2]); 117886cd7b2dSPaolo Bonzini (*regs)[10] = tswapreg(env->aregs[3]); 117986cd7b2dSPaolo Bonzini (*regs)[11] = tswapreg(env->aregs[4]); 118086cd7b2dSPaolo Bonzini (*regs)[12] = tswapreg(env->aregs[5]); 118186cd7b2dSPaolo Bonzini (*regs)[13] = tswapreg(env->aregs[6]); 118286cd7b2dSPaolo Bonzini (*regs)[14] = tswapreg(env->dregs[0]); 118386cd7b2dSPaolo Bonzini (*regs)[15] = tswapreg(env->aregs[7]); 118486cd7b2dSPaolo Bonzini (*regs)[16] = tswapreg(env->dregs[0]); /* FIXME: orig_d0 */ 118586cd7b2dSPaolo Bonzini (*regs)[17] = tswapreg(env->sr); 118686cd7b2dSPaolo Bonzini (*regs)[18] = tswapreg(env->pc); 11877a93cc55SNathan Froyd (*regs)[19] = 0; /* FIXME: regs->format | regs->vector */ 11887a93cc55SNathan Froyd } 11897a93cc55SNathan Froyd 11907a93cc55SNathan Froyd #define USE_ELF_CORE_DUMP 1191e6e5906bSpbrook #define ELF_EXEC_PAGESIZE 8192 1192e6e5906bSpbrook 1193e6e5906bSpbrook #endif 1194e6e5906bSpbrook 11957a3148a9Sj_mayer #ifdef TARGET_ALPHA 11967a3148a9Sj_mayer 11977a3148a9Sj_mayer #define ELF_START_MMAP (0x30000000000ULL) 11987a3148a9Sj_mayer 11997a3148a9Sj_mayer #define ELF_CLASS ELFCLASS64 12007a3148a9Sj_mayer #define ELF_ARCH EM_ALPHA 12017a3148a9Sj_mayer 1202d97ef72eSRichard Henderson static inline void init_thread(struct target_pt_regs *regs, 1203d97ef72eSRichard Henderson struct image_info *infop) 12047a3148a9Sj_mayer { 12057a3148a9Sj_mayer regs->pc = infop->entry; 12067a3148a9Sj_mayer regs->ps = 8; 12077a3148a9Sj_mayer regs->usp = infop->start_stack; 12087a3148a9Sj_mayer } 12097a3148a9Sj_mayer 12107a3148a9Sj_mayer #define ELF_EXEC_PAGESIZE 8192 12117a3148a9Sj_mayer 12127a3148a9Sj_mayer #endif /* TARGET_ALPHA */ 12137a3148a9Sj_mayer 1214a4c075f1SUlrich Hecht #ifdef TARGET_S390X 1215a4c075f1SUlrich Hecht 1216a4c075f1SUlrich Hecht #define ELF_START_MMAP (0x20000000000ULL) 1217a4c075f1SUlrich Hecht 1218a4c075f1SUlrich Hecht #define ELF_CLASS ELFCLASS64 1219a4c075f1SUlrich Hecht #define ELF_DATA ELFDATA2MSB 1220a4c075f1SUlrich Hecht #define ELF_ARCH EM_S390 1221a4c075f1SUlrich Hecht 1222a4c075f1SUlrich Hecht static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop) 1223a4c075f1SUlrich Hecht { 1224a4c075f1SUlrich Hecht regs->psw.addr = infop->entry; 1225a4c075f1SUlrich Hecht regs->psw.mask = PSW_MASK_64 | PSW_MASK_32; 1226a4c075f1SUlrich Hecht regs->gprs[15] = infop->start_stack; 1227a4c075f1SUlrich Hecht } 1228a4c075f1SUlrich Hecht 1229a4c075f1SUlrich Hecht #endif /* TARGET_S390X */ 1230a4c075f1SUlrich Hecht 1231b16189b2SChen Gang #ifdef TARGET_TILEGX 1232b16189b2SChen Gang 1233b16189b2SChen Gang /* 42 bits real used address, a half for user mode */ 1234b16189b2SChen Gang #define ELF_START_MMAP (0x00000020000000000ULL) 1235b16189b2SChen Gang 1236b16189b2SChen Gang #define elf_check_arch(x) ((x) == EM_TILEGX) 1237b16189b2SChen Gang 1238b16189b2SChen Gang #define ELF_CLASS ELFCLASS64 1239b16189b2SChen Gang #define ELF_DATA ELFDATA2LSB 1240b16189b2SChen Gang #define ELF_ARCH EM_TILEGX 1241b16189b2SChen Gang 1242b16189b2SChen Gang static inline void init_thread(struct target_pt_regs *regs, 1243b16189b2SChen Gang struct image_info *infop) 1244b16189b2SChen Gang { 1245b16189b2SChen Gang regs->pc = infop->entry; 1246b16189b2SChen Gang regs->sp = infop->start_stack; 1247b16189b2SChen Gang 1248b16189b2SChen Gang } 1249b16189b2SChen Gang 1250b16189b2SChen Gang #define ELF_EXEC_PAGESIZE 65536 /* TILE-Gx page size is 64KB */ 1251b16189b2SChen Gang 1252b16189b2SChen Gang #endif /* TARGET_TILEGX */ 1253b16189b2SChen Gang 125447ae93cdSMichael Clark #ifdef TARGET_RISCV 125547ae93cdSMichael Clark 125647ae93cdSMichael Clark #define ELF_START_MMAP 0x80000000 125747ae93cdSMichael Clark #define ELF_ARCH EM_RISCV 125847ae93cdSMichael Clark 125947ae93cdSMichael Clark #ifdef TARGET_RISCV32 126047ae93cdSMichael Clark #define ELF_CLASS ELFCLASS32 126147ae93cdSMichael Clark #else 126247ae93cdSMichael Clark #define ELF_CLASS ELFCLASS64 126347ae93cdSMichael Clark #endif 126447ae93cdSMichael Clark 126547ae93cdSMichael Clark static inline void init_thread(struct target_pt_regs *regs, 126647ae93cdSMichael Clark struct image_info *infop) 126747ae93cdSMichael Clark { 126847ae93cdSMichael Clark regs->sepc = infop->entry; 126947ae93cdSMichael Clark regs->sp = infop->start_stack; 127047ae93cdSMichael Clark } 127147ae93cdSMichael Clark 127247ae93cdSMichael Clark #define ELF_EXEC_PAGESIZE 4096 127347ae93cdSMichael Clark 127447ae93cdSMichael Clark #endif /* TARGET_RISCV */ 127547ae93cdSMichael Clark 12767c248bcdSRichard Henderson #ifdef TARGET_HPPA 12777c248bcdSRichard Henderson 12787c248bcdSRichard Henderson #define ELF_START_MMAP 0x80000000 12797c248bcdSRichard Henderson #define ELF_CLASS ELFCLASS32 12807c248bcdSRichard Henderson #define ELF_ARCH EM_PARISC 12817c248bcdSRichard Henderson #define ELF_PLATFORM "PARISC" 12827c248bcdSRichard Henderson #define STACK_GROWS_DOWN 0 12837c248bcdSRichard Henderson #define STACK_ALIGNMENT 64 12847c248bcdSRichard Henderson 12857c248bcdSRichard Henderson static inline void init_thread(struct target_pt_regs *regs, 12867c248bcdSRichard Henderson struct image_info *infop) 12877c248bcdSRichard Henderson { 12887c248bcdSRichard Henderson regs->iaoq[0] = infop->entry; 12897c248bcdSRichard Henderson regs->iaoq[1] = infop->entry + 4; 12907c248bcdSRichard Henderson regs->gr[23] = 0; 12917c248bcdSRichard Henderson regs->gr[24] = infop->arg_start; 12927c248bcdSRichard Henderson regs->gr[25] = (infop->arg_end - infop->arg_start) / sizeof(abi_ulong); 12937c248bcdSRichard Henderson /* The top-of-stack contains a linkage buffer. */ 12947c248bcdSRichard Henderson regs->gr[30] = infop->start_stack + 64; 12957c248bcdSRichard Henderson regs->gr[31] = infop->entry; 12967c248bcdSRichard Henderson } 12977c248bcdSRichard Henderson 12987c248bcdSRichard Henderson #endif /* TARGET_HPPA */ 12997c248bcdSRichard Henderson 1300ba7651fbSMax Filippov #ifdef TARGET_XTENSA 1301ba7651fbSMax Filippov 1302ba7651fbSMax Filippov #define ELF_START_MMAP 0x20000000 1303ba7651fbSMax Filippov 1304ba7651fbSMax Filippov #define ELF_CLASS ELFCLASS32 1305ba7651fbSMax Filippov #define ELF_ARCH EM_XTENSA 1306ba7651fbSMax Filippov 1307ba7651fbSMax Filippov static inline void init_thread(struct target_pt_regs *regs, 1308ba7651fbSMax Filippov struct image_info *infop) 1309ba7651fbSMax Filippov { 1310ba7651fbSMax Filippov regs->windowbase = 0; 1311ba7651fbSMax Filippov regs->windowstart = 1; 1312ba7651fbSMax Filippov regs->areg[1] = infop->start_stack; 1313ba7651fbSMax Filippov regs->pc = infop->entry; 1314ba7651fbSMax Filippov } 1315ba7651fbSMax Filippov 1316ba7651fbSMax Filippov /* See linux kernel: arch/xtensa/include/asm/elf.h. */ 1317ba7651fbSMax Filippov #define ELF_NREG 128 1318ba7651fbSMax Filippov typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG]; 1319ba7651fbSMax Filippov 1320ba7651fbSMax Filippov enum { 1321ba7651fbSMax Filippov TARGET_REG_PC, 1322ba7651fbSMax Filippov TARGET_REG_PS, 1323ba7651fbSMax Filippov TARGET_REG_LBEG, 1324ba7651fbSMax Filippov TARGET_REG_LEND, 1325ba7651fbSMax Filippov TARGET_REG_LCOUNT, 1326ba7651fbSMax Filippov TARGET_REG_SAR, 1327ba7651fbSMax Filippov TARGET_REG_WINDOWSTART, 1328ba7651fbSMax Filippov TARGET_REG_WINDOWBASE, 1329ba7651fbSMax Filippov TARGET_REG_THREADPTR, 1330ba7651fbSMax Filippov TARGET_REG_AR0 = 64, 1331ba7651fbSMax Filippov }; 1332ba7651fbSMax Filippov 1333ba7651fbSMax Filippov static void elf_core_copy_regs(target_elf_gregset_t *regs, 1334ba7651fbSMax Filippov const CPUXtensaState *env) 1335ba7651fbSMax Filippov { 1336ba7651fbSMax Filippov unsigned i; 1337ba7651fbSMax Filippov 1338ba7651fbSMax Filippov (*regs)[TARGET_REG_PC] = tswapreg(env->pc); 1339ba7651fbSMax Filippov (*regs)[TARGET_REG_PS] = tswapreg(env->sregs[PS] & ~PS_EXCM); 1340ba7651fbSMax Filippov (*regs)[TARGET_REG_LBEG] = tswapreg(env->sregs[LBEG]); 1341ba7651fbSMax Filippov (*regs)[TARGET_REG_LEND] = tswapreg(env->sregs[LEND]); 1342ba7651fbSMax Filippov (*regs)[TARGET_REG_LCOUNT] = tswapreg(env->sregs[LCOUNT]); 1343ba7651fbSMax Filippov (*regs)[TARGET_REG_SAR] = tswapreg(env->sregs[SAR]); 1344ba7651fbSMax Filippov (*regs)[TARGET_REG_WINDOWSTART] = tswapreg(env->sregs[WINDOW_START]); 1345ba7651fbSMax Filippov (*regs)[TARGET_REG_WINDOWBASE] = tswapreg(env->sregs[WINDOW_BASE]); 1346ba7651fbSMax Filippov (*regs)[TARGET_REG_THREADPTR] = tswapreg(env->uregs[THREADPTR]); 1347ba7651fbSMax Filippov xtensa_sync_phys_from_window((CPUXtensaState *)env); 1348ba7651fbSMax Filippov for (i = 0; i < env->config->nareg; ++i) { 1349ba7651fbSMax Filippov (*regs)[TARGET_REG_AR0 + i] = tswapreg(env->phys_regs[i]); 1350ba7651fbSMax Filippov } 1351ba7651fbSMax Filippov } 1352ba7651fbSMax Filippov 1353ba7651fbSMax Filippov #define USE_ELF_CORE_DUMP 1354ba7651fbSMax Filippov #define ELF_EXEC_PAGESIZE 4096 1355ba7651fbSMax Filippov 1356ba7651fbSMax Filippov #endif /* TARGET_XTENSA */ 1357ba7651fbSMax Filippov 135815338fd7Sbellard #ifndef ELF_PLATFORM 135915338fd7Sbellard #define ELF_PLATFORM (NULL) 136015338fd7Sbellard #endif 136115338fd7Sbellard 136275be901cSPeter Crosthwaite #ifndef ELF_MACHINE 136375be901cSPeter Crosthwaite #define ELF_MACHINE ELF_ARCH 136475be901cSPeter Crosthwaite #endif 136575be901cSPeter Crosthwaite 1366d276a604SPeter Crosthwaite #ifndef elf_check_arch 1367d276a604SPeter Crosthwaite #define elf_check_arch(x) ((x) == ELF_ARCH) 1368d276a604SPeter Crosthwaite #endif 1369d276a604SPeter Crosthwaite 137015338fd7Sbellard #ifndef ELF_HWCAP 137115338fd7Sbellard #define ELF_HWCAP 0 137215338fd7Sbellard #endif 137315338fd7Sbellard 13747c4ee5bcSRichard Henderson #ifndef STACK_GROWS_DOWN 13757c4ee5bcSRichard Henderson #define STACK_GROWS_DOWN 1 13767c4ee5bcSRichard Henderson #endif 13777c4ee5bcSRichard Henderson 13787c4ee5bcSRichard Henderson #ifndef STACK_ALIGNMENT 13797c4ee5bcSRichard Henderson #define STACK_ALIGNMENT 16 13807c4ee5bcSRichard Henderson #endif 13817c4ee5bcSRichard Henderson 1382992f48a0Sblueswir1 #ifdef TARGET_ABI32 1383cb33da57Sblueswir1 #undef ELF_CLASS 1384992f48a0Sblueswir1 #define ELF_CLASS ELFCLASS32 1385cb33da57Sblueswir1 #undef bswaptls 1386cb33da57Sblueswir1 #define bswaptls(ptr) bswap32s(ptr) 1387cb33da57Sblueswir1 #endif 1388cb33da57Sblueswir1 138931e31b8aSbellard #include "elf.h" 139009bfb054Sbellard 139109bfb054Sbellard struct exec 139209bfb054Sbellard { 139309bfb054Sbellard unsigned int a_info; /* Use macros N_MAGIC, etc for access */ 139409bfb054Sbellard unsigned int a_text; /* length of text, in bytes */ 139509bfb054Sbellard unsigned int a_data; /* length of data, in bytes */ 139609bfb054Sbellard unsigned int a_bss; /* length of uninitialized data area, in bytes */ 139709bfb054Sbellard unsigned int a_syms; /* length of symbol table data in file, in bytes */ 139809bfb054Sbellard unsigned int a_entry; /* start address */ 139909bfb054Sbellard unsigned int a_trsize; /* length of relocation info for text, in bytes */ 140009bfb054Sbellard unsigned int a_drsize; /* length of relocation info for data, in bytes */ 140109bfb054Sbellard }; 140209bfb054Sbellard 140309bfb054Sbellard 140409bfb054Sbellard #define N_MAGIC(exec) ((exec).a_info & 0xffff) 140509bfb054Sbellard #define OMAGIC 0407 140609bfb054Sbellard #define NMAGIC 0410 140709bfb054Sbellard #define ZMAGIC 0413 140809bfb054Sbellard #define QMAGIC 0314 140909bfb054Sbellard 141031e31b8aSbellard /* Necessary parameters */ 141154936004Sbellard #define TARGET_ELF_EXEC_PAGESIZE TARGET_PAGE_SIZE 141279cb1f1dSYongbok Kim #define TARGET_ELF_PAGESTART(_v) ((_v) & \ 141379cb1f1dSYongbok Kim ~(abi_ulong)(TARGET_ELF_EXEC_PAGESIZE-1)) 141454936004Sbellard #define TARGET_ELF_PAGEOFFSET(_v) ((_v) & (TARGET_ELF_EXEC_PAGESIZE-1)) 141531e31b8aSbellard 1416444cd5c3SMarco A L Barbosa #define DLINFO_ITEMS 15 141731e31b8aSbellard 141809bfb054Sbellard static inline void memcpy_fromfs(void * to, const void * from, unsigned long n) 141909bfb054Sbellard { 142009bfb054Sbellard memcpy(to, from, n); 142109bfb054Sbellard } 142209bfb054Sbellard 142331e31b8aSbellard #ifdef BSWAP_NEEDED 142492a31b1fSbellard static void bswap_ehdr(struct elfhdr *ehdr) 142531e31b8aSbellard { 142631e31b8aSbellard bswap16s(&ehdr->e_type); /* Object file type */ 142731e31b8aSbellard bswap16s(&ehdr->e_machine); /* Architecture */ 142831e31b8aSbellard bswap32s(&ehdr->e_version); /* Object file version */ 142992a31b1fSbellard bswaptls(&ehdr->e_entry); /* Entry point virtual address */ 143092a31b1fSbellard bswaptls(&ehdr->e_phoff); /* Program header table file offset */ 143192a31b1fSbellard bswaptls(&ehdr->e_shoff); /* Section header table file offset */ 143231e31b8aSbellard bswap32s(&ehdr->e_flags); /* Processor-specific flags */ 143331e31b8aSbellard bswap16s(&ehdr->e_ehsize); /* ELF header size in bytes */ 143431e31b8aSbellard bswap16s(&ehdr->e_phentsize); /* Program header table entry size */ 143531e31b8aSbellard bswap16s(&ehdr->e_phnum); /* Program header table entry count */ 143631e31b8aSbellard bswap16s(&ehdr->e_shentsize); /* Section header table entry size */ 143731e31b8aSbellard bswap16s(&ehdr->e_shnum); /* Section header table entry count */ 143831e31b8aSbellard bswap16s(&ehdr->e_shstrndx); /* Section header string table index */ 143931e31b8aSbellard } 144031e31b8aSbellard 1441991f8f0cSRichard Henderson static void bswap_phdr(struct elf_phdr *phdr, int phnum) 144231e31b8aSbellard { 1443991f8f0cSRichard Henderson int i; 1444991f8f0cSRichard Henderson for (i = 0; i < phnum; ++i, ++phdr) { 144531e31b8aSbellard bswap32s(&phdr->p_type); /* Segment type */ 1446991f8f0cSRichard Henderson bswap32s(&phdr->p_flags); /* Segment flags */ 144792a31b1fSbellard bswaptls(&phdr->p_offset); /* Segment file offset */ 144892a31b1fSbellard bswaptls(&phdr->p_vaddr); /* Segment virtual address */ 144992a31b1fSbellard bswaptls(&phdr->p_paddr); /* Segment physical address */ 145092a31b1fSbellard bswaptls(&phdr->p_filesz); /* Segment size in file */ 145192a31b1fSbellard bswaptls(&phdr->p_memsz); /* Segment size in memory */ 145292a31b1fSbellard bswaptls(&phdr->p_align); /* Segment alignment */ 145331e31b8aSbellard } 1454991f8f0cSRichard Henderson } 1455689f936fSbellard 1456991f8f0cSRichard Henderson static void bswap_shdr(struct elf_shdr *shdr, int shnum) 1457689f936fSbellard { 1458991f8f0cSRichard Henderson int i; 1459991f8f0cSRichard Henderson for (i = 0; i < shnum; ++i, ++shdr) { 1460689f936fSbellard bswap32s(&shdr->sh_name); 1461689f936fSbellard bswap32s(&shdr->sh_type); 146292a31b1fSbellard bswaptls(&shdr->sh_flags); 146392a31b1fSbellard bswaptls(&shdr->sh_addr); 146492a31b1fSbellard bswaptls(&shdr->sh_offset); 146592a31b1fSbellard bswaptls(&shdr->sh_size); 1466689f936fSbellard bswap32s(&shdr->sh_link); 1467689f936fSbellard bswap32s(&shdr->sh_info); 146892a31b1fSbellard bswaptls(&shdr->sh_addralign); 146992a31b1fSbellard bswaptls(&shdr->sh_entsize); 1470689f936fSbellard } 1471991f8f0cSRichard Henderson } 1472689f936fSbellard 14737a3148a9Sj_mayer static void bswap_sym(struct elf_sym *sym) 1474689f936fSbellard { 1475689f936fSbellard bswap32s(&sym->st_name); 14767a3148a9Sj_mayer bswaptls(&sym->st_value); 14777a3148a9Sj_mayer bswaptls(&sym->st_size); 1478689f936fSbellard bswap16s(&sym->st_shndx); 1479689f936fSbellard } 1480991f8f0cSRichard Henderson #else 1481991f8f0cSRichard Henderson static inline void bswap_ehdr(struct elfhdr *ehdr) { } 1482991f8f0cSRichard Henderson static inline void bswap_phdr(struct elf_phdr *phdr, int phnum) { } 1483991f8f0cSRichard Henderson static inline void bswap_shdr(struct elf_shdr *shdr, int shnum) { } 1484991f8f0cSRichard Henderson static inline void bswap_sym(struct elf_sym *sym) { } 148531e31b8aSbellard #endif 148631e31b8aSbellard 1487edf8e2afSMika Westerberg #ifdef USE_ELF_CORE_DUMP 14889349b4f9SAndreas Färber static int elf_core_dump(int, const CPUArchState *); 1489edf8e2afSMika Westerberg #endif /* USE_ELF_CORE_DUMP */ 1490682674b8SRichard Henderson static void load_symbols(struct elfhdr *hdr, int fd, abi_ulong load_bias); 1491edf8e2afSMika Westerberg 14929058abddSRichard Henderson /* Verify the portions of EHDR within E_IDENT for the target. 14939058abddSRichard Henderson This can be performed before bswapping the entire header. */ 14949058abddSRichard Henderson static bool elf_check_ident(struct elfhdr *ehdr) 14959058abddSRichard Henderson { 14969058abddSRichard Henderson return (ehdr->e_ident[EI_MAG0] == ELFMAG0 14979058abddSRichard Henderson && ehdr->e_ident[EI_MAG1] == ELFMAG1 14989058abddSRichard Henderson && ehdr->e_ident[EI_MAG2] == ELFMAG2 14999058abddSRichard Henderson && ehdr->e_ident[EI_MAG3] == ELFMAG3 15009058abddSRichard Henderson && ehdr->e_ident[EI_CLASS] == ELF_CLASS 15019058abddSRichard Henderson && ehdr->e_ident[EI_DATA] == ELF_DATA 15029058abddSRichard Henderson && ehdr->e_ident[EI_VERSION] == EV_CURRENT); 15039058abddSRichard Henderson } 15049058abddSRichard Henderson 15059058abddSRichard Henderson /* Verify the portions of EHDR outside of E_IDENT for the target. 15069058abddSRichard Henderson This has to wait until after bswapping the header. */ 15079058abddSRichard Henderson static bool elf_check_ehdr(struct elfhdr *ehdr) 15089058abddSRichard Henderson { 15099058abddSRichard Henderson return (elf_check_arch(ehdr->e_machine) 15109058abddSRichard Henderson && ehdr->e_ehsize == sizeof(struct elfhdr) 15119058abddSRichard Henderson && ehdr->e_phentsize == sizeof(struct elf_phdr) 15129058abddSRichard Henderson && (ehdr->e_type == ET_EXEC || ehdr->e_type == ET_DYN)); 15139058abddSRichard Henderson } 15149058abddSRichard Henderson 151531e31b8aSbellard /* 1516e5fe0c52Spbrook * 'copy_elf_strings()' copies argument/envelope strings from user 151731e31b8aSbellard * memory to free pages in kernel mem. These are in a format ready 151831e31b8aSbellard * to be put directly into the top of new user memory. 151931e31b8aSbellard * 152031e31b8aSbellard */ 152159baae9aSStefan Brüns static abi_ulong copy_elf_strings(int argc, char **argv, char *scratch, 152259baae9aSStefan Brüns abi_ulong p, abi_ulong stack_limit) 152331e31b8aSbellard { 152459baae9aSStefan Brüns char *tmp; 15257c4ee5bcSRichard Henderson int len, i; 152659baae9aSStefan Brüns abi_ulong top = p; 152731e31b8aSbellard 152831e31b8aSbellard if (!p) { 152931e31b8aSbellard return 0; /* bullet-proofing */ 153031e31b8aSbellard } 153159baae9aSStefan Brüns 15327c4ee5bcSRichard Henderson if (STACK_GROWS_DOWN) { 15337c4ee5bcSRichard Henderson int offset = ((p - 1) % TARGET_PAGE_SIZE) + 1; 15347c4ee5bcSRichard Henderson for (i = argc - 1; i >= 0; --i) { 15357c4ee5bcSRichard Henderson tmp = argv[i]; 1536edf779ffSbellard if (!tmp) { 153731e31b8aSbellard fprintf(stderr, "VFS: argc is wrong"); 153831e31b8aSbellard exit(-1); 153931e31b8aSbellard } 154059baae9aSStefan Brüns len = strlen(tmp) + 1; 154159baae9aSStefan Brüns tmp += len; 154259baae9aSStefan Brüns 154359baae9aSStefan Brüns if (len > (p - stack_limit)) { 154431e31b8aSbellard return 0; 154531e31b8aSbellard } 154631e31b8aSbellard while (len) { 154731e31b8aSbellard int bytes_to_copy = (len > offset) ? offset : len; 154831e31b8aSbellard tmp -= bytes_to_copy; 154931e31b8aSbellard p -= bytes_to_copy; 155031e31b8aSbellard offset -= bytes_to_copy; 155131e31b8aSbellard len -= bytes_to_copy; 155259baae9aSStefan Brüns 155359baae9aSStefan Brüns memcpy_fromfs(scratch + offset, tmp, bytes_to_copy); 155459baae9aSStefan Brüns 155559baae9aSStefan Brüns if (offset == 0) { 155659baae9aSStefan Brüns memcpy_to_target(p, scratch, top - p); 155759baae9aSStefan Brüns top = p; 155859baae9aSStefan Brüns offset = TARGET_PAGE_SIZE; 155931e31b8aSbellard } 156031e31b8aSbellard } 156131e31b8aSbellard } 15627c4ee5bcSRichard Henderson if (p != top) { 156359baae9aSStefan Brüns memcpy_to_target(p, scratch + offset, top - p); 156459baae9aSStefan Brüns } 15657c4ee5bcSRichard Henderson } else { 15667c4ee5bcSRichard Henderson int remaining = TARGET_PAGE_SIZE - (p % TARGET_PAGE_SIZE); 15677c4ee5bcSRichard Henderson for (i = 0; i < argc; ++i) { 15687c4ee5bcSRichard Henderson tmp = argv[i]; 15697c4ee5bcSRichard Henderson if (!tmp) { 15707c4ee5bcSRichard Henderson fprintf(stderr, "VFS: argc is wrong"); 15717c4ee5bcSRichard Henderson exit(-1); 15727c4ee5bcSRichard Henderson } 15737c4ee5bcSRichard Henderson len = strlen(tmp) + 1; 15747c4ee5bcSRichard Henderson if (len > (stack_limit - p)) { 15757c4ee5bcSRichard Henderson return 0; 15767c4ee5bcSRichard Henderson } 15777c4ee5bcSRichard Henderson while (len) { 15787c4ee5bcSRichard Henderson int bytes_to_copy = (len > remaining) ? remaining : len; 15797c4ee5bcSRichard Henderson 15807c4ee5bcSRichard Henderson memcpy_fromfs(scratch + (p - top), tmp, bytes_to_copy); 15817c4ee5bcSRichard Henderson 15827c4ee5bcSRichard Henderson tmp += bytes_to_copy; 15837c4ee5bcSRichard Henderson remaining -= bytes_to_copy; 15847c4ee5bcSRichard Henderson p += bytes_to_copy; 15857c4ee5bcSRichard Henderson len -= bytes_to_copy; 15867c4ee5bcSRichard Henderson 15877c4ee5bcSRichard Henderson if (remaining == 0) { 15887c4ee5bcSRichard Henderson memcpy_to_target(top, scratch, p - top); 15897c4ee5bcSRichard Henderson top = p; 15907c4ee5bcSRichard Henderson remaining = TARGET_PAGE_SIZE; 15917c4ee5bcSRichard Henderson } 15927c4ee5bcSRichard Henderson } 15937c4ee5bcSRichard Henderson } 15947c4ee5bcSRichard Henderson if (p != top) { 15957c4ee5bcSRichard Henderson memcpy_to_target(top, scratch, p - top); 15967c4ee5bcSRichard Henderson } 15977c4ee5bcSRichard Henderson } 159859baae9aSStefan Brüns 159931e31b8aSbellard return p; 160031e31b8aSbellard } 160131e31b8aSbellard 160259baae9aSStefan Brüns /* Older linux kernels provide up to MAX_ARG_PAGES (default: 32) of 160359baae9aSStefan Brüns * argument/environment space. Newer kernels (>2.6.33) allow more, 160459baae9aSStefan Brüns * dependent on stack size, but guarantee at least 32 pages for 160559baae9aSStefan Brüns * backwards compatibility. 160659baae9aSStefan Brüns */ 160759baae9aSStefan Brüns #define STACK_LOWER_LIMIT (32 * TARGET_PAGE_SIZE) 160859baae9aSStefan Brüns 160959baae9aSStefan Brüns static abi_ulong setup_arg_pages(struct linux_binprm *bprm, 161031e31b8aSbellard struct image_info *info) 161131e31b8aSbellard { 161259baae9aSStefan Brüns abi_ulong size, error, guard; 161331e31b8aSbellard 1614703e0e89SRichard Henderson size = guest_stack_size; 161559baae9aSStefan Brüns if (size < STACK_LOWER_LIMIT) { 161659baae9aSStefan Brüns size = STACK_LOWER_LIMIT; 161760dcbcb5SRichard Henderson } 161860dcbcb5SRichard Henderson guard = TARGET_PAGE_SIZE; 161960dcbcb5SRichard Henderson if (guard < qemu_real_host_page_size) { 162060dcbcb5SRichard Henderson guard = qemu_real_host_page_size; 162160dcbcb5SRichard Henderson } 162260dcbcb5SRichard Henderson 162360dcbcb5SRichard Henderson error = target_mmap(0, size + guard, PROT_READ | PROT_WRITE, 162460dcbcb5SRichard Henderson MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); 162509bfb054Sbellard if (error == -1) { 162660dcbcb5SRichard Henderson perror("mmap stack"); 162731e31b8aSbellard exit(-1); 162831e31b8aSbellard } 162931e31b8aSbellard 163060dcbcb5SRichard Henderson /* We reserve one extra page at the top of the stack as guard. */ 16317c4ee5bcSRichard Henderson if (STACK_GROWS_DOWN) { 163260dcbcb5SRichard Henderson target_mprotect(error, guard, PROT_NONE); 163360dcbcb5SRichard Henderson info->stack_limit = error + guard; 163459baae9aSStefan Brüns return info->stack_limit + size - sizeof(void *); 16357c4ee5bcSRichard Henderson } else { 16367c4ee5bcSRichard Henderson target_mprotect(error + size, guard, PROT_NONE); 16377c4ee5bcSRichard Henderson info->stack_limit = error + size; 16387c4ee5bcSRichard Henderson return error; 16397c4ee5bcSRichard Henderson } 164031e31b8aSbellard } 164131e31b8aSbellard 1642cf129f3aSRichard Henderson /* Map and zero the bss. We need to explicitly zero any fractional pages 1643cf129f3aSRichard Henderson after the data section (i.e. bss). */ 1644cf129f3aSRichard Henderson static void zero_bss(abi_ulong elf_bss, abi_ulong last_bss, int prot) 164531e31b8aSbellard { 1646cf129f3aSRichard Henderson uintptr_t host_start, host_map_start, host_end; 1647cf129f3aSRichard Henderson 1648cf129f3aSRichard Henderson last_bss = TARGET_PAGE_ALIGN(last_bss); 1649cf129f3aSRichard Henderson 1650cf129f3aSRichard Henderson /* ??? There is confusion between qemu_real_host_page_size and 1651cf129f3aSRichard Henderson qemu_host_page_size here and elsewhere in target_mmap, which 1652cf129f3aSRichard Henderson may lead to the end of the data section mapping from the file 1653cf129f3aSRichard Henderson not being mapped. At least there was an explicit test and 1654cf129f3aSRichard Henderson comment for that here, suggesting that "the file size must 1655cf129f3aSRichard Henderson be known". The comment probably pre-dates the introduction 1656cf129f3aSRichard Henderson of the fstat system call in target_mmap which does in fact 1657cf129f3aSRichard Henderson find out the size. What isn't clear is if the workaround 1658cf129f3aSRichard Henderson here is still actually needed. For now, continue with it, 1659cf129f3aSRichard Henderson but merge it with the "normal" mmap that would allocate the bss. */ 1660cf129f3aSRichard Henderson 1661cf129f3aSRichard Henderson host_start = (uintptr_t) g2h(elf_bss); 1662cf129f3aSRichard Henderson host_end = (uintptr_t) g2h(last_bss); 16630c2d70c4SPaolo Bonzini host_map_start = REAL_HOST_PAGE_ALIGN(host_start); 1664cf129f3aSRichard Henderson 1665cf129f3aSRichard Henderson if (host_map_start < host_end) { 1666cf129f3aSRichard Henderson void *p = mmap((void *)host_map_start, host_end - host_map_start, 1667cf129f3aSRichard Henderson prot, MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); 1668cf129f3aSRichard Henderson if (p == MAP_FAILED) { 166931e31b8aSbellard perror("cannot mmap brk"); 167031e31b8aSbellard exit(-1); 167131e31b8aSbellard } 1672f46e9a0bSTom Musta } 1673cf129f3aSRichard Henderson 1674f46e9a0bSTom Musta /* Ensure that the bss page(s) are valid */ 1675f46e9a0bSTom Musta if ((page_get_flags(last_bss-1) & prot) != prot) { 1676cf129f3aSRichard Henderson page_set_flags(elf_bss & TARGET_PAGE_MASK, last_bss, prot | PAGE_VALID); 167731e31b8aSbellard } 167831e31b8aSbellard 1679cf129f3aSRichard Henderson if (host_start < host_map_start) { 1680cf129f3aSRichard Henderson memset((void *)host_start, 0, host_map_start - host_start); 1681853d6f7aSbellard } 1682853d6f7aSbellard } 1683853d6f7aSbellard 1684*cf58affeSChristophe Lyon #ifdef TARGET_ARM 1685*cf58affeSChristophe Lyon static int elf_is_fdpic(struct elfhdr *exec) 1686*cf58affeSChristophe Lyon { 1687*cf58affeSChristophe Lyon return exec->e_ident[EI_OSABI] == ELFOSABI_ARM_FDPIC; 1688*cf58affeSChristophe Lyon } 1689*cf58affeSChristophe Lyon #else 1690a99856cdSChristophe Lyon /* Default implementation, always false. */ 1691a99856cdSChristophe Lyon static int elf_is_fdpic(struct elfhdr *exec) 1692a99856cdSChristophe Lyon { 1693a99856cdSChristophe Lyon return 0; 1694a99856cdSChristophe Lyon } 1695*cf58affeSChristophe Lyon #endif 1696a99856cdSChristophe Lyon 16971af02e83SMike Frysinger static abi_ulong loader_build_fdpic_loadmap(struct image_info *info, abi_ulong sp) 16981af02e83SMike Frysinger { 16991af02e83SMike Frysinger uint16_t n; 17001af02e83SMike Frysinger struct elf32_fdpic_loadseg *loadsegs = info->loadsegs; 17011af02e83SMike Frysinger 17021af02e83SMike Frysinger /* elf32_fdpic_loadseg */ 17031af02e83SMike Frysinger n = info->nsegs; 17041af02e83SMike Frysinger while (n--) { 17051af02e83SMike Frysinger sp -= 12; 17061af02e83SMike Frysinger put_user_u32(loadsegs[n].addr, sp+0); 17071af02e83SMike Frysinger put_user_u32(loadsegs[n].p_vaddr, sp+4); 17081af02e83SMike Frysinger put_user_u32(loadsegs[n].p_memsz, sp+8); 17091af02e83SMike Frysinger } 17101af02e83SMike Frysinger 17111af02e83SMike Frysinger /* elf32_fdpic_loadmap */ 17121af02e83SMike Frysinger sp -= 4; 17131af02e83SMike Frysinger put_user_u16(0, sp+0); /* version */ 17141af02e83SMike Frysinger put_user_u16(info->nsegs, sp+2); /* nsegs */ 17151af02e83SMike Frysinger 17161af02e83SMike Frysinger info->personality = PER_LINUX_FDPIC; 17171af02e83SMike Frysinger info->loadmap_addr = sp; 17181af02e83SMike Frysinger 17191af02e83SMike Frysinger return sp; 17201af02e83SMike Frysinger } 17211af02e83SMike Frysinger 1722992f48a0Sblueswir1 static abi_ulong create_elf_tables(abi_ulong p, int argc, int envc, 172331e31b8aSbellard struct elfhdr *exec, 17248e62a717SRichard Henderson struct image_info *info, 17258e62a717SRichard Henderson struct image_info *interp_info) 172631e31b8aSbellard { 1727992f48a0Sblueswir1 abi_ulong sp; 17287c4ee5bcSRichard Henderson abi_ulong u_argc, u_argv, u_envp, u_auxv; 172953a5960aSpbrook int size; 173014322badSLaurent ALFONSI int i; 173114322badSLaurent ALFONSI abi_ulong u_rand_bytes; 173214322badSLaurent ALFONSI uint8_t k_rand_bytes[16]; 1733992f48a0Sblueswir1 abi_ulong u_platform; 173415338fd7Sbellard const char *k_platform; 1735863cf0b7Sj_mayer const int n = sizeof(elf_addr_t); 173631e31b8aSbellard 173753a5960aSpbrook sp = p; 17381af02e83SMike Frysinger 17391af02e83SMike Frysinger /* Needs to be before we load the env/argc/... */ 17401af02e83SMike Frysinger if (elf_is_fdpic(exec)) { 17411af02e83SMike Frysinger /* Need 4 byte alignment for these structs */ 17421af02e83SMike Frysinger sp &= ~3; 17431af02e83SMike Frysinger sp = loader_build_fdpic_loadmap(info, sp); 17441af02e83SMike Frysinger info->other_info = interp_info; 17451af02e83SMike Frysinger if (interp_info) { 17461af02e83SMike Frysinger interp_info->other_info = info; 17471af02e83SMike Frysinger sp = loader_build_fdpic_loadmap(interp_info, sp); 17481af02e83SMike Frysinger } 17491af02e83SMike Frysinger } 17501af02e83SMike Frysinger 175153a5960aSpbrook u_platform = 0; 175215338fd7Sbellard k_platform = ELF_PLATFORM; 175315338fd7Sbellard if (k_platform) { 175415338fd7Sbellard size_t len = strlen(k_platform) + 1; 17557c4ee5bcSRichard Henderson if (STACK_GROWS_DOWN) { 175653a5960aSpbrook sp -= (len + n - 1) & ~(n - 1); 175753a5960aSpbrook u_platform = sp; 1758579a97f7Sbellard /* FIXME - check return value of memcpy_to_target() for failure */ 175953a5960aSpbrook memcpy_to_target(sp, k_platform, len); 17607c4ee5bcSRichard Henderson } else { 17617c4ee5bcSRichard Henderson memcpy_to_target(sp, k_platform, len); 17627c4ee5bcSRichard Henderson u_platform = sp; 17637c4ee5bcSRichard Henderson sp += len + 1; 17647c4ee5bcSRichard Henderson } 17657c4ee5bcSRichard Henderson } 17667c4ee5bcSRichard Henderson 17677c4ee5bcSRichard Henderson /* Provide 16 byte alignment for the PRNG, and basic alignment for 17687c4ee5bcSRichard Henderson * the argv and envp pointers. 17697c4ee5bcSRichard Henderson */ 17707c4ee5bcSRichard Henderson if (STACK_GROWS_DOWN) { 17717c4ee5bcSRichard Henderson sp = QEMU_ALIGN_DOWN(sp, 16); 17727c4ee5bcSRichard Henderson } else { 17737c4ee5bcSRichard Henderson sp = QEMU_ALIGN_UP(sp, 16); 177415338fd7Sbellard } 177514322badSLaurent ALFONSI 177614322badSLaurent ALFONSI /* 177714322badSLaurent ALFONSI * Generate 16 random bytes for userspace PRNG seeding (not 177814322badSLaurent ALFONSI * cryptically secure but it's not the aim of QEMU). 177914322badSLaurent ALFONSI */ 178014322badSLaurent ALFONSI for (i = 0; i < 16; i++) { 178114322badSLaurent ALFONSI k_rand_bytes[i] = rand(); 178214322badSLaurent ALFONSI } 17837c4ee5bcSRichard Henderson if (STACK_GROWS_DOWN) { 178414322badSLaurent ALFONSI sp -= 16; 178514322badSLaurent ALFONSI u_rand_bytes = sp; 178614322badSLaurent ALFONSI /* FIXME - check return value of memcpy_to_target() for failure */ 178714322badSLaurent ALFONSI memcpy_to_target(sp, k_rand_bytes, 16); 17887c4ee5bcSRichard Henderson } else { 17897c4ee5bcSRichard Henderson memcpy_to_target(sp, k_rand_bytes, 16); 17907c4ee5bcSRichard Henderson u_rand_bytes = sp; 17917c4ee5bcSRichard Henderson sp += 16; 17927c4ee5bcSRichard Henderson } 179314322badSLaurent ALFONSI 179453a5960aSpbrook size = (DLINFO_ITEMS + 1) * 2; 179515338fd7Sbellard if (k_platform) 179653a5960aSpbrook size += 2; 1797f5155289Sbellard #ifdef DLINFO_ARCH_ITEMS 179853a5960aSpbrook size += DLINFO_ARCH_ITEMS * 2; 1799f5155289Sbellard #endif 1800ad6919dcSPeter Maydell #ifdef ELF_HWCAP2 1801ad6919dcSPeter Maydell size += 2; 1802ad6919dcSPeter Maydell #endif 1803f516511eSPeter Maydell info->auxv_len = size * n; 1804f516511eSPeter Maydell 180553a5960aSpbrook size += envc + argc + 2; 1806b9329d4bSRichard Henderson size += 1; /* argc itself */ 180753a5960aSpbrook size *= n; 18087c4ee5bcSRichard Henderson 18097c4ee5bcSRichard Henderson /* Allocate space and finalize stack alignment for entry now. */ 18107c4ee5bcSRichard Henderson if (STACK_GROWS_DOWN) { 18117c4ee5bcSRichard Henderson u_argc = QEMU_ALIGN_DOWN(sp - size, STACK_ALIGNMENT); 18127c4ee5bcSRichard Henderson sp = u_argc; 18137c4ee5bcSRichard Henderson } else { 18147c4ee5bcSRichard Henderson u_argc = sp; 18157c4ee5bcSRichard Henderson sp = QEMU_ALIGN_UP(sp + size, STACK_ALIGNMENT); 18167c4ee5bcSRichard Henderson } 18177c4ee5bcSRichard Henderson 18187c4ee5bcSRichard Henderson u_argv = u_argc + n; 18197c4ee5bcSRichard Henderson u_envp = u_argv + (argc + 1) * n; 18207c4ee5bcSRichard Henderson u_auxv = u_envp + (envc + 1) * n; 18217c4ee5bcSRichard Henderson info->saved_auxv = u_auxv; 18227c4ee5bcSRichard Henderson info->arg_start = u_argv; 18237c4ee5bcSRichard Henderson info->arg_end = u_argv + argc * n; 1824f5155289Sbellard 1825863cf0b7Sj_mayer /* This is correct because Linux defines 1826863cf0b7Sj_mayer * elf_addr_t as Elf32_Off / Elf64_Off 1827863cf0b7Sj_mayer */ 182853a5960aSpbrook #define NEW_AUX_ENT(id, val) do { \ 18297c4ee5bcSRichard Henderson put_user_ual(id, u_auxv); u_auxv += n; \ 18307c4ee5bcSRichard Henderson put_user_ual(val, u_auxv); u_auxv += n; \ 183153a5960aSpbrook } while(0) 18322f619698Sbellard 183382991bedSPeter Maydell #ifdef ARCH_DLINFO 183482991bedSPeter Maydell /* 183582991bedSPeter Maydell * ARCH_DLINFO must come first so platform specific code can enforce 183682991bedSPeter Maydell * special alignment requirements on the AUXV if necessary (eg. PPC). 183782991bedSPeter Maydell */ 183882991bedSPeter Maydell ARCH_DLINFO; 183982991bedSPeter Maydell #endif 1840f516511eSPeter Maydell /* There must be exactly DLINFO_ITEMS entries here, or the assert 1841f516511eSPeter Maydell * on info->auxv_len will trigger. 1842f516511eSPeter Maydell */ 18438e62a717SRichard Henderson NEW_AUX_ENT(AT_PHDR, (abi_ulong)(info->load_addr + exec->e_phoff)); 1844992f48a0Sblueswir1 NEW_AUX_ENT(AT_PHENT, (abi_ulong)(sizeof (struct elf_phdr))); 1845992f48a0Sblueswir1 NEW_AUX_ENT(AT_PHNUM, (abi_ulong)(exec->e_phnum)); 1846a70daba3SAlexander Graf NEW_AUX_ENT(AT_PAGESZ, (abi_ulong)(MAX(TARGET_PAGE_SIZE, getpagesize()))); 18478e62a717SRichard Henderson NEW_AUX_ENT(AT_BASE, (abi_ulong)(interp_info ? interp_info->load_addr : 0)); 1848992f48a0Sblueswir1 NEW_AUX_ENT(AT_FLAGS, (abi_ulong)0); 18498e62a717SRichard Henderson NEW_AUX_ENT(AT_ENTRY, info->entry); 1850992f48a0Sblueswir1 NEW_AUX_ENT(AT_UID, (abi_ulong) getuid()); 1851992f48a0Sblueswir1 NEW_AUX_ENT(AT_EUID, (abi_ulong) geteuid()); 1852992f48a0Sblueswir1 NEW_AUX_ENT(AT_GID, (abi_ulong) getgid()); 1853992f48a0Sblueswir1 NEW_AUX_ENT(AT_EGID, (abi_ulong) getegid()); 1854992f48a0Sblueswir1 NEW_AUX_ENT(AT_HWCAP, (abi_ulong) ELF_HWCAP); 1855a07c67dfSpbrook NEW_AUX_ENT(AT_CLKTCK, (abi_ulong) sysconf(_SC_CLK_TCK)); 185614322badSLaurent ALFONSI NEW_AUX_ENT(AT_RANDOM, (abi_ulong) u_rand_bytes); 1857444cd5c3SMarco A L Barbosa NEW_AUX_ENT(AT_SECURE, (abi_ulong) qemu_getauxval(AT_SECURE)); 185814322badSLaurent ALFONSI 1859ad6919dcSPeter Maydell #ifdef ELF_HWCAP2 1860ad6919dcSPeter Maydell NEW_AUX_ENT(AT_HWCAP2, (abi_ulong) ELF_HWCAP2); 1861ad6919dcSPeter Maydell #endif 1862ad6919dcSPeter Maydell 18637c4ee5bcSRichard Henderson if (u_platform) { 186453a5960aSpbrook NEW_AUX_ENT(AT_PLATFORM, u_platform); 18657c4ee5bcSRichard Henderson } 18667c4ee5bcSRichard Henderson NEW_AUX_ENT (AT_NULL, 0); 1867f5155289Sbellard #undef NEW_AUX_ENT 1868f5155289Sbellard 1869f516511eSPeter Maydell /* Check that our initial calculation of the auxv length matches how much 1870f516511eSPeter Maydell * we actually put into it. 1871f516511eSPeter Maydell */ 1872f516511eSPeter Maydell assert(info->auxv_len == u_auxv - info->saved_auxv); 1873edf8e2afSMika Westerberg 18747c4ee5bcSRichard Henderson put_user_ual(argc, u_argc); 18757c4ee5bcSRichard Henderson 18767c4ee5bcSRichard Henderson p = info->arg_strings; 18777c4ee5bcSRichard Henderson for (i = 0; i < argc; ++i) { 18787c4ee5bcSRichard Henderson put_user_ual(p, u_argv); 18797c4ee5bcSRichard Henderson u_argv += n; 18807c4ee5bcSRichard Henderson p += target_strlen(p) + 1; 18817c4ee5bcSRichard Henderson } 18827c4ee5bcSRichard Henderson put_user_ual(0, u_argv); 18837c4ee5bcSRichard Henderson 18847c4ee5bcSRichard Henderson p = info->env_strings; 18857c4ee5bcSRichard Henderson for (i = 0; i < envc; ++i) { 18867c4ee5bcSRichard Henderson put_user_ual(p, u_envp); 18877c4ee5bcSRichard Henderson u_envp += n; 18887c4ee5bcSRichard Henderson p += target_strlen(p) + 1; 18897c4ee5bcSRichard Henderson } 18907c4ee5bcSRichard Henderson put_user_ual(0, u_envp); 18917c4ee5bcSRichard Henderson 189231e31b8aSbellard return sp; 189331e31b8aSbellard } 189431e31b8aSbellard 1895dce10401SMeador Inge unsigned long init_guest_space(unsigned long host_start, 1896dce10401SMeador Inge unsigned long host_size, 1897dce10401SMeador Inge unsigned long guest_start, 1898dce10401SMeador Inge bool fixed) 1899dce10401SMeador Inge { 1900293f2060SLuke Shumaker unsigned long current_start, aligned_start; 1901dce10401SMeador Inge int flags; 1902dce10401SMeador Inge 1903dce10401SMeador Inge assert(host_start || host_size); 1904dce10401SMeador Inge 1905dce10401SMeador Inge /* If just a starting address is given, then just verify that 1906dce10401SMeador Inge * address. */ 1907dce10401SMeador Inge if (host_start && !host_size) { 19088756e136SLuke Shumaker #if defined(TARGET_ARM) && !defined(TARGET_AARCH64) 1909c3637eafSLuke Shumaker if (init_guest_commpage(host_start, host_size) != 1) { 1910dce10401SMeador Inge return (unsigned long)-1; 1911dce10401SMeador Inge } 19128756e136SLuke Shumaker #endif 19138756e136SLuke Shumaker return host_start; 1914dce10401SMeador Inge } 1915dce10401SMeador Inge 1916dce10401SMeador Inge /* Setup the initial flags and start address. */ 1917dce10401SMeador Inge current_start = host_start & qemu_host_page_mask; 1918dce10401SMeador Inge flags = MAP_ANONYMOUS | MAP_PRIVATE | MAP_NORESERVE; 1919dce10401SMeador Inge if (fixed) { 1920dce10401SMeador Inge flags |= MAP_FIXED; 1921dce10401SMeador Inge } 1922dce10401SMeador Inge 1923dce10401SMeador Inge /* Otherwise, a non-zero size region of memory needs to be mapped 1924dce10401SMeador Inge * and validated. */ 19252a53535aSLuke Shumaker 19262a53535aSLuke Shumaker #if defined(TARGET_ARM) && !defined(TARGET_AARCH64) 19272a53535aSLuke Shumaker /* On 32-bit ARM, we need to map not just the usable memory, but 19282a53535aSLuke Shumaker * also the commpage. Try to find a suitable place by allocating 19292a53535aSLuke Shumaker * a big chunk for all of it. If host_start, then the naive 19302a53535aSLuke Shumaker * strategy probably does good enough. 19312a53535aSLuke Shumaker */ 19322a53535aSLuke Shumaker if (!host_start) { 19332a53535aSLuke Shumaker unsigned long guest_full_size, host_full_size, real_start; 19342a53535aSLuke Shumaker 19352a53535aSLuke Shumaker guest_full_size = 19362a53535aSLuke Shumaker (0xffff0f00 & qemu_host_page_mask) + qemu_host_page_size; 19372a53535aSLuke Shumaker host_full_size = guest_full_size - guest_start; 19382a53535aSLuke Shumaker real_start = (unsigned long) 19392a53535aSLuke Shumaker mmap(NULL, host_full_size, PROT_NONE, flags, -1, 0); 19402a53535aSLuke Shumaker if (real_start == (unsigned long)-1) { 19412a53535aSLuke Shumaker if (host_size < host_full_size - qemu_host_page_size) { 19422a53535aSLuke Shumaker /* We failed to map a continous segment, but we're 19432a53535aSLuke Shumaker * allowed to have a gap between the usable memory and 19442a53535aSLuke Shumaker * the commpage where other things can be mapped. 19452a53535aSLuke Shumaker * This sparseness gives us more flexibility to find 19462a53535aSLuke Shumaker * an address range. 19472a53535aSLuke Shumaker */ 19482a53535aSLuke Shumaker goto naive; 19492a53535aSLuke Shumaker } 19502a53535aSLuke Shumaker return (unsigned long)-1; 19512a53535aSLuke Shumaker } 19522a53535aSLuke Shumaker munmap((void *)real_start, host_full_size); 19532a53535aSLuke Shumaker if (real_start & ~qemu_host_page_mask) { 19542a53535aSLuke Shumaker /* The same thing again, but with an extra qemu_host_page_size 19552a53535aSLuke Shumaker * so that we can shift around alignment. 19562a53535aSLuke Shumaker */ 19572a53535aSLuke Shumaker unsigned long real_size = host_full_size + qemu_host_page_size; 19582a53535aSLuke Shumaker real_start = (unsigned long) 19592a53535aSLuke Shumaker mmap(NULL, real_size, PROT_NONE, flags, -1, 0); 19602a53535aSLuke Shumaker if (real_start == (unsigned long)-1) { 19612a53535aSLuke Shumaker if (host_size < host_full_size - qemu_host_page_size) { 19622a53535aSLuke Shumaker goto naive; 19632a53535aSLuke Shumaker } 19642a53535aSLuke Shumaker return (unsigned long)-1; 19652a53535aSLuke Shumaker } 19662a53535aSLuke Shumaker munmap((void *)real_start, real_size); 19672a53535aSLuke Shumaker real_start = HOST_PAGE_ALIGN(real_start); 19682a53535aSLuke Shumaker } 19692a53535aSLuke Shumaker current_start = real_start; 19702a53535aSLuke Shumaker } 19712a53535aSLuke Shumaker naive: 19722a53535aSLuke Shumaker #endif 19732a53535aSLuke Shumaker 1974dce10401SMeador Inge while (1) { 1975293f2060SLuke Shumaker unsigned long real_start, real_size, aligned_size; 1976293f2060SLuke Shumaker aligned_size = real_size = host_size; 1977806d1021SMeador Inge 1978dce10401SMeador Inge /* Do not use mmap_find_vma here because that is limited to the 1979dce10401SMeador Inge * guest address space. We are going to make the 1980dce10401SMeador Inge * guest address space fit whatever we're given. 1981dce10401SMeador Inge */ 1982dce10401SMeador Inge real_start = (unsigned long) 1983dce10401SMeador Inge mmap((void *)current_start, host_size, PROT_NONE, flags, -1, 0); 1984dce10401SMeador Inge if (real_start == (unsigned long)-1) { 1985dce10401SMeador Inge return (unsigned long)-1; 1986dce10401SMeador Inge } 1987dce10401SMeador Inge 1988aac362e4SLuke Shumaker /* Check to see if the address is valid. */ 1989aac362e4SLuke Shumaker if (host_start && real_start != current_start) { 1990aac362e4SLuke Shumaker goto try_again; 1991aac362e4SLuke Shumaker } 1992aac362e4SLuke Shumaker 1993806d1021SMeador Inge /* Ensure the address is properly aligned. */ 1994806d1021SMeador Inge if (real_start & ~qemu_host_page_mask) { 1995293f2060SLuke Shumaker /* Ideally, we adjust like 1996293f2060SLuke Shumaker * 1997293f2060SLuke Shumaker * pages: [ ][ ][ ][ ][ ] 1998293f2060SLuke Shumaker * old: [ real ] 1999293f2060SLuke Shumaker * [ aligned ] 2000293f2060SLuke Shumaker * new: [ real ] 2001293f2060SLuke Shumaker * [ aligned ] 2002293f2060SLuke Shumaker * 2003293f2060SLuke Shumaker * But if there is something else mapped right after it, 2004293f2060SLuke Shumaker * then obviously it won't have room to grow, and the 2005293f2060SLuke Shumaker * kernel will put the new larger real someplace else with 2006293f2060SLuke Shumaker * unknown alignment (if we made it to here, then 2007293f2060SLuke Shumaker * fixed=false). Which is why we grow real by a full page 2008293f2060SLuke Shumaker * size, instead of by part of one; so that even if we get 2009293f2060SLuke Shumaker * moved, we can still guarantee alignment. But this does 2010293f2060SLuke Shumaker * mean that there is a padding of < 1 page both before 2011293f2060SLuke Shumaker * and after the aligned range; the "after" could could 2012293f2060SLuke Shumaker * cause problems for ARM emulation where it could butt in 2013293f2060SLuke Shumaker * to where we need to put the commpage. 2014293f2060SLuke Shumaker */ 2015806d1021SMeador Inge munmap((void *)real_start, host_size); 2016293f2060SLuke Shumaker real_size = aligned_size + qemu_host_page_size; 2017806d1021SMeador Inge real_start = (unsigned long) 2018806d1021SMeador Inge mmap((void *)real_start, real_size, PROT_NONE, flags, -1, 0); 2019806d1021SMeador Inge if (real_start == (unsigned long)-1) { 2020806d1021SMeador Inge return (unsigned long)-1; 2021806d1021SMeador Inge } 2022293f2060SLuke Shumaker aligned_start = HOST_PAGE_ALIGN(real_start); 2023293f2060SLuke Shumaker } else { 2024293f2060SLuke Shumaker aligned_start = real_start; 2025806d1021SMeador Inge } 2026806d1021SMeador Inge 20278756e136SLuke Shumaker #if defined(TARGET_ARM) && !defined(TARGET_AARCH64) 20288756e136SLuke Shumaker /* On 32-bit ARM, we need to also be able to map the commpage. */ 2029293f2060SLuke Shumaker int valid = init_guest_commpage(aligned_start - guest_start, 2030293f2060SLuke Shumaker aligned_size + guest_start); 20317ad75eeaSLuke Shumaker if (valid == -1) { 2032293f2060SLuke Shumaker munmap((void *)real_start, real_size); 2033806d1021SMeador Inge return (unsigned long)-1; 20347ad75eeaSLuke Shumaker } else if (valid == 0) { 20357ad75eeaSLuke Shumaker goto try_again; 2036806d1021SMeador Inge } 20378756e136SLuke Shumaker #endif 2038dce10401SMeador Inge 20397ad75eeaSLuke Shumaker /* If nothing has said `return -1` or `goto try_again` yet, 20407ad75eeaSLuke Shumaker * then the address we have is good. 20417ad75eeaSLuke Shumaker */ 20427ad75eeaSLuke Shumaker break; 20437ad75eeaSLuke Shumaker 20447ad75eeaSLuke Shumaker try_again: 2045dce10401SMeador Inge /* That address didn't work. Unmap and try a different one. 2046dce10401SMeador Inge * The address the host picked because is typically right at 2047dce10401SMeador Inge * the top of the host address space and leaves the guest with 2048dce10401SMeador Inge * no usable address space. Resort to a linear search. We 2049dce10401SMeador Inge * already compensated for mmap_min_addr, so this should not 2050dce10401SMeador Inge * happen often. Probably means we got unlucky and host 2051dce10401SMeador Inge * address space randomization put a shared library somewhere 2052dce10401SMeador Inge * inconvenient. 20538c17d862SLuke Shumaker * 20548c17d862SLuke Shumaker * This is probably a good strategy if host_start, but is 20558c17d862SLuke Shumaker * probably a bad strategy if not, which means we got here 20568c17d862SLuke Shumaker * because of trouble with ARM commpage setup. 2057dce10401SMeador Inge */ 2058293f2060SLuke Shumaker munmap((void *)real_start, real_size); 2059dce10401SMeador Inge current_start += qemu_host_page_size; 2060dce10401SMeador Inge if (host_start == current_start) { 2061dce10401SMeador Inge /* Theoretically possible if host doesn't have any suitably 2062dce10401SMeador Inge * aligned areas. Normally the first mmap will fail. 2063dce10401SMeador Inge */ 2064dce10401SMeador Inge return (unsigned long)-1; 2065dce10401SMeador Inge } 2066dce10401SMeador Inge } 2067dce10401SMeador Inge 206813829020SPaolo Bonzini qemu_log_mask(CPU_LOG_PAGE, "Reserved 0x%lx bytes of guest address space\n", host_size); 2069806d1021SMeador Inge 2070293f2060SLuke Shumaker return aligned_start; 2071dce10401SMeador Inge } 2072dce10401SMeador Inge 2073f3ed1f5dSPeter Maydell static void probe_guest_base(const char *image_name, 2074f3ed1f5dSPeter Maydell abi_ulong loaddr, abi_ulong hiaddr) 2075f3ed1f5dSPeter Maydell { 2076f3ed1f5dSPeter Maydell /* Probe for a suitable guest base address, if the user has not set 2077f3ed1f5dSPeter Maydell * it explicitly, and set guest_base appropriately. 2078f3ed1f5dSPeter Maydell * In case of error we will print a suitable message and exit. 2079f3ed1f5dSPeter Maydell */ 2080f3ed1f5dSPeter Maydell const char *errmsg; 2081f3ed1f5dSPeter Maydell if (!have_guest_base && !reserved_va) { 2082f3ed1f5dSPeter Maydell unsigned long host_start, real_start, host_size; 2083f3ed1f5dSPeter Maydell 2084f3ed1f5dSPeter Maydell /* Round addresses to page boundaries. */ 2085f3ed1f5dSPeter Maydell loaddr &= qemu_host_page_mask; 2086f3ed1f5dSPeter Maydell hiaddr = HOST_PAGE_ALIGN(hiaddr); 2087f3ed1f5dSPeter Maydell 2088f3ed1f5dSPeter Maydell if (loaddr < mmap_min_addr) { 2089f3ed1f5dSPeter Maydell host_start = HOST_PAGE_ALIGN(mmap_min_addr); 2090f3ed1f5dSPeter Maydell } else { 2091f3ed1f5dSPeter Maydell host_start = loaddr; 2092f3ed1f5dSPeter Maydell if (host_start != loaddr) { 2093f3ed1f5dSPeter Maydell errmsg = "Address overflow loading ELF binary"; 2094f3ed1f5dSPeter Maydell goto exit_errmsg; 2095f3ed1f5dSPeter Maydell } 2096f3ed1f5dSPeter Maydell } 2097f3ed1f5dSPeter Maydell host_size = hiaddr - loaddr; 2098dce10401SMeador Inge 2099dce10401SMeador Inge /* Setup the initial guest memory space with ranges gleaned from 2100dce10401SMeador Inge * the ELF image that is being loaded. 2101dce10401SMeador Inge */ 2102dce10401SMeador Inge real_start = init_guest_space(host_start, host_size, loaddr, false); 2103f3ed1f5dSPeter Maydell if (real_start == (unsigned long)-1) { 2104f3ed1f5dSPeter Maydell errmsg = "Unable to find space for application"; 2105f3ed1f5dSPeter Maydell goto exit_errmsg; 2106f3ed1f5dSPeter Maydell } 2107dce10401SMeador Inge guest_base = real_start - loaddr; 2108dce10401SMeador Inge 210913829020SPaolo Bonzini qemu_log_mask(CPU_LOG_PAGE, "Relocating guest address space from 0x" 2110f3ed1f5dSPeter Maydell TARGET_ABI_FMT_lx " to 0x%lx\n", 2111f3ed1f5dSPeter Maydell loaddr, real_start); 2112f3ed1f5dSPeter Maydell } 2113f3ed1f5dSPeter Maydell return; 2114f3ed1f5dSPeter Maydell 2115f3ed1f5dSPeter Maydell exit_errmsg: 2116f3ed1f5dSPeter Maydell fprintf(stderr, "%s: %s\n", image_name, errmsg); 2117f3ed1f5dSPeter Maydell exit(-1); 2118f3ed1f5dSPeter Maydell } 2119f3ed1f5dSPeter Maydell 2120f3ed1f5dSPeter Maydell 21218e62a717SRichard Henderson /* Load an ELF image into the address space. 212231e31b8aSbellard 21238e62a717SRichard Henderson IMAGE_NAME is the filename of the image, to use in error messages. 21248e62a717SRichard Henderson IMAGE_FD is the open file descriptor for the image. 21258e62a717SRichard Henderson 21268e62a717SRichard Henderson BPRM_BUF is a copy of the beginning of the file; this of course 21278e62a717SRichard Henderson contains the elf file header at offset 0. It is assumed that this 21288e62a717SRichard Henderson buffer is sufficiently aligned to present no problems to the host 21298e62a717SRichard Henderson in accessing data at aligned offsets within the buffer. 21308e62a717SRichard Henderson 21318e62a717SRichard Henderson On return: INFO values will be filled in, as necessary or available. */ 21328e62a717SRichard Henderson 21338e62a717SRichard Henderson static void load_elf_image(const char *image_name, int image_fd, 2134bf858897SRichard Henderson struct image_info *info, char **pinterp_name, 21359955ffacSRichard Henderson char bprm_buf[BPRM_BUF_SIZE]) 213631e31b8aSbellard { 21378e62a717SRichard Henderson struct elfhdr *ehdr = (struct elfhdr *)bprm_buf; 21388e62a717SRichard Henderson struct elf_phdr *phdr; 21398e62a717SRichard Henderson abi_ulong load_addr, load_bias, loaddr, hiaddr, error; 21408e62a717SRichard Henderson int i, retval; 21418e62a717SRichard Henderson const char *errmsg; 214231e31b8aSbellard 21438e62a717SRichard Henderson /* First of all, some simple consistency checks */ 21448e62a717SRichard Henderson errmsg = "Invalid ELF image for this architecture"; 21458e62a717SRichard Henderson if (!elf_check_ident(ehdr)) { 21468e62a717SRichard Henderson goto exit_errmsg; 21478e62a717SRichard Henderson } 21488e62a717SRichard Henderson bswap_ehdr(ehdr); 21498e62a717SRichard Henderson if (!elf_check_ehdr(ehdr)) { 21508e62a717SRichard Henderson goto exit_errmsg; 215131e31b8aSbellard } 215231e31b8aSbellard 21538e62a717SRichard Henderson i = ehdr->e_phnum * sizeof(struct elf_phdr); 21548e62a717SRichard Henderson if (ehdr->e_phoff + i <= BPRM_BUF_SIZE) { 21558e62a717SRichard Henderson phdr = (struct elf_phdr *)(bprm_buf + ehdr->e_phoff); 21569955ffacSRichard Henderson } else { 21578e62a717SRichard Henderson phdr = (struct elf_phdr *) alloca(i); 21588e62a717SRichard Henderson retval = pread(image_fd, phdr, i, ehdr->e_phoff); 21599955ffacSRichard Henderson if (retval != i) { 21608e62a717SRichard Henderson goto exit_read; 21619955ffacSRichard Henderson } 216231e31b8aSbellard } 21638e62a717SRichard Henderson bswap_phdr(phdr, ehdr->e_phnum); 216409bfb054Sbellard 21651af02e83SMike Frysinger info->nsegs = 0; 21661af02e83SMike Frysinger info->pt_dynamic_addr = 0; 21671af02e83SMike Frysinger 216898c1076cSAlex Bennée mmap_lock(); 216998c1076cSAlex Bennée 2170682674b8SRichard Henderson /* Find the maximum size of the image and allocate an appropriate 2171682674b8SRichard Henderson amount of memory to handle that. */ 2172682674b8SRichard Henderson loaddr = -1, hiaddr = 0; 21738e62a717SRichard Henderson for (i = 0; i < ehdr->e_phnum; ++i) { 21748e62a717SRichard Henderson if (phdr[i].p_type == PT_LOAD) { 2175a93934feSJonas Maebe abi_ulong a = phdr[i].p_vaddr - phdr[i].p_offset; 2176682674b8SRichard Henderson if (a < loaddr) { 2177682674b8SRichard Henderson loaddr = a; 2178682674b8SRichard Henderson } 2179ccf661f8STom Musta a = phdr[i].p_vaddr + phdr[i].p_memsz; 2180682674b8SRichard Henderson if (a > hiaddr) { 2181682674b8SRichard Henderson hiaddr = a; 2182682674b8SRichard Henderson } 21831af02e83SMike Frysinger ++info->nsegs; 2184682674b8SRichard Henderson } 2185682674b8SRichard Henderson } 2186682674b8SRichard Henderson 2187682674b8SRichard Henderson load_addr = loaddr; 21888e62a717SRichard Henderson if (ehdr->e_type == ET_DYN) { 2189682674b8SRichard Henderson /* The image indicates that it can be loaded anywhere. Find a 2190682674b8SRichard Henderson location that can hold the memory space required. If the 2191682674b8SRichard Henderson image is pre-linked, LOADDR will be non-zero. Since we do 2192682674b8SRichard Henderson not supply MAP_FIXED here we'll use that address if and 2193682674b8SRichard Henderson only if it remains available. */ 2194682674b8SRichard Henderson load_addr = target_mmap(loaddr, hiaddr - loaddr, PROT_NONE, 2195682674b8SRichard Henderson MAP_PRIVATE | MAP_ANON | MAP_NORESERVE, 219609bfb054Sbellard -1, 0); 2197682674b8SRichard Henderson if (load_addr == -1) { 21988e62a717SRichard Henderson goto exit_perror; 219909bfb054Sbellard } 2200bf858897SRichard Henderson } else if (pinterp_name != NULL) { 2201bf858897SRichard Henderson /* This is the main executable. Make sure that the low 2202bf858897SRichard Henderson address does not conflict with MMAP_MIN_ADDR or the 2203bf858897SRichard Henderson QEMU application itself. */ 2204f3ed1f5dSPeter Maydell probe_guest_base(image_name, loaddr, hiaddr); 220509bfb054Sbellard } 2206682674b8SRichard Henderson load_bias = load_addr - loaddr; 220709bfb054Sbellard 2208a99856cdSChristophe Lyon if (elf_is_fdpic(ehdr)) { 22091af02e83SMike Frysinger struct elf32_fdpic_loadseg *loadsegs = info->loadsegs = 22107267c094SAnthony Liguori g_malloc(sizeof(*loadsegs) * info->nsegs); 22111af02e83SMike Frysinger 22121af02e83SMike Frysinger for (i = 0; i < ehdr->e_phnum; ++i) { 22131af02e83SMike Frysinger switch (phdr[i].p_type) { 22141af02e83SMike Frysinger case PT_DYNAMIC: 22151af02e83SMike Frysinger info->pt_dynamic_addr = phdr[i].p_vaddr + load_bias; 22161af02e83SMike Frysinger break; 22171af02e83SMike Frysinger case PT_LOAD: 22181af02e83SMike Frysinger loadsegs->addr = phdr[i].p_vaddr + load_bias; 22191af02e83SMike Frysinger loadsegs->p_vaddr = phdr[i].p_vaddr; 22201af02e83SMike Frysinger loadsegs->p_memsz = phdr[i].p_memsz; 22211af02e83SMike Frysinger ++loadsegs; 22221af02e83SMike Frysinger break; 22231af02e83SMike Frysinger } 22241af02e83SMike Frysinger } 22251af02e83SMike Frysinger } 22261af02e83SMike Frysinger 22278e62a717SRichard Henderson info->load_bias = load_bias; 22288e62a717SRichard Henderson info->load_addr = load_addr; 22298e62a717SRichard Henderson info->entry = ehdr->e_entry + load_bias; 22308e62a717SRichard Henderson info->start_code = -1; 22318e62a717SRichard Henderson info->end_code = 0; 22328e62a717SRichard Henderson info->start_data = -1; 22338e62a717SRichard Henderson info->end_data = 0; 22348e62a717SRichard Henderson info->brk = 0; 2235d8fd2954SPaul Brook info->elf_flags = ehdr->e_flags; 22368e62a717SRichard Henderson 22378e62a717SRichard Henderson for (i = 0; i < ehdr->e_phnum; i++) { 22388e62a717SRichard Henderson struct elf_phdr *eppnt = phdr + i; 223931e31b8aSbellard if (eppnt->p_type == PT_LOAD) { 2240682674b8SRichard Henderson abi_ulong vaddr, vaddr_po, vaddr_ps, vaddr_ef, vaddr_em; 224131e31b8aSbellard int elf_prot = 0; 224231e31b8aSbellard 224331e31b8aSbellard if (eppnt->p_flags & PF_R) elf_prot = PROT_READ; 224431e31b8aSbellard if (eppnt->p_flags & PF_W) elf_prot |= PROT_WRITE; 224531e31b8aSbellard if (eppnt->p_flags & PF_X) elf_prot |= PROT_EXEC; 224631e31b8aSbellard 2247682674b8SRichard Henderson vaddr = load_bias + eppnt->p_vaddr; 2248682674b8SRichard Henderson vaddr_po = TARGET_ELF_PAGEOFFSET(vaddr); 2249682674b8SRichard Henderson vaddr_ps = TARGET_ELF_PAGESTART(vaddr); 2250682674b8SRichard Henderson 2251682674b8SRichard Henderson error = target_mmap(vaddr_ps, eppnt->p_filesz + vaddr_po, 2252682674b8SRichard Henderson elf_prot, MAP_PRIVATE | MAP_FIXED, 22538e62a717SRichard Henderson image_fd, eppnt->p_offset - vaddr_po); 2254e89f07d3Spbrook if (error == -1) { 22558e62a717SRichard Henderson goto exit_perror; 225631e31b8aSbellard } 225731e31b8aSbellard 2258682674b8SRichard Henderson vaddr_ef = vaddr + eppnt->p_filesz; 2259682674b8SRichard Henderson vaddr_em = vaddr + eppnt->p_memsz; 226031e31b8aSbellard 2261cf129f3aSRichard Henderson /* If the load segment requests extra zeros (e.g. bss), map it. */ 2262682674b8SRichard Henderson if (vaddr_ef < vaddr_em) { 2263682674b8SRichard Henderson zero_bss(vaddr_ef, vaddr_em, elf_prot); 2264682674b8SRichard Henderson } 22658e62a717SRichard Henderson 22668e62a717SRichard Henderson /* Find the full program boundaries. */ 22678e62a717SRichard Henderson if (elf_prot & PROT_EXEC) { 22688e62a717SRichard Henderson if (vaddr < info->start_code) { 22698e62a717SRichard Henderson info->start_code = vaddr; 2270cf129f3aSRichard Henderson } 22718e62a717SRichard Henderson if (vaddr_ef > info->end_code) { 22728e62a717SRichard Henderson info->end_code = vaddr_ef; 22738e62a717SRichard Henderson } 22748e62a717SRichard Henderson } 22758e62a717SRichard Henderson if (elf_prot & PROT_WRITE) { 22768e62a717SRichard Henderson if (vaddr < info->start_data) { 22778e62a717SRichard Henderson info->start_data = vaddr; 22788e62a717SRichard Henderson } 22798e62a717SRichard Henderson if (vaddr_ef > info->end_data) { 22808e62a717SRichard Henderson info->end_data = vaddr_ef; 22818e62a717SRichard Henderson } 22828e62a717SRichard Henderson if (vaddr_em > info->brk) { 22838e62a717SRichard Henderson info->brk = vaddr_em; 22848e62a717SRichard Henderson } 22858e62a717SRichard Henderson } 2286bf858897SRichard Henderson } else if (eppnt->p_type == PT_INTERP && pinterp_name) { 2287bf858897SRichard Henderson char *interp_name; 2288bf858897SRichard Henderson 2289bf858897SRichard Henderson if (*pinterp_name) { 2290bf858897SRichard Henderson errmsg = "Multiple PT_INTERP entries"; 2291bf858897SRichard Henderson goto exit_errmsg; 2292bf858897SRichard Henderson } 2293bf858897SRichard Henderson interp_name = malloc(eppnt->p_filesz); 2294bf858897SRichard Henderson if (!interp_name) { 2295bf858897SRichard Henderson goto exit_perror; 2296bf858897SRichard Henderson } 2297bf858897SRichard Henderson 2298bf858897SRichard Henderson if (eppnt->p_offset + eppnt->p_filesz <= BPRM_BUF_SIZE) { 2299bf858897SRichard Henderson memcpy(interp_name, bprm_buf + eppnt->p_offset, 2300bf858897SRichard Henderson eppnt->p_filesz); 2301bf858897SRichard Henderson } else { 2302bf858897SRichard Henderson retval = pread(image_fd, interp_name, eppnt->p_filesz, 2303bf858897SRichard Henderson eppnt->p_offset); 2304bf858897SRichard Henderson if (retval != eppnt->p_filesz) { 2305bf858897SRichard Henderson goto exit_perror; 2306bf858897SRichard Henderson } 2307bf858897SRichard Henderson } 2308bf858897SRichard Henderson if (interp_name[eppnt->p_filesz - 1] != 0) { 2309bf858897SRichard Henderson errmsg = "Invalid PT_INTERP entry"; 2310bf858897SRichard Henderson goto exit_errmsg; 2311bf858897SRichard Henderson } 2312bf858897SRichard Henderson *pinterp_name = interp_name; 23138e62a717SRichard Henderson } 23148e62a717SRichard Henderson } 23158e62a717SRichard Henderson 23168e62a717SRichard Henderson if (info->end_data == 0) { 23178e62a717SRichard Henderson info->start_data = info->end_code; 23188e62a717SRichard Henderson info->end_data = info->end_code; 23198e62a717SRichard Henderson info->brk = info->end_code; 232031e31b8aSbellard } 232131e31b8aSbellard 2322682674b8SRichard Henderson if (qemu_log_enabled()) { 23238e62a717SRichard Henderson load_symbols(ehdr, image_fd, load_bias); 2324682674b8SRichard Henderson } 232531e31b8aSbellard 232698c1076cSAlex Bennée mmap_unlock(); 232798c1076cSAlex Bennée 23288e62a717SRichard Henderson close(image_fd); 23298e62a717SRichard Henderson return; 233031e31b8aSbellard 23318e62a717SRichard Henderson exit_read: 23328e62a717SRichard Henderson if (retval >= 0) { 23338e62a717SRichard Henderson errmsg = "Incomplete read of file header"; 23348e62a717SRichard Henderson goto exit_errmsg; 23358e62a717SRichard Henderson } 23368e62a717SRichard Henderson exit_perror: 23378e62a717SRichard Henderson errmsg = strerror(errno); 23388e62a717SRichard Henderson exit_errmsg: 23398e62a717SRichard Henderson fprintf(stderr, "%s: %s\n", image_name, errmsg); 23408e62a717SRichard Henderson exit(-1); 23418e62a717SRichard Henderson } 23428e62a717SRichard Henderson 23438e62a717SRichard Henderson static void load_elf_interp(const char *filename, struct image_info *info, 23448e62a717SRichard Henderson char bprm_buf[BPRM_BUF_SIZE]) 23458e62a717SRichard Henderson { 23468e62a717SRichard Henderson int fd, retval; 23478e62a717SRichard Henderson 23488e62a717SRichard Henderson fd = open(path(filename), O_RDONLY); 23498e62a717SRichard Henderson if (fd < 0) { 23508e62a717SRichard Henderson goto exit_perror; 23518e62a717SRichard Henderson } 23528e62a717SRichard Henderson 23538e62a717SRichard Henderson retval = read(fd, bprm_buf, BPRM_BUF_SIZE); 23548e62a717SRichard Henderson if (retval < 0) { 23558e62a717SRichard Henderson goto exit_perror; 23568e62a717SRichard Henderson } 23578e62a717SRichard Henderson if (retval < BPRM_BUF_SIZE) { 23588e62a717SRichard Henderson memset(bprm_buf + retval, 0, BPRM_BUF_SIZE - retval); 23598e62a717SRichard Henderson } 23608e62a717SRichard Henderson 2361bf858897SRichard Henderson load_elf_image(filename, fd, info, NULL, bprm_buf); 23628e62a717SRichard Henderson return; 23638e62a717SRichard Henderson 23648e62a717SRichard Henderson exit_perror: 23658e62a717SRichard Henderson fprintf(stderr, "%s: %s\n", filename, strerror(errno)); 23668e62a717SRichard Henderson exit(-1); 236731e31b8aSbellard } 236831e31b8aSbellard 236949918a75Spbrook static int symfind(const void *s0, const void *s1) 237049918a75Spbrook { 2371c7c530cdSStefan Weil target_ulong addr = *(target_ulong *)s0; 237249918a75Spbrook struct elf_sym *sym = (struct elf_sym *)s1; 237349918a75Spbrook int result = 0; 2374c7c530cdSStefan Weil if (addr < sym->st_value) { 237549918a75Spbrook result = -1; 2376c7c530cdSStefan Weil } else if (addr >= sym->st_value + sym->st_size) { 237749918a75Spbrook result = 1; 237849918a75Spbrook } 237949918a75Spbrook return result; 238049918a75Spbrook } 238149918a75Spbrook 238249918a75Spbrook static const char *lookup_symbolxx(struct syminfo *s, target_ulong orig_addr) 238349918a75Spbrook { 238449918a75Spbrook #if ELF_CLASS == ELFCLASS32 238549918a75Spbrook struct elf_sym *syms = s->disas_symtab.elf32; 238649918a75Spbrook #else 238749918a75Spbrook struct elf_sym *syms = s->disas_symtab.elf64; 238849918a75Spbrook #endif 238949918a75Spbrook 239049918a75Spbrook // binary search 239149918a75Spbrook struct elf_sym *sym; 239249918a75Spbrook 2393c7c530cdSStefan Weil sym = bsearch(&orig_addr, syms, s->disas_num_syms, sizeof(*syms), symfind); 23947cba04f6SBlue Swirl if (sym != NULL) { 239549918a75Spbrook return s->disas_strtab + sym->st_name; 239649918a75Spbrook } 239749918a75Spbrook 239849918a75Spbrook return ""; 239949918a75Spbrook } 240049918a75Spbrook 240149918a75Spbrook /* FIXME: This should use elf_ops.h */ 240249918a75Spbrook static int symcmp(const void *s0, const void *s1) 240349918a75Spbrook { 240449918a75Spbrook struct elf_sym *sym0 = (struct elf_sym *)s0; 240549918a75Spbrook struct elf_sym *sym1 = (struct elf_sym *)s1; 240649918a75Spbrook return (sym0->st_value < sym1->st_value) 240749918a75Spbrook ? -1 240849918a75Spbrook : ((sym0->st_value > sym1->st_value) ? 1 : 0); 240949918a75Spbrook } 241049918a75Spbrook 2411689f936fSbellard /* Best attempt to load symbols from this ELF object. */ 2412682674b8SRichard Henderson static void load_symbols(struct elfhdr *hdr, int fd, abi_ulong load_bias) 2413689f936fSbellard { 2414682674b8SRichard Henderson int i, shnum, nsyms, sym_idx = 0, str_idx = 0; 24151e06262dSPeter Maydell uint64_t segsz; 2416682674b8SRichard Henderson struct elf_shdr *shdr; 2417b9475279SCédric VINCENT char *strings = NULL; 2418b9475279SCédric VINCENT struct syminfo *s = NULL; 2419b9475279SCédric VINCENT struct elf_sym *new_syms, *syms = NULL; 242031e31b8aSbellard 2421682674b8SRichard Henderson shnum = hdr->e_shnum; 2422682674b8SRichard Henderson i = shnum * sizeof(struct elf_shdr); 2423682674b8SRichard Henderson shdr = (struct elf_shdr *)alloca(i); 2424682674b8SRichard Henderson if (pread(fd, shdr, i, hdr->e_shoff) != i) { 2425689f936fSbellard return; 2426682674b8SRichard Henderson } 2427682674b8SRichard Henderson 2428682674b8SRichard Henderson bswap_shdr(shdr, shnum); 2429682674b8SRichard Henderson for (i = 0; i < shnum; ++i) { 2430682674b8SRichard Henderson if (shdr[i].sh_type == SHT_SYMTAB) { 2431682674b8SRichard Henderson sym_idx = i; 2432682674b8SRichard Henderson str_idx = shdr[i].sh_link; 2433689f936fSbellard goto found; 2434689f936fSbellard } 2435689f936fSbellard } 2436682674b8SRichard Henderson 2437682674b8SRichard Henderson /* There will be no symbol table if the file was stripped. */ 2438682674b8SRichard Henderson return; 2439689f936fSbellard 2440689f936fSbellard found: 2441689f936fSbellard /* Now know where the strtab and symtab are. Snarf them. */ 24420ef9ea29SPeter Maydell s = g_try_new(struct syminfo, 1); 2443682674b8SRichard Henderson if (!s) { 2444b9475279SCédric VINCENT goto give_up; 2445682674b8SRichard Henderson } 2446682674b8SRichard Henderson 24471e06262dSPeter Maydell segsz = shdr[str_idx].sh_size; 24481e06262dSPeter Maydell s->disas_strtab = strings = g_try_malloc(segsz); 24491e06262dSPeter Maydell if (!strings || 24501e06262dSPeter Maydell pread(fd, strings, segsz, shdr[str_idx].sh_offset) != segsz) { 2451b9475279SCédric VINCENT goto give_up; 2452682674b8SRichard Henderson } 2453689f936fSbellard 24541e06262dSPeter Maydell segsz = shdr[sym_idx].sh_size; 24551e06262dSPeter Maydell syms = g_try_malloc(segsz); 24561e06262dSPeter Maydell if (!syms || pread(fd, syms, segsz, shdr[sym_idx].sh_offset) != segsz) { 2457b9475279SCédric VINCENT goto give_up; 2458682674b8SRichard Henderson } 2459689f936fSbellard 24601e06262dSPeter Maydell if (segsz / sizeof(struct elf_sym) > INT_MAX) { 24611e06262dSPeter Maydell /* Implausibly large symbol table: give up rather than ploughing 24621e06262dSPeter Maydell * on with the number of symbols calculation overflowing 24631e06262dSPeter Maydell */ 24641e06262dSPeter Maydell goto give_up; 24651e06262dSPeter Maydell } 24661e06262dSPeter Maydell nsyms = segsz / sizeof(struct elf_sym); 2467682674b8SRichard Henderson for (i = 0; i < nsyms; ) { 246849918a75Spbrook bswap_sym(syms + i); 2469682674b8SRichard Henderson /* Throw away entries which we do not need. */ 2470682674b8SRichard Henderson if (syms[i].st_shndx == SHN_UNDEF 2471682674b8SRichard Henderson || syms[i].st_shndx >= SHN_LORESERVE 2472682674b8SRichard Henderson || ELF_ST_TYPE(syms[i].st_info) != STT_FUNC) { 2473682674b8SRichard Henderson if (i < --nsyms) { 247449918a75Spbrook syms[i] = syms[nsyms]; 247549918a75Spbrook } 2476682674b8SRichard Henderson } else { 247749918a75Spbrook #if defined(TARGET_ARM) || defined (TARGET_MIPS) 247849918a75Spbrook /* The bottom address bit marks a Thumb or MIPS16 symbol. */ 247949918a75Spbrook syms[i].st_value &= ~(target_ulong)1; 248049918a75Spbrook #endif 2481682674b8SRichard Henderson syms[i].st_value += load_bias; 248249918a75Spbrook i++; 248349918a75Spbrook } 2484682674b8SRichard Henderson } 248549918a75Spbrook 2486b9475279SCédric VINCENT /* No "useful" symbol. */ 2487b9475279SCédric VINCENT if (nsyms == 0) { 2488b9475279SCédric VINCENT goto give_up; 2489b9475279SCédric VINCENT } 2490b9475279SCédric VINCENT 24915d5c9930SRichard Henderson /* Attempt to free the storage associated with the local symbols 24925d5c9930SRichard Henderson that we threw away. Whether or not this has any effect on the 24935d5c9930SRichard Henderson memory allocation depends on the malloc implementation and how 24945d5c9930SRichard Henderson many symbols we managed to discard. */ 24950ef9ea29SPeter Maydell new_syms = g_try_renew(struct elf_sym, syms, nsyms); 24968d79de6eSStefan Weil if (new_syms == NULL) { 2497b9475279SCédric VINCENT goto give_up; 24985d5c9930SRichard Henderson } 24998d79de6eSStefan Weil syms = new_syms; 25005d5c9930SRichard Henderson 250149918a75Spbrook qsort(syms, nsyms, sizeof(*syms), symcmp); 250249918a75Spbrook 250349918a75Spbrook s->disas_num_syms = nsyms; 250449918a75Spbrook #if ELF_CLASS == ELFCLASS32 250549918a75Spbrook s->disas_symtab.elf32 = syms; 250649918a75Spbrook #else 250749918a75Spbrook s->disas_symtab.elf64 = syms; 250849918a75Spbrook #endif 2509682674b8SRichard Henderson s->lookup_symbol = lookup_symbolxx; 2510e80cfcfcSbellard s->next = syminfos; 2511e80cfcfcSbellard syminfos = s; 2512b9475279SCédric VINCENT 2513b9475279SCédric VINCENT return; 2514b9475279SCédric VINCENT 2515b9475279SCédric VINCENT give_up: 25160ef9ea29SPeter Maydell g_free(s); 25170ef9ea29SPeter Maydell g_free(strings); 25180ef9ea29SPeter Maydell g_free(syms); 2519689f936fSbellard } 252031e31b8aSbellard 2521768fe76eSYunQiang Su uint32_t get_elf_eflags(int fd) 2522768fe76eSYunQiang Su { 2523768fe76eSYunQiang Su struct elfhdr ehdr; 2524768fe76eSYunQiang Su off_t offset; 2525768fe76eSYunQiang Su int ret; 2526768fe76eSYunQiang Su 2527768fe76eSYunQiang Su /* Read ELF header */ 2528768fe76eSYunQiang Su offset = lseek(fd, 0, SEEK_SET); 2529768fe76eSYunQiang Su if (offset == (off_t) -1) { 2530768fe76eSYunQiang Su return 0; 2531768fe76eSYunQiang Su } 2532768fe76eSYunQiang Su ret = read(fd, &ehdr, sizeof(ehdr)); 2533768fe76eSYunQiang Su if (ret < sizeof(ehdr)) { 2534768fe76eSYunQiang Su return 0; 2535768fe76eSYunQiang Su } 2536768fe76eSYunQiang Su offset = lseek(fd, offset, SEEK_SET); 2537768fe76eSYunQiang Su if (offset == (off_t) -1) { 2538768fe76eSYunQiang Su return 0; 2539768fe76eSYunQiang Su } 2540768fe76eSYunQiang Su 2541768fe76eSYunQiang Su /* Check ELF signature */ 2542768fe76eSYunQiang Su if (!elf_check_ident(&ehdr)) { 2543768fe76eSYunQiang Su return 0; 2544768fe76eSYunQiang Su } 2545768fe76eSYunQiang Su 2546768fe76eSYunQiang Su /* check header */ 2547768fe76eSYunQiang Su bswap_ehdr(&ehdr); 2548768fe76eSYunQiang Su if (!elf_check_ehdr(&ehdr)) { 2549768fe76eSYunQiang Su return 0; 2550768fe76eSYunQiang Su } 2551768fe76eSYunQiang Su 2552768fe76eSYunQiang Su /* return architecture id */ 2553768fe76eSYunQiang Su return ehdr.e_flags; 2554768fe76eSYunQiang Su } 2555768fe76eSYunQiang Su 2556f0116c54SWill Newton int load_elf_binary(struct linux_binprm *bprm, struct image_info *info) 255731e31b8aSbellard { 25588e62a717SRichard Henderson struct image_info interp_info; 255931e31b8aSbellard struct elfhdr elf_ex; 25608e62a717SRichard Henderson char *elf_interpreter = NULL; 256159baae9aSStefan Brüns char *scratch; 256231e31b8aSbellard 2563bf858897SRichard Henderson info->start_mmap = (abi_ulong)ELF_START_MMAP; 256431e31b8aSbellard 2565bf858897SRichard Henderson load_elf_image(bprm->filename, bprm->fd, info, 2566bf858897SRichard Henderson &elf_interpreter, bprm->buf); 2567bf858897SRichard Henderson 2568bf858897SRichard Henderson /* ??? We need a copy of the elf header for passing to create_elf_tables. 2569bf858897SRichard Henderson If we do nothing, we'll have overwritten this when we re-use bprm->buf 2570bf858897SRichard Henderson when we load the interpreter. */ 2571bf858897SRichard Henderson elf_ex = *(struct elfhdr *)bprm->buf; 257231e31b8aSbellard 257359baae9aSStefan Brüns /* Do this so that we can load the interpreter, if need be. We will 257459baae9aSStefan Brüns change some of these later */ 257559baae9aSStefan Brüns bprm->p = setup_arg_pages(bprm, info); 257659baae9aSStefan Brüns 257759baae9aSStefan Brüns scratch = g_new0(char, TARGET_PAGE_SIZE); 25787c4ee5bcSRichard Henderson if (STACK_GROWS_DOWN) { 257959baae9aSStefan Brüns bprm->p = copy_elf_strings(1, &bprm->filename, scratch, 258059baae9aSStefan Brüns bprm->p, info->stack_limit); 25817c4ee5bcSRichard Henderson info->file_string = bprm->p; 258259baae9aSStefan Brüns bprm->p = copy_elf_strings(bprm->envc, bprm->envp, scratch, 258359baae9aSStefan Brüns bprm->p, info->stack_limit); 25847c4ee5bcSRichard Henderson info->env_strings = bprm->p; 258559baae9aSStefan Brüns bprm->p = copy_elf_strings(bprm->argc, bprm->argv, scratch, 258659baae9aSStefan Brüns bprm->p, info->stack_limit); 25877c4ee5bcSRichard Henderson info->arg_strings = bprm->p; 25887c4ee5bcSRichard Henderson } else { 25897c4ee5bcSRichard Henderson info->arg_strings = bprm->p; 25907c4ee5bcSRichard Henderson bprm->p = copy_elf_strings(bprm->argc, bprm->argv, scratch, 25917c4ee5bcSRichard Henderson bprm->p, info->stack_limit); 25927c4ee5bcSRichard Henderson info->env_strings = bprm->p; 25937c4ee5bcSRichard Henderson bprm->p = copy_elf_strings(bprm->envc, bprm->envp, scratch, 25947c4ee5bcSRichard Henderson bprm->p, info->stack_limit); 25957c4ee5bcSRichard Henderson info->file_string = bprm->p; 25967c4ee5bcSRichard Henderson bprm->p = copy_elf_strings(1, &bprm->filename, scratch, 25977c4ee5bcSRichard Henderson bprm->p, info->stack_limit); 25987c4ee5bcSRichard Henderson } 25997c4ee5bcSRichard Henderson 260059baae9aSStefan Brüns g_free(scratch); 260159baae9aSStefan Brüns 2602e5fe0c52Spbrook if (!bprm->p) { 2603bf858897SRichard Henderson fprintf(stderr, "%s: %s\n", bprm->filename, strerror(E2BIG)); 260431e31b8aSbellard exit(-1); 26059955ffacSRichard Henderson } 2606379f6698SPaul Brook 26078e62a717SRichard Henderson if (elf_interpreter) { 26088e62a717SRichard Henderson load_elf_interp(elf_interpreter, &interp_info, bprm->buf); 260931e31b8aSbellard 26108e62a717SRichard Henderson /* If the program interpreter is one of these two, then assume 26118e62a717SRichard Henderson an iBCS2 image. Otherwise assume a native linux image. */ 261231e31b8aSbellard 26138e62a717SRichard Henderson if (strcmp(elf_interpreter, "/usr/lib/libc.so.1") == 0 26148e62a717SRichard Henderson || strcmp(elf_interpreter, "/usr/lib/ld.so.1") == 0) { 26158e62a717SRichard Henderson info->personality = PER_SVR4; 26168e62a717SRichard Henderson 261731e31b8aSbellard /* Why this, you ask??? Well SVr4 maps page 0 as read-only, 26188e62a717SRichard Henderson and some applications "depend" upon this behavior. Since 26198e62a717SRichard Henderson we do not have the power to recompile these, we emulate 26208e62a717SRichard Henderson the SVr4 behavior. Sigh. */ 26218e62a717SRichard Henderson target_mmap(0, qemu_host_page_size, PROT_READ | PROT_EXEC, 262268754b44SPeter Maydell MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); 262331e31b8aSbellard } 26248e62a717SRichard Henderson } 262531e31b8aSbellard 26268e62a717SRichard Henderson bprm->p = create_elf_tables(bprm->p, bprm->argc, bprm->envc, &elf_ex, 26278e62a717SRichard Henderson info, (elf_interpreter ? &interp_info : NULL)); 26288e62a717SRichard Henderson info->start_stack = bprm->p; 26298e62a717SRichard Henderson 26308e62a717SRichard Henderson /* If we have an interpreter, set that as the program's entry point. 26318e78064eSRichard Henderson Copy the load_bias as well, to help PPC64 interpret the entry 26328e62a717SRichard Henderson point as a function descriptor. Do this after creating elf tables 26338e62a717SRichard Henderson so that we copy the original program entry point into the AUXV. */ 26348e62a717SRichard Henderson if (elf_interpreter) { 26358e78064eSRichard Henderson info->load_bias = interp_info.load_bias; 26368e62a717SRichard Henderson info->entry = interp_info.entry; 2637bf858897SRichard Henderson free(elf_interpreter); 26388e62a717SRichard Henderson } 263931e31b8aSbellard 2640edf8e2afSMika Westerberg #ifdef USE_ELF_CORE_DUMP 2641edf8e2afSMika Westerberg bprm->core_dump = &elf_core_dump; 2642edf8e2afSMika Westerberg #endif 2643edf8e2afSMika Westerberg 264431e31b8aSbellard return 0; 264531e31b8aSbellard } 264631e31b8aSbellard 2647edf8e2afSMika Westerberg #ifdef USE_ELF_CORE_DUMP 2648edf8e2afSMika Westerberg /* 2649edf8e2afSMika Westerberg * Definitions to generate Intel SVR4-like core files. 2650a2547a13SLaurent Desnogues * These mostly have the same names as the SVR4 types with "target_elf_" 2651edf8e2afSMika Westerberg * tacked on the front to prevent clashes with linux definitions, 2652edf8e2afSMika Westerberg * and the typedef forms have been avoided. This is mostly like 2653edf8e2afSMika Westerberg * the SVR4 structure, but more Linuxy, with things that Linux does 2654edf8e2afSMika Westerberg * not support and which gdb doesn't really use excluded. 2655edf8e2afSMika Westerberg * 2656edf8e2afSMika Westerberg * Fields we don't dump (their contents is zero) in linux-user qemu 2657edf8e2afSMika Westerberg * are marked with XXX. 2658edf8e2afSMika Westerberg * 2659edf8e2afSMika Westerberg * Core dump code is copied from linux kernel (fs/binfmt_elf.c). 2660edf8e2afSMika Westerberg * 2661edf8e2afSMika Westerberg * Porting ELF coredump for target is (quite) simple process. First you 2662dd0a3651SNathan Froyd * define USE_ELF_CORE_DUMP in target ELF code (where init_thread() for 2663edf8e2afSMika Westerberg * the target resides): 2664edf8e2afSMika Westerberg * 2665edf8e2afSMika Westerberg * #define USE_ELF_CORE_DUMP 2666edf8e2afSMika Westerberg * 2667edf8e2afSMika Westerberg * Next you define type of register set used for dumping. ELF specification 2668edf8e2afSMika Westerberg * says that it needs to be array of elf_greg_t that has size of ELF_NREG. 2669edf8e2afSMika Westerberg * 2670c227f099SAnthony Liguori * typedef <target_regtype> target_elf_greg_t; 2671edf8e2afSMika Westerberg * #define ELF_NREG <number of registers> 2672c227f099SAnthony Liguori * typedef taret_elf_greg_t target_elf_gregset_t[ELF_NREG]; 2673edf8e2afSMika Westerberg * 2674edf8e2afSMika Westerberg * Last step is to implement target specific function that copies registers 2675edf8e2afSMika Westerberg * from given cpu into just specified register set. Prototype is: 2676edf8e2afSMika Westerberg * 2677c227f099SAnthony Liguori * static void elf_core_copy_regs(taret_elf_gregset_t *regs, 26789349b4f9SAndreas Färber * const CPUArchState *env); 2679edf8e2afSMika Westerberg * 2680edf8e2afSMika Westerberg * Parameters: 2681edf8e2afSMika Westerberg * regs - copy register values into here (allocated and zeroed by caller) 2682edf8e2afSMika Westerberg * env - copy registers from here 2683edf8e2afSMika Westerberg * 2684edf8e2afSMika Westerberg * Example for ARM target is provided in this file. 2685edf8e2afSMika Westerberg */ 2686edf8e2afSMika Westerberg 2687edf8e2afSMika Westerberg /* An ELF note in memory */ 2688edf8e2afSMika Westerberg struct memelfnote { 2689edf8e2afSMika Westerberg const char *name; 2690edf8e2afSMika Westerberg size_t namesz; 2691edf8e2afSMika Westerberg size_t namesz_rounded; 2692edf8e2afSMika Westerberg int type; 2693edf8e2afSMika Westerberg size_t datasz; 269480f5ce75SLaurent Vivier size_t datasz_rounded; 2695edf8e2afSMika Westerberg void *data; 2696edf8e2afSMika Westerberg size_t notesz; 2697edf8e2afSMika Westerberg }; 2698edf8e2afSMika Westerberg 2699a2547a13SLaurent Desnogues struct target_elf_siginfo { 2700f8fd4fc4SPaolo Bonzini abi_int si_signo; /* signal number */ 2701f8fd4fc4SPaolo Bonzini abi_int si_code; /* extra code */ 2702f8fd4fc4SPaolo Bonzini abi_int si_errno; /* errno */ 2703edf8e2afSMika Westerberg }; 2704edf8e2afSMika Westerberg 2705a2547a13SLaurent Desnogues struct target_elf_prstatus { 2706a2547a13SLaurent Desnogues struct target_elf_siginfo pr_info; /* Info associated with signal */ 27071ddd592fSPaolo Bonzini abi_short pr_cursig; /* Current signal */ 2708ca98ac83SPaolo Bonzini abi_ulong pr_sigpend; /* XXX */ 2709ca98ac83SPaolo Bonzini abi_ulong pr_sighold; /* XXX */ 2710c227f099SAnthony Liguori target_pid_t pr_pid; 2711c227f099SAnthony Liguori target_pid_t pr_ppid; 2712c227f099SAnthony Liguori target_pid_t pr_pgrp; 2713c227f099SAnthony Liguori target_pid_t pr_sid; 2714edf8e2afSMika Westerberg struct target_timeval pr_utime; /* XXX User time */ 2715edf8e2afSMika Westerberg struct target_timeval pr_stime; /* XXX System time */ 2716edf8e2afSMika Westerberg struct target_timeval pr_cutime; /* XXX Cumulative user time */ 2717edf8e2afSMika Westerberg struct target_timeval pr_cstime; /* XXX Cumulative system time */ 2718c227f099SAnthony Liguori target_elf_gregset_t pr_reg; /* GP registers */ 2719f8fd4fc4SPaolo Bonzini abi_int pr_fpvalid; /* XXX */ 2720edf8e2afSMika Westerberg }; 2721edf8e2afSMika Westerberg 2722edf8e2afSMika Westerberg #define ELF_PRARGSZ (80) /* Number of chars for args */ 2723edf8e2afSMika Westerberg 2724a2547a13SLaurent Desnogues struct target_elf_prpsinfo { 2725edf8e2afSMika Westerberg char pr_state; /* numeric process state */ 2726edf8e2afSMika Westerberg char pr_sname; /* char for pr_state */ 2727edf8e2afSMika Westerberg char pr_zomb; /* zombie */ 2728edf8e2afSMika Westerberg char pr_nice; /* nice val */ 2729ca98ac83SPaolo Bonzini abi_ulong pr_flag; /* flags */ 2730c227f099SAnthony Liguori target_uid_t pr_uid; 2731c227f099SAnthony Liguori target_gid_t pr_gid; 2732c227f099SAnthony Liguori target_pid_t pr_pid, pr_ppid, pr_pgrp, pr_sid; 2733edf8e2afSMika Westerberg /* Lots missing */ 2734edf8e2afSMika Westerberg char pr_fname[16]; /* filename of executable */ 2735edf8e2afSMika Westerberg char pr_psargs[ELF_PRARGSZ]; /* initial part of arg list */ 2736edf8e2afSMika Westerberg }; 2737edf8e2afSMika Westerberg 2738edf8e2afSMika Westerberg /* Here is the structure in which status of each thread is captured. */ 2739edf8e2afSMika Westerberg struct elf_thread_status { 274072cf2d4fSBlue Swirl QTAILQ_ENTRY(elf_thread_status) ets_link; 2741a2547a13SLaurent Desnogues struct target_elf_prstatus prstatus; /* NT_PRSTATUS */ 2742edf8e2afSMika Westerberg #if 0 2743edf8e2afSMika Westerberg elf_fpregset_t fpu; /* NT_PRFPREG */ 2744edf8e2afSMika Westerberg struct task_struct *thread; 2745edf8e2afSMika Westerberg elf_fpxregset_t xfpu; /* ELF_CORE_XFPREG_TYPE */ 2746edf8e2afSMika Westerberg #endif 2747edf8e2afSMika Westerberg struct memelfnote notes[1]; 2748edf8e2afSMika Westerberg int num_notes; 2749edf8e2afSMika Westerberg }; 2750edf8e2afSMika Westerberg 2751edf8e2afSMika Westerberg struct elf_note_info { 2752edf8e2afSMika Westerberg struct memelfnote *notes; 2753a2547a13SLaurent Desnogues struct target_elf_prstatus *prstatus; /* NT_PRSTATUS */ 2754a2547a13SLaurent Desnogues struct target_elf_prpsinfo *psinfo; /* NT_PRPSINFO */ 2755edf8e2afSMika Westerberg 275672cf2d4fSBlue Swirl QTAILQ_HEAD(thread_list_head, elf_thread_status) thread_list; 2757edf8e2afSMika Westerberg #if 0 2758edf8e2afSMika Westerberg /* 2759edf8e2afSMika Westerberg * Current version of ELF coredump doesn't support 2760edf8e2afSMika Westerberg * dumping fp regs etc. 2761edf8e2afSMika Westerberg */ 2762edf8e2afSMika Westerberg elf_fpregset_t *fpu; 2763edf8e2afSMika Westerberg elf_fpxregset_t *xfpu; 2764edf8e2afSMika Westerberg int thread_status_size; 2765edf8e2afSMika Westerberg #endif 2766edf8e2afSMika Westerberg int notes_size; 2767edf8e2afSMika Westerberg int numnote; 2768edf8e2afSMika Westerberg }; 2769edf8e2afSMika Westerberg 2770edf8e2afSMika Westerberg struct vm_area_struct { 27711a1c4db9SMikhail Ilyin target_ulong vma_start; /* start vaddr of memory region */ 27721a1c4db9SMikhail Ilyin target_ulong vma_end; /* end vaddr of memory region */ 2773edf8e2afSMika Westerberg abi_ulong vma_flags; /* protection etc. flags for the region */ 277472cf2d4fSBlue Swirl QTAILQ_ENTRY(vm_area_struct) vma_link; 2775edf8e2afSMika Westerberg }; 2776edf8e2afSMika Westerberg 2777edf8e2afSMika Westerberg struct mm_struct { 277872cf2d4fSBlue Swirl QTAILQ_HEAD(, vm_area_struct) mm_mmap; 2779edf8e2afSMika Westerberg int mm_count; /* number of mappings */ 2780edf8e2afSMika Westerberg }; 2781edf8e2afSMika Westerberg 2782edf8e2afSMika Westerberg static struct mm_struct *vma_init(void); 2783edf8e2afSMika Westerberg static void vma_delete(struct mm_struct *); 27841a1c4db9SMikhail Ilyin static int vma_add_mapping(struct mm_struct *, target_ulong, 27851a1c4db9SMikhail Ilyin target_ulong, abi_ulong); 2786edf8e2afSMika Westerberg static int vma_get_mapping_count(const struct mm_struct *); 2787edf8e2afSMika Westerberg static struct vm_area_struct *vma_first(const struct mm_struct *); 2788edf8e2afSMika Westerberg static struct vm_area_struct *vma_next(struct vm_area_struct *); 2789edf8e2afSMika Westerberg static abi_ulong vma_dump_size(const struct vm_area_struct *); 27901a1c4db9SMikhail Ilyin static int vma_walker(void *priv, target_ulong start, target_ulong end, 2791edf8e2afSMika Westerberg unsigned long flags); 2792edf8e2afSMika Westerberg 2793edf8e2afSMika Westerberg static void fill_elf_header(struct elfhdr *, int, uint16_t, uint32_t); 2794edf8e2afSMika Westerberg static void fill_note(struct memelfnote *, const char *, int, 2795edf8e2afSMika Westerberg unsigned int, void *); 2796a2547a13SLaurent Desnogues static void fill_prstatus(struct target_elf_prstatus *, const TaskState *, int); 2797a2547a13SLaurent Desnogues static int fill_psinfo(struct target_elf_prpsinfo *, const TaskState *); 2798edf8e2afSMika Westerberg static void fill_auxv_note(struct memelfnote *, const TaskState *); 2799edf8e2afSMika Westerberg static void fill_elf_note_phdr(struct elf_phdr *, int, off_t); 2800edf8e2afSMika Westerberg static size_t note_size(const struct memelfnote *); 2801edf8e2afSMika Westerberg static void free_note_info(struct elf_note_info *); 28029349b4f9SAndreas Färber static int fill_note_info(struct elf_note_info *, long, const CPUArchState *); 28039349b4f9SAndreas Färber static void fill_thread_info(struct elf_note_info *, const CPUArchState *); 2804edf8e2afSMika Westerberg static int core_dump_filename(const TaskState *, char *, size_t); 2805edf8e2afSMika Westerberg 2806edf8e2afSMika Westerberg static int dump_write(int, const void *, size_t); 2807edf8e2afSMika Westerberg static int write_note(struct memelfnote *, int); 2808edf8e2afSMika Westerberg static int write_note_info(struct elf_note_info *, int); 2809edf8e2afSMika Westerberg 2810edf8e2afSMika Westerberg #ifdef BSWAP_NEEDED 2811a2547a13SLaurent Desnogues static void bswap_prstatus(struct target_elf_prstatus *prstatus) 2812edf8e2afSMika Westerberg { 2813ca98ac83SPaolo Bonzini prstatus->pr_info.si_signo = tswap32(prstatus->pr_info.si_signo); 2814ca98ac83SPaolo Bonzini prstatus->pr_info.si_code = tswap32(prstatus->pr_info.si_code); 2815ca98ac83SPaolo Bonzini prstatus->pr_info.si_errno = tswap32(prstatus->pr_info.si_errno); 2816edf8e2afSMika Westerberg prstatus->pr_cursig = tswap16(prstatus->pr_cursig); 2817ca98ac83SPaolo Bonzini prstatus->pr_sigpend = tswapal(prstatus->pr_sigpend); 2818ca98ac83SPaolo Bonzini prstatus->pr_sighold = tswapal(prstatus->pr_sighold); 2819edf8e2afSMika Westerberg prstatus->pr_pid = tswap32(prstatus->pr_pid); 2820edf8e2afSMika Westerberg prstatus->pr_ppid = tswap32(prstatus->pr_ppid); 2821edf8e2afSMika Westerberg prstatus->pr_pgrp = tswap32(prstatus->pr_pgrp); 2822edf8e2afSMika Westerberg prstatus->pr_sid = tswap32(prstatus->pr_sid); 2823edf8e2afSMika Westerberg /* cpu times are not filled, so we skip them */ 2824edf8e2afSMika Westerberg /* regs should be in correct format already */ 2825edf8e2afSMika Westerberg prstatus->pr_fpvalid = tswap32(prstatus->pr_fpvalid); 2826edf8e2afSMika Westerberg } 2827edf8e2afSMika Westerberg 2828a2547a13SLaurent Desnogues static void bswap_psinfo(struct target_elf_prpsinfo *psinfo) 2829edf8e2afSMika Westerberg { 2830ca98ac83SPaolo Bonzini psinfo->pr_flag = tswapal(psinfo->pr_flag); 2831edf8e2afSMika Westerberg psinfo->pr_uid = tswap16(psinfo->pr_uid); 2832edf8e2afSMika Westerberg psinfo->pr_gid = tswap16(psinfo->pr_gid); 2833edf8e2afSMika Westerberg psinfo->pr_pid = tswap32(psinfo->pr_pid); 2834edf8e2afSMika Westerberg psinfo->pr_ppid = tswap32(psinfo->pr_ppid); 2835edf8e2afSMika Westerberg psinfo->pr_pgrp = tswap32(psinfo->pr_pgrp); 2836edf8e2afSMika Westerberg psinfo->pr_sid = tswap32(psinfo->pr_sid); 2837edf8e2afSMika Westerberg } 2838991f8f0cSRichard Henderson 2839991f8f0cSRichard Henderson static void bswap_note(struct elf_note *en) 2840991f8f0cSRichard Henderson { 2841991f8f0cSRichard Henderson bswap32s(&en->n_namesz); 2842991f8f0cSRichard Henderson bswap32s(&en->n_descsz); 2843991f8f0cSRichard Henderson bswap32s(&en->n_type); 2844991f8f0cSRichard Henderson } 2845991f8f0cSRichard Henderson #else 2846991f8f0cSRichard Henderson static inline void bswap_prstatus(struct target_elf_prstatus *p) { } 2847991f8f0cSRichard Henderson static inline void bswap_psinfo(struct target_elf_prpsinfo *p) {} 2848991f8f0cSRichard Henderson static inline void bswap_note(struct elf_note *en) { } 2849edf8e2afSMika Westerberg #endif /* BSWAP_NEEDED */ 2850edf8e2afSMika Westerberg 2851edf8e2afSMika Westerberg /* 2852edf8e2afSMika Westerberg * Minimal support for linux memory regions. These are needed 2853edf8e2afSMika Westerberg * when we are finding out what memory exactly belongs to 2854edf8e2afSMika Westerberg * emulated process. No locks needed here, as long as 2855edf8e2afSMika Westerberg * thread that received the signal is stopped. 2856edf8e2afSMika Westerberg */ 2857edf8e2afSMika Westerberg 2858edf8e2afSMika Westerberg static struct mm_struct *vma_init(void) 2859edf8e2afSMika Westerberg { 2860edf8e2afSMika Westerberg struct mm_struct *mm; 2861edf8e2afSMika Westerberg 28627267c094SAnthony Liguori if ((mm = g_malloc(sizeof (*mm))) == NULL) 2863edf8e2afSMika Westerberg return (NULL); 2864edf8e2afSMika Westerberg 2865edf8e2afSMika Westerberg mm->mm_count = 0; 286672cf2d4fSBlue Swirl QTAILQ_INIT(&mm->mm_mmap); 2867edf8e2afSMika Westerberg 2868edf8e2afSMika Westerberg return (mm); 2869edf8e2afSMika Westerberg } 2870edf8e2afSMika Westerberg 2871edf8e2afSMika Westerberg static void vma_delete(struct mm_struct *mm) 2872edf8e2afSMika Westerberg { 2873edf8e2afSMika Westerberg struct vm_area_struct *vma; 2874edf8e2afSMika Westerberg 2875edf8e2afSMika Westerberg while ((vma = vma_first(mm)) != NULL) { 287672cf2d4fSBlue Swirl QTAILQ_REMOVE(&mm->mm_mmap, vma, vma_link); 28777267c094SAnthony Liguori g_free(vma); 2878edf8e2afSMika Westerberg } 28797267c094SAnthony Liguori g_free(mm); 2880edf8e2afSMika Westerberg } 2881edf8e2afSMika Westerberg 28821a1c4db9SMikhail Ilyin static int vma_add_mapping(struct mm_struct *mm, target_ulong start, 28831a1c4db9SMikhail Ilyin target_ulong end, abi_ulong flags) 2884edf8e2afSMika Westerberg { 2885edf8e2afSMika Westerberg struct vm_area_struct *vma; 2886edf8e2afSMika Westerberg 28877267c094SAnthony Liguori if ((vma = g_malloc0(sizeof (*vma))) == NULL) 2888edf8e2afSMika Westerberg return (-1); 2889edf8e2afSMika Westerberg 2890edf8e2afSMika Westerberg vma->vma_start = start; 2891edf8e2afSMika Westerberg vma->vma_end = end; 2892edf8e2afSMika Westerberg vma->vma_flags = flags; 2893edf8e2afSMika Westerberg 289472cf2d4fSBlue Swirl QTAILQ_INSERT_TAIL(&mm->mm_mmap, vma, vma_link); 2895edf8e2afSMika Westerberg mm->mm_count++; 2896edf8e2afSMika Westerberg 2897edf8e2afSMika Westerberg return (0); 2898edf8e2afSMika Westerberg } 2899edf8e2afSMika Westerberg 2900edf8e2afSMika Westerberg static struct vm_area_struct *vma_first(const struct mm_struct *mm) 2901edf8e2afSMika Westerberg { 290272cf2d4fSBlue Swirl return (QTAILQ_FIRST(&mm->mm_mmap)); 2903edf8e2afSMika Westerberg } 2904edf8e2afSMika Westerberg 2905edf8e2afSMika Westerberg static struct vm_area_struct *vma_next(struct vm_area_struct *vma) 2906edf8e2afSMika Westerberg { 290772cf2d4fSBlue Swirl return (QTAILQ_NEXT(vma, vma_link)); 2908edf8e2afSMika Westerberg } 2909edf8e2afSMika Westerberg 2910edf8e2afSMika Westerberg static int vma_get_mapping_count(const struct mm_struct *mm) 2911edf8e2afSMika Westerberg { 2912edf8e2afSMika Westerberg return (mm->mm_count); 2913edf8e2afSMika Westerberg } 2914edf8e2afSMika Westerberg 2915edf8e2afSMika Westerberg /* 2916edf8e2afSMika Westerberg * Calculate file (dump) size of given memory region. 2917edf8e2afSMika Westerberg */ 2918edf8e2afSMika Westerberg static abi_ulong vma_dump_size(const struct vm_area_struct *vma) 2919edf8e2afSMika Westerberg { 2920edf8e2afSMika Westerberg /* if we cannot even read the first page, skip it */ 2921edf8e2afSMika Westerberg if (!access_ok(VERIFY_READ, vma->vma_start, TARGET_PAGE_SIZE)) 2922edf8e2afSMika Westerberg return (0); 2923edf8e2afSMika Westerberg 2924edf8e2afSMika Westerberg /* 2925edf8e2afSMika Westerberg * Usually we don't dump executable pages as they contain 2926edf8e2afSMika Westerberg * non-writable code that debugger can read directly from 2927edf8e2afSMika Westerberg * target library etc. However, thread stacks are marked 2928edf8e2afSMika Westerberg * also executable so we read in first page of given region 2929edf8e2afSMika Westerberg * and check whether it contains elf header. If there is 2930edf8e2afSMika Westerberg * no elf header, we dump it. 2931edf8e2afSMika Westerberg */ 2932edf8e2afSMika Westerberg if (vma->vma_flags & PROT_EXEC) { 2933edf8e2afSMika Westerberg char page[TARGET_PAGE_SIZE]; 2934edf8e2afSMika Westerberg 2935edf8e2afSMika Westerberg copy_from_user(page, vma->vma_start, sizeof (page)); 2936edf8e2afSMika Westerberg if ((page[EI_MAG0] == ELFMAG0) && 2937edf8e2afSMika Westerberg (page[EI_MAG1] == ELFMAG1) && 2938edf8e2afSMika Westerberg (page[EI_MAG2] == ELFMAG2) && 2939edf8e2afSMika Westerberg (page[EI_MAG3] == ELFMAG3)) { 2940edf8e2afSMika Westerberg /* 2941edf8e2afSMika Westerberg * Mappings are possibly from ELF binary. Don't dump 2942edf8e2afSMika Westerberg * them. 2943edf8e2afSMika Westerberg */ 2944edf8e2afSMika Westerberg return (0); 2945edf8e2afSMika Westerberg } 2946edf8e2afSMika Westerberg } 2947edf8e2afSMika Westerberg 2948edf8e2afSMika Westerberg return (vma->vma_end - vma->vma_start); 2949edf8e2afSMika Westerberg } 2950edf8e2afSMika Westerberg 29511a1c4db9SMikhail Ilyin static int vma_walker(void *priv, target_ulong start, target_ulong end, 2952edf8e2afSMika Westerberg unsigned long flags) 2953edf8e2afSMika Westerberg { 2954edf8e2afSMika Westerberg struct mm_struct *mm = (struct mm_struct *)priv; 2955edf8e2afSMika Westerberg 2956edf8e2afSMika Westerberg vma_add_mapping(mm, start, end, flags); 2957edf8e2afSMika Westerberg return (0); 2958edf8e2afSMika Westerberg } 2959edf8e2afSMika Westerberg 2960edf8e2afSMika Westerberg static void fill_note(struct memelfnote *note, const char *name, int type, 2961edf8e2afSMika Westerberg unsigned int sz, void *data) 2962edf8e2afSMika Westerberg { 2963edf8e2afSMika Westerberg unsigned int namesz; 2964edf8e2afSMika Westerberg 2965edf8e2afSMika Westerberg namesz = strlen(name) + 1; 2966edf8e2afSMika Westerberg note->name = name; 2967edf8e2afSMika Westerberg note->namesz = namesz; 2968edf8e2afSMika Westerberg note->namesz_rounded = roundup(namesz, sizeof (int32_t)); 2969edf8e2afSMika Westerberg note->type = type; 297080f5ce75SLaurent Vivier note->datasz = sz; 297180f5ce75SLaurent Vivier note->datasz_rounded = roundup(sz, sizeof (int32_t)); 297280f5ce75SLaurent Vivier 2973edf8e2afSMika Westerberg note->data = data; 2974edf8e2afSMika Westerberg 2975edf8e2afSMika Westerberg /* 2976edf8e2afSMika Westerberg * We calculate rounded up note size here as specified by 2977edf8e2afSMika Westerberg * ELF document. 2978edf8e2afSMika Westerberg */ 2979edf8e2afSMika Westerberg note->notesz = sizeof (struct elf_note) + 298080f5ce75SLaurent Vivier note->namesz_rounded + note->datasz_rounded; 2981edf8e2afSMika Westerberg } 2982edf8e2afSMika Westerberg 2983edf8e2afSMika Westerberg static void fill_elf_header(struct elfhdr *elf, int segs, uint16_t machine, 2984edf8e2afSMika Westerberg uint32_t flags) 2985edf8e2afSMika Westerberg { 2986edf8e2afSMika Westerberg (void) memset(elf, 0, sizeof(*elf)); 2987edf8e2afSMika Westerberg 2988edf8e2afSMika Westerberg (void) memcpy(elf->e_ident, ELFMAG, SELFMAG); 2989edf8e2afSMika Westerberg elf->e_ident[EI_CLASS] = ELF_CLASS; 2990edf8e2afSMika Westerberg elf->e_ident[EI_DATA] = ELF_DATA; 2991edf8e2afSMika Westerberg elf->e_ident[EI_VERSION] = EV_CURRENT; 2992edf8e2afSMika Westerberg elf->e_ident[EI_OSABI] = ELF_OSABI; 2993edf8e2afSMika Westerberg 2994edf8e2afSMika Westerberg elf->e_type = ET_CORE; 2995edf8e2afSMika Westerberg elf->e_machine = machine; 2996edf8e2afSMika Westerberg elf->e_version = EV_CURRENT; 2997edf8e2afSMika Westerberg elf->e_phoff = sizeof(struct elfhdr); 2998edf8e2afSMika Westerberg elf->e_flags = flags; 2999edf8e2afSMika Westerberg elf->e_ehsize = sizeof(struct elfhdr); 3000edf8e2afSMika Westerberg elf->e_phentsize = sizeof(struct elf_phdr); 3001edf8e2afSMika Westerberg elf->e_phnum = segs; 3002edf8e2afSMika Westerberg 3003edf8e2afSMika Westerberg bswap_ehdr(elf); 3004edf8e2afSMika Westerberg } 3005edf8e2afSMika Westerberg 3006edf8e2afSMika Westerberg static void fill_elf_note_phdr(struct elf_phdr *phdr, int sz, off_t offset) 3007edf8e2afSMika Westerberg { 3008edf8e2afSMika Westerberg phdr->p_type = PT_NOTE; 3009edf8e2afSMika Westerberg phdr->p_offset = offset; 3010edf8e2afSMika Westerberg phdr->p_vaddr = 0; 3011edf8e2afSMika Westerberg phdr->p_paddr = 0; 3012edf8e2afSMika Westerberg phdr->p_filesz = sz; 3013edf8e2afSMika Westerberg phdr->p_memsz = 0; 3014edf8e2afSMika Westerberg phdr->p_flags = 0; 3015edf8e2afSMika Westerberg phdr->p_align = 0; 3016edf8e2afSMika Westerberg 3017991f8f0cSRichard Henderson bswap_phdr(phdr, 1); 3018edf8e2afSMika Westerberg } 3019edf8e2afSMika Westerberg 3020edf8e2afSMika Westerberg static size_t note_size(const struct memelfnote *note) 3021edf8e2afSMika Westerberg { 3022edf8e2afSMika Westerberg return (note->notesz); 3023edf8e2afSMika Westerberg } 3024edf8e2afSMika Westerberg 3025a2547a13SLaurent Desnogues static void fill_prstatus(struct target_elf_prstatus *prstatus, 3026edf8e2afSMika Westerberg const TaskState *ts, int signr) 3027edf8e2afSMika Westerberg { 3028edf8e2afSMika Westerberg (void) memset(prstatus, 0, sizeof (*prstatus)); 3029edf8e2afSMika Westerberg prstatus->pr_info.si_signo = prstatus->pr_cursig = signr; 3030edf8e2afSMika Westerberg prstatus->pr_pid = ts->ts_tid; 3031edf8e2afSMika Westerberg prstatus->pr_ppid = getppid(); 3032edf8e2afSMika Westerberg prstatus->pr_pgrp = getpgrp(); 3033edf8e2afSMika Westerberg prstatus->pr_sid = getsid(0); 3034edf8e2afSMika Westerberg 3035edf8e2afSMika Westerberg bswap_prstatus(prstatus); 3036edf8e2afSMika Westerberg } 3037edf8e2afSMika Westerberg 3038a2547a13SLaurent Desnogues static int fill_psinfo(struct target_elf_prpsinfo *psinfo, const TaskState *ts) 3039edf8e2afSMika Westerberg { 3040900cfbcaSJim Meyering char *base_filename; 3041edf8e2afSMika Westerberg unsigned int i, len; 3042edf8e2afSMika Westerberg 3043edf8e2afSMika Westerberg (void) memset(psinfo, 0, sizeof (*psinfo)); 3044edf8e2afSMika Westerberg 3045edf8e2afSMika Westerberg len = ts->info->arg_end - ts->info->arg_start; 3046edf8e2afSMika Westerberg if (len >= ELF_PRARGSZ) 3047edf8e2afSMika Westerberg len = ELF_PRARGSZ - 1; 3048edf8e2afSMika Westerberg if (copy_from_user(&psinfo->pr_psargs, ts->info->arg_start, len)) 3049edf8e2afSMika Westerberg return -EFAULT; 3050edf8e2afSMika Westerberg for (i = 0; i < len; i++) 3051edf8e2afSMika Westerberg if (psinfo->pr_psargs[i] == 0) 3052edf8e2afSMika Westerberg psinfo->pr_psargs[i] = ' '; 3053edf8e2afSMika Westerberg psinfo->pr_psargs[len] = 0; 3054edf8e2afSMika Westerberg 3055edf8e2afSMika Westerberg psinfo->pr_pid = getpid(); 3056edf8e2afSMika Westerberg psinfo->pr_ppid = getppid(); 3057edf8e2afSMika Westerberg psinfo->pr_pgrp = getpgrp(); 3058edf8e2afSMika Westerberg psinfo->pr_sid = getsid(0); 3059edf8e2afSMika Westerberg psinfo->pr_uid = getuid(); 3060edf8e2afSMika Westerberg psinfo->pr_gid = getgid(); 3061edf8e2afSMika Westerberg 3062900cfbcaSJim Meyering base_filename = g_path_get_basename(ts->bprm->filename); 3063900cfbcaSJim Meyering /* 3064900cfbcaSJim Meyering * Using strncpy here is fine: at max-length, 3065900cfbcaSJim Meyering * this field is not NUL-terminated. 3066900cfbcaSJim Meyering */ 3067edf8e2afSMika Westerberg (void) strncpy(psinfo->pr_fname, base_filename, 3068edf8e2afSMika Westerberg sizeof(psinfo->pr_fname)); 3069edf8e2afSMika Westerberg 3070900cfbcaSJim Meyering g_free(base_filename); 3071edf8e2afSMika Westerberg bswap_psinfo(psinfo); 3072edf8e2afSMika Westerberg return (0); 3073edf8e2afSMika Westerberg } 3074edf8e2afSMika Westerberg 3075edf8e2afSMika Westerberg static void fill_auxv_note(struct memelfnote *note, const TaskState *ts) 3076edf8e2afSMika Westerberg { 3077edf8e2afSMika Westerberg elf_addr_t auxv = (elf_addr_t)ts->info->saved_auxv; 3078edf8e2afSMika Westerberg elf_addr_t orig_auxv = auxv; 3079edf8e2afSMika Westerberg void *ptr; 3080125b0f55SAlexander Graf int len = ts->info->auxv_len; 3081edf8e2afSMika Westerberg 3082edf8e2afSMika Westerberg /* 3083edf8e2afSMika Westerberg * Auxiliary vector is stored in target process stack. It contains 3084edf8e2afSMika Westerberg * {type, value} pairs that we need to dump into note. This is not 3085edf8e2afSMika Westerberg * strictly necessary but we do it here for sake of completeness. 3086edf8e2afSMika Westerberg */ 3087edf8e2afSMika Westerberg 3088edf8e2afSMika Westerberg /* read in whole auxv vector and copy it to memelfnote */ 3089edf8e2afSMika Westerberg ptr = lock_user(VERIFY_READ, orig_auxv, len, 0); 3090edf8e2afSMika Westerberg if (ptr != NULL) { 3091edf8e2afSMika Westerberg fill_note(note, "CORE", NT_AUXV, len, ptr); 3092edf8e2afSMika Westerberg unlock_user(ptr, auxv, len); 3093edf8e2afSMika Westerberg } 3094edf8e2afSMika Westerberg } 3095edf8e2afSMika Westerberg 3096edf8e2afSMika Westerberg /* 3097edf8e2afSMika Westerberg * Constructs name of coredump file. We have following convention 3098edf8e2afSMika Westerberg * for the name: 3099edf8e2afSMika Westerberg * qemu_<basename-of-target-binary>_<date>-<time>_<pid>.core 3100edf8e2afSMika Westerberg * 3101edf8e2afSMika Westerberg * Returns 0 in case of success, -1 otherwise (errno is set). 3102edf8e2afSMika Westerberg */ 3103edf8e2afSMika Westerberg static int core_dump_filename(const TaskState *ts, char *buf, 3104edf8e2afSMika Westerberg size_t bufsize) 3105edf8e2afSMika Westerberg { 3106edf8e2afSMika Westerberg char timestamp[64]; 3107edf8e2afSMika Westerberg char *base_filename = NULL; 3108edf8e2afSMika Westerberg struct timeval tv; 3109edf8e2afSMika Westerberg struct tm tm; 3110edf8e2afSMika Westerberg 3111edf8e2afSMika Westerberg assert(bufsize >= PATH_MAX); 3112edf8e2afSMika Westerberg 3113edf8e2afSMika Westerberg if (gettimeofday(&tv, NULL) < 0) { 3114edf8e2afSMika Westerberg (void) fprintf(stderr, "unable to get current timestamp: %s", 3115edf8e2afSMika Westerberg strerror(errno)); 3116edf8e2afSMika Westerberg return (-1); 3117edf8e2afSMika Westerberg } 3118edf8e2afSMika Westerberg 3119b8da57faSWei Jiangang base_filename = g_path_get_basename(ts->bprm->filename); 3120edf8e2afSMika Westerberg (void) strftime(timestamp, sizeof (timestamp), "%Y%m%d-%H%M%S", 3121edf8e2afSMika Westerberg localtime_r(&tv.tv_sec, &tm)); 3122edf8e2afSMika Westerberg (void) snprintf(buf, bufsize, "qemu_%s_%s_%d.core", 3123edf8e2afSMika Westerberg base_filename, timestamp, (int)getpid()); 3124b8da57faSWei Jiangang g_free(base_filename); 3125edf8e2afSMika Westerberg 3126edf8e2afSMika Westerberg return (0); 3127edf8e2afSMika Westerberg } 3128edf8e2afSMika Westerberg 3129edf8e2afSMika Westerberg static int dump_write(int fd, const void *ptr, size_t size) 3130edf8e2afSMika Westerberg { 3131edf8e2afSMika Westerberg const char *bufp = (const char *)ptr; 3132edf8e2afSMika Westerberg ssize_t bytes_written, bytes_left; 3133edf8e2afSMika Westerberg struct rlimit dumpsize; 3134edf8e2afSMika Westerberg off_t pos; 3135edf8e2afSMika Westerberg 3136edf8e2afSMika Westerberg bytes_written = 0; 3137edf8e2afSMika Westerberg getrlimit(RLIMIT_CORE, &dumpsize); 3138edf8e2afSMika Westerberg if ((pos = lseek(fd, 0, SEEK_CUR))==-1) { 3139edf8e2afSMika Westerberg if (errno == ESPIPE) { /* not a seekable stream */ 3140edf8e2afSMika Westerberg bytes_left = size; 3141edf8e2afSMika Westerberg } else { 3142edf8e2afSMika Westerberg return pos; 3143edf8e2afSMika Westerberg } 3144edf8e2afSMika Westerberg } else { 3145edf8e2afSMika Westerberg if (dumpsize.rlim_cur <= pos) { 3146edf8e2afSMika Westerberg return -1; 3147edf8e2afSMika Westerberg } else if (dumpsize.rlim_cur == RLIM_INFINITY) { 3148edf8e2afSMika Westerberg bytes_left = size; 3149edf8e2afSMika Westerberg } else { 3150edf8e2afSMika Westerberg size_t limit_left=dumpsize.rlim_cur - pos; 3151edf8e2afSMika Westerberg bytes_left = limit_left >= size ? size : limit_left ; 3152edf8e2afSMika Westerberg } 3153edf8e2afSMika Westerberg } 3154edf8e2afSMika Westerberg 3155edf8e2afSMika Westerberg /* 3156edf8e2afSMika Westerberg * In normal conditions, single write(2) should do but 3157edf8e2afSMika Westerberg * in case of socket etc. this mechanism is more portable. 3158edf8e2afSMika Westerberg */ 3159edf8e2afSMika Westerberg do { 3160edf8e2afSMika Westerberg bytes_written = write(fd, bufp, bytes_left); 3161edf8e2afSMika Westerberg if (bytes_written < 0) { 3162edf8e2afSMika Westerberg if (errno == EINTR) 3163edf8e2afSMika Westerberg continue; 3164edf8e2afSMika Westerberg return (-1); 3165edf8e2afSMika Westerberg } else if (bytes_written == 0) { /* eof */ 3166edf8e2afSMika Westerberg return (-1); 3167edf8e2afSMika Westerberg } 3168edf8e2afSMika Westerberg bufp += bytes_written; 3169edf8e2afSMika Westerberg bytes_left -= bytes_written; 3170edf8e2afSMika Westerberg } while (bytes_left > 0); 3171edf8e2afSMika Westerberg 3172edf8e2afSMika Westerberg return (0); 3173edf8e2afSMika Westerberg } 3174edf8e2afSMika Westerberg 3175edf8e2afSMika Westerberg static int write_note(struct memelfnote *men, int fd) 3176edf8e2afSMika Westerberg { 3177edf8e2afSMika Westerberg struct elf_note en; 3178edf8e2afSMika Westerberg 3179edf8e2afSMika Westerberg en.n_namesz = men->namesz; 3180edf8e2afSMika Westerberg en.n_type = men->type; 3181edf8e2afSMika Westerberg en.n_descsz = men->datasz; 3182edf8e2afSMika Westerberg 3183edf8e2afSMika Westerberg bswap_note(&en); 3184edf8e2afSMika Westerberg 3185edf8e2afSMika Westerberg if (dump_write(fd, &en, sizeof(en)) != 0) 3186edf8e2afSMika Westerberg return (-1); 3187edf8e2afSMika Westerberg if (dump_write(fd, men->name, men->namesz_rounded) != 0) 3188edf8e2afSMika Westerberg return (-1); 318980f5ce75SLaurent Vivier if (dump_write(fd, men->data, men->datasz_rounded) != 0) 3190edf8e2afSMika Westerberg return (-1); 3191edf8e2afSMika Westerberg 3192edf8e2afSMika Westerberg return (0); 3193edf8e2afSMika Westerberg } 3194edf8e2afSMika Westerberg 31959349b4f9SAndreas Färber static void fill_thread_info(struct elf_note_info *info, const CPUArchState *env) 3196edf8e2afSMika Westerberg { 31970429a971SAndreas Färber CPUState *cpu = ENV_GET_CPU((CPUArchState *)env); 31980429a971SAndreas Färber TaskState *ts = (TaskState *)cpu->opaque; 3199edf8e2afSMika Westerberg struct elf_thread_status *ets; 3200edf8e2afSMika Westerberg 32017267c094SAnthony Liguori ets = g_malloc0(sizeof (*ets)); 3202edf8e2afSMika Westerberg ets->num_notes = 1; /* only prstatus is dumped */ 3203edf8e2afSMika Westerberg fill_prstatus(&ets->prstatus, ts, 0); 3204edf8e2afSMika Westerberg elf_core_copy_regs(&ets->prstatus.pr_reg, env); 3205edf8e2afSMika Westerberg fill_note(&ets->notes[0], "CORE", NT_PRSTATUS, sizeof (ets->prstatus), 3206edf8e2afSMika Westerberg &ets->prstatus); 3207edf8e2afSMika Westerberg 320872cf2d4fSBlue Swirl QTAILQ_INSERT_TAIL(&info->thread_list, ets, ets_link); 3209edf8e2afSMika Westerberg 3210edf8e2afSMika Westerberg info->notes_size += note_size(&ets->notes[0]); 3211edf8e2afSMika Westerberg } 3212edf8e2afSMika Westerberg 32136afafa86SPeter Maydell static void init_note_info(struct elf_note_info *info) 32146afafa86SPeter Maydell { 32156afafa86SPeter Maydell /* Initialize the elf_note_info structure so that it is at 32166afafa86SPeter Maydell * least safe to call free_note_info() on it. Must be 32176afafa86SPeter Maydell * called before calling fill_note_info(). 32186afafa86SPeter Maydell */ 32196afafa86SPeter Maydell memset(info, 0, sizeof (*info)); 32206afafa86SPeter Maydell QTAILQ_INIT(&info->thread_list); 32216afafa86SPeter Maydell } 32226afafa86SPeter Maydell 3223edf8e2afSMika Westerberg static int fill_note_info(struct elf_note_info *info, 32249349b4f9SAndreas Färber long signr, const CPUArchState *env) 3225edf8e2afSMika Westerberg { 3226edf8e2afSMika Westerberg #define NUMNOTES 3 32270429a971SAndreas Färber CPUState *cpu = ENV_GET_CPU((CPUArchState *)env); 32280429a971SAndreas Färber TaskState *ts = (TaskState *)cpu->opaque; 3229edf8e2afSMika Westerberg int i; 3230edf8e2afSMika Westerberg 3231c78d65e8SMarkus Armbruster info->notes = g_new0(struct memelfnote, NUMNOTES); 3232edf8e2afSMika Westerberg if (info->notes == NULL) 3233edf8e2afSMika Westerberg return (-ENOMEM); 32347267c094SAnthony Liguori info->prstatus = g_malloc0(sizeof (*info->prstatus)); 3235edf8e2afSMika Westerberg if (info->prstatus == NULL) 3236edf8e2afSMika Westerberg return (-ENOMEM); 32377267c094SAnthony Liguori info->psinfo = g_malloc0(sizeof (*info->psinfo)); 3238edf8e2afSMika Westerberg if (info->prstatus == NULL) 3239edf8e2afSMika Westerberg return (-ENOMEM); 3240edf8e2afSMika Westerberg 3241edf8e2afSMika Westerberg /* 3242edf8e2afSMika Westerberg * First fill in status (and registers) of current thread 3243edf8e2afSMika Westerberg * including process info & aux vector. 3244edf8e2afSMika Westerberg */ 3245edf8e2afSMika Westerberg fill_prstatus(info->prstatus, ts, signr); 3246edf8e2afSMika Westerberg elf_core_copy_regs(&info->prstatus->pr_reg, env); 3247edf8e2afSMika Westerberg fill_note(&info->notes[0], "CORE", NT_PRSTATUS, 3248edf8e2afSMika Westerberg sizeof (*info->prstatus), info->prstatus); 3249edf8e2afSMika Westerberg fill_psinfo(info->psinfo, ts); 3250edf8e2afSMika Westerberg fill_note(&info->notes[1], "CORE", NT_PRPSINFO, 3251edf8e2afSMika Westerberg sizeof (*info->psinfo), info->psinfo); 3252edf8e2afSMika Westerberg fill_auxv_note(&info->notes[2], ts); 3253edf8e2afSMika Westerberg info->numnote = 3; 3254edf8e2afSMika Westerberg 3255edf8e2afSMika Westerberg info->notes_size = 0; 3256edf8e2afSMika Westerberg for (i = 0; i < info->numnote; i++) 3257edf8e2afSMika Westerberg info->notes_size += note_size(&info->notes[i]); 3258edf8e2afSMika Westerberg 3259edf8e2afSMika Westerberg /* read and fill status of all threads */ 3260edf8e2afSMika Westerberg cpu_list_lock(); 3261bdc44640SAndreas Färber CPU_FOREACH(cpu) { 3262a2247f8eSAndreas Färber if (cpu == thread_cpu) { 3263edf8e2afSMika Westerberg continue; 3264182735efSAndreas Färber } 3265182735efSAndreas Färber fill_thread_info(info, (CPUArchState *)cpu->env_ptr); 3266edf8e2afSMika Westerberg } 3267edf8e2afSMika Westerberg cpu_list_unlock(); 3268edf8e2afSMika Westerberg 3269edf8e2afSMika Westerberg return (0); 3270edf8e2afSMika Westerberg } 3271edf8e2afSMika Westerberg 3272edf8e2afSMika Westerberg static void free_note_info(struct elf_note_info *info) 3273edf8e2afSMika Westerberg { 3274edf8e2afSMika Westerberg struct elf_thread_status *ets; 3275edf8e2afSMika Westerberg 327672cf2d4fSBlue Swirl while (!QTAILQ_EMPTY(&info->thread_list)) { 327772cf2d4fSBlue Swirl ets = QTAILQ_FIRST(&info->thread_list); 327872cf2d4fSBlue Swirl QTAILQ_REMOVE(&info->thread_list, ets, ets_link); 32797267c094SAnthony Liguori g_free(ets); 3280edf8e2afSMika Westerberg } 3281edf8e2afSMika Westerberg 32827267c094SAnthony Liguori g_free(info->prstatus); 32837267c094SAnthony Liguori g_free(info->psinfo); 32847267c094SAnthony Liguori g_free(info->notes); 3285edf8e2afSMika Westerberg } 3286edf8e2afSMika Westerberg 3287edf8e2afSMika Westerberg static int write_note_info(struct elf_note_info *info, int fd) 3288edf8e2afSMika Westerberg { 3289edf8e2afSMika Westerberg struct elf_thread_status *ets; 3290edf8e2afSMika Westerberg int i, error = 0; 3291edf8e2afSMika Westerberg 3292edf8e2afSMika Westerberg /* write prstatus, psinfo and auxv for current thread */ 3293edf8e2afSMika Westerberg for (i = 0; i < info->numnote; i++) 3294edf8e2afSMika Westerberg if ((error = write_note(&info->notes[i], fd)) != 0) 3295edf8e2afSMika Westerberg return (error); 3296edf8e2afSMika Westerberg 3297edf8e2afSMika Westerberg /* write prstatus for each thread */ 329852a53afeSEmilio G. Cota QTAILQ_FOREACH(ets, &info->thread_list, ets_link) { 3299edf8e2afSMika Westerberg if ((error = write_note(&ets->notes[0], fd)) != 0) 3300edf8e2afSMika Westerberg return (error); 3301edf8e2afSMika Westerberg } 3302edf8e2afSMika Westerberg 3303edf8e2afSMika Westerberg return (0); 3304edf8e2afSMika Westerberg } 3305edf8e2afSMika Westerberg 3306edf8e2afSMika Westerberg /* 3307edf8e2afSMika Westerberg * Write out ELF coredump. 3308edf8e2afSMika Westerberg * 3309edf8e2afSMika Westerberg * See documentation of ELF object file format in: 3310edf8e2afSMika Westerberg * http://www.caldera.com/developers/devspecs/gabi41.pdf 3311edf8e2afSMika Westerberg * 3312edf8e2afSMika Westerberg * Coredump format in linux is following: 3313edf8e2afSMika Westerberg * 3314edf8e2afSMika Westerberg * 0 +----------------------+ \ 3315edf8e2afSMika Westerberg * | ELF header | ET_CORE | 3316edf8e2afSMika Westerberg * +----------------------+ | 3317edf8e2afSMika Westerberg * | ELF program headers | |--- headers 3318edf8e2afSMika Westerberg * | - NOTE section | | 3319edf8e2afSMika Westerberg * | - PT_LOAD sections | | 3320edf8e2afSMika Westerberg * +----------------------+ / 3321edf8e2afSMika Westerberg * | NOTEs: | 3322edf8e2afSMika Westerberg * | - NT_PRSTATUS | 3323edf8e2afSMika Westerberg * | - NT_PRSINFO | 3324edf8e2afSMika Westerberg * | - NT_AUXV | 3325edf8e2afSMika Westerberg * +----------------------+ <-- aligned to target page 3326edf8e2afSMika Westerberg * | Process memory dump | 3327edf8e2afSMika Westerberg * : : 3328edf8e2afSMika Westerberg * . . 3329edf8e2afSMika Westerberg * : : 3330edf8e2afSMika Westerberg * | | 3331edf8e2afSMika Westerberg * +----------------------+ 3332edf8e2afSMika Westerberg * 3333edf8e2afSMika Westerberg * NT_PRSTATUS -> struct elf_prstatus (per thread) 3334edf8e2afSMika Westerberg * NT_PRSINFO -> struct elf_prpsinfo 3335edf8e2afSMika Westerberg * NT_AUXV is array of { type, value } pairs (see fill_auxv_note()). 3336edf8e2afSMika Westerberg * 3337edf8e2afSMika Westerberg * Format follows System V format as close as possible. Current 3338edf8e2afSMika Westerberg * version limitations are as follows: 3339edf8e2afSMika Westerberg * - no floating point registers are dumped 3340edf8e2afSMika Westerberg * 3341edf8e2afSMika Westerberg * Function returns 0 in case of success, negative errno otherwise. 3342edf8e2afSMika Westerberg * 3343edf8e2afSMika Westerberg * TODO: make this work also during runtime: it should be 3344edf8e2afSMika Westerberg * possible to force coredump from running process and then 3345edf8e2afSMika Westerberg * continue processing. For example qemu could set up SIGUSR2 3346edf8e2afSMika Westerberg * handler (provided that target process haven't registered 3347edf8e2afSMika Westerberg * handler for that) that does the dump when signal is received. 3348edf8e2afSMika Westerberg */ 33499349b4f9SAndreas Färber static int elf_core_dump(int signr, const CPUArchState *env) 3350edf8e2afSMika Westerberg { 33510429a971SAndreas Färber const CPUState *cpu = ENV_GET_CPU((CPUArchState *)env); 33520429a971SAndreas Färber const TaskState *ts = (const TaskState *)cpu->opaque; 3353edf8e2afSMika Westerberg struct vm_area_struct *vma = NULL; 3354edf8e2afSMika Westerberg char corefile[PATH_MAX]; 3355edf8e2afSMika Westerberg struct elf_note_info info; 3356edf8e2afSMika Westerberg struct elfhdr elf; 3357edf8e2afSMika Westerberg struct elf_phdr phdr; 3358edf8e2afSMika Westerberg struct rlimit dumpsize; 3359edf8e2afSMika Westerberg struct mm_struct *mm = NULL; 3360edf8e2afSMika Westerberg off_t offset = 0, data_offset = 0; 3361edf8e2afSMika Westerberg int segs = 0; 3362edf8e2afSMika Westerberg int fd = -1; 3363edf8e2afSMika Westerberg 33646afafa86SPeter Maydell init_note_info(&info); 33656afafa86SPeter Maydell 3366edf8e2afSMika Westerberg errno = 0; 3367edf8e2afSMika Westerberg getrlimit(RLIMIT_CORE, &dumpsize); 3368edf8e2afSMika Westerberg if (dumpsize.rlim_cur == 0) 3369edf8e2afSMika Westerberg return 0; 3370edf8e2afSMika Westerberg 3371edf8e2afSMika Westerberg if (core_dump_filename(ts, corefile, sizeof (corefile)) < 0) 3372edf8e2afSMika Westerberg return (-errno); 3373edf8e2afSMika Westerberg 3374edf8e2afSMika Westerberg if ((fd = open(corefile, O_WRONLY | O_CREAT, 3375edf8e2afSMika Westerberg S_IRUSR|S_IWUSR|S_IRGRP|S_IROTH)) < 0) 3376edf8e2afSMika Westerberg return (-errno); 3377edf8e2afSMika Westerberg 3378edf8e2afSMika Westerberg /* 3379edf8e2afSMika Westerberg * Walk through target process memory mappings and 3380edf8e2afSMika Westerberg * set up structure containing this information. After 3381edf8e2afSMika Westerberg * this point vma_xxx functions can be used. 3382edf8e2afSMika Westerberg */ 3383edf8e2afSMika Westerberg if ((mm = vma_init()) == NULL) 3384edf8e2afSMika Westerberg goto out; 3385edf8e2afSMika Westerberg 3386edf8e2afSMika Westerberg walk_memory_regions(mm, vma_walker); 3387edf8e2afSMika Westerberg segs = vma_get_mapping_count(mm); 3388edf8e2afSMika Westerberg 3389edf8e2afSMika Westerberg /* 3390edf8e2afSMika Westerberg * Construct valid coredump ELF header. We also 3391edf8e2afSMika Westerberg * add one more segment for notes. 3392edf8e2afSMika Westerberg */ 3393edf8e2afSMika Westerberg fill_elf_header(&elf, segs + 1, ELF_MACHINE, 0); 3394edf8e2afSMika Westerberg if (dump_write(fd, &elf, sizeof (elf)) != 0) 3395edf8e2afSMika Westerberg goto out; 3396edf8e2afSMika Westerberg 3397b6af0975SDaniel P. Berrange /* fill in the in-memory version of notes */ 3398edf8e2afSMika Westerberg if (fill_note_info(&info, signr, env) < 0) 3399edf8e2afSMika Westerberg goto out; 3400edf8e2afSMika Westerberg 3401edf8e2afSMika Westerberg offset += sizeof (elf); /* elf header */ 3402edf8e2afSMika Westerberg offset += (segs + 1) * sizeof (struct elf_phdr); /* program headers */ 3403edf8e2afSMika Westerberg 3404edf8e2afSMika Westerberg /* write out notes program header */ 3405edf8e2afSMika Westerberg fill_elf_note_phdr(&phdr, info.notes_size, offset); 3406edf8e2afSMika Westerberg 3407edf8e2afSMika Westerberg offset += info.notes_size; 3408edf8e2afSMika Westerberg if (dump_write(fd, &phdr, sizeof (phdr)) != 0) 3409edf8e2afSMika Westerberg goto out; 3410edf8e2afSMika Westerberg 3411edf8e2afSMika Westerberg /* 3412edf8e2afSMika Westerberg * ELF specification wants data to start at page boundary so 3413edf8e2afSMika Westerberg * we align it here. 3414edf8e2afSMika Westerberg */ 341580f5ce75SLaurent Vivier data_offset = offset = roundup(offset, ELF_EXEC_PAGESIZE); 3416edf8e2afSMika Westerberg 3417edf8e2afSMika Westerberg /* 3418edf8e2afSMika Westerberg * Write program headers for memory regions mapped in 3419edf8e2afSMika Westerberg * the target process. 3420edf8e2afSMika Westerberg */ 3421edf8e2afSMika Westerberg for (vma = vma_first(mm); vma != NULL; vma = vma_next(vma)) { 3422edf8e2afSMika Westerberg (void) memset(&phdr, 0, sizeof (phdr)); 3423edf8e2afSMika Westerberg 3424edf8e2afSMika Westerberg phdr.p_type = PT_LOAD; 3425edf8e2afSMika Westerberg phdr.p_offset = offset; 3426edf8e2afSMika Westerberg phdr.p_vaddr = vma->vma_start; 3427edf8e2afSMika Westerberg phdr.p_paddr = 0; 3428edf8e2afSMika Westerberg phdr.p_filesz = vma_dump_size(vma); 3429edf8e2afSMika Westerberg offset += phdr.p_filesz; 3430edf8e2afSMika Westerberg phdr.p_memsz = vma->vma_end - vma->vma_start; 3431edf8e2afSMika Westerberg phdr.p_flags = vma->vma_flags & PROT_READ ? PF_R : 0; 3432edf8e2afSMika Westerberg if (vma->vma_flags & PROT_WRITE) 3433edf8e2afSMika Westerberg phdr.p_flags |= PF_W; 3434edf8e2afSMika Westerberg if (vma->vma_flags & PROT_EXEC) 3435edf8e2afSMika Westerberg phdr.p_flags |= PF_X; 3436edf8e2afSMika Westerberg phdr.p_align = ELF_EXEC_PAGESIZE; 3437edf8e2afSMika Westerberg 343880f5ce75SLaurent Vivier bswap_phdr(&phdr, 1); 3439772034b6SPeter Maydell if (dump_write(fd, &phdr, sizeof(phdr)) != 0) { 3440772034b6SPeter Maydell goto out; 3441772034b6SPeter Maydell } 3442edf8e2afSMika Westerberg } 3443edf8e2afSMika Westerberg 3444edf8e2afSMika Westerberg /* 3445edf8e2afSMika Westerberg * Next we write notes just after program headers. No 3446edf8e2afSMika Westerberg * alignment needed here. 3447edf8e2afSMika Westerberg */ 3448edf8e2afSMika Westerberg if (write_note_info(&info, fd) < 0) 3449edf8e2afSMika Westerberg goto out; 3450edf8e2afSMika Westerberg 3451edf8e2afSMika Westerberg /* align data to page boundary */ 3452edf8e2afSMika Westerberg if (lseek(fd, data_offset, SEEK_SET) != data_offset) 3453edf8e2afSMika Westerberg goto out; 3454edf8e2afSMika Westerberg 3455edf8e2afSMika Westerberg /* 3456edf8e2afSMika Westerberg * Finally we can dump process memory into corefile as well. 3457edf8e2afSMika Westerberg */ 3458edf8e2afSMika Westerberg for (vma = vma_first(mm); vma != NULL; vma = vma_next(vma)) { 3459edf8e2afSMika Westerberg abi_ulong addr; 3460edf8e2afSMika Westerberg abi_ulong end; 3461edf8e2afSMika Westerberg 3462edf8e2afSMika Westerberg end = vma->vma_start + vma_dump_size(vma); 3463edf8e2afSMika Westerberg 3464edf8e2afSMika Westerberg for (addr = vma->vma_start; addr < end; 3465edf8e2afSMika Westerberg addr += TARGET_PAGE_SIZE) { 3466edf8e2afSMika Westerberg char page[TARGET_PAGE_SIZE]; 3467edf8e2afSMika Westerberg int error; 3468edf8e2afSMika Westerberg 3469edf8e2afSMika Westerberg /* 3470edf8e2afSMika Westerberg * Read in page from target process memory and 3471edf8e2afSMika Westerberg * write it to coredump file. 3472edf8e2afSMika Westerberg */ 3473edf8e2afSMika Westerberg error = copy_from_user(page, addr, sizeof (page)); 3474edf8e2afSMika Westerberg if (error != 0) { 347549995e17SAurelien Jarno (void) fprintf(stderr, "unable to dump " TARGET_ABI_FMT_lx "\n", 3476edf8e2afSMika Westerberg addr); 3477edf8e2afSMika Westerberg errno = -error; 3478edf8e2afSMika Westerberg goto out; 3479edf8e2afSMika Westerberg } 3480edf8e2afSMika Westerberg if (dump_write(fd, page, TARGET_PAGE_SIZE) < 0) 3481edf8e2afSMika Westerberg goto out; 3482edf8e2afSMika Westerberg } 3483edf8e2afSMika Westerberg } 3484edf8e2afSMika Westerberg 3485edf8e2afSMika Westerberg out: 3486edf8e2afSMika Westerberg free_note_info(&info); 3487edf8e2afSMika Westerberg if (mm != NULL) 3488edf8e2afSMika Westerberg vma_delete(mm); 3489edf8e2afSMika Westerberg (void) close(fd); 3490edf8e2afSMika Westerberg 3491edf8e2afSMika Westerberg if (errno != 0) 3492edf8e2afSMika Westerberg return (-errno); 3493edf8e2afSMika Westerberg return (0); 3494edf8e2afSMika Westerberg } 3495edf8e2afSMika Westerberg #endif /* USE_ELF_CORE_DUMP */ 3496edf8e2afSMika Westerberg 3497e5fe0c52Spbrook void do_init_thread(struct target_pt_regs *regs, struct image_info *infop) 3498e5fe0c52Spbrook { 3499e5fe0c52Spbrook init_thread(regs, infop); 3500e5fe0c52Spbrook } 3501