131e31b8aSbellard /* This is the Linux kernel elf-loading code, ported into user space */ 2d39594e9SPeter Maydell #include "qemu/osdep.h" 3edf8e2afSMika Westerberg #include <sys/param.h> 431e31b8aSbellard 5edf8e2afSMika Westerberg #include <sys/resource.h> 631e31b8aSbellard 73ef693a0Sbellard #include "qemu.h" 876cad711SPaolo Bonzini #include "disas/disas.h" 9f348b6d1SVeronia Bahaa #include "qemu/path.h" 1031e31b8aSbellard 11e58ffeb3Smalc #ifdef _ARCH_PPC64 12a6cc84f4Smalc #undef ARCH_DLINFO 13a6cc84f4Smalc #undef ELF_PLATFORM 14a6cc84f4Smalc #undef ELF_HWCAP 15ad6919dcSPeter Maydell #undef ELF_HWCAP2 16a6cc84f4Smalc #undef ELF_CLASS 17a6cc84f4Smalc #undef ELF_DATA 18a6cc84f4Smalc #undef ELF_ARCH 19a6cc84f4Smalc #endif 20a6cc84f4Smalc 21edf8e2afSMika Westerberg #define ELF_OSABI ELFOSABI_SYSV 22edf8e2afSMika Westerberg 23cb33da57Sblueswir1 /* from personality.h */ 24cb33da57Sblueswir1 25cb33da57Sblueswir1 /* 26cb33da57Sblueswir1 * Flags for bug emulation. 27cb33da57Sblueswir1 * 28cb33da57Sblueswir1 * These occupy the top three bytes. 29cb33da57Sblueswir1 */ 30cb33da57Sblueswir1 enum { 31cb33da57Sblueswir1 ADDR_NO_RANDOMIZE = 0x0040000, /* disable randomization of VA space */ 32d97ef72eSRichard Henderson FDPIC_FUNCPTRS = 0x0080000, /* userspace function ptrs point to 33d97ef72eSRichard Henderson descriptors (signal handling) */ 34cb33da57Sblueswir1 MMAP_PAGE_ZERO = 0x0100000, 35cb33da57Sblueswir1 ADDR_COMPAT_LAYOUT = 0x0200000, 36cb33da57Sblueswir1 READ_IMPLIES_EXEC = 0x0400000, 37cb33da57Sblueswir1 ADDR_LIMIT_32BIT = 0x0800000, 38cb33da57Sblueswir1 SHORT_INODE = 0x1000000, 39cb33da57Sblueswir1 WHOLE_SECONDS = 0x2000000, 40cb33da57Sblueswir1 STICKY_TIMEOUTS = 0x4000000, 41cb33da57Sblueswir1 ADDR_LIMIT_3GB = 0x8000000, 42cb33da57Sblueswir1 }; 43cb33da57Sblueswir1 44cb33da57Sblueswir1 /* 45cb33da57Sblueswir1 * Personality types. 46cb33da57Sblueswir1 * 47cb33da57Sblueswir1 * These go in the low byte. Avoid using the top bit, it will 48cb33da57Sblueswir1 * conflict with error returns. 49cb33da57Sblueswir1 */ 50cb33da57Sblueswir1 enum { 51cb33da57Sblueswir1 PER_LINUX = 0x0000, 52cb33da57Sblueswir1 PER_LINUX_32BIT = 0x0000 | ADDR_LIMIT_32BIT, 53cb33da57Sblueswir1 PER_LINUX_FDPIC = 0x0000 | FDPIC_FUNCPTRS, 54cb33da57Sblueswir1 PER_SVR4 = 0x0001 | STICKY_TIMEOUTS | MMAP_PAGE_ZERO, 55cb33da57Sblueswir1 PER_SVR3 = 0x0002 | STICKY_TIMEOUTS | SHORT_INODE, 56d97ef72eSRichard Henderson PER_SCOSVR3 = 0x0003 | STICKY_TIMEOUTS | WHOLE_SECONDS | SHORT_INODE, 57cb33da57Sblueswir1 PER_OSR5 = 0x0003 | STICKY_TIMEOUTS | WHOLE_SECONDS, 58cb33da57Sblueswir1 PER_WYSEV386 = 0x0004 | STICKY_TIMEOUTS | SHORT_INODE, 59cb33da57Sblueswir1 PER_ISCR4 = 0x0005 | STICKY_TIMEOUTS, 60cb33da57Sblueswir1 PER_BSD = 0x0006, 61cb33da57Sblueswir1 PER_SUNOS = 0x0006 | STICKY_TIMEOUTS, 62cb33da57Sblueswir1 PER_XENIX = 0x0007 | STICKY_TIMEOUTS | SHORT_INODE, 63cb33da57Sblueswir1 PER_LINUX32 = 0x0008, 64cb33da57Sblueswir1 PER_LINUX32_3GB = 0x0008 | ADDR_LIMIT_3GB, 65cb33da57Sblueswir1 PER_IRIX32 = 0x0009 | STICKY_TIMEOUTS,/* IRIX5 32-bit */ 66cb33da57Sblueswir1 PER_IRIXN32 = 0x000a | STICKY_TIMEOUTS,/* IRIX6 new 32-bit */ 67cb33da57Sblueswir1 PER_IRIX64 = 0x000b | STICKY_TIMEOUTS,/* IRIX6 64-bit */ 68cb33da57Sblueswir1 PER_RISCOS = 0x000c, 69cb33da57Sblueswir1 PER_SOLARIS = 0x000d | STICKY_TIMEOUTS, 70cb33da57Sblueswir1 PER_UW7 = 0x000e | STICKY_TIMEOUTS | MMAP_PAGE_ZERO, 71cb33da57Sblueswir1 PER_OSF4 = 0x000f, /* OSF/1 v4 */ 72cb33da57Sblueswir1 PER_HPUX = 0x0010, 73cb33da57Sblueswir1 PER_MASK = 0x00ff, 74cb33da57Sblueswir1 }; 75cb33da57Sblueswir1 76cb33da57Sblueswir1 /* 77cb33da57Sblueswir1 * Return the base personality without flags. 78cb33da57Sblueswir1 */ 79cb33da57Sblueswir1 #define personality(pers) (pers & PER_MASK) 80cb33da57Sblueswir1 813cb10cfaSChristophe Lyon int info_is_fdpic(struct image_info *info) 823cb10cfaSChristophe Lyon { 833cb10cfaSChristophe Lyon return info->personality == PER_LINUX_FDPIC; 843cb10cfaSChristophe Lyon } 853cb10cfaSChristophe Lyon 8683fb7adfSbellard /* this flag is uneffective under linux too, should be deleted */ 8783fb7adfSbellard #ifndef MAP_DENYWRITE 8883fb7adfSbellard #define MAP_DENYWRITE 0 8983fb7adfSbellard #endif 9083fb7adfSbellard 9183fb7adfSbellard /* should probably go in elf.h */ 9283fb7adfSbellard #ifndef ELIBBAD 9383fb7adfSbellard #define ELIBBAD 80 9483fb7adfSbellard #endif 9583fb7adfSbellard 9628490231SRichard Henderson #ifdef TARGET_WORDS_BIGENDIAN 9728490231SRichard Henderson #define ELF_DATA ELFDATA2MSB 9828490231SRichard Henderson #else 9928490231SRichard Henderson #define ELF_DATA ELFDATA2LSB 10028490231SRichard Henderson #endif 10128490231SRichard Henderson 102a29f998dSPaolo Bonzini #ifdef TARGET_ABI_MIPSN32 103918fc54cSPaolo Bonzini typedef abi_ullong target_elf_greg_t; 104918fc54cSPaolo Bonzini #define tswapreg(ptr) tswap64(ptr) 105a29f998dSPaolo Bonzini #else 106a29f998dSPaolo Bonzini typedef abi_ulong target_elf_greg_t; 107a29f998dSPaolo Bonzini #define tswapreg(ptr) tswapal(ptr) 108a29f998dSPaolo Bonzini #endif 109a29f998dSPaolo Bonzini 11021e807faSNathan Froyd #ifdef USE_UID16 1111ddd592fSPaolo Bonzini typedef abi_ushort target_uid_t; 1121ddd592fSPaolo Bonzini typedef abi_ushort target_gid_t; 11321e807faSNathan Froyd #else 114f8fd4fc4SPaolo Bonzini typedef abi_uint target_uid_t; 115f8fd4fc4SPaolo Bonzini typedef abi_uint target_gid_t; 11621e807faSNathan Froyd #endif 117f8fd4fc4SPaolo Bonzini typedef abi_int target_pid_t; 11821e807faSNathan Froyd 11930ac07d4Sbellard #ifdef TARGET_I386 12030ac07d4Sbellard 12115338fd7Sbellard #define ELF_PLATFORM get_elf_platform() 12215338fd7Sbellard 12315338fd7Sbellard static const char *get_elf_platform(void) 12415338fd7Sbellard { 12515338fd7Sbellard static char elf_platform[] = "i386"; 126a2247f8eSAndreas Färber int family = object_property_get_int(OBJECT(thread_cpu), "family", NULL); 12715338fd7Sbellard if (family > 6) 12815338fd7Sbellard family = 6; 12915338fd7Sbellard if (family >= 3) 13015338fd7Sbellard elf_platform[1] = '0' + family; 13115338fd7Sbellard return elf_platform; 13215338fd7Sbellard } 13315338fd7Sbellard 13415338fd7Sbellard #define ELF_HWCAP get_elf_hwcap() 13515338fd7Sbellard 13615338fd7Sbellard static uint32_t get_elf_hwcap(void) 13715338fd7Sbellard { 138a2247f8eSAndreas Färber X86CPU *cpu = X86_CPU(thread_cpu); 139a2247f8eSAndreas Färber 140a2247f8eSAndreas Färber return cpu->env.features[FEAT_1_EDX]; 14115338fd7Sbellard } 14215338fd7Sbellard 14384409ddbSj_mayer #ifdef TARGET_X86_64 14484409ddbSj_mayer #define ELF_START_MMAP 0x2aaaaab000ULL 14584409ddbSj_mayer 14684409ddbSj_mayer #define ELF_CLASS ELFCLASS64 14784409ddbSj_mayer #define ELF_ARCH EM_X86_64 14884409ddbSj_mayer 14984409ddbSj_mayer static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop) 15084409ddbSj_mayer { 15184409ddbSj_mayer regs->rax = 0; 15284409ddbSj_mayer regs->rsp = infop->start_stack; 15384409ddbSj_mayer regs->rip = infop->entry; 15484409ddbSj_mayer } 15584409ddbSj_mayer 1569edc5d79SMika Westerberg #define ELF_NREG 27 157c227f099SAnthony Liguori typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG]; 1589edc5d79SMika Westerberg 1599edc5d79SMika Westerberg /* 1609edc5d79SMika Westerberg * Note that ELF_NREG should be 29 as there should be place for 1619edc5d79SMika Westerberg * TRAPNO and ERR "registers" as well but linux doesn't dump 1629edc5d79SMika Westerberg * those. 1639edc5d79SMika Westerberg * 1649edc5d79SMika Westerberg * See linux kernel: arch/x86/include/asm/elf.h 1659edc5d79SMika Westerberg */ 16605390248SAndreas Färber static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUX86State *env) 1679edc5d79SMika Westerberg { 1689edc5d79SMika Westerberg (*regs)[0] = env->regs[15]; 1699edc5d79SMika Westerberg (*regs)[1] = env->regs[14]; 1709edc5d79SMika Westerberg (*regs)[2] = env->regs[13]; 1719edc5d79SMika Westerberg (*regs)[3] = env->regs[12]; 1729edc5d79SMika Westerberg (*regs)[4] = env->regs[R_EBP]; 1739edc5d79SMika Westerberg (*regs)[5] = env->regs[R_EBX]; 1749edc5d79SMika Westerberg (*regs)[6] = env->regs[11]; 1759edc5d79SMika Westerberg (*regs)[7] = env->regs[10]; 1769edc5d79SMika Westerberg (*regs)[8] = env->regs[9]; 1779edc5d79SMika Westerberg (*regs)[9] = env->regs[8]; 1789edc5d79SMika Westerberg (*regs)[10] = env->regs[R_EAX]; 1799edc5d79SMika Westerberg (*regs)[11] = env->regs[R_ECX]; 1809edc5d79SMika Westerberg (*regs)[12] = env->regs[R_EDX]; 1819edc5d79SMika Westerberg (*regs)[13] = env->regs[R_ESI]; 1829edc5d79SMika Westerberg (*regs)[14] = env->regs[R_EDI]; 1839edc5d79SMika Westerberg (*regs)[15] = env->regs[R_EAX]; /* XXX */ 1849edc5d79SMika Westerberg (*regs)[16] = env->eip; 1859edc5d79SMika Westerberg (*regs)[17] = env->segs[R_CS].selector & 0xffff; 1869edc5d79SMika Westerberg (*regs)[18] = env->eflags; 1879edc5d79SMika Westerberg (*regs)[19] = env->regs[R_ESP]; 1889edc5d79SMika Westerberg (*regs)[20] = env->segs[R_SS].selector & 0xffff; 1899edc5d79SMika Westerberg (*regs)[21] = env->segs[R_FS].selector & 0xffff; 1909edc5d79SMika Westerberg (*regs)[22] = env->segs[R_GS].selector & 0xffff; 1919edc5d79SMika Westerberg (*regs)[23] = env->segs[R_DS].selector & 0xffff; 1929edc5d79SMika Westerberg (*regs)[24] = env->segs[R_ES].selector & 0xffff; 1939edc5d79SMika Westerberg (*regs)[25] = env->segs[R_FS].selector & 0xffff; 1949edc5d79SMika Westerberg (*regs)[26] = env->segs[R_GS].selector & 0xffff; 1959edc5d79SMika Westerberg } 1969edc5d79SMika Westerberg 19784409ddbSj_mayer #else 19884409ddbSj_mayer 19930ac07d4Sbellard #define ELF_START_MMAP 0x80000000 20030ac07d4Sbellard 20130ac07d4Sbellard /* 20230ac07d4Sbellard * This is used to ensure we don't load something for the wrong architecture. 20330ac07d4Sbellard */ 20430ac07d4Sbellard #define elf_check_arch(x) ( ((x) == EM_386) || ((x) == EM_486) ) 20530ac07d4Sbellard 20630ac07d4Sbellard /* 20730ac07d4Sbellard * These are used to set parameters in the core dumps. 20830ac07d4Sbellard */ 20930ac07d4Sbellard #define ELF_CLASS ELFCLASS32 21030ac07d4Sbellard #define ELF_ARCH EM_386 21130ac07d4Sbellard 212d97ef72eSRichard Henderson static inline void init_thread(struct target_pt_regs *regs, 213d97ef72eSRichard Henderson struct image_info *infop) 214e5fe0c52Spbrook { 215e5fe0c52Spbrook regs->esp = infop->start_stack; 216e5fe0c52Spbrook regs->eip = infop->entry; 217e5fe0c52Spbrook 21830ac07d4Sbellard /* SVR4/i386 ABI (pages 3-31, 3-32) says that when the program 21930ac07d4Sbellard starts %edx contains a pointer to a function which might be 22030ac07d4Sbellard registered using `atexit'. This provides a mean for the 22130ac07d4Sbellard dynamic linker to call DT_FINI functions for shared libraries 22230ac07d4Sbellard that have been loaded before the code runs. 22330ac07d4Sbellard 22430ac07d4Sbellard A value of 0 tells we have no such handler. */ 225e5fe0c52Spbrook regs->edx = 0; 226b346ff46Sbellard } 2279edc5d79SMika Westerberg 2289edc5d79SMika Westerberg #define ELF_NREG 17 229c227f099SAnthony Liguori typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG]; 2309edc5d79SMika Westerberg 2319edc5d79SMika Westerberg /* 2329edc5d79SMika Westerberg * Note that ELF_NREG should be 19 as there should be place for 2339edc5d79SMika Westerberg * TRAPNO and ERR "registers" as well but linux doesn't dump 2349edc5d79SMika Westerberg * those. 2359edc5d79SMika Westerberg * 2369edc5d79SMika Westerberg * See linux kernel: arch/x86/include/asm/elf.h 2379edc5d79SMika Westerberg */ 23805390248SAndreas Färber static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUX86State *env) 2399edc5d79SMika Westerberg { 2409edc5d79SMika Westerberg (*regs)[0] = env->regs[R_EBX]; 2419edc5d79SMika Westerberg (*regs)[1] = env->regs[R_ECX]; 2429edc5d79SMika Westerberg (*regs)[2] = env->regs[R_EDX]; 2439edc5d79SMika Westerberg (*regs)[3] = env->regs[R_ESI]; 2449edc5d79SMika Westerberg (*regs)[4] = env->regs[R_EDI]; 2459edc5d79SMika Westerberg (*regs)[5] = env->regs[R_EBP]; 2469edc5d79SMika Westerberg (*regs)[6] = env->regs[R_EAX]; 2479edc5d79SMika Westerberg (*regs)[7] = env->segs[R_DS].selector & 0xffff; 2489edc5d79SMika Westerberg (*regs)[8] = env->segs[R_ES].selector & 0xffff; 2499edc5d79SMika Westerberg (*regs)[9] = env->segs[R_FS].selector & 0xffff; 2509edc5d79SMika Westerberg (*regs)[10] = env->segs[R_GS].selector & 0xffff; 2519edc5d79SMika Westerberg (*regs)[11] = env->regs[R_EAX]; /* XXX */ 2529edc5d79SMika Westerberg (*regs)[12] = env->eip; 2539edc5d79SMika Westerberg (*regs)[13] = env->segs[R_CS].selector & 0xffff; 2549edc5d79SMika Westerberg (*regs)[14] = env->eflags; 2559edc5d79SMika Westerberg (*regs)[15] = env->regs[R_ESP]; 2569edc5d79SMika Westerberg (*regs)[16] = env->segs[R_SS].selector & 0xffff; 2579edc5d79SMika Westerberg } 25884409ddbSj_mayer #endif 259b346ff46Sbellard 2609edc5d79SMika Westerberg #define USE_ELF_CORE_DUMP 261b346ff46Sbellard #define ELF_EXEC_PAGESIZE 4096 262b346ff46Sbellard 263b346ff46Sbellard #endif 264b346ff46Sbellard 265b346ff46Sbellard #ifdef TARGET_ARM 266b346ff46Sbellard 26724e76ff0SPeter Maydell #ifndef TARGET_AARCH64 26824e76ff0SPeter Maydell /* 32 bit ARM definitions */ 26924e76ff0SPeter Maydell 270b346ff46Sbellard #define ELF_START_MMAP 0x80000000 271b346ff46Sbellard 272b597c3f7SPeter Crosthwaite #define ELF_ARCH EM_ARM 273b346ff46Sbellard #define ELF_CLASS ELFCLASS32 274b346ff46Sbellard 275d97ef72eSRichard Henderson static inline void init_thread(struct target_pt_regs *regs, 276d97ef72eSRichard Henderson struct image_info *infop) 277b346ff46Sbellard { 278992f48a0Sblueswir1 abi_long stack = infop->start_stack; 279b346ff46Sbellard memset(regs, 0, sizeof(*regs)); 28099033caeSAlexander Graf 281167e4cdcSPeter Maydell regs->uregs[16] = ARM_CPU_MODE_USR; 282167e4cdcSPeter Maydell if (infop->entry & 1) { 283167e4cdcSPeter Maydell regs->uregs[16] |= CPSR_T; 284167e4cdcSPeter Maydell } 285167e4cdcSPeter Maydell regs->uregs[15] = infop->entry & 0xfffffffe; 286167e4cdcSPeter Maydell regs->uregs[13] = infop->start_stack; 2872f619698Sbellard /* FIXME - what to for failure of get_user()? */ 288167e4cdcSPeter Maydell get_user_ual(regs->uregs[2], stack + 8); /* envp */ 289167e4cdcSPeter Maydell get_user_ual(regs->uregs[1], stack + 4); /* envp */ 290a1516e92Sbellard /* XXX: it seems that r0 is zeroed after ! */ 291167e4cdcSPeter Maydell regs->uregs[0] = 0; 292e5fe0c52Spbrook /* For uClinux PIC binaries. */ 293863cf0b7Sj_mayer /* XXX: Linux does this only on ARM with no MMU (do we care ?) */ 294167e4cdcSPeter Maydell regs->uregs[10] = infop->start_data; 2953cb10cfaSChristophe Lyon 2963cb10cfaSChristophe Lyon /* Support ARM FDPIC. */ 2973cb10cfaSChristophe Lyon if (info_is_fdpic(infop)) { 2983cb10cfaSChristophe Lyon /* As described in the ABI document, r7 points to the loadmap info 2993cb10cfaSChristophe Lyon * prepared by the kernel. If an interpreter is needed, r8 points 3003cb10cfaSChristophe Lyon * to the interpreter loadmap and r9 points to the interpreter 3013cb10cfaSChristophe Lyon * PT_DYNAMIC info. If no interpreter is needed, r8 is zero, and 3023cb10cfaSChristophe Lyon * r9 points to the main program PT_DYNAMIC info. 3033cb10cfaSChristophe Lyon */ 3043cb10cfaSChristophe Lyon regs->uregs[7] = infop->loadmap_addr; 3053cb10cfaSChristophe Lyon if (infop->interpreter_loadmap_addr) { 3063cb10cfaSChristophe Lyon /* Executable is dynamically loaded. */ 3073cb10cfaSChristophe Lyon regs->uregs[8] = infop->interpreter_loadmap_addr; 3083cb10cfaSChristophe Lyon regs->uregs[9] = infop->interpreter_pt_dynamic_addr; 3093cb10cfaSChristophe Lyon } else { 3103cb10cfaSChristophe Lyon regs->uregs[8] = 0; 3113cb10cfaSChristophe Lyon regs->uregs[9] = infop->pt_dynamic_addr; 3123cb10cfaSChristophe Lyon } 3133cb10cfaSChristophe Lyon } 314b346ff46Sbellard } 315b346ff46Sbellard 316edf8e2afSMika Westerberg #define ELF_NREG 18 317c227f099SAnthony Liguori typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG]; 318edf8e2afSMika Westerberg 31905390248SAndreas Färber static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUARMState *env) 320edf8e2afSMika Westerberg { 32186cd7b2dSPaolo Bonzini (*regs)[0] = tswapreg(env->regs[0]); 32286cd7b2dSPaolo Bonzini (*regs)[1] = tswapreg(env->regs[1]); 32386cd7b2dSPaolo Bonzini (*regs)[2] = tswapreg(env->regs[2]); 32486cd7b2dSPaolo Bonzini (*regs)[3] = tswapreg(env->regs[3]); 32586cd7b2dSPaolo Bonzini (*regs)[4] = tswapreg(env->regs[4]); 32686cd7b2dSPaolo Bonzini (*regs)[5] = tswapreg(env->regs[5]); 32786cd7b2dSPaolo Bonzini (*regs)[6] = tswapreg(env->regs[6]); 32886cd7b2dSPaolo Bonzini (*regs)[7] = tswapreg(env->regs[7]); 32986cd7b2dSPaolo Bonzini (*regs)[8] = tswapreg(env->regs[8]); 33086cd7b2dSPaolo Bonzini (*regs)[9] = tswapreg(env->regs[9]); 33186cd7b2dSPaolo Bonzini (*regs)[10] = tswapreg(env->regs[10]); 33286cd7b2dSPaolo Bonzini (*regs)[11] = tswapreg(env->regs[11]); 33386cd7b2dSPaolo Bonzini (*regs)[12] = tswapreg(env->regs[12]); 33486cd7b2dSPaolo Bonzini (*regs)[13] = tswapreg(env->regs[13]); 33586cd7b2dSPaolo Bonzini (*regs)[14] = tswapreg(env->regs[14]); 33686cd7b2dSPaolo Bonzini (*regs)[15] = tswapreg(env->regs[15]); 337edf8e2afSMika Westerberg 33886cd7b2dSPaolo Bonzini (*regs)[16] = tswapreg(cpsr_read((CPUARMState *)env)); 33986cd7b2dSPaolo Bonzini (*regs)[17] = tswapreg(env->regs[0]); /* XXX */ 340edf8e2afSMika Westerberg } 341edf8e2afSMika Westerberg 34230ac07d4Sbellard #define USE_ELF_CORE_DUMP 34330ac07d4Sbellard #define ELF_EXEC_PAGESIZE 4096 34430ac07d4Sbellard 345afce2927Sbellard enum 346afce2927Sbellard { 347afce2927Sbellard ARM_HWCAP_ARM_SWP = 1 << 0, 348afce2927Sbellard ARM_HWCAP_ARM_HALF = 1 << 1, 349afce2927Sbellard ARM_HWCAP_ARM_THUMB = 1 << 2, 350afce2927Sbellard ARM_HWCAP_ARM_26BIT = 1 << 3, 351afce2927Sbellard ARM_HWCAP_ARM_FAST_MULT = 1 << 4, 352afce2927Sbellard ARM_HWCAP_ARM_FPA = 1 << 5, 353afce2927Sbellard ARM_HWCAP_ARM_VFP = 1 << 6, 354afce2927Sbellard ARM_HWCAP_ARM_EDSP = 1 << 7, 355cf6de34aSRiku Voipio ARM_HWCAP_ARM_JAVA = 1 << 8, 356cf6de34aSRiku Voipio ARM_HWCAP_ARM_IWMMXT = 1 << 9, 35743ce393eSPeter Maydell ARM_HWCAP_ARM_CRUNCH = 1 << 10, 35843ce393eSPeter Maydell ARM_HWCAP_ARM_THUMBEE = 1 << 11, 35943ce393eSPeter Maydell ARM_HWCAP_ARM_NEON = 1 << 12, 36043ce393eSPeter Maydell ARM_HWCAP_ARM_VFPv3 = 1 << 13, 36143ce393eSPeter Maydell ARM_HWCAP_ARM_VFPv3D16 = 1 << 14, 36224682654SPeter Maydell ARM_HWCAP_ARM_TLS = 1 << 15, 36324682654SPeter Maydell ARM_HWCAP_ARM_VFPv4 = 1 << 16, 36424682654SPeter Maydell ARM_HWCAP_ARM_IDIVA = 1 << 17, 36524682654SPeter Maydell ARM_HWCAP_ARM_IDIVT = 1 << 18, 36624682654SPeter Maydell ARM_HWCAP_ARM_VFPD32 = 1 << 19, 36724682654SPeter Maydell ARM_HWCAP_ARM_LPAE = 1 << 20, 36824682654SPeter Maydell ARM_HWCAP_ARM_EVTSTRM = 1 << 21, 369afce2927Sbellard }; 370afce2927Sbellard 371ad6919dcSPeter Maydell enum { 372ad6919dcSPeter Maydell ARM_HWCAP2_ARM_AES = 1 << 0, 373ad6919dcSPeter Maydell ARM_HWCAP2_ARM_PMULL = 1 << 1, 374ad6919dcSPeter Maydell ARM_HWCAP2_ARM_SHA1 = 1 << 2, 375ad6919dcSPeter Maydell ARM_HWCAP2_ARM_SHA2 = 1 << 3, 376ad6919dcSPeter Maydell ARM_HWCAP2_ARM_CRC32 = 1 << 4, 377ad6919dcSPeter Maydell }; 378ad6919dcSPeter Maydell 3796b1275ffSPeter Maydell /* The commpage only exists for 32 bit kernels */ 3806b1275ffSPeter Maydell 381806d1021SMeador Inge /* Return 1 if the proposed guest space is suitable for the guest. 382806d1021SMeador Inge * Return 0 if the proposed guest space isn't suitable, but another 383806d1021SMeador Inge * address space should be tried. 384806d1021SMeador Inge * Return -1 if there is no way the proposed guest space can be 385806d1021SMeador Inge * valid regardless of the base. 386806d1021SMeador Inge * The guest code may leave a page mapped and populate it if the 387806d1021SMeador Inge * address is suitable. 388806d1021SMeador Inge */ 389c3637eafSLuke Shumaker static int init_guest_commpage(unsigned long guest_base, 390806d1021SMeador Inge unsigned long guest_size) 39197cc7560SDr. David Alan Gilbert { 39297cc7560SDr. David Alan Gilbert unsigned long real_start, test_page_addr; 39397cc7560SDr. David Alan Gilbert 39497cc7560SDr. David Alan Gilbert /* We need to check that we can force a fault on access to the 39597cc7560SDr. David Alan Gilbert * commpage at 0xffff0fxx 39697cc7560SDr. David Alan Gilbert */ 39797cc7560SDr. David Alan Gilbert test_page_addr = guest_base + (0xffff0f00 & qemu_host_page_mask); 398806d1021SMeador Inge 399806d1021SMeador Inge /* If the commpage lies within the already allocated guest space, 400806d1021SMeador Inge * then there is no way we can allocate it. 401955e304fSLuke Shumaker * 402955e304fSLuke Shumaker * You may be thinking that that this check is redundant because 403955e304fSLuke Shumaker * we already validated the guest size against MAX_RESERVED_VA; 404955e304fSLuke Shumaker * but if qemu_host_page_mask is unusually large, then 405955e304fSLuke Shumaker * test_page_addr may be lower. 406806d1021SMeador Inge */ 407806d1021SMeador Inge if (test_page_addr >= guest_base 408e568f9dfSPeter Maydell && test_page_addr < (guest_base + guest_size)) { 409806d1021SMeador Inge return -1; 410806d1021SMeador Inge } 411806d1021SMeador Inge 41297cc7560SDr. David Alan Gilbert /* Note it needs to be writeable to let us initialise it */ 41397cc7560SDr. David Alan Gilbert real_start = (unsigned long) 41497cc7560SDr. David Alan Gilbert mmap((void *)test_page_addr, qemu_host_page_size, 41597cc7560SDr. David Alan Gilbert PROT_READ | PROT_WRITE, 41697cc7560SDr. David Alan Gilbert MAP_ANONYMOUS | MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); 41797cc7560SDr. David Alan Gilbert 41897cc7560SDr. David Alan Gilbert /* If we can't map it then try another address */ 41997cc7560SDr. David Alan Gilbert if (real_start == -1ul) { 42097cc7560SDr. David Alan Gilbert return 0; 42197cc7560SDr. David Alan Gilbert } 42297cc7560SDr. David Alan Gilbert 42397cc7560SDr. David Alan Gilbert if (real_start != test_page_addr) { 42497cc7560SDr. David Alan Gilbert /* OS didn't put the page where we asked - unmap and reject */ 42597cc7560SDr. David Alan Gilbert munmap((void *)real_start, qemu_host_page_size); 42697cc7560SDr. David Alan Gilbert return 0; 42797cc7560SDr. David Alan Gilbert } 42897cc7560SDr. David Alan Gilbert 42997cc7560SDr. David Alan Gilbert /* Leave the page mapped 43097cc7560SDr. David Alan Gilbert * Populate it (mmap should have left it all 0'd) 43197cc7560SDr. David Alan Gilbert */ 43297cc7560SDr. David Alan Gilbert 43397cc7560SDr. David Alan Gilbert /* Kernel helper versions */ 43497cc7560SDr. David Alan Gilbert __put_user(5, (uint32_t *)g2h(0xffff0ffcul)); 43597cc7560SDr. David Alan Gilbert 43697cc7560SDr. David Alan Gilbert /* Now it's populated make it RO */ 43797cc7560SDr. David Alan Gilbert if (mprotect((void *)test_page_addr, qemu_host_page_size, PROT_READ)) { 43897cc7560SDr. David Alan Gilbert perror("Protecting guest commpage"); 43997cc7560SDr. David Alan Gilbert exit(-1); 44097cc7560SDr. David Alan Gilbert } 44197cc7560SDr. David Alan Gilbert 44297cc7560SDr. David Alan Gilbert return 1; /* All good */ 44397cc7560SDr. David Alan Gilbert } 444adf050b1SBenoit Canet 445adf050b1SBenoit Canet #define ELF_HWCAP get_elf_hwcap() 446ad6919dcSPeter Maydell #define ELF_HWCAP2 get_elf_hwcap2() 447adf050b1SBenoit Canet 448adf050b1SBenoit Canet static uint32_t get_elf_hwcap(void) 449adf050b1SBenoit Canet { 450a2247f8eSAndreas Färber ARMCPU *cpu = ARM_CPU(thread_cpu); 451adf050b1SBenoit Canet uint32_t hwcaps = 0; 452adf050b1SBenoit Canet 453adf050b1SBenoit Canet hwcaps |= ARM_HWCAP_ARM_SWP; 454adf050b1SBenoit Canet hwcaps |= ARM_HWCAP_ARM_HALF; 455adf050b1SBenoit Canet hwcaps |= ARM_HWCAP_ARM_THUMB; 456adf050b1SBenoit Canet hwcaps |= ARM_HWCAP_ARM_FAST_MULT; 457adf050b1SBenoit Canet 458adf050b1SBenoit Canet /* probe for the extra features */ 459adf050b1SBenoit Canet #define GET_FEATURE(feat, hwcap) \ 460a2247f8eSAndreas Färber do { if (arm_feature(&cpu->env, feat)) { hwcaps |= hwcap; } } while (0) 461962fcbf2SRichard Henderson 462962fcbf2SRichard Henderson #define GET_FEATURE_ID(feat, hwcap) \ 463962fcbf2SRichard Henderson do { if (cpu_isar_feature(feat, cpu)) { hwcaps |= hwcap; } } while (0) 464962fcbf2SRichard Henderson 46524682654SPeter Maydell /* EDSP is in v5TE and above, but all our v5 CPUs are v5TE */ 46624682654SPeter Maydell GET_FEATURE(ARM_FEATURE_V5, ARM_HWCAP_ARM_EDSP); 467adf050b1SBenoit Canet GET_FEATURE(ARM_FEATURE_VFP, ARM_HWCAP_ARM_VFP); 468adf050b1SBenoit Canet GET_FEATURE(ARM_FEATURE_IWMMXT, ARM_HWCAP_ARM_IWMMXT); 469adf050b1SBenoit Canet GET_FEATURE(ARM_FEATURE_THUMB2EE, ARM_HWCAP_ARM_THUMBEE); 470adf050b1SBenoit Canet GET_FEATURE(ARM_FEATURE_NEON, ARM_HWCAP_ARM_NEON); 471adf050b1SBenoit Canet GET_FEATURE(ARM_FEATURE_VFP3, ARM_HWCAP_ARM_VFPv3); 47224682654SPeter Maydell GET_FEATURE(ARM_FEATURE_V6K, ARM_HWCAP_ARM_TLS); 47324682654SPeter Maydell GET_FEATURE(ARM_FEATURE_VFP4, ARM_HWCAP_ARM_VFPv4); 4747e0cf8b4SRichard Henderson GET_FEATURE_ID(arm_div, ARM_HWCAP_ARM_IDIVA); 4757e0cf8b4SRichard Henderson GET_FEATURE_ID(thumb_div, ARM_HWCAP_ARM_IDIVT); 47624682654SPeter Maydell /* All QEMU's VFPv3 CPUs have 32 registers, see VFP_DREG in translate.c. 47724682654SPeter Maydell * Note that the ARM_HWCAP_ARM_VFPv3D16 bit is always the inverse of 47824682654SPeter Maydell * ARM_HWCAP_ARM_VFPD32 (and so always clear for QEMU); it is unrelated 47924682654SPeter Maydell * to our VFP_FP16 feature bit. 48024682654SPeter Maydell */ 48124682654SPeter Maydell GET_FEATURE(ARM_FEATURE_VFP3, ARM_HWCAP_ARM_VFPD32); 48224682654SPeter Maydell GET_FEATURE(ARM_FEATURE_LPAE, ARM_HWCAP_ARM_LPAE); 483adf050b1SBenoit Canet 484adf050b1SBenoit Canet return hwcaps; 485adf050b1SBenoit Canet } 486afce2927Sbellard 487ad6919dcSPeter Maydell static uint32_t get_elf_hwcap2(void) 488ad6919dcSPeter Maydell { 489ad6919dcSPeter Maydell ARMCPU *cpu = ARM_CPU(thread_cpu); 490ad6919dcSPeter Maydell uint32_t hwcaps = 0; 491ad6919dcSPeter Maydell 492962fcbf2SRichard Henderson GET_FEATURE_ID(aa32_aes, ARM_HWCAP2_ARM_AES); 493962fcbf2SRichard Henderson GET_FEATURE_ID(aa32_pmull, ARM_HWCAP2_ARM_PMULL); 494962fcbf2SRichard Henderson GET_FEATURE_ID(aa32_sha1, ARM_HWCAP2_ARM_SHA1); 495962fcbf2SRichard Henderson GET_FEATURE_ID(aa32_sha2, ARM_HWCAP2_ARM_SHA2); 496962fcbf2SRichard Henderson GET_FEATURE_ID(aa32_crc32, ARM_HWCAP2_ARM_CRC32); 497ad6919dcSPeter Maydell return hwcaps; 498ad6919dcSPeter Maydell } 499ad6919dcSPeter Maydell 500ad6919dcSPeter Maydell #undef GET_FEATURE 501962fcbf2SRichard Henderson #undef GET_FEATURE_ID 502ad6919dcSPeter Maydell 50324e76ff0SPeter Maydell #else 50424e76ff0SPeter Maydell /* 64 bit ARM definitions */ 50524e76ff0SPeter Maydell #define ELF_START_MMAP 0x80000000 50624e76ff0SPeter Maydell 507b597c3f7SPeter Crosthwaite #define ELF_ARCH EM_AARCH64 50824e76ff0SPeter Maydell #define ELF_CLASS ELFCLASS64 50924e76ff0SPeter Maydell #define ELF_PLATFORM "aarch64" 51024e76ff0SPeter Maydell 51124e76ff0SPeter Maydell static inline void init_thread(struct target_pt_regs *regs, 51224e76ff0SPeter Maydell struct image_info *infop) 51324e76ff0SPeter Maydell { 51424e76ff0SPeter Maydell abi_long stack = infop->start_stack; 51524e76ff0SPeter Maydell memset(regs, 0, sizeof(*regs)); 51624e76ff0SPeter Maydell 51724e76ff0SPeter Maydell regs->pc = infop->entry & ~0x3ULL; 51824e76ff0SPeter Maydell regs->sp = stack; 51924e76ff0SPeter Maydell } 52024e76ff0SPeter Maydell 52124e76ff0SPeter Maydell #define ELF_NREG 34 52224e76ff0SPeter Maydell typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG]; 52324e76ff0SPeter Maydell 52424e76ff0SPeter Maydell static void elf_core_copy_regs(target_elf_gregset_t *regs, 52524e76ff0SPeter Maydell const CPUARMState *env) 52624e76ff0SPeter Maydell { 52724e76ff0SPeter Maydell int i; 52824e76ff0SPeter Maydell 52924e76ff0SPeter Maydell for (i = 0; i < 32; i++) { 53024e76ff0SPeter Maydell (*regs)[i] = tswapreg(env->xregs[i]); 53124e76ff0SPeter Maydell } 53224e76ff0SPeter Maydell (*regs)[32] = tswapreg(env->pc); 53324e76ff0SPeter Maydell (*regs)[33] = tswapreg(pstate_read((CPUARMState *)env)); 53424e76ff0SPeter Maydell } 53524e76ff0SPeter Maydell 53624e76ff0SPeter Maydell #define USE_ELF_CORE_DUMP 53724e76ff0SPeter Maydell #define ELF_EXEC_PAGESIZE 4096 53824e76ff0SPeter Maydell 53924e76ff0SPeter Maydell enum { 54024e76ff0SPeter Maydell ARM_HWCAP_A64_FP = 1 << 0, 54124e76ff0SPeter Maydell ARM_HWCAP_A64_ASIMD = 1 << 1, 54224e76ff0SPeter Maydell ARM_HWCAP_A64_EVTSTRM = 1 << 2, 54324e76ff0SPeter Maydell ARM_HWCAP_A64_AES = 1 << 3, 54424e76ff0SPeter Maydell ARM_HWCAP_A64_PMULL = 1 << 4, 54524e76ff0SPeter Maydell ARM_HWCAP_A64_SHA1 = 1 << 5, 54624e76ff0SPeter Maydell ARM_HWCAP_A64_SHA2 = 1 << 6, 54724e76ff0SPeter Maydell ARM_HWCAP_A64_CRC32 = 1 << 7, 548955f56d4SArd Biesheuvel ARM_HWCAP_A64_ATOMICS = 1 << 8, 549955f56d4SArd Biesheuvel ARM_HWCAP_A64_FPHP = 1 << 9, 550955f56d4SArd Biesheuvel ARM_HWCAP_A64_ASIMDHP = 1 << 10, 551955f56d4SArd Biesheuvel ARM_HWCAP_A64_CPUID = 1 << 11, 552955f56d4SArd Biesheuvel ARM_HWCAP_A64_ASIMDRDM = 1 << 12, 553955f56d4SArd Biesheuvel ARM_HWCAP_A64_JSCVT = 1 << 13, 554955f56d4SArd Biesheuvel ARM_HWCAP_A64_FCMA = 1 << 14, 555955f56d4SArd Biesheuvel ARM_HWCAP_A64_LRCPC = 1 << 15, 556955f56d4SArd Biesheuvel ARM_HWCAP_A64_DCPOP = 1 << 16, 557955f56d4SArd Biesheuvel ARM_HWCAP_A64_SHA3 = 1 << 17, 558955f56d4SArd Biesheuvel ARM_HWCAP_A64_SM3 = 1 << 18, 559955f56d4SArd Biesheuvel ARM_HWCAP_A64_SM4 = 1 << 19, 560955f56d4SArd Biesheuvel ARM_HWCAP_A64_ASIMDDP = 1 << 20, 561955f56d4SArd Biesheuvel ARM_HWCAP_A64_SHA512 = 1 << 21, 562955f56d4SArd Biesheuvel ARM_HWCAP_A64_SVE = 1 << 22, 56324e76ff0SPeter Maydell }; 56424e76ff0SPeter Maydell 56524e76ff0SPeter Maydell #define ELF_HWCAP get_elf_hwcap() 56624e76ff0SPeter Maydell 56724e76ff0SPeter Maydell static uint32_t get_elf_hwcap(void) 56824e76ff0SPeter Maydell { 56924e76ff0SPeter Maydell ARMCPU *cpu = ARM_CPU(thread_cpu); 57024e76ff0SPeter Maydell uint32_t hwcaps = 0; 57124e76ff0SPeter Maydell 57224e76ff0SPeter Maydell hwcaps |= ARM_HWCAP_A64_FP; 57324e76ff0SPeter Maydell hwcaps |= ARM_HWCAP_A64_ASIMD; 57424e76ff0SPeter Maydell 57524e76ff0SPeter Maydell /* probe for the extra features */ 576962fcbf2SRichard Henderson #define GET_FEATURE_ID(feat, hwcap) \ 577962fcbf2SRichard Henderson do { if (cpu_isar_feature(feat, cpu)) { hwcaps |= hwcap; } } while (0) 578962fcbf2SRichard Henderson 579962fcbf2SRichard Henderson GET_FEATURE_ID(aa64_aes, ARM_HWCAP_A64_AES); 580962fcbf2SRichard Henderson GET_FEATURE_ID(aa64_pmull, ARM_HWCAP_A64_PMULL); 581962fcbf2SRichard Henderson GET_FEATURE_ID(aa64_sha1, ARM_HWCAP_A64_SHA1); 582962fcbf2SRichard Henderson GET_FEATURE_ID(aa64_sha256, ARM_HWCAP_A64_SHA2); 583962fcbf2SRichard Henderson GET_FEATURE_ID(aa64_sha512, ARM_HWCAP_A64_SHA512); 584962fcbf2SRichard Henderson GET_FEATURE_ID(aa64_crc32, ARM_HWCAP_A64_CRC32); 585962fcbf2SRichard Henderson GET_FEATURE_ID(aa64_sha3, ARM_HWCAP_A64_SHA3); 586962fcbf2SRichard Henderson GET_FEATURE_ID(aa64_sm3, ARM_HWCAP_A64_SM3); 587962fcbf2SRichard Henderson GET_FEATURE_ID(aa64_sm4, ARM_HWCAP_A64_SM4); 5885763190fSRichard Henderson GET_FEATURE_ID(aa64_fp16, ARM_HWCAP_A64_FPHP | ARM_HWCAP_A64_ASIMDHP); 589962fcbf2SRichard Henderson GET_FEATURE_ID(aa64_atomics, ARM_HWCAP_A64_ATOMICS); 590962fcbf2SRichard Henderson GET_FEATURE_ID(aa64_rdm, ARM_HWCAP_A64_ASIMDRDM); 591962fcbf2SRichard Henderson GET_FEATURE_ID(aa64_dp, ARM_HWCAP_A64_ASIMDDP); 592962fcbf2SRichard Henderson GET_FEATURE_ID(aa64_fcma, ARM_HWCAP_A64_FCMA); 593cd208a1cSRichard Henderson GET_FEATURE_ID(aa64_sve, ARM_HWCAP_A64_SVE); 594962fcbf2SRichard Henderson 595962fcbf2SRichard Henderson #undef GET_FEATURE_ID 59624e76ff0SPeter Maydell 59724e76ff0SPeter Maydell return hwcaps; 59824e76ff0SPeter Maydell } 59924e76ff0SPeter Maydell 60024e76ff0SPeter Maydell #endif /* not TARGET_AARCH64 */ 60124e76ff0SPeter Maydell #endif /* TARGET_ARM */ 60230ac07d4Sbellard 603853d6f7aSbellard #ifdef TARGET_SPARC 604a315a145Sbellard #ifdef TARGET_SPARC64 605853d6f7aSbellard 606853d6f7aSbellard #define ELF_START_MMAP 0x80000000 607cf973e46SArtyom Tarasenko #define ELF_HWCAP (HWCAP_SPARC_FLUSH | HWCAP_SPARC_STBAR | HWCAP_SPARC_SWAP \ 608cf973e46SArtyom Tarasenko | HWCAP_SPARC_MULDIV | HWCAP_SPARC_V9) 609992f48a0Sblueswir1 #ifndef TARGET_ABI32 610cb33da57Sblueswir1 #define elf_check_arch(x) ( (x) == EM_SPARCV9 || (x) == EM_SPARC32PLUS ) 611992f48a0Sblueswir1 #else 612992f48a0Sblueswir1 #define elf_check_arch(x) ( (x) == EM_SPARC32PLUS || (x) == EM_SPARC ) 613992f48a0Sblueswir1 #endif 614853d6f7aSbellard 615a315a145Sbellard #define ELF_CLASS ELFCLASS64 6165ef54116Sbellard #define ELF_ARCH EM_SPARCV9 6175ef54116Sbellard 6185ef54116Sbellard #define STACK_BIAS 2047 619a315a145Sbellard 620d97ef72eSRichard Henderson static inline void init_thread(struct target_pt_regs *regs, 621d97ef72eSRichard Henderson struct image_info *infop) 622a315a145Sbellard { 623992f48a0Sblueswir1 #ifndef TARGET_ABI32 624a315a145Sbellard regs->tstate = 0; 625992f48a0Sblueswir1 #endif 626a315a145Sbellard regs->pc = infop->entry; 627a315a145Sbellard regs->npc = regs->pc + 4; 628a315a145Sbellard regs->y = 0; 629992f48a0Sblueswir1 #ifdef TARGET_ABI32 630992f48a0Sblueswir1 regs->u_regs[14] = infop->start_stack - 16 * 4; 631992f48a0Sblueswir1 #else 632cb33da57Sblueswir1 if (personality(infop->personality) == PER_LINUX32) 633cb33da57Sblueswir1 regs->u_regs[14] = infop->start_stack - 16 * 4; 634cb33da57Sblueswir1 else 6355ef54116Sbellard regs->u_regs[14] = infop->start_stack - 16 * 8 - STACK_BIAS; 636992f48a0Sblueswir1 #endif 637a315a145Sbellard } 638a315a145Sbellard 639a315a145Sbellard #else 640a315a145Sbellard #define ELF_START_MMAP 0x80000000 641cf973e46SArtyom Tarasenko #define ELF_HWCAP (HWCAP_SPARC_FLUSH | HWCAP_SPARC_STBAR | HWCAP_SPARC_SWAP \ 642cf973e46SArtyom Tarasenko | HWCAP_SPARC_MULDIV) 643a315a145Sbellard 644853d6f7aSbellard #define ELF_CLASS ELFCLASS32 645853d6f7aSbellard #define ELF_ARCH EM_SPARC 646853d6f7aSbellard 647d97ef72eSRichard Henderson static inline void init_thread(struct target_pt_regs *regs, 648d97ef72eSRichard Henderson struct image_info *infop) 649853d6f7aSbellard { 650f5155289Sbellard regs->psr = 0; 651f5155289Sbellard regs->pc = infop->entry; 652f5155289Sbellard regs->npc = regs->pc + 4; 653f5155289Sbellard regs->y = 0; 654f5155289Sbellard regs->u_regs[14] = infop->start_stack - 16 * 4; 655853d6f7aSbellard } 656853d6f7aSbellard 657853d6f7aSbellard #endif 658a315a145Sbellard #endif 659853d6f7aSbellard 66067867308Sbellard #ifdef TARGET_PPC 66167867308Sbellard 6624ecd4d16SPeter Crosthwaite #define ELF_MACHINE PPC_ELF_MACHINE 66367867308Sbellard #define ELF_START_MMAP 0x80000000 66467867308Sbellard 665e85e7c6eSj_mayer #if defined(TARGET_PPC64) && !defined(TARGET_ABI32) 66684409ddbSj_mayer 66784409ddbSj_mayer #define elf_check_arch(x) ( (x) == EM_PPC64 ) 66884409ddbSj_mayer 66984409ddbSj_mayer #define ELF_CLASS ELFCLASS64 67084409ddbSj_mayer 67184409ddbSj_mayer #else 67284409ddbSj_mayer 67367867308Sbellard #define ELF_CLASS ELFCLASS32 67484409ddbSj_mayer 67584409ddbSj_mayer #endif 67684409ddbSj_mayer 67767867308Sbellard #define ELF_ARCH EM_PPC 67867867308Sbellard 679df84e4f3SNathan Froyd /* Feature masks for the Aux Vector Hardware Capabilities (AT_HWCAP). 680df84e4f3SNathan Froyd See arch/powerpc/include/asm/cputable.h. */ 681df84e4f3SNathan Froyd enum { 6823efa9a67Smalc QEMU_PPC_FEATURE_32 = 0x80000000, 6833efa9a67Smalc QEMU_PPC_FEATURE_64 = 0x40000000, 6843efa9a67Smalc QEMU_PPC_FEATURE_601_INSTR = 0x20000000, 6853efa9a67Smalc QEMU_PPC_FEATURE_HAS_ALTIVEC = 0x10000000, 6863efa9a67Smalc QEMU_PPC_FEATURE_HAS_FPU = 0x08000000, 6873efa9a67Smalc QEMU_PPC_FEATURE_HAS_MMU = 0x04000000, 6883efa9a67Smalc QEMU_PPC_FEATURE_HAS_4xxMAC = 0x02000000, 6893efa9a67Smalc QEMU_PPC_FEATURE_UNIFIED_CACHE = 0x01000000, 6903efa9a67Smalc QEMU_PPC_FEATURE_HAS_SPE = 0x00800000, 6913efa9a67Smalc QEMU_PPC_FEATURE_HAS_EFP_SINGLE = 0x00400000, 6923efa9a67Smalc QEMU_PPC_FEATURE_HAS_EFP_DOUBLE = 0x00200000, 6933efa9a67Smalc QEMU_PPC_FEATURE_NO_TB = 0x00100000, 6943efa9a67Smalc QEMU_PPC_FEATURE_POWER4 = 0x00080000, 6953efa9a67Smalc QEMU_PPC_FEATURE_POWER5 = 0x00040000, 6963efa9a67Smalc QEMU_PPC_FEATURE_POWER5_PLUS = 0x00020000, 6973efa9a67Smalc QEMU_PPC_FEATURE_CELL = 0x00010000, 6983efa9a67Smalc QEMU_PPC_FEATURE_BOOKE = 0x00008000, 6993efa9a67Smalc QEMU_PPC_FEATURE_SMT = 0x00004000, 7003efa9a67Smalc QEMU_PPC_FEATURE_ICACHE_SNOOP = 0x00002000, 7013efa9a67Smalc QEMU_PPC_FEATURE_ARCH_2_05 = 0x00001000, 7023efa9a67Smalc QEMU_PPC_FEATURE_PA6T = 0x00000800, 7033efa9a67Smalc QEMU_PPC_FEATURE_HAS_DFP = 0x00000400, 7043efa9a67Smalc QEMU_PPC_FEATURE_POWER6_EXT = 0x00000200, 7053efa9a67Smalc QEMU_PPC_FEATURE_ARCH_2_06 = 0x00000100, 7063efa9a67Smalc QEMU_PPC_FEATURE_HAS_VSX = 0x00000080, 7073efa9a67Smalc QEMU_PPC_FEATURE_PSERIES_PERFMON_COMPAT = 0x00000040, 708df84e4f3SNathan Froyd 7093efa9a67Smalc QEMU_PPC_FEATURE_TRUE_LE = 0x00000002, 7103efa9a67Smalc QEMU_PPC_FEATURE_PPC_LE = 0x00000001, 711a60438ddSTom Musta 712a60438ddSTom Musta /* Feature definitions in AT_HWCAP2. */ 713a60438ddSTom Musta QEMU_PPC_FEATURE2_ARCH_2_07 = 0x80000000, /* ISA 2.07 */ 714a60438ddSTom Musta QEMU_PPC_FEATURE2_HAS_HTM = 0x40000000, /* Hardware Transactional Memory */ 715a60438ddSTom Musta QEMU_PPC_FEATURE2_HAS_DSCR = 0x20000000, /* Data Stream Control Register */ 716a60438ddSTom Musta QEMU_PPC_FEATURE2_HAS_EBB = 0x10000000, /* Event Base Branching */ 717a60438ddSTom Musta QEMU_PPC_FEATURE2_HAS_ISEL = 0x08000000, /* Integer Select */ 718a60438ddSTom Musta QEMU_PPC_FEATURE2_HAS_TAR = 0x04000000, /* Target Address Register */ 719be0c46d4SSandipan Das QEMU_PPC_FEATURE2_ARCH_3_00 = 0x00800000, /* ISA 3.00 */ 720df84e4f3SNathan Froyd }; 721df84e4f3SNathan Froyd 722df84e4f3SNathan Froyd #define ELF_HWCAP get_elf_hwcap() 723df84e4f3SNathan Froyd 724df84e4f3SNathan Froyd static uint32_t get_elf_hwcap(void) 725df84e4f3SNathan Froyd { 726a2247f8eSAndreas Färber PowerPCCPU *cpu = POWERPC_CPU(thread_cpu); 727df84e4f3SNathan Froyd uint32_t features = 0; 728df84e4f3SNathan Froyd 729df84e4f3SNathan Froyd /* We don't have to be terribly complete here; the high points are 730df84e4f3SNathan Froyd Altivec/FP/SPE support. Anything else is just a bonus. */ 731df84e4f3SNathan Froyd #define GET_FEATURE(flag, feature) \ 732a2247f8eSAndreas Färber do { if (cpu->env.insns_flags & flag) { features |= feature; } } while (0) 73358eb5308SMichael Walle #define GET_FEATURE2(flags, feature) \ 73458eb5308SMichael Walle do { \ 73558eb5308SMichael Walle if ((cpu->env.insns_flags2 & flags) == flags) { \ 73658eb5308SMichael Walle features |= feature; \ 73758eb5308SMichael Walle } \ 73858eb5308SMichael Walle } while (0) 7393efa9a67Smalc GET_FEATURE(PPC_64B, QEMU_PPC_FEATURE_64); 7403efa9a67Smalc GET_FEATURE(PPC_FLOAT, QEMU_PPC_FEATURE_HAS_FPU); 7413efa9a67Smalc GET_FEATURE(PPC_ALTIVEC, QEMU_PPC_FEATURE_HAS_ALTIVEC); 7423efa9a67Smalc GET_FEATURE(PPC_SPE, QEMU_PPC_FEATURE_HAS_SPE); 7433efa9a67Smalc GET_FEATURE(PPC_SPE_SINGLE, QEMU_PPC_FEATURE_HAS_EFP_SINGLE); 7443efa9a67Smalc GET_FEATURE(PPC_SPE_DOUBLE, QEMU_PPC_FEATURE_HAS_EFP_DOUBLE); 7453efa9a67Smalc GET_FEATURE(PPC_BOOKE, QEMU_PPC_FEATURE_BOOKE); 7463efa9a67Smalc GET_FEATURE(PPC_405_MAC, QEMU_PPC_FEATURE_HAS_4xxMAC); 7470e019746STom Musta GET_FEATURE2(PPC2_DFP, QEMU_PPC_FEATURE_HAS_DFP); 7480e019746STom Musta GET_FEATURE2(PPC2_VSX, QEMU_PPC_FEATURE_HAS_VSX); 7490e019746STom Musta GET_FEATURE2((PPC2_PERM_ISA206 | PPC2_DIVE_ISA206 | PPC2_ATOMIC_ISA206 | 7500e019746STom Musta PPC2_FP_CVT_ISA206 | PPC2_FP_TST_ISA206), 7510e019746STom Musta QEMU_PPC_FEATURE_ARCH_2_06); 752df84e4f3SNathan Froyd #undef GET_FEATURE 7530e019746STom Musta #undef GET_FEATURE2 754df84e4f3SNathan Froyd 755df84e4f3SNathan Froyd return features; 756df84e4f3SNathan Froyd } 757df84e4f3SNathan Froyd 758a60438ddSTom Musta #define ELF_HWCAP2 get_elf_hwcap2() 759a60438ddSTom Musta 760a60438ddSTom Musta static uint32_t get_elf_hwcap2(void) 761a60438ddSTom Musta { 762a60438ddSTom Musta PowerPCCPU *cpu = POWERPC_CPU(thread_cpu); 763a60438ddSTom Musta uint32_t features = 0; 764a60438ddSTom Musta 765a60438ddSTom Musta #define GET_FEATURE(flag, feature) \ 766a60438ddSTom Musta do { if (cpu->env.insns_flags & flag) { features |= feature; } } while (0) 767a60438ddSTom Musta #define GET_FEATURE2(flag, feature) \ 768a60438ddSTom Musta do { if (cpu->env.insns_flags2 & flag) { features |= feature; } } while (0) 769a60438ddSTom Musta 770a60438ddSTom Musta GET_FEATURE(PPC_ISEL, QEMU_PPC_FEATURE2_HAS_ISEL); 771a60438ddSTom Musta GET_FEATURE2(PPC2_BCTAR_ISA207, QEMU_PPC_FEATURE2_HAS_TAR); 772a60438ddSTom Musta GET_FEATURE2((PPC2_BCTAR_ISA207 | PPC2_LSQ_ISA207 | PPC2_ALTIVEC_207 | 773a60438ddSTom Musta PPC2_ISA207S), QEMU_PPC_FEATURE2_ARCH_2_07); 774be0c46d4SSandipan Das GET_FEATURE2(PPC2_ISA300, QEMU_PPC_FEATURE2_ARCH_3_00); 775a60438ddSTom Musta 776a60438ddSTom Musta #undef GET_FEATURE 777a60438ddSTom Musta #undef GET_FEATURE2 778a60438ddSTom Musta 779a60438ddSTom Musta return features; 780a60438ddSTom Musta } 781a60438ddSTom Musta 782f5155289Sbellard /* 783f5155289Sbellard * The requirements here are: 784f5155289Sbellard * - keep the final alignment of sp (sp & 0xf) 785f5155289Sbellard * - make sure the 32-bit value at the first 16 byte aligned position of 786f5155289Sbellard * AUXV is greater than 16 for glibc compatibility. 787f5155289Sbellard * AT_IGNOREPPC is used for that. 788f5155289Sbellard * - for compatibility with glibc ARCH_DLINFO must always be defined on PPC, 789f5155289Sbellard * even if DLINFO_ARCH_ITEMS goes to zero or is undefined. 790f5155289Sbellard */ 7910bccf03dSbellard #define DLINFO_ARCH_ITEMS 5 792f5155289Sbellard #define ARCH_DLINFO \ 793f5155289Sbellard do { \ 794623e250aSTom Musta PowerPCCPU *cpu = POWERPC_CPU(thread_cpu); \ 795f5155289Sbellard /* \ 79682991bedSPeter Maydell * Handle glibc compatibility: these magic entries must \ 79782991bedSPeter Maydell * be at the lowest addresses in the final auxv. \ 798f5155289Sbellard */ \ 7990bccf03dSbellard NEW_AUX_ENT(AT_IGNOREPPC, AT_IGNOREPPC); \ 8000bccf03dSbellard NEW_AUX_ENT(AT_IGNOREPPC, AT_IGNOREPPC); \ 80182991bedSPeter Maydell NEW_AUX_ENT(AT_DCACHEBSIZE, cpu->env.dcache_line_size); \ 80282991bedSPeter Maydell NEW_AUX_ENT(AT_ICACHEBSIZE, cpu->env.icache_line_size); \ 80382991bedSPeter Maydell NEW_AUX_ENT(AT_UCACHEBSIZE, 0); \ 804f5155289Sbellard } while (0) 805f5155289Sbellard 80667867308Sbellard static inline void init_thread(struct target_pt_regs *_regs, struct image_info *infop) 80767867308Sbellard { 80867867308Sbellard _regs->gpr[1] = infop->start_stack; 809e85e7c6eSj_mayer #if defined(TARGET_PPC64) && !defined(TARGET_ABI32) 810d90b94cdSDoug Kwan if (get_ppc64_abi(infop) < 2) { 8112ccf97ecSPeter Maydell uint64_t val; 8122ccf97ecSPeter Maydell get_user_u64(val, infop->entry + 8); 8132ccf97ecSPeter Maydell _regs->gpr[2] = val + infop->load_bias; 8142ccf97ecSPeter Maydell get_user_u64(val, infop->entry); 8152ccf97ecSPeter Maydell infop->entry = val + infop->load_bias; 816d90b94cdSDoug Kwan } else { 817d90b94cdSDoug Kwan _regs->gpr[12] = infop->entry; /* r12 set to global entry address */ 818d90b94cdSDoug Kwan } 81984409ddbSj_mayer #endif 82067867308Sbellard _regs->nip = infop->entry; 82167867308Sbellard } 82267867308Sbellard 823e2f3e741SNathan Froyd /* See linux kernel: arch/powerpc/include/asm/elf.h. */ 824e2f3e741SNathan Froyd #define ELF_NREG 48 825e2f3e741SNathan Froyd typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG]; 826e2f3e741SNathan Froyd 82705390248SAndreas Färber static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUPPCState *env) 828e2f3e741SNathan Froyd { 829e2f3e741SNathan Froyd int i; 830e2f3e741SNathan Froyd target_ulong ccr = 0; 831e2f3e741SNathan Froyd 832e2f3e741SNathan Froyd for (i = 0; i < ARRAY_SIZE(env->gpr); i++) { 83386cd7b2dSPaolo Bonzini (*regs)[i] = tswapreg(env->gpr[i]); 834e2f3e741SNathan Froyd } 835e2f3e741SNathan Froyd 83686cd7b2dSPaolo Bonzini (*regs)[32] = tswapreg(env->nip); 83786cd7b2dSPaolo Bonzini (*regs)[33] = tswapreg(env->msr); 83886cd7b2dSPaolo Bonzini (*regs)[35] = tswapreg(env->ctr); 83986cd7b2dSPaolo Bonzini (*regs)[36] = tswapreg(env->lr); 84086cd7b2dSPaolo Bonzini (*regs)[37] = tswapreg(env->xer); 841e2f3e741SNathan Froyd 842e2f3e741SNathan Froyd for (i = 0; i < ARRAY_SIZE(env->crf); i++) { 843e2f3e741SNathan Froyd ccr |= env->crf[i] << (32 - ((i + 1) * 4)); 844e2f3e741SNathan Froyd } 84586cd7b2dSPaolo Bonzini (*regs)[38] = tswapreg(ccr); 846e2f3e741SNathan Froyd } 847e2f3e741SNathan Froyd 848e2f3e741SNathan Froyd #define USE_ELF_CORE_DUMP 84967867308Sbellard #define ELF_EXEC_PAGESIZE 4096 85067867308Sbellard 85167867308Sbellard #endif 85267867308Sbellard 853048f6b4dSbellard #ifdef TARGET_MIPS 854048f6b4dSbellard 855048f6b4dSbellard #define ELF_START_MMAP 0x80000000 856048f6b4dSbellard 857388bb21aSths #ifdef TARGET_MIPS64 858388bb21aSths #define ELF_CLASS ELFCLASS64 859388bb21aSths #else 860048f6b4dSbellard #define ELF_CLASS ELFCLASS32 861388bb21aSths #endif 862048f6b4dSbellard #define ELF_ARCH EM_MIPS 863048f6b4dSbellard 864f72541f3SAleksandar Markovic #define elf_check_arch(x) ((x) == EM_MIPS || (x) == EM_NANOMIPS) 865f72541f3SAleksandar Markovic 866d97ef72eSRichard Henderson static inline void init_thread(struct target_pt_regs *regs, 867d97ef72eSRichard Henderson struct image_info *infop) 868048f6b4dSbellard { 869623a930eSths regs->cp0_status = 2 << CP0St_KSU; 870048f6b4dSbellard regs->cp0_epc = infop->entry; 871048f6b4dSbellard regs->regs[29] = infop->start_stack; 872048f6b4dSbellard } 873048f6b4dSbellard 87451e52606SNathan Froyd /* See linux kernel: arch/mips/include/asm/elf.h. */ 87551e52606SNathan Froyd #define ELF_NREG 45 87651e52606SNathan Froyd typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG]; 87751e52606SNathan Froyd 87851e52606SNathan Froyd /* See linux kernel: arch/mips/include/asm/reg.h. */ 87951e52606SNathan Froyd enum { 88051e52606SNathan Froyd #ifdef TARGET_MIPS64 88151e52606SNathan Froyd TARGET_EF_R0 = 0, 88251e52606SNathan Froyd #else 88351e52606SNathan Froyd TARGET_EF_R0 = 6, 88451e52606SNathan Froyd #endif 88551e52606SNathan Froyd TARGET_EF_R26 = TARGET_EF_R0 + 26, 88651e52606SNathan Froyd TARGET_EF_R27 = TARGET_EF_R0 + 27, 88751e52606SNathan Froyd TARGET_EF_LO = TARGET_EF_R0 + 32, 88851e52606SNathan Froyd TARGET_EF_HI = TARGET_EF_R0 + 33, 88951e52606SNathan Froyd TARGET_EF_CP0_EPC = TARGET_EF_R0 + 34, 89051e52606SNathan Froyd TARGET_EF_CP0_BADVADDR = TARGET_EF_R0 + 35, 89151e52606SNathan Froyd TARGET_EF_CP0_STATUS = TARGET_EF_R0 + 36, 89251e52606SNathan Froyd TARGET_EF_CP0_CAUSE = TARGET_EF_R0 + 37 89351e52606SNathan Froyd }; 89451e52606SNathan Froyd 89551e52606SNathan Froyd /* See linux kernel: arch/mips/kernel/process.c:elf_dump_regs. */ 89605390248SAndreas Färber static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUMIPSState *env) 89751e52606SNathan Froyd { 89851e52606SNathan Froyd int i; 89951e52606SNathan Froyd 90051e52606SNathan Froyd for (i = 0; i < TARGET_EF_R0; i++) { 90151e52606SNathan Froyd (*regs)[i] = 0; 90251e52606SNathan Froyd } 90351e52606SNathan Froyd (*regs)[TARGET_EF_R0] = 0; 90451e52606SNathan Froyd 90551e52606SNathan Froyd for (i = 1; i < ARRAY_SIZE(env->active_tc.gpr); i++) { 906a29f998dSPaolo Bonzini (*regs)[TARGET_EF_R0 + i] = tswapreg(env->active_tc.gpr[i]); 90751e52606SNathan Froyd } 90851e52606SNathan Froyd 90951e52606SNathan Froyd (*regs)[TARGET_EF_R26] = 0; 91051e52606SNathan Froyd (*regs)[TARGET_EF_R27] = 0; 911a29f998dSPaolo Bonzini (*regs)[TARGET_EF_LO] = tswapreg(env->active_tc.LO[0]); 912a29f998dSPaolo Bonzini (*regs)[TARGET_EF_HI] = tswapreg(env->active_tc.HI[0]); 913a29f998dSPaolo Bonzini (*regs)[TARGET_EF_CP0_EPC] = tswapreg(env->active_tc.PC); 914a29f998dSPaolo Bonzini (*regs)[TARGET_EF_CP0_BADVADDR] = tswapreg(env->CP0_BadVAddr); 915a29f998dSPaolo Bonzini (*regs)[TARGET_EF_CP0_STATUS] = tswapreg(env->CP0_Status); 916a29f998dSPaolo Bonzini (*regs)[TARGET_EF_CP0_CAUSE] = tswapreg(env->CP0_Cause); 91751e52606SNathan Froyd } 91851e52606SNathan Froyd 91951e52606SNathan Froyd #define USE_ELF_CORE_DUMP 920388bb21aSths #define ELF_EXEC_PAGESIZE 4096 921388bb21aSths 92246a1ee4fSJames Cowgill /* See arch/mips/include/uapi/asm/hwcap.h. */ 92346a1ee4fSJames Cowgill enum { 92446a1ee4fSJames Cowgill HWCAP_MIPS_R6 = (1 << 0), 92546a1ee4fSJames Cowgill HWCAP_MIPS_MSA = (1 << 1), 92646a1ee4fSJames Cowgill }; 92746a1ee4fSJames Cowgill 92846a1ee4fSJames Cowgill #define ELF_HWCAP get_elf_hwcap() 92946a1ee4fSJames Cowgill 93046a1ee4fSJames Cowgill static uint32_t get_elf_hwcap(void) 93146a1ee4fSJames Cowgill { 93246a1ee4fSJames Cowgill MIPSCPU *cpu = MIPS_CPU(thread_cpu); 93346a1ee4fSJames Cowgill uint32_t hwcaps = 0; 93446a1ee4fSJames Cowgill 93546a1ee4fSJames Cowgill #define GET_FEATURE(flag, hwcap) \ 93646a1ee4fSJames Cowgill do { if (cpu->env.insn_flags & (flag)) { hwcaps |= hwcap; } } while (0) 93746a1ee4fSJames Cowgill 93846a1ee4fSJames Cowgill GET_FEATURE(ISA_MIPS32R6 | ISA_MIPS64R6, HWCAP_MIPS_R6); 93946a1ee4fSJames Cowgill GET_FEATURE(ASE_MSA, HWCAP_MIPS_MSA); 94046a1ee4fSJames Cowgill 94146a1ee4fSJames Cowgill #undef GET_FEATURE 94246a1ee4fSJames Cowgill 94346a1ee4fSJames Cowgill return hwcaps; 94446a1ee4fSJames Cowgill } 94546a1ee4fSJames Cowgill 946048f6b4dSbellard #endif /* TARGET_MIPS */ 947048f6b4dSbellard 948b779e29eSEdgar E. Iglesias #ifdef TARGET_MICROBLAZE 949b779e29eSEdgar E. Iglesias 950b779e29eSEdgar E. Iglesias #define ELF_START_MMAP 0x80000000 951b779e29eSEdgar E. Iglesias 9520d5d4699SEdgar E. Iglesias #define elf_check_arch(x) ( (x) == EM_MICROBLAZE || (x) == EM_MICROBLAZE_OLD) 953b779e29eSEdgar E. Iglesias 954b779e29eSEdgar E. Iglesias #define ELF_CLASS ELFCLASS32 9550d5d4699SEdgar E. Iglesias #define ELF_ARCH EM_MICROBLAZE 956b779e29eSEdgar E. Iglesias 957d97ef72eSRichard Henderson static inline void init_thread(struct target_pt_regs *regs, 958d97ef72eSRichard Henderson struct image_info *infop) 959b779e29eSEdgar E. Iglesias { 960b779e29eSEdgar E. Iglesias regs->pc = infop->entry; 961b779e29eSEdgar E. Iglesias regs->r1 = infop->start_stack; 962b779e29eSEdgar E. Iglesias 963b779e29eSEdgar E. Iglesias } 964b779e29eSEdgar E. Iglesias 965b779e29eSEdgar E. Iglesias #define ELF_EXEC_PAGESIZE 4096 966b779e29eSEdgar E. Iglesias 967e4cbd44dSEdgar E. Iglesias #define USE_ELF_CORE_DUMP 968e4cbd44dSEdgar E. Iglesias #define ELF_NREG 38 969e4cbd44dSEdgar E. Iglesias typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG]; 970e4cbd44dSEdgar E. Iglesias 971e4cbd44dSEdgar E. Iglesias /* See linux kernel: arch/mips/kernel/process.c:elf_dump_regs. */ 97205390248SAndreas Färber static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUMBState *env) 973e4cbd44dSEdgar E. Iglesias { 974e4cbd44dSEdgar E. Iglesias int i, pos = 0; 975e4cbd44dSEdgar E. Iglesias 976e4cbd44dSEdgar E. Iglesias for (i = 0; i < 32; i++) { 97786cd7b2dSPaolo Bonzini (*regs)[pos++] = tswapreg(env->regs[i]); 978e4cbd44dSEdgar E. Iglesias } 979e4cbd44dSEdgar E. Iglesias 980e4cbd44dSEdgar E. Iglesias for (i = 0; i < 6; i++) { 98186cd7b2dSPaolo Bonzini (*regs)[pos++] = tswapreg(env->sregs[i]); 982e4cbd44dSEdgar E. Iglesias } 983e4cbd44dSEdgar E. Iglesias } 984e4cbd44dSEdgar E. Iglesias 985b779e29eSEdgar E. Iglesias #endif /* TARGET_MICROBLAZE */ 986b779e29eSEdgar E. Iglesias 987a0a839b6SMarek Vasut #ifdef TARGET_NIOS2 988a0a839b6SMarek Vasut 989a0a839b6SMarek Vasut #define ELF_START_MMAP 0x80000000 990a0a839b6SMarek Vasut 991a0a839b6SMarek Vasut #define elf_check_arch(x) ((x) == EM_ALTERA_NIOS2) 992a0a839b6SMarek Vasut 993a0a839b6SMarek Vasut #define ELF_CLASS ELFCLASS32 994a0a839b6SMarek Vasut #define ELF_ARCH EM_ALTERA_NIOS2 995a0a839b6SMarek Vasut 996a0a839b6SMarek Vasut static void init_thread(struct target_pt_regs *regs, struct image_info *infop) 997a0a839b6SMarek Vasut { 998a0a839b6SMarek Vasut regs->ea = infop->entry; 999a0a839b6SMarek Vasut regs->sp = infop->start_stack; 1000a0a839b6SMarek Vasut regs->estatus = 0x3; 1001a0a839b6SMarek Vasut } 1002a0a839b6SMarek Vasut 1003a0a839b6SMarek Vasut #define ELF_EXEC_PAGESIZE 4096 1004a0a839b6SMarek Vasut 1005a0a839b6SMarek Vasut #define USE_ELF_CORE_DUMP 1006a0a839b6SMarek Vasut #define ELF_NREG 49 1007a0a839b6SMarek Vasut typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG]; 1008a0a839b6SMarek Vasut 1009a0a839b6SMarek Vasut /* See linux kernel: arch/mips/kernel/process.c:elf_dump_regs. */ 1010a0a839b6SMarek Vasut static void elf_core_copy_regs(target_elf_gregset_t *regs, 1011a0a839b6SMarek Vasut const CPUNios2State *env) 1012a0a839b6SMarek Vasut { 1013a0a839b6SMarek Vasut int i; 1014a0a839b6SMarek Vasut 1015a0a839b6SMarek Vasut (*regs)[0] = -1; 1016a0a839b6SMarek Vasut for (i = 1; i < 8; i++) /* r0-r7 */ 1017a0a839b6SMarek Vasut (*regs)[i] = tswapreg(env->regs[i + 7]); 1018a0a839b6SMarek Vasut 1019a0a839b6SMarek Vasut for (i = 8; i < 16; i++) /* r8-r15 */ 1020a0a839b6SMarek Vasut (*regs)[i] = tswapreg(env->regs[i - 8]); 1021a0a839b6SMarek Vasut 1022a0a839b6SMarek Vasut for (i = 16; i < 24; i++) /* r16-r23 */ 1023a0a839b6SMarek Vasut (*regs)[i] = tswapreg(env->regs[i + 7]); 1024a0a839b6SMarek Vasut (*regs)[24] = -1; /* R_ET */ 1025a0a839b6SMarek Vasut (*regs)[25] = -1; /* R_BT */ 1026a0a839b6SMarek Vasut (*regs)[26] = tswapreg(env->regs[R_GP]); 1027a0a839b6SMarek Vasut (*regs)[27] = tswapreg(env->regs[R_SP]); 1028a0a839b6SMarek Vasut (*regs)[28] = tswapreg(env->regs[R_FP]); 1029a0a839b6SMarek Vasut (*regs)[29] = tswapreg(env->regs[R_EA]); 1030a0a839b6SMarek Vasut (*regs)[30] = -1; /* R_SSTATUS */ 1031a0a839b6SMarek Vasut (*regs)[31] = tswapreg(env->regs[R_RA]); 1032a0a839b6SMarek Vasut 1033a0a839b6SMarek Vasut (*regs)[32] = tswapreg(env->regs[R_PC]); 1034a0a839b6SMarek Vasut 1035a0a839b6SMarek Vasut (*regs)[33] = -1; /* R_STATUS */ 1036a0a839b6SMarek Vasut (*regs)[34] = tswapreg(env->regs[CR_ESTATUS]); 1037a0a839b6SMarek Vasut 1038a0a839b6SMarek Vasut for (i = 35; i < 49; i++) /* ... */ 1039a0a839b6SMarek Vasut (*regs)[i] = -1; 1040a0a839b6SMarek Vasut } 1041a0a839b6SMarek Vasut 1042a0a839b6SMarek Vasut #endif /* TARGET_NIOS2 */ 1043a0a839b6SMarek Vasut 1044d962783eSJia Liu #ifdef TARGET_OPENRISC 1045d962783eSJia Liu 1046d962783eSJia Liu #define ELF_START_MMAP 0x08000000 1047d962783eSJia Liu 1048d962783eSJia Liu #define ELF_ARCH EM_OPENRISC 1049d962783eSJia Liu #define ELF_CLASS ELFCLASS32 1050d962783eSJia Liu #define ELF_DATA ELFDATA2MSB 1051d962783eSJia Liu 1052d962783eSJia Liu static inline void init_thread(struct target_pt_regs *regs, 1053d962783eSJia Liu struct image_info *infop) 1054d962783eSJia Liu { 1055d962783eSJia Liu regs->pc = infop->entry; 1056d962783eSJia Liu regs->gpr[1] = infop->start_stack; 1057d962783eSJia Liu } 1058d962783eSJia Liu 1059d962783eSJia Liu #define USE_ELF_CORE_DUMP 1060d962783eSJia Liu #define ELF_EXEC_PAGESIZE 8192 1061d962783eSJia Liu 1062d962783eSJia Liu /* See linux kernel arch/openrisc/include/asm/elf.h. */ 1063d962783eSJia Liu #define ELF_NREG 34 /* gprs and pc, sr */ 1064d962783eSJia Liu typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG]; 1065d962783eSJia Liu 1066d962783eSJia Liu static void elf_core_copy_regs(target_elf_gregset_t *regs, 1067d962783eSJia Liu const CPUOpenRISCState *env) 1068d962783eSJia Liu { 1069d962783eSJia Liu int i; 1070d962783eSJia Liu 1071d962783eSJia Liu for (i = 0; i < 32; i++) { 1072d89e71e8SStafford Horne (*regs)[i] = tswapreg(cpu_get_gpr(env, i)); 1073d962783eSJia Liu } 107486cd7b2dSPaolo Bonzini (*regs)[32] = tswapreg(env->pc); 107584775c43SRichard Henderson (*regs)[33] = tswapreg(cpu_get_sr(env)); 1076d962783eSJia Liu } 1077d962783eSJia Liu #define ELF_HWCAP 0 1078d962783eSJia Liu #define ELF_PLATFORM NULL 1079d962783eSJia Liu 1080d962783eSJia Liu #endif /* TARGET_OPENRISC */ 1081d962783eSJia Liu 1082fdf9b3e8Sbellard #ifdef TARGET_SH4 1083fdf9b3e8Sbellard 1084fdf9b3e8Sbellard #define ELF_START_MMAP 0x80000000 1085fdf9b3e8Sbellard 1086fdf9b3e8Sbellard #define ELF_CLASS ELFCLASS32 1087fdf9b3e8Sbellard #define ELF_ARCH EM_SH 1088fdf9b3e8Sbellard 1089d97ef72eSRichard Henderson static inline void init_thread(struct target_pt_regs *regs, 1090d97ef72eSRichard Henderson struct image_info *infop) 1091fdf9b3e8Sbellard { 1092fdf9b3e8Sbellard /* Check other registers XXXXX */ 1093fdf9b3e8Sbellard regs->pc = infop->entry; 1094072ae847Sths regs->regs[15] = infop->start_stack; 1095fdf9b3e8Sbellard } 1096fdf9b3e8Sbellard 10977631c97eSNathan Froyd /* See linux kernel: arch/sh/include/asm/elf.h. */ 10987631c97eSNathan Froyd #define ELF_NREG 23 10997631c97eSNathan Froyd typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG]; 11007631c97eSNathan Froyd 11017631c97eSNathan Froyd /* See linux kernel: arch/sh/include/asm/ptrace.h. */ 11027631c97eSNathan Froyd enum { 11037631c97eSNathan Froyd TARGET_REG_PC = 16, 11047631c97eSNathan Froyd TARGET_REG_PR = 17, 11057631c97eSNathan Froyd TARGET_REG_SR = 18, 11067631c97eSNathan Froyd TARGET_REG_GBR = 19, 11077631c97eSNathan Froyd TARGET_REG_MACH = 20, 11087631c97eSNathan Froyd TARGET_REG_MACL = 21, 11097631c97eSNathan Froyd TARGET_REG_SYSCALL = 22 11107631c97eSNathan Froyd }; 11117631c97eSNathan Froyd 1112d97ef72eSRichard Henderson static inline void elf_core_copy_regs(target_elf_gregset_t *regs, 111305390248SAndreas Färber const CPUSH4State *env) 11147631c97eSNathan Froyd { 11157631c97eSNathan Froyd int i; 11167631c97eSNathan Froyd 11177631c97eSNathan Froyd for (i = 0; i < 16; i++) { 111872cd500bSPhilippe Mathieu-Daudé (*regs)[i] = tswapreg(env->gregs[i]); 11197631c97eSNathan Froyd } 11207631c97eSNathan Froyd 112186cd7b2dSPaolo Bonzini (*regs)[TARGET_REG_PC] = tswapreg(env->pc); 112286cd7b2dSPaolo Bonzini (*regs)[TARGET_REG_PR] = tswapreg(env->pr); 112386cd7b2dSPaolo Bonzini (*regs)[TARGET_REG_SR] = tswapreg(env->sr); 112486cd7b2dSPaolo Bonzini (*regs)[TARGET_REG_GBR] = tswapreg(env->gbr); 112586cd7b2dSPaolo Bonzini (*regs)[TARGET_REG_MACH] = tswapreg(env->mach); 112686cd7b2dSPaolo Bonzini (*regs)[TARGET_REG_MACL] = tswapreg(env->macl); 11277631c97eSNathan Froyd (*regs)[TARGET_REG_SYSCALL] = 0; /* FIXME */ 11287631c97eSNathan Froyd } 11297631c97eSNathan Froyd 11307631c97eSNathan Froyd #define USE_ELF_CORE_DUMP 1131fdf9b3e8Sbellard #define ELF_EXEC_PAGESIZE 4096 1132fdf9b3e8Sbellard 1133e42fd944SRichard Henderson enum { 1134e42fd944SRichard Henderson SH_CPU_HAS_FPU = 0x0001, /* Hardware FPU support */ 1135e42fd944SRichard Henderson SH_CPU_HAS_P2_FLUSH_BUG = 0x0002, /* Need to flush the cache in P2 area */ 1136e42fd944SRichard Henderson SH_CPU_HAS_MMU_PAGE_ASSOC = 0x0004, /* SH3: TLB way selection bit support */ 1137e42fd944SRichard Henderson SH_CPU_HAS_DSP = 0x0008, /* SH-DSP: DSP support */ 1138e42fd944SRichard Henderson SH_CPU_HAS_PERF_COUNTER = 0x0010, /* Hardware performance counters */ 1139e42fd944SRichard Henderson SH_CPU_HAS_PTEA = 0x0020, /* PTEA register */ 1140e42fd944SRichard Henderson SH_CPU_HAS_LLSC = 0x0040, /* movli.l/movco.l */ 1141e42fd944SRichard Henderson SH_CPU_HAS_L2_CACHE = 0x0080, /* Secondary cache / URAM */ 1142e42fd944SRichard Henderson SH_CPU_HAS_OP32 = 0x0100, /* 32-bit instruction support */ 1143e42fd944SRichard Henderson SH_CPU_HAS_PTEAEX = 0x0200, /* PTE ASID Extension support */ 1144e42fd944SRichard Henderson }; 1145e42fd944SRichard Henderson 1146e42fd944SRichard Henderson #define ELF_HWCAP get_elf_hwcap() 1147e42fd944SRichard Henderson 1148e42fd944SRichard Henderson static uint32_t get_elf_hwcap(void) 1149e42fd944SRichard Henderson { 1150e42fd944SRichard Henderson SuperHCPU *cpu = SUPERH_CPU(thread_cpu); 1151e42fd944SRichard Henderson uint32_t hwcap = 0; 1152e42fd944SRichard Henderson 1153e42fd944SRichard Henderson hwcap |= SH_CPU_HAS_FPU; 1154e42fd944SRichard Henderson 1155e42fd944SRichard Henderson if (cpu->env.features & SH_FEATURE_SH4A) { 1156e42fd944SRichard Henderson hwcap |= SH_CPU_HAS_LLSC; 1157e42fd944SRichard Henderson } 1158e42fd944SRichard Henderson 1159e42fd944SRichard Henderson return hwcap; 1160e42fd944SRichard Henderson } 1161e42fd944SRichard Henderson 1162fdf9b3e8Sbellard #endif 1163fdf9b3e8Sbellard 116448733d19Sths #ifdef TARGET_CRIS 116548733d19Sths 116648733d19Sths #define ELF_START_MMAP 0x80000000 116748733d19Sths 116848733d19Sths #define ELF_CLASS ELFCLASS32 116948733d19Sths #define ELF_ARCH EM_CRIS 117048733d19Sths 1171d97ef72eSRichard Henderson static inline void init_thread(struct target_pt_regs *regs, 1172d97ef72eSRichard Henderson struct image_info *infop) 117348733d19Sths { 117448733d19Sths regs->erp = infop->entry; 117548733d19Sths } 117648733d19Sths 117748733d19Sths #define ELF_EXEC_PAGESIZE 8192 117848733d19Sths 117948733d19Sths #endif 118048733d19Sths 1181e6e5906bSpbrook #ifdef TARGET_M68K 1182e6e5906bSpbrook 1183e6e5906bSpbrook #define ELF_START_MMAP 0x80000000 1184e6e5906bSpbrook 1185e6e5906bSpbrook #define ELF_CLASS ELFCLASS32 1186e6e5906bSpbrook #define ELF_ARCH EM_68K 1187e6e5906bSpbrook 1188e6e5906bSpbrook /* ??? Does this need to do anything? 1189e6e5906bSpbrook #define ELF_PLAT_INIT(_r) */ 1190e6e5906bSpbrook 1191d97ef72eSRichard Henderson static inline void init_thread(struct target_pt_regs *regs, 1192d97ef72eSRichard Henderson struct image_info *infop) 1193e6e5906bSpbrook { 1194e6e5906bSpbrook regs->usp = infop->start_stack; 1195e6e5906bSpbrook regs->sr = 0; 1196e6e5906bSpbrook regs->pc = infop->entry; 1197e6e5906bSpbrook } 1198e6e5906bSpbrook 11997a93cc55SNathan Froyd /* See linux kernel: arch/m68k/include/asm/elf.h. */ 12007a93cc55SNathan Froyd #define ELF_NREG 20 12017a93cc55SNathan Froyd typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG]; 12027a93cc55SNathan Froyd 120305390248SAndreas Färber static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUM68KState *env) 12047a93cc55SNathan Froyd { 120586cd7b2dSPaolo Bonzini (*regs)[0] = tswapreg(env->dregs[1]); 120686cd7b2dSPaolo Bonzini (*regs)[1] = tswapreg(env->dregs[2]); 120786cd7b2dSPaolo Bonzini (*regs)[2] = tswapreg(env->dregs[3]); 120886cd7b2dSPaolo Bonzini (*regs)[3] = tswapreg(env->dregs[4]); 120986cd7b2dSPaolo Bonzini (*regs)[4] = tswapreg(env->dregs[5]); 121086cd7b2dSPaolo Bonzini (*regs)[5] = tswapreg(env->dregs[6]); 121186cd7b2dSPaolo Bonzini (*regs)[6] = tswapreg(env->dregs[7]); 121286cd7b2dSPaolo Bonzini (*regs)[7] = tswapreg(env->aregs[0]); 121386cd7b2dSPaolo Bonzini (*regs)[8] = tswapreg(env->aregs[1]); 121486cd7b2dSPaolo Bonzini (*regs)[9] = tswapreg(env->aregs[2]); 121586cd7b2dSPaolo Bonzini (*regs)[10] = tswapreg(env->aregs[3]); 121686cd7b2dSPaolo Bonzini (*regs)[11] = tswapreg(env->aregs[4]); 121786cd7b2dSPaolo Bonzini (*regs)[12] = tswapreg(env->aregs[5]); 121886cd7b2dSPaolo Bonzini (*regs)[13] = tswapreg(env->aregs[6]); 121986cd7b2dSPaolo Bonzini (*regs)[14] = tswapreg(env->dregs[0]); 122086cd7b2dSPaolo Bonzini (*regs)[15] = tswapreg(env->aregs[7]); 122186cd7b2dSPaolo Bonzini (*regs)[16] = tswapreg(env->dregs[0]); /* FIXME: orig_d0 */ 122286cd7b2dSPaolo Bonzini (*regs)[17] = tswapreg(env->sr); 122386cd7b2dSPaolo Bonzini (*regs)[18] = tswapreg(env->pc); 12247a93cc55SNathan Froyd (*regs)[19] = 0; /* FIXME: regs->format | regs->vector */ 12257a93cc55SNathan Froyd } 12267a93cc55SNathan Froyd 12277a93cc55SNathan Froyd #define USE_ELF_CORE_DUMP 1228e6e5906bSpbrook #define ELF_EXEC_PAGESIZE 8192 1229e6e5906bSpbrook 1230e6e5906bSpbrook #endif 1231e6e5906bSpbrook 12327a3148a9Sj_mayer #ifdef TARGET_ALPHA 12337a3148a9Sj_mayer 12347a3148a9Sj_mayer #define ELF_START_MMAP (0x30000000000ULL) 12357a3148a9Sj_mayer 12367a3148a9Sj_mayer #define ELF_CLASS ELFCLASS64 12377a3148a9Sj_mayer #define ELF_ARCH EM_ALPHA 12387a3148a9Sj_mayer 1239d97ef72eSRichard Henderson static inline void init_thread(struct target_pt_regs *regs, 1240d97ef72eSRichard Henderson struct image_info *infop) 12417a3148a9Sj_mayer { 12427a3148a9Sj_mayer regs->pc = infop->entry; 12437a3148a9Sj_mayer regs->ps = 8; 12447a3148a9Sj_mayer regs->usp = infop->start_stack; 12457a3148a9Sj_mayer } 12467a3148a9Sj_mayer 12477a3148a9Sj_mayer #define ELF_EXEC_PAGESIZE 8192 12487a3148a9Sj_mayer 12497a3148a9Sj_mayer #endif /* TARGET_ALPHA */ 12507a3148a9Sj_mayer 1251a4c075f1SUlrich Hecht #ifdef TARGET_S390X 1252a4c075f1SUlrich Hecht 1253a4c075f1SUlrich Hecht #define ELF_START_MMAP (0x20000000000ULL) 1254a4c075f1SUlrich Hecht 1255a4c075f1SUlrich Hecht #define ELF_CLASS ELFCLASS64 1256a4c075f1SUlrich Hecht #define ELF_DATA ELFDATA2MSB 1257a4c075f1SUlrich Hecht #define ELF_ARCH EM_S390 1258a4c075f1SUlrich Hecht 1259a4c075f1SUlrich Hecht static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop) 1260a4c075f1SUlrich Hecht { 1261a4c075f1SUlrich Hecht regs->psw.addr = infop->entry; 1262a4c075f1SUlrich Hecht regs->psw.mask = PSW_MASK_64 | PSW_MASK_32; 1263a4c075f1SUlrich Hecht regs->gprs[15] = infop->start_stack; 1264a4c075f1SUlrich Hecht } 1265a4c075f1SUlrich Hecht 1266a4c075f1SUlrich Hecht #endif /* TARGET_S390X */ 1267a4c075f1SUlrich Hecht 1268b16189b2SChen Gang #ifdef TARGET_TILEGX 1269b16189b2SChen Gang 1270b16189b2SChen Gang /* 42 bits real used address, a half for user mode */ 1271b16189b2SChen Gang #define ELF_START_MMAP (0x00000020000000000ULL) 1272b16189b2SChen Gang 1273b16189b2SChen Gang #define elf_check_arch(x) ((x) == EM_TILEGX) 1274b16189b2SChen Gang 1275b16189b2SChen Gang #define ELF_CLASS ELFCLASS64 1276b16189b2SChen Gang #define ELF_DATA ELFDATA2LSB 1277b16189b2SChen Gang #define ELF_ARCH EM_TILEGX 1278b16189b2SChen Gang 1279b16189b2SChen Gang static inline void init_thread(struct target_pt_regs *regs, 1280b16189b2SChen Gang struct image_info *infop) 1281b16189b2SChen Gang { 1282b16189b2SChen Gang regs->pc = infop->entry; 1283b16189b2SChen Gang regs->sp = infop->start_stack; 1284b16189b2SChen Gang 1285b16189b2SChen Gang } 1286b16189b2SChen Gang 1287b16189b2SChen Gang #define ELF_EXEC_PAGESIZE 65536 /* TILE-Gx page size is 64KB */ 1288b16189b2SChen Gang 1289b16189b2SChen Gang #endif /* TARGET_TILEGX */ 1290b16189b2SChen Gang 129147ae93cdSMichael Clark #ifdef TARGET_RISCV 129247ae93cdSMichael Clark 129347ae93cdSMichael Clark #define ELF_START_MMAP 0x80000000 129447ae93cdSMichael Clark #define ELF_ARCH EM_RISCV 129547ae93cdSMichael Clark 129647ae93cdSMichael Clark #ifdef TARGET_RISCV32 129747ae93cdSMichael Clark #define ELF_CLASS ELFCLASS32 129847ae93cdSMichael Clark #else 129947ae93cdSMichael Clark #define ELF_CLASS ELFCLASS64 130047ae93cdSMichael Clark #endif 130147ae93cdSMichael Clark 130247ae93cdSMichael Clark static inline void init_thread(struct target_pt_regs *regs, 130347ae93cdSMichael Clark struct image_info *infop) 130447ae93cdSMichael Clark { 130547ae93cdSMichael Clark regs->sepc = infop->entry; 130647ae93cdSMichael Clark regs->sp = infop->start_stack; 130747ae93cdSMichael Clark } 130847ae93cdSMichael Clark 130947ae93cdSMichael Clark #define ELF_EXEC_PAGESIZE 4096 131047ae93cdSMichael Clark 131147ae93cdSMichael Clark #endif /* TARGET_RISCV */ 131247ae93cdSMichael Clark 13137c248bcdSRichard Henderson #ifdef TARGET_HPPA 13147c248bcdSRichard Henderson 13157c248bcdSRichard Henderson #define ELF_START_MMAP 0x80000000 13167c248bcdSRichard Henderson #define ELF_CLASS ELFCLASS32 13177c248bcdSRichard Henderson #define ELF_ARCH EM_PARISC 13187c248bcdSRichard Henderson #define ELF_PLATFORM "PARISC" 13197c248bcdSRichard Henderson #define STACK_GROWS_DOWN 0 13207c248bcdSRichard Henderson #define STACK_ALIGNMENT 64 13217c248bcdSRichard Henderson 13227c248bcdSRichard Henderson static inline void init_thread(struct target_pt_regs *regs, 13237c248bcdSRichard Henderson struct image_info *infop) 13247c248bcdSRichard Henderson { 13257c248bcdSRichard Henderson regs->iaoq[0] = infop->entry; 13267c248bcdSRichard Henderson regs->iaoq[1] = infop->entry + 4; 13277c248bcdSRichard Henderson regs->gr[23] = 0; 13287c248bcdSRichard Henderson regs->gr[24] = infop->arg_start; 13297c248bcdSRichard Henderson regs->gr[25] = (infop->arg_end - infop->arg_start) / sizeof(abi_ulong); 13307c248bcdSRichard Henderson /* The top-of-stack contains a linkage buffer. */ 13317c248bcdSRichard Henderson regs->gr[30] = infop->start_stack + 64; 13327c248bcdSRichard Henderson regs->gr[31] = infop->entry; 13337c248bcdSRichard Henderson } 13347c248bcdSRichard Henderson 13357c248bcdSRichard Henderson #endif /* TARGET_HPPA */ 13367c248bcdSRichard Henderson 1337ba7651fbSMax Filippov #ifdef TARGET_XTENSA 1338ba7651fbSMax Filippov 1339ba7651fbSMax Filippov #define ELF_START_MMAP 0x20000000 1340ba7651fbSMax Filippov 1341ba7651fbSMax Filippov #define ELF_CLASS ELFCLASS32 1342ba7651fbSMax Filippov #define ELF_ARCH EM_XTENSA 1343ba7651fbSMax Filippov 1344ba7651fbSMax Filippov static inline void init_thread(struct target_pt_regs *regs, 1345ba7651fbSMax Filippov struct image_info *infop) 1346ba7651fbSMax Filippov { 1347ba7651fbSMax Filippov regs->windowbase = 0; 1348ba7651fbSMax Filippov regs->windowstart = 1; 1349ba7651fbSMax Filippov regs->areg[1] = infop->start_stack; 1350ba7651fbSMax Filippov regs->pc = infop->entry; 1351ba7651fbSMax Filippov } 1352ba7651fbSMax Filippov 1353ba7651fbSMax Filippov /* See linux kernel: arch/xtensa/include/asm/elf.h. */ 1354ba7651fbSMax Filippov #define ELF_NREG 128 1355ba7651fbSMax Filippov typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG]; 1356ba7651fbSMax Filippov 1357ba7651fbSMax Filippov enum { 1358ba7651fbSMax Filippov TARGET_REG_PC, 1359ba7651fbSMax Filippov TARGET_REG_PS, 1360ba7651fbSMax Filippov TARGET_REG_LBEG, 1361ba7651fbSMax Filippov TARGET_REG_LEND, 1362ba7651fbSMax Filippov TARGET_REG_LCOUNT, 1363ba7651fbSMax Filippov TARGET_REG_SAR, 1364ba7651fbSMax Filippov TARGET_REG_WINDOWSTART, 1365ba7651fbSMax Filippov TARGET_REG_WINDOWBASE, 1366ba7651fbSMax Filippov TARGET_REG_THREADPTR, 1367ba7651fbSMax Filippov TARGET_REG_AR0 = 64, 1368ba7651fbSMax Filippov }; 1369ba7651fbSMax Filippov 1370ba7651fbSMax Filippov static void elf_core_copy_regs(target_elf_gregset_t *regs, 1371ba7651fbSMax Filippov const CPUXtensaState *env) 1372ba7651fbSMax Filippov { 1373ba7651fbSMax Filippov unsigned i; 1374ba7651fbSMax Filippov 1375ba7651fbSMax Filippov (*regs)[TARGET_REG_PC] = tswapreg(env->pc); 1376ba7651fbSMax Filippov (*regs)[TARGET_REG_PS] = tswapreg(env->sregs[PS] & ~PS_EXCM); 1377ba7651fbSMax Filippov (*regs)[TARGET_REG_LBEG] = tswapreg(env->sregs[LBEG]); 1378ba7651fbSMax Filippov (*regs)[TARGET_REG_LEND] = tswapreg(env->sregs[LEND]); 1379ba7651fbSMax Filippov (*regs)[TARGET_REG_LCOUNT] = tswapreg(env->sregs[LCOUNT]); 1380ba7651fbSMax Filippov (*regs)[TARGET_REG_SAR] = tswapreg(env->sregs[SAR]); 1381ba7651fbSMax Filippov (*regs)[TARGET_REG_WINDOWSTART] = tswapreg(env->sregs[WINDOW_START]); 1382ba7651fbSMax Filippov (*regs)[TARGET_REG_WINDOWBASE] = tswapreg(env->sregs[WINDOW_BASE]); 1383ba7651fbSMax Filippov (*regs)[TARGET_REG_THREADPTR] = tswapreg(env->uregs[THREADPTR]); 1384ba7651fbSMax Filippov xtensa_sync_phys_from_window((CPUXtensaState *)env); 1385ba7651fbSMax Filippov for (i = 0; i < env->config->nareg; ++i) { 1386ba7651fbSMax Filippov (*regs)[TARGET_REG_AR0 + i] = tswapreg(env->phys_regs[i]); 1387ba7651fbSMax Filippov } 1388ba7651fbSMax Filippov } 1389ba7651fbSMax Filippov 1390ba7651fbSMax Filippov #define USE_ELF_CORE_DUMP 1391ba7651fbSMax Filippov #define ELF_EXEC_PAGESIZE 4096 1392ba7651fbSMax Filippov 1393ba7651fbSMax Filippov #endif /* TARGET_XTENSA */ 1394ba7651fbSMax Filippov 139515338fd7Sbellard #ifndef ELF_PLATFORM 139615338fd7Sbellard #define ELF_PLATFORM (NULL) 139715338fd7Sbellard #endif 139815338fd7Sbellard 139975be901cSPeter Crosthwaite #ifndef ELF_MACHINE 140075be901cSPeter Crosthwaite #define ELF_MACHINE ELF_ARCH 140175be901cSPeter Crosthwaite #endif 140275be901cSPeter Crosthwaite 1403d276a604SPeter Crosthwaite #ifndef elf_check_arch 1404d276a604SPeter Crosthwaite #define elf_check_arch(x) ((x) == ELF_ARCH) 1405d276a604SPeter Crosthwaite #endif 1406d276a604SPeter Crosthwaite 140715338fd7Sbellard #ifndef ELF_HWCAP 140815338fd7Sbellard #define ELF_HWCAP 0 140915338fd7Sbellard #endif 141015338fd7Sbellard 14117c4ee5bcSRichard Henderson #ifndef STACK_GROWS_DOWN 14127c4ee5bcSRichard Henderson #define STACK_GROWS_DOWN 1 14137c4ee5bcSRichard Henderson #endif 14147c4ee5bcSRichard Henderson 14157c4ee5bcSRichard Henderson #ifndef STACK_ALIGNMENT 14167c4ee5bcSRichard Henderson #define STACK_ALIGNMENT 16 14177c4ee5bcSRichard Henderson #endif 14187c4ee5bcSRichard Henderson 1419992f48a0Sblueswir1 #ifdef TARGET_ABI32 1420cb33da57Sblueswir1 #undef ELF_CLASS 1421992f48a0Sblueswir1 #define ELF_CLASS ELFCLASS32 1422cb33da57Sblueswir1 #undef bswaptls 1423cb33da57Sblueswir1 #define bswaptls(ptr) bswap32s(ptr) 1424cb33da57Sblueswir1 #endif 1425cb33da57Sblueswir1 142631e31b8aSbellard #include "elf.h" 142709bfb054Sbellard 142809bfb054Sbellard struct exec 142909bfb054Sbellard { 143009bfb054Sbellard unsigned int a_info; /* Use macros N_MAGIC, etc for access */ 143109bfb054Sbellard unsigned int a_text; /* length of text, in bytes */ 143209bfb054Sbellard unsigned int a_data; /* length of data, in bytes */ 143309bfb054Sbellard unsigned int a_bss; /* length of uninitialized data area, in bytes */ 143409bfb054Sbellard unsigned int a_syms; /* length of symbol table data in file, in bytes */ 143509bfb054Sbellard unsigned int a_entry; /* start address */ 143609bfb054Sbellard unsigned int a_trsize; /* length of relocation info for text, in bytes */ 143709bfb054Sbellard unsigned int a_drsize; /* length of relocation info for data, in bytes */ 143809bfb054Sbellard }; 143909bfb054Sbellard 144009bfb054Sbellard 144109bfb054Sbellard #define N_MAGIC(exec) ((exec).a_info & 0xffff) 144209bfb054Sbellard #define OMAGIC 0407 144309bfb054Sbellard #define NMAGIC 0410 144409bfb054Sbellard #define ZMAGIC 0413 144509bfb054Sbellard #define QMAGIC 0314 144609bfb054Sbellard 144731e31b8aSbellard /* Necessary parameters */ 144894894ff2SShivaprasad G Bhat #define TARGET_ELF_EXEC_PAGESIZE \ 144994894ff2SShivaprasad G Bhat (((eppnt->p_align & ~qemu_host_page_mask) != 0) ? \ 145094894ff2SShivaprasad G Bhat TARGET_PAGE_SIZE : MAX(qemu_host_page_size, TARGET_PAGE_SIZE)) 145194894ff2SShivaprasad G Bhat #define TARGET_ELF_PAGELENGTH(_v) ROUND_UP((_v), TARGET_ELF_EXEC_PAGESIZE) 145279cb1f1dSYongbok Kim #define TARGET_ELF_PAGESTART(_v) ((_v) & \ 145379cb1f1dSYongbok Kim ~(abi_ulong)(TARGET_ELF_EXEC_PAGESIZE-1)) 145454936004Sbellard #define TARGET_ELF_PAGEOFFSET(_v) ((_v) & (TARGET_ELF_EXEC_PAGESIZE-1)) 145531e31b8aSbellard 1456444cd5c3SMarco A L Barbosa #define DLINFO_ITEMS 15 145731e31b8aSbellard 145809bfb054Sbellard static inline void memcpy_fromfs(void * to, const void * from, unsigned long n) 145909bfb054Sbellard { 146009bfb054Sbellard memcpy(to, from, n); 146109bfb054Sbellard } 146209bfb054Sbellard 146331e31b8aSbellard #ifdef BSWAP_NEEDED 146492a31b1fSbellard static void bswap_ehdr(struct elfhdr *ehdr) 146531e31b8aSbellard { 146631e31b8aSbellard bswap16s(&ehdr->e_type); /* Object file type */ 146731e31b8aSbellard bswap16s(&ehdr->e_machine); /* Architecture */ 146831e31b8aSbellard bswap32s(&ehdr->e_version); /* Object file version */ 146992a31b1fSbellard bswaptls(&ehdr->e_entry); /* Entry point virtual address */ 147092a31b1fSbellard bswaptls(&ehdr->e_phoff); /* Program header table file offset */ 147192a31b1fSbellard bswaptls(&ehdr->e_shoff); /* Section header table file offset */ 147231e31b8aSbellard bswap32s(&ehdr->e_flags); /* Processor-specific flags */ 147331e31b8aSbellard bswap16s(&ehdr->e_ehsize); /* ELF header size in bytes */ 147431e31b8aSbellard bswap16s(&ehdr->e_phentsize); /* Program header table entry size */ 147531e31b8aSbellard bswap16s(&ehdr->e_phnum); /* Program header table entry count */ 147631e31b8aSbellard bswap16s(&ehdr->e_shentsize); /* Section header table entry size */ 147731e31b8aSbellard bswap16s(&ehdr->e_shnum); /* Section header table entry count */ 147831e31b8aSbellard bswap16s(&ehdr->e_shstrndx); /* Section header string table index */ 147931e31b8aSbellard } 148031e31b8aSbellard 1481991f8f0cSRichard Henderson static void bswap_phdr(struct elf_phdr *phdr, int phnum) 148231e31b8aSbellard { 1483991f8f0cSRichard Henderson int i; 1484991f8f0cSRichard Henderson for (i = 0; i < phnum; ++i, ++phdr) { 148531e31b8aSbellard bswap32s(&phdr->p_type); /* Segment type */ 1486991f8f0cSRichard Henderson bswap32s(&phdr->p_flags); /* Segment flags */ 148792a31b1fSbellard bswaptls(&phdr->p_offset); /* Segment file offset */ 148892a31b1fSbellard bswaptls(&phdr->p_vaddr); /* Segment virtual address */ 148992a31b1fSbellard bswaptls(&phdr->p_paddr); /* Segment physical address */ 149092a31b1fSbellard bswaptls(&phdr->p_filesz); /* Segment size in file */ 149192a31b1fSbellard bswaptls(&phdr->p_memsz); /* Segment size in memory */ 149292a31b1fSbellard bswaptls(&phdr->p_align); /* Segment alignment */ 149331e31b8aSbellard } 1494991f8f0cSRichard Henderson } 1495689f936fSbellard 1496991f8f0cSRichard Henderson static void bswap_shdr(struct elf_shdr *shdr, int shnum) 1497689f936fSbellard { 1498991f8f0cSRichard Henderson int i; 1499991f8f0cSRichard Henderson for (i = 0; i < shnum; ++i, ++shdr) { 1500689f936fSbellard bswap32s(&shdr->sh_name); 1501689f936fSbellard bswap32s(&shdr->sh_type); 150292a31b1fSbellard bswaptls(&shdr->sh_flags); 150392a31b1fSbellard bswaptls(&shdr->sh_addr); 150492a31b1fSbellard bswaptls(&shdr->sh_offset); 150592a31b1fSbellard bswaptls(&shdr->sh_size); 1506689f936fSbellard bswap32s(&shdr->sh_link); 1507689f936fSbellard bswap32s(&shdr->sh_info); 150892a31b1fSbellard bswaptls(&shdr->sh_addralign); 150992a31b1fSbellard bswaptls(&shdr->sh_entsize); 1510689f936fSbellard } 1511991f8f0cSRichard Henderson } 1512689f936fSbellard 15137a3148a9Sj_mayer static void bswap_sym(struct elf_sym *sym) 1514689f936fSbellard { 1515689f936fSbellard bswap32s(&sym->st_name); 15167a3148a9Sj_mayer bswaptls(&sym->st_value); 15177a3148a9Sj_mayer bswaptls(&sym->st_size); 1518689f936fSbellard bswap16s(&sym->st_shndx); 1519689f936fSbellard } 1520*5dd0db52SStefan Markovic 1521*5dd0db52SStefan Markovic #ifdef TARGET_MIPS 1522*5dd0db52SStefan Markovic static void bswap_mips_abiflags(Mips_elf_abiflags_v0 *abiflags) 1523*5dd0db52SStefan Markovic { 1524*5dd0db52SStefan Markovic bswap16s(&abiflags->version); 1525*5dd0db52SStefan Markovic bswap32s(&abiflags->ases); 1526*5dd0db52SStefan Markovic bswap32s(&abiflags->isa_ext); 1527*5dd0db52SStefan Markovic bswap32s(&abiflags->flags1); 1528*5dd0db52SStefan Markovic bswap32s(&abiflags->flags2); 1529*5dd0db52SStefan Markovic } 1530*5dd0db52SStefan Markovic #endif 1531991f8f0cSRichard Henderson #else 1532991f8f0cSRichard Henderson static inline void bswap_ehdr(struct elfhdr *ehdr) { } 1533991f8f0cSRichard Henderson static inline void bswap_phdr(struct elf_phdr *phdr, int phnum) { } 1534991f8f0cSRichard Henderson static inline void bswap_shdr(struct elf_shdr *shdr, int shnum) { } 1535991f8f0cSRichard Henderson static inline void bswap_sym(struct elf_sym *sym) { } 1536*5dd0db52SStefan Markovic #ifdef TARGET_MIPS 1537*5dd0db52SStefan Markovic static inline void bswap_mips_abiflags(Mips_elf_abiflags_v0 *abiflags) { } 1538*5dd0db52SStefan Markovic #endif 153931e31b8aSbellard #endif 154031e31b8aSbellard 1541edf8e2afSMika Westerberg #ifdef USE_ELF_CORE_DUMP 15429349b4f9SAndreas Färber static int elf_core_dump(int, const CPUArchState *); 1543edf8e2afSMika Westerberg #endif /* USE_ELF_CORE_DUMP */ 1544682674b8SRichard Henderson static void load_symbols(struct elfhdr *hdr, int fd, abi_ulong load_bias); 1545edf8e2afSMika Westerberg 15469058abddSRichard Henderson /* Verify the portions of EHDR within E_IDENT for the target. 15479058abddSRichard Henderson This can be performed before bswapping the entire header. */ 15489058abddSRichard Henderson static bool elf_check_ident(struct elfhdr *ehdr) 15499058abddSRichard Henderson { 15509058abddSRichard Henderson return (ehdr->e_ident[EI_MAG0] == ELFMAG0 15519058abddSRichard Henderson && ehdr->e_ident[EI_MAG1] == ELFMAG1 15529058abddSRichard Henderson && ehdr->e_ident[EI_MAG2] == ELFMAG2 15539058abddSRichard Henderson && ehdr->e_ident[EI_MAG3] == ELFMAG3 15549058abddSRichard Henderson && ehdr->e_ident[EI_CLASS] == ELF_CLASS 15559058abddSRichard Henderson && ehdr->e_ident[EI_DATA] == ELF_DATA 15569058abddSRichard Henderson && ehdr->e_ident[EI_VERSION] == EV_CURRENT); 15579058abddSRichard Henderson } 15589058abddSRichard Henderson 15599058abddSRichard Henderson /* Verify the portions of EHDR outside of E_IDENT for the target. 15609058abddSRichard Henderson This has to wait until after bswapping the header. */ 15619058abddSRichard Henderson static bool elf_check_ehdr(struct elfhdr *ehdr) 15629058abddSRichard Henderson { 15639058abddSRichard Henderson return (elf_check_arch(ehdr->e_machine) 15649058abddSRichard Henderson && ehdr->e_ehsize == sizeof(struct elfhdr) 15659058abddSRichard Henderson && ehdr->e_phentsize == sizeof(struct elf_phdr) 15669058abddSRichard Henderson && (ehdr->e_type == ET_EXEC || ehdr->e_type == ET_DYN)); 15679058abddSRichard Henderson } 15689058abddSRichard Henderson 156931e31b8aSbellard /* 1570e5fe0c52Spbrook * 'copy_elf_strings()' copies argument/envelope strings from user 157131e31b8aSbellard * memory to free pages in kernel mem. These are in a format ready 157231e31b8aSbellard * to be put directly into the top of new user memory. 157331e31b8aSbellard * 157431e31b8aSbellard */ 157559baae9aSStefan Brüns static abi_ulong copy_elf_strings(int argc, char **argv, char *scratch, 157659baae9aSStefan Brüns abi_ulong p, abi_ulong stack_limit) 157731e31b8aSbellard { 157859baae9aSStefan Brüns char *tmp; 15797c4ee5bcSRichard Henderson int len, i; 158059baae9aSStefan Brüns abi_ulong top = p; 158131e31b8aSbellard 158231e31b8aSbellard if (!p) { 158331e31b8aSbellard return 0; /* bullet-proofing */ 158431e31b8aSbellard } 158559baae9aSStefan Brüns 15867c4ee5bcSRichard Henderson if (STACK_GROWS_DOWN) { 15877c4ee5bcSRichard Henderson int offset = ((p - 1) % TARGET_PAGE_SIZE) + 1; 15887c4ee5bcSRichard Henderson for (i = argc - 1; i >= 0; --i) { 15897c4ee5bcSRichard Henderson tmp = argv[i]; 1590edf779ffSbellard if (!tmp) { 159131e31b8aSbellard fprintf(stderr, "VFS: argc is wrong"); 159231e31b8aSbellard exit(-1); 159331e31b8aSbellard } 159459baae9aSStefan Brüns len = strlen(tmp) + 1; 159559baae9aSStefan Brüns tmp += len; 159659baae9aSStefan Brüns 159759baae9aSStefan Brüns if (len > (p - stack_limit)) { 159831e31b8aSbellard return 0; 159931e31b8aSbellard } 160031e31b8aSbellard while (len) { 160131e31b8aSbellard int bytes_to_copy = (len > offset) ? offset : len; 160231e31b8aSbellard tmp -= bytes_to_copy; 160331e31b8aSbellard p -= bytes_to_copy; 160431e31b8aSbellard offset -= bytes_to_copy; 160531e31b8aSbellard len -= bytes_to_copy; 160659baae9aSStefan Brüns 160759baae9aSStefan Brüns memcpy_fromfs(scratch + offset, tmp, bytes_to_copy); 160859baae9aSStefan Brüns 160959baae9aSStefan Brüns if (offset == 0) { 161059baae9aSStefan Brüns memcpy_to_target(p, scratch, top - p); 161159baae9aSStefan Brüns top = p; 161259baae9aSStefan Brüns offset = TARGET_PAGE_SIZE; 161331e31b8aSbellard } 161431e31b8aSbellard } 161531e31b8aSbellard } 16167c4ee5bcSRichard Henderson if (p != top) { 161759baae9aSStefan Brüns memcpy_to_target(p, scratch + offset, top - p); 161859baae9aSStefan Brüns } 16197c4ee5bcSRichard Henderson } else { 16207c4ee5bcSRichard Henderson int remaining = TARGET_PAGE_SIZE - (p % TARGET_PAGE_SIZE); 16217c4ee5bcSRichard Henderson for (i = 0; i < argc; ++i) { 16227c4ee5bcSRichard Henderson tmp = argv[i]; 16237c4ee5bcSRichard Henderson if (!tmp) { 16247c4ee5bcSRichard Henderson fprintf(stderr, "VFS: argc is wrong"); 16257c4ee5bcSRichard Henderson exit(-1); 16267c4ee5bcSRichard Henderson } 16277c4ee5bcSRichard Henderson len = strlen(tmp) + 1; 16287c4ee5bcSRichard Henderson if (len > (stack_limit - p)) { 16297c4ee5bcSRichard Henderson return 0; 16307c4ee5bcSRichard Henderson } 16317c4ee5bcSRichard Henderson while (len) { 16327c4ee5bcSRichard Henderson int bytes_to_copy = (len > remaining) ? remaining : len; 16337c4ee5bcSRichard Henderson 16347c4ee5bcSRichard Henderson memcpy_fromfs(scratch + (p - top), tmp, bytes_to_copy); 16357c4ee5bcSRichard Henderson 16367c4ee5bcSRichard Henderson tmp += bytes_to_copy; 16377c4ee5bcSRichard Henderson remaining -= bytes_to_copy; 16387c4ee5bcSRichard Henderson p += bytes_to_copy; 16397c4ee5bcSRichard Henderson len -= bytes_to_copy; 16407c4ee5bcSRichard Henderson 16417c4ee5bcSRichard Henderson if (remaining == 0) { 16427c4ee5bcSRichard Henderson memcpy_to_target(top, scratch, p - top); 16437c4ee5bcSRichard Henderson top = p; 16447c4ee5bcSRichard Henderson remaining = TARGET_PAGE_SIZE; 16457c4ee5bcSRichard Henderson } 16467c4ee5bcSRichard Henderson } 16477c4ee5bcSRichard Henderson } 16487c4ee5bcSRichard Henderson if (p != top) { 16497c4ee5bcSRichard Henderson memcpy_to_target(top, scratch, p - top); 16507c4ee5bcSRichard Henderson } 16517c4ee5bcSRichard Henderson } 165259baae9aSStefan Brüns 165331e31b8aSbellard return p; 165431e31b8aSbellard } 165531e31b8aSbellard 165659baae9aSStefan Brüns /* Older linux kernels provide up to MAX_ARG_PAGES (default: 32) of 165759baae9aSStefan Brüns * argument/environment space. Newer kernels (>2.6.33) allow more, 165859baae9aSStefan Brüns * dependent on stack size, but guarantee at least 32 pages for 165959baae9aSStefan Brüns * backwards compatibility. 166059baae9aSStefan Brüns */ 166159baae9aSStefan Brüns #define STACK_LOWER_LIMIT (32 * TARGET_PAGE_SIZE) 166259baae9aSStefan Brüns 166359baae9aSStefan Brüns static abi_ulong setup_arg_pages(struct linux_binprm *bprm, 166431e31b8aSbellard struct image_info *info) 166531e31b8aSbellard { 166659baae9aSStefan Brüns abi_ulong size, error, guard; 166731e31b8aSbellard 1668703e0e89SRichard Henderson size = guest_stack_size; 166959baae9aSStefan Brüns if (size < STACK_LOWER_LIMIT) { 167059baae9aSStefan Brüns size = STACK_LOWER_LIMIT; 167160dcbcb5SRichard Henderson } 167260dcbcb5SRichard Henderson guard = TARGET_PAGE_SIZE; 167360dcbcb5SRichard Henderson if (guard < qemu_real_host_page_size) { 167460dcbcb5SRichard Henderson guard = qemu_real_host_page_size; 167560dcbcb5SRichard Henderson } 167660dcbcb5SRichard Henderson 167760dcbcb5SRichard Henderson error = target_mmap(0, size + guard, PROT_READ | PROT_WRITE, 167860dcbcb5SRichard Henderson MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); 167909bfb054Sbellard if (error == -1) { 168060dcbcb5SRichard Henderson perror("mmap stack"); 168131e31b8aSbellard exit(-1); 168231e31b8aSbellard } 168331e31b8aSbellard 168460dcbcb5SRichard Henderson /* We reserve one extra page at the top of the stack as guard. */ 16857c4ee5bcSRichard Henderson if (STACK_GROWS_DOWN) { 168660dcbcb5SRichard Henderson target_mprotect(error, guard, PROT_NONE); 168760dcbcb5SRichard Henderson info->stack_limit = error + guard; 168859baae9aSStefan Brüns return info->stack_limit + size - sizeof(void *); 16897c4ee5bcSRichard Henderson } else { 16907c4ee5bcSRichard Henderson target_mprotect(error + size, guard, PROT_NONE); 16917c4ee5bcSRichard Henderson info->stack_limit = error + size; 16927c4ee5bcSRichard Henderson return error; 16937c4ee5bcSRichard Henderson } 169431e31b8aSbellard } 169531e31b8aSbellard 1696cf129f3aSRichard Henderson /* Map and zero the bss. We need to explicitly zero any fractional pages 1697cf129f3aSRichard Henderson after the data section (i.e. bss). */ 1698cf129f3aSRichard Henderson static void zero_bss(abi_ulong elf_bss, abi_ulong last_bss, int prot) 169931e31b8aSbellard { 1700cf129f3aSRichard Henderson uintptr_t host_start, host_map_start, host_end; 1701cf129f3aSRichard Henderson 1702cf129f3aSRichard Henderson last_bss = TARGET_PAGE_ALIGN(last_bss); 1703cf129f3aSRichard Henderson 1704cf129f3aSRichard Henderson /* ??? There is confusion between qemu_real_host_page_size and 1705cf129f3aSRichard Henderson qemu_host_page_size here and elsewhere in target_mmap, which 1706cf129f3aSRichard Henderson may lead to the end of the data section mapping from the file 1707cf129f3aSRichard Henderson not being mapped. At least there was an explicit test and 1708cf129f3aSRichard Henderson comment for that here, suggesting that "the file size must 1709cf129f3aSRichard Henderson be known". The comment probably pre-dates the introduction 1710cf129f3aSRichard Henderson of the fstat system call in target_mmap which does in fact 1711cf129f3aSRichard Henderson find out the size. What isn't clear is if the workaround 1712cf129f3aSRichard Henderson here is still actually needed. For now, continue with it, 1713cf129f3aSRichard Henderson but merge it with the "normal" mmap that would allocate the bss. */ 1714cf129f3aSRichard Henderson 1715cf129f3aSRichard Henderson host_start = (uintptr_t) g2h(elf_bss); 1716cf129f3aSRichard Henderson host_end = (uintptr_t) g2h(last_bss); 17170c2d70c4SPaolo Bonzini host_map_start = REAL_HOST_PAGE_ALIGN(host_start); 1718cf129f3aSRichard Henderson 1719cf129f3aSRichard Henderson if (host_map_start < host_end) { 1720cf129f3aSRichard Henderson void *p = mmap((void *)host_map_start, host_end - host_map_start, 1721cf129f3aSRichard Henderson prot, MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); 1722cf129f3aSRichard Henderson if (p == MAP_FAILED) { 172331e31b8aSbellard perror("cannot mmap brk"); 172431e31b8aSbellard exit(-1); 172531e31b8aSbellard } 1726f46e9a0bSTom Musta } 1727cf129f3aSRichard Henderson 1728f46e9a0bSTom Musta /* Ensure that the bss page(s) are valid */ 1729f46e9a0bSTom Musta if ((page_get_flags(last_bss-1) & prot) != prot) { 1730cf129f3aSRichard Henderson page_set_flags(elf_bss & TARGET_PAGE_MASK, last_bss, prot | PAGE_VALID); 173131e31b8aSbellard } 173231e31b8aSbellard 1733cf129f3aSRichard Henderson if (host_start < host_map_start) { 1734cf129f3aSRichard Henderson memset((void *)host_start, 0, host_map_start - host_start); 1735853d6f7aSbellard } 1736853d6f7aSbellard } 1737853d6f7aSbellard 1738cf58affeSChristophe Lyon #ifdef TARGET_ARM 1739cf58affeSChristophe Lyon static int elf_is_fdpic(struct elfhdr *exec) 1740cf58affeSChristophe Lyon { 1741cf58affeSChristophe Lyon return exec->e_ident[EI_OSABI] == ELFOSABI_ARM_FDPIC; 1742cf58affeSChristophe Lyon } 1743cf58affeSChristophe Lyon #else 1744a99856cdSChristophe Lyon /* Default implementation, always false. */ 1745a99856cdSChristophe Lyon static int elf_is_fdpic(struct elfhdr *exec) 1746a99856cdSChristophe Lyon { 1747a99856cdSChristophe Lyon return 0; 1748a99856cdSChristophe Lyon } 1749cf58affeSChristophe Lyon #endif 1750a99856cdSChristophe Lyon 17511af02e83SMike Frysinger static abi_ulong loader_build_fdpic_loadmap(struct image_info *info, abi_ulong sp) 17521af02e83SMike Frysinger { 17531af02e83SMike Frysinger uint16_t n; 17541af02e83SMike Frysinger struct elf32_fdpic_loadseg *loadsegs = info->loadsegs; 17551af02e83SMike Frysinger 17561af02e83SMike Frysinger /* elf32_fdpic_loadseg */ 17571af02e83SMike Frysinger n = info->nsegs; 17581af02e83SMike Frysinger while (n--) { 17591af02e83SMike Frysinger sp -= 12; 17601af02e83SMike Frysinger put_user_u32(loadsegs[n].addr, sp+0); 17611af02e83SMike Frysinger put_user_u32(loadsegs[n].p_vaddr, sp+4); 17621af02e83SMike Frysinger put_user_u32(loadsegs[n].p_memsz, sp+8); 17631af02e83SMike Frysinger } 17641af02e83SMike Frysinger 17651af02e83SMike Frysinger /* elf32_fdpic_loadmap */ 17661af02e83SMike Frysinger sp -= 4; 17671af02e83SMike Frysinger put_user_u16(0, sp+0); /* version */ 17681af02e83SMike Frysinger put_user_u16(info->nsegs, sp+2); /* nsegs */ 17691af02e83SMike Frysinger 17701af02e83SMike Frysinger info->personality = PER_LINUX_FDPIC; 17711af02e83SMike Frysinger info->loadmap_addr = sp; 17721af02e83SMike Frysinger 17731af02e83SMike Frysinger return sp; 17741af02e83SMike Frysinger } 17751af02e83SMike Frysinger 1776992f48a0Sblueswir1 static abi_ulong create_elf_tables(abi_ulong p, int argc, int envc, 177731e31b8aSbellard struct elfhdr *exec, 17788e62a717SRichard Henderson struct image_info *info, 17798e62a717SRichard Henderson struct image_info *interp_info) 178031e31b8aSbellard { 1781992f48a0Sblueswir1 abi_ulong sp; 17827c4ee5bcSRichard Henderson abi_ulong u_argc, u_argv, u_envp, u_auxv; 178353a5960aSpbrook int size; 178414322badSLaurent ALFONSI int i; 178514322badSLaurent ALFONSI abi_ulong u_rand_bytes; 178614322badSLaurent ALFONSI uint8_t k_rand_bytes[16]; 1787992f48a0Sblueswir1 abi_ulong u_platform; 178815338fd7Sbellard const char *k_platform; 1789863cf0b7Sj_mayer const int n = sizeof(elf_addr_t); 179031e31b8aSbellard 179153a5960aSpbrook sp = p; 17921af02e83SMike Frysinger 17931af02e83SMike Frysinger /* Needs to be before we load the env/argc/... */ 17941af02e83SMike Frysinger if (elf_is_fdpic(exec)) { 17951af02e83SMike Frysinger /* Need 4 byte alignment for these structs */ 17961af02e83SMike Frysinger sp &= ~3; 17971af02e83SMike Frysinger sp = loader_build_fdpic_loadmap(info, sp); 17981af02e83SMike Frysinger info->other_info = interp_info; 17991af02e83SMike Frysinger if (interp_info) { 18001af02e83SMike Frysinger interp_info->other_info = info; 18011af02e83SMike Frysinger sp = loader_build_fdpic_loadmap(interp_info, sp); 18023cb10cfaSChristophe Lyon info->interpreter_loadmap_addr = interp_info->loadmap_addr; 18033cb10cfaSChristophe Lyon info->interpreter_pt_dynamic_addr = interp_info->pt_dynamic_addr; 18043cb10cfaSChristophe Lyon } else { 18053cb10cfaSChristophe Lyon info->interpreter_loadmap_addr = 0; 18063cb10cfaSChristophe Lyon info->interpreter_pt_dynamic_addr = 0; 18071af02e83SMike Frysinger } 18081af02e83SMike Frysinger } 18091af02e83SMike Frysinger 181053a5960aSpbrook u_platform = 0; 181115338fd7Sbellard k_platform = ELF_PLATFORM; 181215338fd7Sbellard if (k_platform) { 181315338fd7Sbellard size_t len = strlen(k_platform) + 1; 18147c4ee5bcSRichard Henderson if (STACK_GROWS_DOWN) { 181553a5960aSpbrook sp -= (len + n - 1) & ~(n - 1); 181653a5960aSpbrook u_platform = sp; 1817579a97f7Sbellard /* FIXME - check return value of memcpy_to_target() for failure */ 181853a5960aSpbrook memcpy_to_target(sp, k_platform, len); 18197c4ee5bcSRichard Henderson } else { 18207c4ee5bcSRichard Henderson memcpy_to_target(sp, k_platform, len); 18217c4ee5bcSRichard Henderson u_platform = sp; 18227c4ee5bcSRichard Henderson sp += len + 1; 18237c4ee5bcSRichard Henderson } 18247c4ee5bcSRichard Henderson } 18257c4ee5bcSRichard Henderson 18267c4ee5bcSRichard Henderson /* Provide 16 byte alignment for the PRNG, and basic alignment for 18277c4ee5bcSRichard Henderson * the argv and envp pointers. 18287c4ee5bcSRichard Henderson */ 18297c4ee5bcSRichard Henderson if (STACK_GROWS_DOWN) { 18307c4ee5bcSRichard Henderson sp = QEMU_ALIGN_DOWN(sp, 16); 18317c4ee5bcSRichard Henderson } else { 18327c4ee5bcSRichard Henderson sp = QEMU_ALIGN_UP(sp, 16); 183315338fd7Sbellard } 183414322badSLaurent ALFONSI 183514322badSLaurent ALFONSI /* 183614322badSLaurent ALFONSI * Generate 16 random bytes for userspace PRNG seeding (not 183714322badSLaurent ALFONSI * cryptically secure but it's not the aim of QEMU). 183814322badSLaurent ALFONSI */ 183914322badSLaurent ALFONSI for (i = 0; i < 16; i++) { 184014322badSLaurent ALFONSI k_rand_bytes[i] = rand(); 184114322badSLaurent ALFONSI } 18427c4ee5bcSRichard Henderson if (STACK_GROWS_DOWN) { 184314322badSLaurent ALFONSI sp -= 16; 184414322badSLaurent ALFONSI u_rand_bytes = sp; 184514322badSLaurent ALFONSI /* FIXME - check return value of memcpy_to_target() for failure */ 184614322badSLaurent ALFONSI memcpy_to_target(sp, k_rand_bytes, 16); 18477c4ee5bcSRichard Henderson } else { 18487c4ee5bcSRichard Henderson memcpy_to_target(sp, k_rand_bytes, 16); 18497c4ee5bcSRichard Henderson u_rand_bytes = sp; 18507c4ee5bcSRichard Henderson sp += 16; 18517c4ee5bcSRichard Henderson } 185214322badSLaurent ALFONSI 185353a5960aSpbrook size = (DLINFO_ITEMS + 1) * 2; 185415338fd7Sbellard if (k_platform) 185553a5960aSpbrook size += 2; 1856f5155289Sbellard #ifdef DLINFO_ARCH_ITEMS 185753a5960aSpbrook size += DLINFO_ARCH_ITEMS * 2; 1858f5155289Sbellard #endif 1859ad6919dcSPeter Maydell #ifdef ELF_HWCAP2 1860ad6919dcSPeter Maydell size += 2; 1861ad6919dcSPeter Maydell #endif 1862f516511eSPeter Maydell info->auxv_len = size * n; 1863f516511eSPeter Maydell 186453a5960aSpbrook size += envc + argc + 2; 1865b9329d4bSRichard Henderson size += 1; /* argc itself */ 186653a5960aSpbrook size *= n; 18677c4ee5bcSRichard Henderson 18687c4ee5bcSRichard Henderson /* Allocate space and finalize stack alignment for entry now. */ 18697c4ee5bcSRichard Henderson if (STACK_GROWS_DOWN) { 18707c4ee5bcSRichard Henderson u_argc = QEMU_ALIGN_DOWN(sp - size, STACK_ALIGNMENT); 18717c4ee5bcSRichard Henderson sp = u_argc; 18727c4ee5bcSRichard Henderson } else { 18737c4ee5bcSRichard Henderson u_argc = sp; 18747c4ee5bcSRichard Henderson sp = QEMU_ALIGN_UP(sp + size, STACK_ALIGNMENT); 18757c4ee5bcSRichard Henderson } 18767c4ee5bcSRichard Henderson 18777c4ee5bcSRichard Henderson u_argv = u_argc + n; 18787c4ee5bcSRichard Henderson u_envp = u_argv + (argc + 1) * n; 18797c4ee5bcSRichard Henderson u_auxv = u_envp + (envc + 1) * n; 18807c4ee5bcSRichard Henderson info->saved_auxv = u_auxv; 18817c4ee5bcSRichard Henderson info->arg_start = u_argv; 18827c4ee5bcSRichard Henderson info->arg_end = u_argv + argc * n; 1883f5155289Sbellard 1884863cf0b7Sj_mayer /* This is correct because Linux defines 1885863cf0b7Sj_mayer * elf_addr_t as Elf32_Off / Elf64_Off 1886863cf0b7Sj_mayer */ 188753a5960aSpbrook #define NEW_AUX_ENT(id, val) do { \ 18887c4ee5bcSRichard Henderson put_user_ual(id, u_auxv); u_auxv += n; \ 18897c4ee5bcSRichard Henderson put_user_ual(val, u_auxv); u_auxv += n; \ 189053a5960aSpbrook } while(0) 18912f619698Sbellard 189282991bedSPeter Maydell #ifdef ARCH_DLINFO 189382991bedSPeter Maydell /* 189482991bedSPeter Maydell * ARCH_DLINFO must come first so platform specific code can enforce 189582991bedSPeter Maydell * special alignment requirements on the AUXV if necessary (eg. PPC). 189682991bedSPeter Maydell */ 189782991bedSPeter Maydell ARCH_DLINFO; 189882991bedSPeter Maydell #endif 1899f516511eSPeter Maydell /* There must be exactly DLINFO_ITEMS entries here, or the assert 1900f516511eSPeter Maydell * on info->auxv_len will trigger. 1901f516511eSPeter Maydell */ 19028e62a717SRichard Henderson NEW_AUX_ENT(AT_PHDR, (abi_ulong)(info->load_addr + exec->e_phoff)); 1903992f48a0Sblueswir1 NEW_AUX_ENT(AT_PHENT, (abi_ulong)(sizeof (struct elf_phdr))); 1904992f48a0Sblueswir1 NEW_AUX_ENT(AT_PHNUM, (abi_ulong)(exec->e_phnum)); 190533143c44SLaurent Vivier if ((info->alignment & ~qemu_host_page_mask) != 0) { 190633143c44SLaurent Vivier /* Target doesn't support host page size alignment */ 190733143c44SLaurent Vivier NEW_AUX_ENT(AT_PAGESZ, (abi_ulong)(TARGET_PAGE_SIZE)); 190833143c44SLaurent Vivier } else { 190933143c44SLaurent Vivier NEW_AUX_ENT(AT_PAGESZ, (abi_ulong)(MAX(TARGET_PAGE_SIZE, 191033143c44SLaurent Vivier qemu_host_page_size))); 191133143c44SLaurent Vivier } 19128e62a717SRichard Henderson NEW_AUX_ENT(AT_BASE, (abi_ulong)(interp_info ? interp_info->load_addr : 0)); 1913992f48a0Sblueswir1 NEW_AUX_ENT(AT_FLAGS, (abi_ulong)0); 19148e62a717SRichard Henderson NEW_AUX_ENT(AT_ENTRY, info->entry); 1915992f48a0Sblueswir1 NEW_AUX_ENT(AT_UID, (abi_ulong) getuid()); 1916992f48a0Sblueswir1 NEW_AUX_ENT(AT_EUID, (abi_ulong) geteuid()); 1917992f48a0Sblueswir1 NEW_AUX_ENT(AT_GID, (abi_ulong) getgid()); 1918992f48a0Sblueswir1 NEW_AUX_ENT(AT_EGID, (abi_ulong) getegid()); 1919992f48a0Sblueswir1 NEW_AUX_ENT(AT_HWCAP, (abi_ulong) ELF_HWCAP); 1920a07c67dfSpbrook NEW_AUX_ENT(AT_CLKTCK, (abi_ulong) sysconf(_SC_CLK_TCK)); 192114322badSLaurent ALFONSI NEW_AUX_ENT(AT_RANDOM, (abi_ulong) u_rand_bytes); 1922444cd5c3SMarco A L Barbosa NEW_AUX_ENT(AT_SECURE, (abi_ulong) qemu_getauxval(AT_SECURE)); 192314322badSLaurent ALFONSI 1924ad6919dcSPeter Maydell #ifdef ELF_HWCAP2 1925ad6919dcSPeter Maydell NEW_AUX_ENT(AT_HWCAP2, (abi_ulong) ELF_HWCAP2); 1926ad6919dcSPeter Maydell #endif 1927ad6919dcSPeter Maydell 19287c4ee5bcSRichard Henderson if (u_platform) { 192953a5960aSpbrook NEW_AUX_ENT(AT_PLATFORM, u_platform); 19307c4ee5bcSRichard Henderson } 19317c4ee5bcSRichard Henderson NEW_AUX_ENT (AT_NULL, 0); 1932f5155289Sbellard #undef NEW_AUX_ENT 1933f5155289Sbellard 1934f516511eSPeter Maydell /* Check that our initial calculation of the auxv length matches how much 1935f516511eSPeter Maydell * we actually put into it. 1936f516511eSPeter Maydell */ 1937f516511eSPeter Maydell assert(info->auxv_len == u_auxv - info->saved_auxv); 1938edf8e2afSMika Westerberg 19397c4ee5bcSRichard Henderson put_user_ual(argc, u_argc); 19407c4ee5bcSRichard Henderson 19417c4ee5bcSRichard Henderson p = info->arg_strings; 19427c4ee5bcSRichard Henderson for (i = 0; i < argc; ++i) { 19437c4ee5bcSRichard Henderson put_user_ual(p, u_argv); 19447c4ee5bcSRichard Henderson u_argv += n; 19457c4ee5bcSRichard Henderson p += target_strlen(p) + 1; 19467c4ee5bcSRichard Henderson } 19477c4ee5bcSRichard Henderson put_user_ual(0, u_argv); 19487c4ee5bcSRichard Henderson 19497c4ee5bcSRichard Henderson p = info->env_strings; 19507c4ee5bcSRichard Henderson for (i = 0; i < envc; ++i) { 19517c4ee5bcSRichard Henderson put_user_ual(p, u_envp); 19527c4ee5bcSRichard Henderson u_envp += n; 19537c4ee5bcSRichard Henderson p += target_strlen(p) + 1; 19547c4ee5bcSRichard Henderson } 19557c4ee5bcSRichard Henderson put_user_ual(0, u_envp); 19567c4ee5bcSRichard Henderson 195731e31b8aSbellard return sp; 195831e31b8aSbellard } 195931e31b8aSbellard 1960dce10401SMeador Inge unsigned long init_guest_space(unsigned long host_start, 1961dce10401SMeador Inge unsigned long host_size, 1962dce10401SMeador Inge unsigned long guest_start, 1963dce10401SMeador Inge bool fixed) 1964dce10401SMeador Inge { 1965293f2060SLuke Shumaker unsigned long current_start, aligned_start; 1966dce10401SMeador Inge int flags; 1967dce10401SMeador Inge 1968dce10401SMeador Inge assert(host_start || host_size); 1969dce10401SMeador Inge 1970dce10401SMeador Inge /* If just a starting address is given, then just verify that 1971dce10401SMeador Inge * address. */ 1972dce10401SMeador Inge if (host_start && !host_size) { 19738756e136SLuke Shumaker #if defined(TARGET_ARM) && !defined(TARGET_AARCH64) 1974c3637eafSLuke Shumaker if (init_guest_commpage(host_start, host_size) != 1) { 1975dce10401SMeador Inge return (unsigned long)-1; 1976dce10401SMeador Inge } 19778756e136SLuke Shumaker #endif 19788756e136SLuke Shumaker return host_start; 1979dce10401SMeador Inge } 1980dce10401SMeador Inge 1981dce10401SMeador Inge /* Setup the initial flags and start address. */ 1982dce10401SMeador Inge current_start = host_start & qemu_host_page_mask; 1983dce10401SMeador Inge flags = MAP_ANONYMOUS | MAP_PRIVATE | MAP_NORESERVE; 1984dce10401SMeador Inge if (fixed) { 1985dce10401SMeador Inge flags |= MAP_FIXED; 1986dce10401SMeador Inge } 1987dce10401SMeador Inge 1988dce10401SMeador Inge /* Otherwise, a non-zero size region of memory needs to be mapped 1989dce10401SMeador Inge * and validated. */ 19902a53535aSLuke Shumaker 19912a53535aSLuke Shumaker #if defined(TARGET_ARM) && !defined(TARGET_AARCH64) 19922a53535aSLuke Shumaker /* On 32-bit ARM, we need to map not just the usable memory, but 19932a53535aSLuke Shumaker * also the commpage. Try to find a suitable place by allocating 19942a53535aSLuke Shumaker * a big chunk for all of it. If host_start, then the naive 19952a53535aSLuke Shumaker * strategy probably does good enough. 19962a53535aSLuke Shumaker */ 19972a53535aSLuke Shumaker if (!host_start) { 19982a53535aSLuke Shumaker unsigned long guest_full_size, host_full_size, real_start; 19992a53535aSLuke Shumaker 20002a53535aSLuke Shumaker guest_full_size = 20012a53535aSLuke Shumaker (0xffff0f00 & qemu_host_page_mask) + qemu_host_page_size; 20022a53535aSLuke Shumaker host_full_size = guest_full_size - guest_start; 20032a53535aSLuke Shumaker real_start = (unsigned long) 20042a53535aSLuke Shumaker mmap(NULL, host_full_size, PROT_NONE, flags, -1, 0); 20052a53535aSLuke Shumaker if (real_start == (unsigned long)-1) { 20062a53535aSLuke Shumaker if (host_size < host_full_size - qemu_host_page_size) { 20072a53535aSLuke Shumaker /* We failed to map a continous segment, but we're 20082a53535aSLuke Shumaker * allowed to have a gap between the usable memory and 20092a53535aSLuke Shumaker * the commpage where other things can be mapped. 20102a53535aSLuke Shumaker * This sparseness gives us more flexibility to find 20112a53535aSLuke Shumaker * an address range. 20122a53535aSLuke Shumaker */ 20132a53535aSLuke Shumaker goto naive; 20142a53535aSLuke Shumaker } 20152a53535aSLuke Shumaker return (unsigned long)-1; 20162a53535aSLuke Shumaker } 20172a53535aSLuke Shumaker munmap((void *)real_start, host_full_size); 20182a53535aSLuke Shumaker if (real_start & ~qemu_host_page_mask) { 20192a53535aSLuke Shumaker /* The same thing again, but with an extra qemu_host_page_size 20202a53535aSLuke Shumaker * so that we can shift around alignment. 20212a53535aSLuke Shumaker */ 20222a53535aSLuke Shumaker unsigned long real_size = host_full_size + qemu_host_page_size; 20232a53535aSLuke Shumaker real_start = (unsigned long) 20242a53535aSLuke Shumaker mmap(NULL, real_size, PROT_NONE, flags, -1, 0); 20252a53535aSLuke Shumaker if (real_start == (unsigned long)-1) { 20262a53535aSLuke Shumaker if (host_size < host_full_size - qemu_host_page_size) { 20272a53535aSLuke Shumaker goto naive; 20282a53535aSLuke Shumaker } 20292a53535aSLuke Shumaker return (unsigned long)-1; 20302a53535aSLuke Shumaker } 20312a53535aSLuke Shumaker munmap((void *)real_start, real_size); 20322a53535aSLuke Shumaker real_start = HOST_PAGE_ALIGN(real_start); 20332a53535aSLuke Shumaker } 20342a53535aSLuke Shumaker current_start = real_start; 20352a53535aSLuke Shumaker } 20362a53535aSLuke Shumaker naive: 20372a53535aSLuke Shumaker #endif 20382a53535aSLuke Shumaker 2039dce10401SMeador Inge while (1) { 2040293f2060SLuke Shumaker unsigned long real_start, real_size, aligned_size; 2041293f2060SLuke Shumaker aligned_size = real_size = host_size; 2042806d1021SMeador Inge 2043dce10401SMeador Inge /* Do not use mmap_find_vma here because that is limited to the 2044dce10401SMeador Inge * guest address space. We are going to make the 2045dce10401SMeador Inge * guest address space fit whatever we're given. 2046dce10401SMeador Inge */ 2047dce10401SMeador Inge real_start = (unsigned long) 2048dce10401SMeador Inge mmap((void *)current_start, host_size, PROT_NONE, flags, -1, 0); 2049dce10401SMeador Inge if (real_start == (unsigned long)-1) { 2050dce10401SMeador Inge return (unsigned long)-1; 2051dce10401SMeador Inge } 2052dce10401SMeador Inge 2053aac362e4SLuke Shumaker /* Check to see if the address is valid. */ 2054aac362e4SLuke Shumaker if (host_start && real_start != current_start) { 2055aac362e4SLuke Shumaker goto try_again; 2056aac362e4SLuke Shumaker } 2057aac362e4SLuke Shumaker 2058806d1021SMeador Inge /* Ensure the address is properly aligned. */ 2059806d1021SMeador Inge if (real_start & ~qemu_host_page_mask) { 2060293f2060SLuke Shumaker /* Ideally, we adjust like 2061293f2060SLuke Shumaker * 2062293f2060SLuke Shumaker * pages: [ ][ ][ ][ ][ ] 2063293f2060SLuke Shumaker * old: [ real ] 2064293f2060SLuke Shumaker * [ aligned ] 2065293f2060SLuke Shumaker * new: [ real ] 2066293f2060SLuke Shumaker * [ aligned ] 2067293f2060SLuke Shumaker * 2068293f2060SLuke Shumaker * But if there is something else mapped right after it, 2069293f2060SLuke Shumaker * then obviously it won't have room to grow, and the 2070293f2060SLuke Shumaker * kernel will put the new larger real someplace else with 2071293f2060SLuke Shumaker * unknown alignment (if we made it to here, then 2072293f2060SLuke Shumaker * fixed=false). Which is why we grow real by a full page 2073293f2060SLuke Shumaker * size, instead of by part of one; so that even if we get 2074293f2060SLuke Shumaker * moved, we can still guarantee alignment. But this does 2075293f2060SLuke Shumaker * mean that there is a padding of < 1 page both before 2076293f2060SLuke Shumaker * and after the aligned range; the "after" could could 2077293f2060SLuke Shumaker * cause problems for ARM emulation where it could butt in 2078293f2060SLuke Shumaker * to where we need to put the commpage. 2079293f2060SLuke Shumaker */ 2080806d1021SMeador Inge munmap((void *)real_start, host_size); 2081293f2060SLuke Shumaker real_size = aligned_size + qemu_host_page_size; 2082806d1021SMeador Inge real_start = (unsigned long) 2083806d1021SMeador Inge mmap((void *)real_start, real_size, PROT_NONE, flags, -1, 0); 2084806d1021SMeador Inge if (real_start == (unsigned long)-1) { 2085806d1021SMeador Inge return (unsigned long)-1; 2086806d1021SMeador Inge } 2087293f2060SLuke Shumaker aligned_start = HOST_PAGE_ALIGN(real_start); 2088293f2060SLuke Shumaker } else { 2089293f2060SLuke Shumaker aligned_start = real_start; 2090806d1021SMeador Inge } 2091806d1021SMeador Inge 20928756e136SLuke Shumaker #if defined(TARGET_ARM) && !defined(TARGET_AARCH64) 20938756e136SLuke Shumaker /* On 32-bit ARM, we need to also be able to map the commpage. */ 2094293f2060SLuke Shumaker int valid = init_guest_commpage(aligned_start - guest_start, 2095293f2060SLuke Shumaker aligned_size + guest_start); 20967ad75eeaSLuke Shumaker if (valid == -1) { 2097293f2060SLuke Shumaker munmap((void *)real_start, real_size); 2098806d1021SMeador Inge return (unsigned long)-1; 20997ad75eeaSLuke Shumaker } else if (valid == 0) { 21007ad75eeaSLuke Shumaker goto try_again; 2101806d1021SMeador Inge } 21028756e136SLuke Shumaker #endif 2103dce10401SMeador Inge 21047ad75eeaSLuke Shumaker /* If nothing has said `return -1` or `goto try_again` yet, 21057ad75eeaSLuke Shumaker * then the address we have is good. 21067ad75eeaSLuke Shumaker */ 21077ad75eeaSLuke Shumaker break; 21087ad75eeaSLuke Shumaker 21097ad75eeaSLuke Shumaker try_again: 2110dce10401SMeador Inge /* That address didn't work. Unmap and try a different one. 2111dce10401SMeador Inge * The address the host picked because is typically right at 2112dce10401SMeador Inge * the top of the host address space and leaves the guest with 2113dce10401SMeador Inge * no usable address space. Resort to a linear search. We 2114dce10401SMeador Inge * already compensated for mmap_min_addr, so this should not 2115dce10401SMeador Inge * happen often. Probably means we got unlucky and host 2116dce10401SMeador Inge * address space randomization put a shared library somewhere 2117dce10401SMeador Inge * inconvenient. 21188c17d862SLuke Shumaker * 21198c17d862SLuke Shumaker * This is probably a good strategy if host_start, but is 21208c17d862SLuke Shumaker * probably a bad strategy if not, which means we got here 21218c17d862SLuke Shumaker * because of trouble with ARM commpage setup. 2122dce10401SMeador Inge */ 2123293f2060SLuke Shumaker munmap((void *)real_start, real_size); 2124dce10401SMeador Inge current_start += qemu_host_page_size; 2125dce10401SMeador Inge if (host_start == current_start) { 2126dce10401SMeador Inge /* Theoretically possible if host doesn't have any suitably 2127dce10401SMeador Inge * aligned areas. Normally the first mmap will fail. 2128dce10401SMeador Inge */ 2129dce10401SMeador Inge return (unsigned long)-1; 2130dce10401SMeador Inge } 2131dce10401SMeador Inge } 2132dce10401SMeador Inge 213313829020SPaolo Bonzini qemu_log_mask(CPU_LOG_PAGE, "Reserved 0x%lx bytes of guest address space\n", host_size); 2134806d1021SMeador Inge 2135293f2060SLuke Shumaker return aligned_start; 2136dce10401SMeador Inge } 2137dce10401SMeador Inge 2138f3ed1f5dSPeter Maydell static void probe_guest_base(const char *image_name, 2139f3ed1f5dSPeter Maydell abi_ulong loaddr, abi_ulong hiaddr) 2140f3ed1f5dSPeter Maydell { 2141f3ed1f5dSPeter Maydell /* Probe for a suitable guest base address, if the user has not set 2142f3ed1f5dSPeter Maydell * it explicitly, and set guest_base appropriately. 2143f3ed1f5dSPeter Maydell * In case of error we will print a suitable message and exit. 2144f3ed1f5dSPeter Maydell */ 2145f3ed1f5dSPeter Maydell const char *errmsg; 2146f3ed1f5dSPeter Maydell if (!have_guest_base && !reserved_va) { 2147f3ed1f5dSPeter Maydell unsigned long host_start, real_start, host_size; 2148f3ed1f5dSPeter Maydell 2149f3ed1f5dSPeter Maydell /* Round addresses to page boundaries. */ 2150f3ed1f5dSPeter Maydell loaddr &= qemu_host_page_mask; 2151f3ed1f5dSPeter Maydell hiaddr = HOST_PAGE_ALIGN(hiaddr); 2152f3ed1f5dSPeter Maydell 2153f3ed1f5dSPeter Maydell if (loaddr < mmap_min_addr) { 2154f3ed1f5dSPeter Maydell host_start = HOST_PAGE_ALIGN(mmap_min_addr); 2155f3ed1f5dSPeter Maydell } else { 2156f3ed1f5dSPeter Maydell host_start = loaddr; 2157f3ed1f5dSPeter Maydell if (host_start != loaddr) { 2158f3ed1f5dSPeter Maydell errmsg = "Address overflow loading ELF binary"; 2159f3ed1f5dSPeter Maydell goto exit_errmsg; 2160f3ed1f5dSPeter Maydell } 2161f3ed1f5dSPeter Maydell } 2162f3ed1f5dSPeter Maydell host_size = hiaddr - loaddr; 2163dce10401SMeador Inge 2164dce10401SMeador Inge /* Setup the initial guest memory space with ranges gleaned from 2165dce10401SMeador Inge * the ELF image that is being loaded. 2166dce10401SMeador Inge */ 2167dce10401SMeador Inge real_start = init_guest_space(host_start, host_size, loaddr, false); 2168f3ed1f5dSPeter Maydell if (real_start == (unsigned long)-1) { 2169f3ed1f5dSPeter Maydell errmsg = "Unable to find space for application"; 2170f3ed1f5dSPeter Maydell goto exit_errmsg; 2171f3ed1f5dSPeter Maydell } 2172dce10401SMeador Inge guest_base = real_start - loaddr; 2173dce10401SMeador Inge 217413829020SPaolo Bonzini qemu_log_mask(CPU_LOG_PAGE, "Relocating guest address space from 0x" 2175f3ed1f5dSPeter Maydell TARGET_ABI_FMT_lx " to 0x%lx\n", 2176f3ed1f5dSPeter Maydell loaddr, real_start); 2177f3ed1f5dSPeter Maydell } 2178f3ed1f5dSPeter Maydell return; 2179f3ed1f5dSPeter Maydell 2180f3ed1f5dSPeter Maydell exit_errmsg: 2181f3ed1f5dSPeter Maydell fprintf(stderr, "%s: %s\n", image_name, errmsg); 2182f3ed1f5dSPeter Maydell exit(-1); 2183f3ed1f5dSPeter Maydell } 2184f3ed1f5dSPeter Maydell 2185f3ed1f5dSPeter Maydell 21868e62a717SRichard Henderson /* Load an ELF image into the address space. 218731e31b8aSbellard 21888e62a717SRichard Henderson IMAGE_NAME is the filename of the image, to use in error messages. 21898e62a717SRichard Henderson IMAGE_FD is the open file descriptor for the image. 21908e62a717SRichard Henderson 21918e62a717SRichard Henderson BPRM_BUF is a copy of the beginning of the file; this of course 21928e62a717SRichard Henderson contains the elf file header at offset 0. It is assumed that this 21938e62a717SRichard Henderson buffer is sufficiently aligned to present no problems to the host 21948e62a717SRichard Henderson in accessing data at aligned offsets within the buffer. 21958e62a717SRichard Henderson 21968e62a717SRichard Henderson On return: INFO values will be filled in, as necessary or available. */ 21978e62a717SRichard Henderson 21988e62a717SRichard Henderson static void load_elf_image(const char *image_name, int image_fd, 2199bf858897SRichard Henderson struct image_info *info, char **pinterp_name, 22009955ffacSRichard Henderson char bprm_buf[BPRM_BUF_SIZE]) 220131e31b8aSbellard { 22028e62a717SRichard Henderson struct elfhdr *ehdr = (struct elfhdr *)bprm_buf; 22038e62a717SRichard Henderson struct elf_phdr *phdr; 22048e62a717SRichard Henderson abi_ulong load_addr, load_bias, loaddr, hiaddr, error; 22058e62a717SRichard Henderson int i, retval; 22068e62a717SRichard Henderson const char *errmsg; 220731e31b8aSbellard 22088e62a717SRichard Henderson /* First of all, some simple consistency checks */ 22098e62a717SRichard Henderson errmsg = "Invalid ELF image for this architecture"; 22108e62a717SRichard Henderson if (!elf_check_ident(ehdr)) { 22118e62a717SRichard Henderson goto exit_errmsg; 22128e62a717SRichard Henderson } 22138e62a717SRichard Henderson bswap_ehdr(ehdr); 22148e62a717SRichard Henderson if (!elf_check_ehdr(ehdr)) { 22158e62a717SRichard Henderson goto exit_errmsg; 221631e31b8aSbellard } 221731e31b8aSbellard 22188e62a717SRichard Henderson i = ehdr->e_phnum * sizeof(struct elf_phdr); 22198e62a717SRichard Henderson if (ehdr->e_phoff + i <= BPRM_BUF_SIZE) { 22208e62a717SRichard Henderson phdr = (struct elf_phdr *)(bprm_buf + ehdr->e_phoff); 22219955ffacSRichard Henderson } else { 22228e62a717SRichard Henderson phdr = (struct elf_phdr *) alloca(i); 22238e62a717SRichard Henderson retval = pread(image_fd, phdr, i, ehdr->e_phoff); 22249955ffacSRichard Henderson if (retval != i) { 22258e62a717SRichard Henderson goto exit_read; 22269955ffacSRichard Henderson } 222731e31b8aSbellard } 22288e62a717SRichard Henderson bswap_phdr(phdr, ehdr->e_phnum); 222909bfb054Sbellard 22301af02e83SMike Frysinger info->nsegs = 0; 22311af02e83SMike Frysinger info->pt_dynamic_addr = 0; 22321af02e83SMike Frysinger 223398c1076cSAlex Bennée mmap_lock(); 223498c1076cSAlex Bennée 2235682674b8SRichard Henderson /* Find the maximum size of the image and allocate an appropriate 2236682674b8SRichard Henderson amount of memory to handle that. */ 2237682674b8SRichard Henderson loaddr = -1, hiaddr = 0; 223833143c44SLaurent Vivier info->alignment = 0; 22398e62a717SRichard Henderson for (i = 0; i < ehdr->e_phnum; ++i) { 22408e62a717SRichard Henderson if (phdr[i].p_type == PT_LOAD) { 2241a93934feSJonas Maebe abi_ulong a = phdr[i].p_vaddr - phdr[i].p_offset; 2242682674b8SRichard Henderson if (a < loaddr) { 2243682674b8SRichard Henderson loaddr = a; 2244682674b8SRichard Henderson } 2245ccf661f8STom Musta a = phdr[i].p_vaddr + phdr[i].p_memsz; 2246682674b8SRichard Henderson if (a > hiaddr) { 2247682674b8SRichard Henderson hiaddr = a; 2248682674b8SRichard Henderson } 22491af02e83SMike Frysinger ++info->nsegs; 225033143c44SLaurent Vivier info->alignment |= phdr[i].p_align; 2251682674b8SRichard Henderson } 2252682674b8SRichard Henderson } 2253682674b8SRichard Henderson 2254682674b8SRichard Henderson load_addr = loaddr; 22558e62a717SRichard Henderson if (ehdr->e_type == ET_DYN) { 2256682674b8SRichard Henderson /* The image indicates that it can be loaded anywhere. Find a 2257682674b8SRichard Henderson location that can hold the memory space required. If the 2258682674b8SRichard Henderson image is pre-linked, LOADDR will be non-zero. Since we do 2259682674b8SRichard Henderson not supply MAP_FIXED here we'll use that address if and 2260682674b8SRichard Henderson only if it remains available. */ 2261682674b8SRichard Henderson load_addr = target_mmap(loaddr, hiaddr - loaddr, PROT_NONE, 2262682674b8SRichard Henderson MAP_PRIVATE | MAP_ANON | MAP_NORESERVE, 226309bfb054Sbellard -1, 0); 2264682674b8SRichard Henderson if (load_addr == -1) { 22658e62a717SRichard Henderson goto exit_perror; 226609bfb054Sbellard } 2267bf858897SRichard Henderson } else if (pinterp_name != NULL) { 2268bf858897SRichard Henderson /* This is the main executable. Make sure that the low 2269bf858897SRichard Henderson address does not conflict with MMAP_MIN_ADDR or the 2270bf858897SRichard Henderson QEMU application itself. */ 2271f3ed1f5dSPeter Maydell probe_guest_base(image_name, loaddr, hiaddr); 227209bfb054Sbellard } 2273682674b8SRichard Henderson load_bias = load_addr - loaddr; 227409bfb054Sbellard 2275a99856cdSChristophe Lyon if (elf_is_fdpic(ehdr)) { 22761af02e83SMike Frysinger struct elf32_fdpic_loadseg *loadsegs = info->loadsegs = 22777267c094SAnthony Liguori g_malloc(sizeof(*loadsegs) * info->nsegs); 22781af02e83SMike Frysinger 22791af02e83SMike Frysinger for (i = 0; i < ehdr->e_phnum; ++i) { 22801af02e83SMike Frysinger switch (phdr[i].p_type) { 22811af02e83SMike Frysinger case PT_DYNAMIC: 22821af02e83SMike Frysinger info->pt_dynamic_addr = phdr[i].p_vaddr + load_bias; 22831af02e83SMike Frysinger break; 22841af02e83SMike Frysinger case PT_LOAD: 22851af02e83SMike Frysinger loadsegs->addr = phdr[i].p_vaddr + load_bias; 22861af02e83SMike Frysinger loadsegs->p_vaddr = phdr[i].p_vaddr; 22871af02e83SMike Frysinger loadsegs->p_memsz = phdr[i].p_memsz; 22881af02e83SMike Frysinger ++loadsegs; 22891af02e83SMike Frysinger break; 22901af02e83SMike Frysinger } 22911af02e83SMike Frysinger } 22921af02e83SMike Frysinger } 22931af02e83SMike Frysinger 22948e62a717SRichard Henderson info->load_bias = load_bias; 22958e62a717SRichard Henderson info->load_addr = load_addr; 22968e62a717SRichard Henderson info->entry = ehdr->e_entry + load_bias; 22978e62a717SRichard Henderson info->start_code = -1; 22988e62a717SRichard Henderson info->end_code = 0; 22998e62a717SRichard Henderson info->start_data = -1; 23008e62a717SRichard Henderson info->end_data = 0; 23018e62a717SRichard Henderson info->brk = 0; 2302d8fd2954SPaul Brook info->elf_flags = ehdr->e_flags; 23038e62a717SRichard Henderson 23048e62a717SRichard Henderson for (i = 0; i < ehdr->e_phnum; i++) { 23058e62a717SRichard Henderson struct elf_phdr *eppnt = phdr + i; 230631e31b8aSbellard if (eppnt->p_type == PT_LOAD) { 230794894ff2SShivaprasad G Bhat abi_ulong vaddr, vaddr_po, vaddr_ps, vaddr_ef, vaddr_em, vaddr_len; 230831e31b8aSbellard int elf_prot = 0; 230931e31b8aSbellard 231031e31b8aSbellard if (eppnt->p_flags & PF_R) elf_prot = PROT_READ; 231131e31b8aSbellard if (eppnt->p_flags & PF_W) elf_prot |= PROT_WRITE; 231231e31b8aSbellard if (eppnt->p_flags & PF_X) elf_prot |= PROT_EXEC; 231331e31b8aSbellard 2314682674b8SRichard Henderson vaddr = load_bias + eppnt->p_vaddr; 2315682674b8SRichard Henderson vaddr_po = TARGET_ELF_PAGEOFFSET(vaddr); 2316682674b8SRichard Henderson vaddr_ps = TARGET_ELF_PAGESTART(vaddr); 231794894ff2SShivaprasad G Bhat vaddr_len = TARGET_ELF_PAGELENGTH(eppnt->p_filesz + vaddr_po); 2318682674b8SRichard Henderson 231994894ff2SShivaprasad G Bhat error = target_mmap(vaddr_ps, vaddr_len, 2320682674b8SRichard Henderson elf_prot, MAP_PRIVATE | MAP_FIXED, 23218e62a717SRichard Henderson image_fd, eppnt->p_offset - vaddr_po); 2322e89f07d3Spbrook if (error == -1) { 23238e62a717SRichard Henderson goto exit_perror; 232431e31b8aSbellard } 232531e31b8aSbellard 2326682674b8SRichard Henderson vaddr_ef = vaddr + eppnt->p_filesz; 2327682674b8SRichard Henderson vaddr_em = vaddr + eppnt->p_memsz; 232831e31b8aSbellard 2329cf129f3aSRichard Henderson /* If the load segment requests extra zeros (e.g. bss), map it. */ 2330682674b8SRichard Henderson if (vaddr_ef < vaddr_em) { 2331682674b8SRichard Henderson zero_bss(vaddr_ef, vaddr_em, elf_prot); 2332682674b8SRichard Henderson } 23338e62a717SRichard Henderson 23348e62a717SRichard Henderson /* Find the full program boundaries. */ 23358e62a717SRichard Henderson if (elf_prot & PROT_EXEC) { 23368e62a717SRichard Henderson if (vaddr < info->start_code) { 23378e62a717SRichard Henderson info->start_code = vaddr; 2338cf129f3aSRichard Henderson } 23398e62a717SRichard Henderson if (vaddr_ef > info->end_code) { 23408e62a717SRichard Henderson info->end_code = vaddr_ef; 23418e62a717SRichard Henderson } 23428e62a717SRichard Henderson } 23438e62a717SRichard Henderson if (elf_prot & PROT_WRITE) { 23448e62a717SRichard Henderson if (vaddr < info->start_data) { 23458e62a717SRichard Henderson info->start_data = vaddr; 23468e62a717SRichard Henderson } 23478e62a717SRichard Henderson if (vaddr_ef > info->end_data) { 23488e62a717SRichard Henderson info->end_data = vaddr_ef; 23498e62a717SRichard Henderson } 23508e62a717SRichard Henderson if (vaddr_em > info->brk) { 23518e62a717SRichard Henderson info->brk = vaddr_em; 23528e62a717SRichard Henderson } 23538e62a717SRichard Henderson } 2354bf858897SRichard Henderson } else if (eppnt->p_type == PT_INTERP && pinterp_name) { 2355bf858897SRichard Henderson char *interp_name; 2356bf858897SRichard Henderson 2357bf858897SRichard Henderson if (*pinterp_name) { 2358bf858897SRichard Henderson errmsg = "Multiple PT_INTERP entries"; 2359bf858897SRichard Henderson goto exit_errmsg; 2360bf858897SRichard Henderson } 2361bf858897SRichard Henderson interp_name = malloc(eppnt->p_filesz); 2362bf858897SRichard Henderson if (!interp_name) { 2363bf858897SRichard Henderson goto exit_perror; 2364bf858897SRichard Henderson } 2365bf858897SRichard Henderson 2366bf858897SRichard Henderson if (eppnt->p_offset + eppnt->p_filesz <= BPRM_BUF_SIZE) { 2367bf858897SRichard Henderson memcpy(interp_name, bprm_buf + eppnt->p_offset, 2368bf858897SRichard Henderson eppnt->p_filesz); 2369bf858897SRichard Henderson } else { 2370bf858897SRichard Henderson retval = pread(image_fd, interp_name, eppnt->p_filesz, 2371bf858897SRichard Henderson eppnt->p_offset); 2372bf858897SRichard Henderson if (retval != eppnt->p_filesz) { 2373bf858897SRichard Henderson goto exit_perror; 2374bf858897SRichard Henderson } 2375bf858897SRichard Henderson } 2376bf858897SRichard Henderson if (interp_name[eppnt->p_filesz - 1] != 0) { 2377bf858897SRichard Henderson errmsg = "Invalid PT_INTERP entry"; 2378bf858897SRichard Henderson goto exit_errmsg; 2379bf858897SRichard Henderson } 2380bf858897SRichard Henderson *pinterp_name = interp_name; 2381*5dd0db52SStefan Markovic #ifdef TARGET_MIPS 2382*5dd0db52SStefan Markovic } else if (eppnt->p_type == PT_MIPS_ABIFLAGS) { 2383*5dd0db52SStefan Markovic Mips_elf_abiflags_v0 abiflags; 2384*5dd0db52SStefan Markovic if (eppnt->p_filesz < sizeof(Mips_elf_abiflags_v0)) { 2385*5dd0db52SStefan Markovic errmsg = "Invalid PT_MIPS_ABIFLAGS entry"; 2386*5dd0db52SStefan Markovic goto exit_errmsg; 2387*5dd0db52SStefan Markovic } 2388*5dd0db52SStefan Markovic if (eppnt->p_offset + eppnt->p_filesz <= BPRM_BUF_SIZE) { 2389*5dd0db52SStefan Markovic memcpy(&abiflags, bprm_buf + eppnt->p_offset, 2390*5dd0db52SStefan Markovic sizeof(Mips_elf_abiflags_v0)); 2391*5dd0db52SStefan Markovic } else { 2392*5dd0db52SStefan Markovic retval = pread(image_fd, &abiflags, sizeof(Mips_elf_abiflags_v0), 2393*5dd0db52SStefan Markovic eppnt->p_offset); 2394*5dd0db52SStefan Markovic if (retval != sizeof(Mips_elf_abiflags_v0)) { 2395*5dd0db52SStefan Markovic goto exit_perror; 2396*5dd0db52SStefan Markovic } 2397*5dd0db52SStefan Markovic } 2398*5dd0db52SStefan Markovic bswap_mips_abiflags(&abiflags); 2399*5dd0db52SStefan Markovic #endif 24008e62a717SRichard Henderson } 24018e62a717SRichard Henderson } 24028e62a717SRichard Henderson 24038e62a717SRichard Henderson if (info->end_data == 0) { 24048e62a717SRichard Henderson info->start_data = info->end_code; 24058e62a717SRichard Henderson info->end_data = info->end_code; 24068e62a717SRichard Henderson info->brk = info->end_code; 240731e31b8aSbellard } 240831e31b8aSbellard 2409682674b8SRichard Henderson if (qemu_log_enabled()) { 24108e62a717SRichard Henderson load_symbols(ehdr, image_fd, load_bias); 2411682674b8SRichard Henderson } 241231e31b8aSbellard 241398c1076cSAlex Bennée mmap_unlock(); 241498c1076cSAlex Bennée 24158e62a717SRichard Henderson close(image_fd); 24168e62a717SRichard Henderson return; 241731e31b8aSbellard 24188e62a717SRichard Henderson exit_read: 24198e62a717SRichard Henderson if (retval >= 0) { 24208e62a717SRichard Henderson errmsg = "Incomplete read of file header"; 24218e62a717SRichard Henderson goto exit_errmsg; 24228e62a717SRichard Henderson } 24238e62a717SRichard Henderson exit_perror: 24248e62a717SRichard Henderson errmsg = strerror(errno); 24258e62a717SRichard Henderson exit_errmsg: 24268e62a717SRichard Henderson fprintf(stderr, "%s: %s\n", image_name, errmsg); 24278e62a717SRichard Henderson exit(-1); 24288e62a717SRichard Henderson } 24298e62a717SRichard Henderson 24308e62a717SRichard Henderson static void load_elf_interp(const char *filename, struct image_info *info, 24318e62a717SRichard Henderson char bprm_buf[BPRM_BUF_SIZE]) 24328e62a717SRichard Henderson { 24338e62a717SRichard Henderson int fd, retval; 24348e62a717SRichard Henderson 24358e62a717SRichard Henderson fd = open(path(filename), O_RDONLY); 24368e62a717SRichard Henderson if (fd < 0) { 24378e62a717SRichard Henderson goto exit_perror; 24388e62a717SRichard Henderson } 24398e62a717SRichard Henderson 24408e62a717SRichard Henderson retval = read(fd, bprm_buf, BPRM_BUF_SIZE); 24418e62a717SRichard Henderson if (retval < 0) { 24428e62a717SRichard Henderson goto exit_perror; 24438e62a717SRichard Henderson } 24448e62a717SRichard Henderson if (retval < BPRM_BUF_SIZE) { 24458e62a717SRichard Henderson memset(bprm_buf + retval, 0, BPRM_BUF_SIZE - retval); 24468e62a717SRichard Henderson } 24478e62a717SRichard Henderson 2448bf858897SRichard Henderson load_elf_image(filename, fd, info, NULL, bprm_buf); 24498e62a717SRichard Henderson return; 24508e62a717SRichard Henderson 24518e62a717SRichard Henderson exit_perror: 24528e62a717SRichard Henderson fprintf(stderr, "%s: %s\n", filename, strerror(errno)); 24538e62a717SRichard Henderson exit(-1); 245431e31b8aSbellard } 245531e31b8aSbellard 245649918a75Spbrook static int symfind(const void *s0, const void *s1) 245749918a75Spbrook { 2458c7c530cdSStefan Weil target_ulong addr = *(target_ulong *)s0; 245949918a75Spbrook struct elf_sym *sym = (struct elf_sym *)s1; 246049918a75Spbrook int result = 0; 2461c7c530cdSStefan Weil if (addr < sym->st_value) { 246249918a75Spbrook result = -1; 2463c7c530cdSStefan Weil } else if (addr >= sym->st_value + sym->st_size) { 246449918a75Spbrook result = 1; 246549918a75Spbrook } 246649918a75Spbrook return result; 246749918a75Spbrook } 246849918a75Spbrook 246949918a75Spbrook static const char *lookup_symbolxx(struct syminfo *s, target_ulong orig_addr) 247049918a75Spbrook { 247149918a75Spbrook #if ELF_CLASS == ELFCLASS32 247249918a75Spbrook struct elf_sym *syms = s->disas_symtab.elf32; 247349918a75Spbrook #else 247449918a75Spbrook struct elf_sym *syms = s->disas_symtab.elf64; 247549918a75Spbrook #endif 247649918a75Spbrook 247749918a75Spbrook // binary search 247849918a75Spbrook struct elf_sym *sym; 247949918a75Spbrook 2480c7c530cdSStefan Weil sym = bsearch(&orig_addr, syms, s->disas_num_syms, sizeof(*syms), symfind); 24817cba04f6SBlue Swirl if (sym != NULL) { 248249918a75Spbrook return s->disas_strtab + sym->st_name; 248349918a75Spbrook } 248449918a75Spbrook 248549918a75Spbrook return ""; 248649918a75Spbrook } 248749918a75Spbrook 248849918a75Spbrook /* FIXME: This should use elf_ops.h */ 248949918a75Spbrook static int symcmp(const void *s0, const void *s1) 249049918a75Spbrook { 249149918a75Spbrook struct elf_sym *sym0 = (struct elf_sym *)s0; 249249918a75Spbrook struct elf_sym *sym1 = (struct elf_sym *)s1; 249349918a75Spbrook return (sym0->st_value < sym1->st_value) 249449918a75Spbrook ? -1 249549918a75Spbrook : ((sym0->st_value > sym1->st_value) ? 1 : 0); 249649918a75Spbrook } 249749918a75Spbrook 2498689f936fSbellard /* Best attempt to load symbols from this ELF object. */ 2499682674b8SRichard Henderson static void load_symbols(struct elfhdr *hdr, int fd, abi_ulong load_bias) 2500689f936fSbellard { 2501682674b8SRichard Henderson int i, shnum, nsyms, sym_idx = 0, str_idx = 0; 25021e06262dSPeter Maydell uint64_t segsz; 2503682674b8SRichard Henderson struct elf_shdr *shdr; 2504b9475279SCédric VINCENT char *strings = NULL; 2505b9475279SCédric VINCENT struct syminfo *s = NULL; 2506b9475279SCédric VINCENT struct elf_sym *new_syms, *syms = NULL; 250731e31b8aSbellard 2508682674b8SRichard Henderson shnum = hdr->e_shnum; 2509682674b8SRichard Henderson i = shnum * sizeof(struct elf_shdr); 2510682674b8SRichard Henderson shdr = (struct elf_shdr *)alloca(i); 2511682674b8SRichard Henderson if (pread(fd, shdr, i, hdr->e_shoff) != i) { 2512689f936fSbellard return; 2513682674b8SRichard Henderson } 2514682674b8SRichard Henderson 2515682674b8SRichard Henderson bswap_shdr(shdr, shnum); 2516682674b8SRichard Henderson for (i = 0; i < shnum; ++i) { 2517682674b8SRichard Henderson if (shdr[i].sh_type == SHT_SYMTAB) { 2518682674b8SRichard Henderson sym_idx = i; 2519682674b8SRichard Henderson str_idx = shdr[i].sh_link; 2520689f936fSbellard goto found; 2521689f936fSbellard } 2522689f936fSbellard } 2523682674b8SRichard Henderson 2524682674b8SRichard Henderson /* There will be no symbol table if the file was stripped. */ 2525682674b8SRichard Henderson return; 2526689f936fSbellard 2527689f936fSbellard found: 2528689f936fSbellard /* Now know where the strtab and symtab are. Snarf them. */ 25290ef9ea29SPeter Maydell s = g_try_new(struct syminfo, 1); 2530682674b8SRichard Henderson if (!s) { 2531b9475279SCédric VINCENT goto give_up; 2532682674b8SRichard Henderson } 2533682674b8SRichard Henderson 25341e06262dSPeter Maydell segsz = shdr[str_idx].sh_size; 25351e06262dSPeter Maydell s->disas_strtab = strings = g_try_malloc(segsz); 25361e06262dSPeter Maydell if (!strings || 25371e06262dSPeter Maydell pread(fd, strings, segsz, shdr[str_idx].sh_offset) != segsz) { 2538b9475279SCédric VINCENT goto give_up; 2539682674b8SRichard Henderson } 2540689f936fSbellard 25411e06262dSPeter Maydell segsz = shdr[sym_idx].sh_size; 25421e06262dSPeter Maydell syms = g_try_malloc(segsz); 25431e06262dSPeter Maydell if (!syms || pread(fd, syms, segsz, shdr[sym_idx].sh_offset) != segsz) { 2544b9475279SCédric VINCENT goto give_up; 2545682674b8SRichard Henderson } 2546689f936fSbellard 25471e06262dSPeter Maydell if (segsz / sizeof(struct elf_sym) > INT_MAX) { 25481e06262dSPeter Maydell /* Implausibly large symbol table: give up rather than ploughing 25491e06262dSPeter Maydell * on with the number of symbols calculation overflowing 25501e06262dSPeter Maydell */ 25511e06262dSPeter Maydell goto give_up; 25521e06262dSPeter Maydell } 25531e06262dSPeter Maydell nsyms = segsz / sizeof(struct elf_sym); 2554682674b8SRichard Henderson for (i = 0; i < nsyms; ) { 255549918a75Spbrook bswap_sym(syms + i); 2556682674b8SRichard Henderson /* Throw away entries which we do not need. */ 2557682674b8SRichard Henderson if (syms[i].st_shndx == SHN_UNDEF 2558682674b8SRichard Henderson || syms[i].st_shndx >= SHN_LORESERVE 2559682674b8SRichard Henderson || ELF_ST_TYPE(syms[i].st_info) != STT_FUNC) { 2560682674b8SRichard Henderson if (i < --nsyms) { 256149918a75Spbrook syms[i] = syms[nsyms]; 256249918a75Spbrook } 2563682674b8SRichard Henderson } else { 256449918a75Spbrook #if defined(TARGET_ARM) || defined (TARGET_MIPS) 256549918a75Spbrook /* The bottom address bit marks a Thumb or MIPS16 symbol. */ 256649918a75Spbrook syms[i].st_value &= ~(target_ulong)1; 256749918a75Spbrook #endif 2568682674b8SRichard Henderson syms[i].st_value += load_bias; 256949918a75Spbrook i++; 257049918a75Spbrook } 2571682674b8SRichard Henderson } 257249918a75Spbrook 2573b9475279SCédric VINCENT /* No "useful" symbol. */ 2574b9475279SCédric VINCENT if (nsyms == 0) { 2575b9475279SCédric VINCENT goto give_up; 2576b9475279SCédric VINCENT } 2577b9475279SCédric VINCENT 25785d5c9930SRichard Henderson /* Attempt to free the storage associated with the local symbols 25795d5c9930SRichard Henderson that we threw away. Whether or not this has any effect on the 25805d5c9930SRichard Henderson memory allocation depends on the malloc implementation and how 25815d5c9930SRichard Henderson many symbols we managed to discard. */ 25820ef9ea29SPeter Maydell new_syms = g_try_renew(struct elf_sym, syms, nsyms); 25838d79de6eSStefan Weil if (new_syms == NULL) { 2584b9475279SCédric VINCENT goto give_up; 25855d5c9930SRichard Henderson } 25868d79de6eSStefan Weil syms = new_syms; 25875d5c9930SRichard Henderson 258849918a75Spbrook qsort(syms, nsyms, sizeof(*syms), symcmp); 258949918a75Spbrook 259049918a75Spbrook s->disas_num_syms = nsyms; 259149918a75Spbrook #if ELF_CLASS == ELFCLASS32 259249918a75Spbrook s->disas_symtab.elf32 = syms; 259349918a75Spbrook #else 259449918a75Spbrook s->disas_symtab.elf64 = syms; 259549918a75Spbrook #endif 2596682674b8SRichard Henderson s->lookup_symbol = lookup_symbolxx; 2597e80cfcfcSbellard s->next = syminfos; 2598e80cfcfcSbellard syminfos = s; 2599b9475279SCédric VINCENT 2600b9475279SCédric VINCENT return; 2601b9475279SCédric VINCENT 2602b9475279SCédric VINCENT give_up: 26030ef9ea29SPeter Maydell g_free(s); 26040ef9ea29SPeter Maydell g_free(strings); 26050ef9ea29SPeter Maydell g_free(syms); 2606689f936fSbellard } 260731e31b8aSbellard 2608768fe76eSYunQiang Su uint32_t get_elf_eflags(int fd) 2609768fe76eSYunQiang Su { 2610768fe76eSYunQiang Su struct elfhdr ehdr; 2611768fe76eSYunQiang Su off_t offset; 2612768fe76eSYunQiang Su int ret; 2613768fe76eSYunQiang Su 2614768fe76eSYunQiang Su /* Read ELF header */ 2615768fe76eSYunQiang Su offset = lseek(fd, 0, SEEK_SET); 2616768fe76eSYunQiang Su if (offset == (off_t) -1) { 2617768fe76eSYunQiang Su return 0; 2618768fe76eSYunQiang Su } 2619768fe76eSYunQiang Su ret = read(fd, &ehdr, sizeof(ehdr)); 2620768fe76eSYunQiang Su if (ret < sizeof(ehdr)) { 2621768fe76eSYunQiang Su return 0; 2622768fe76eSYunQiang Su } 2623768fe76eSYunQiang Su offset = lseek(fd, offset, SEEK_SET); 2624768fe76eSYunQiang Su if (offset == (off_t) -1) { 2625768fe76eSYunQiang Su return 0; 2626768fe76eSYunQiang Su } 2627768fe76eSYunQiang Su 2628768fe76eSYunQiang Su /* Check ELF signature */ 2629768fe76eSYunQiang Su if (!elf_check_ident(&ehdr)) { 2630768fe76eSYunQiang Su return 0; 2631768fe76eSYunQiang Su } 2632768fe76eSYunQiang Su 2633768fe76eSYunQiang Su /* check header */ 2634768fe76eSYunQiang Su bswap_ehdr(&ehdr); 2635768fe76eSYunQiang Su if (!elf_check_ehdr(&ehdr)) { 2636768fe76eSYunQiang Su return 0; 2637768fe76eSYunQiang Su } 2638768fe76eSYunQiang Su 2639768fe76eSYunQiang Su /* return architecture id */ 2640768fe76eSYunQiang Su return ehdr.e_flags; 2641768fe76eSYunQiang Su } 2642768fe76eSYunQiang Su 2643f0116c54SWill Newton int load_elf_binary(struct linux_binprm *bprm, struct image_info *info) 264431e31b8aSbellard { 26458e62a717SRichard Henderson struct image_info interp_info; 264631e31b8aSbellard struct elfhdr elf_ex; 26478e62a717SRichard Henderson char *elf_interpreter = NULL; 264859baae9aSStefan Brüns char *scratch; 264931e31b8aSbellard 2650bf858897SRichard Henderson info->start_mmap = (abi_ulong)ELF_START_MMAP; 265131e31b8aSbellard 2652bf858897SRichard Henderson load_elf_image(bprm->filename, bprm->fd, info, 2653bf858897SRichard Henderson &elf_interpreter, bprm->buf); 2654bf858897SRichard Henderson 2655bf858897SRichard Henderson /* ??? We need a copy of the elf header for passing to create_elf_tables. 2656bf858897SRichard Henderson If we do nothing, we'll have overwritten this when we re-use bprm->buf 2657bf858897SRichard Henderson when we load the interpreter. */ 2658bf858897SRichard Henderson elf_ex = *(struct elfhdr *)bprm->buf; 265931e31b8aSbellard 266059baae9aSStefan Brüns /* Do this so that we can load the interpreter, if need be. We will 266159baae9aSStefan Brüns change some of these later */ 266259baae9aSStefan Brüns bprm->p = setup_arg_pages(bprm, info); 266359baae9aSStefan Brüns 266459baae9aSStefan Brüns scratch = g_new0(char, TARGET_PAGE_SIZE); 26657c4ee5bcSRichard Henderson if (STACK_GROWS_DOWN) { 266659baae9aSStefan Brüns bprm->p = copy_elf_strings(1, &bprm->filename, scratch, 266759baae9aSStefan Brüns bprm->p, info->stack_limit); 26687c4ee5bcSRichard Henderson info->file_string = bprm->p; 266959baae9aSStefan Brüns bprm->p = copy_elf_strings(bprm->envc, bprm->envp, scratch, 267059baae9aSStefan Brüns bprm->p, info->stack_limit); 26717c4ee5bcSRichard Henderson info->env_strings = bprm->p; 267259baae9aSStefan Brüns bprm->p = copy_elf_strings(bprm->argc, bprm->argv, scratch, 267359baae9aSStefan Brüns bprm->p, info->stack_limit); 26747c4ee5bcSRichard Henderson info->arg_strings = bprm->p; 26757c4ee5bcSRichard Henderson } else { 26767c4ee5bcSRichard Henderson info->arg_strings = bprm->p; 26777c4ee5bcSRichard Henderson bprm->p = copy_elf_strings(bprm->argc, bprm->argv, scratch, 26787c4ee5bcSRichard Henderson bprm->p, info->stack_limit); 26797c4ee5bcSRichard Henderson info->env_strings = bprm->p; 26807c4ee5bcSRichard Henderson bprm->p = copy_elf_strings(bprm->envc, bprm->envp, scratch, 26817c4ee5bcSRichard Henderson bprm->p, info->stack_limit); 26827c4ee5bcSRichard Henderson info->file_string = bprm->p; 26837c4ee5bcSRichard Henderson bprm->p = copy_elf_strings(1, &bprm->filename, scratch, 26847c4ee5bcSRichard Henderson bprm->p, info->stack_limit); 26857c4ee5bcSRichard Henderson } 26867c4ee5bcSRichard Henderson 268759baae9aSStefan Brüns g_free(scratch); 268859baae9aSStefan Brüns 2689e5fe0c52Spbrook if (!bprm->p) { 2690bf858897SRichard Henderson fprintf(stderr, "%s: %s\n", bprm->filename, strerror(E2BIG)); 269131e31b8aSbellard exit(-1); 26929955ffacSRichard Henderson } 2693379f6698SPaul Brook 26948e62a717SRichard Henderson if (elf_interpreter) { 26958e62a717SRichard Henderson load_elf_interp(elf_interpreter, &interp_info, bprm->buf); 269631e31b8aSbellard 26978e62a717SRichard Henderson /* If the program interpreter is one of these two, then assume 26988e62a717SRichard Henderson an iBCS2 image. Otherwise assume a native linux image. */ 269931e31b8aSbellard 27008e62a717SRichard Henderson if (strcmp(elf_interpreter, "/usr/lib/libc.so.1") == 0 27018e62a717SRichard Henderson || strcmp(elf_interpreter, "/usr/lib/ld.so.1") == 0) { 27028e62a717SRichard Henderson info->personality = PER_SVR4; 27038e62a717SRichard Henderson 270431e31b8aSbellard /* Why this, you ask??? Well SVr4 maps page 0 as read-only, 27058e62a717SRichard Henderson and some applications "depend" upon this behavior. Since 27068e62a717SRichard Henderson we do not have the power to recompile these, we emulate 27078e62a717SRichard Henderson the SVr4 behavior. Sigh. */ 27088e62a717SRichard Henderson target_mmap(0, qemu_host_page_size, PROT_READ | PROT_EXEC, 270968754b44SPeter Maydell MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); 271031e31b8aSbellard } 27118e62a717SRichard Henderson } 271231e31b8aSbellard 27138e62a717SRichard Henderson bprm->p = create_elf_tables(bprm->p, bprm->argc, bprm->envc, &elf_ex, 27148e62a717SRichard Henderson info, (elf_interpreter ? &interp_info : NULL)); 27158e62a717SRichard Henderson info->start_stack = bprm->p; 27168e62a717SRichard Henderson 27178e62a717SRichard Henderson /* If we have an interpreter, set that as the program's entry point. 27188e78064eSRichard Henderson Copy the load_bias as well, to help PPC64 interpret the entry 27198e62a717SRichard Henderson point as a function descriptor. Do this after creating elf tables 27208e62a717SRichard Henderson so that we copy the original program entry point into the AUXV. */ 27218e62a717SRichard Henderson if (elf_interpreter) { 27228e78064eSRichard Henderson info->load_bias = interp_info.load_bias; 27238e62a717SRichard Henderson info->entry = interp_info.entry; 2724bf858897SRichard Henderson free(elf_interpreter); 27258e62a717SRichard Henderson } 272631e31b8aSbellard 2727edf8e2afSMika Westerberg #ifdef USE_ELF_CORE_DUMP 2728edf8e2afSMika Westerberg bprm->core_dump = &elf_core_dump; 2729edf8e2afSMika Westerberg #endif 2730edf8e2afSMika Westerberg 273131e31b8aSbellard return 0; 273231e31b8aSbellard } 273331e31b8aSbellard 2734edf8e2afSMika Westerberg #ifdef USE_ELF_CORE_DUMP 2735edf8e2afSMika Westerberg /* 2736edf8e2afSMika Westerberg * Definitions to generate Intel SVR4-like core files. 2737a2547a13SLaurent Desnogues * These mostly have the same names as the SVR4 types with "target_elf_" 2738edf8e2afSMika Westerberg * tacked on the front to prevent clashes with linux definitions, 2739edf8e2afSMika Westerberg * and the typedef forms have been avoided. This is mostly like 2740edf8e2afSMika Westerberg * the SVR4 structure, but more Linuxy, with things that Linux does 2741edf8e2afSMika Westerberg * not support and which gdb doesn't really use excluded. 2742edf8e2afSMika Westerberg * 2743edf8e2afSMika Westerberg * Fields we don't dump (their contents is zero) in linux-user qemu 2744edf8e2afSMika Westerberg * are marked with XXX. 2745edf8e2afSMika Westerberg * 2746edf8e2afSMika Westerberg * Core dump code is copied from linux kernel (fs/binfmt_elf.c). 2747edf8e2afSMika Westerberg * 2748edf8e2afSMika Westerberg * Porting ELF coredump for target is (quite) simple process. First you 2749dd0a3651SNathan Froyd * define USE_ELF_CORE_DUMP in target ELF code (where init_thread() for 2750edf8e2afSMika Westerberg * the target resides): 2751edf8e2afSMika Westerberg * 2752edf8e2afSMika Westerberg * #define USE_ELF_CORE_DUMP 2753edf8e2afSMika Westerberg * 2754edf8e2afSMika Westerberg * Next you define type of register set used for dumping. ELF specification 2755edf8e2afSMika Westerberg * says that it needs to be array of elf_greg_t that has size of ELF_NREG. 2756edf8e2afSMika Westerberg * 2757c227f099SAnthony Liguori * typedef <target_regtype> target_elf_greg_t; 2758edf8e2afSMika Westerberg * #define ELF_NREG <number of registers> 2759c227f099SAnthony Liguori * typedef taret_elf_greg_t target_elf_gregset_t[ELF_NREG]; 2760edf8e2afSMika Westerberg * 2761edf8e2afSMika Westerberg * Last step is to implement target specific function that copies registers 2762edf8e2afSMika Westerberg * from given cpu into just specified register set. Prototype is: 2763edf8e2afSMika Westerberg * 2764c227f099SAnthony Liguori * static void elf_core_copy_regs(taret_elf_gregset_t *regs, 27659349b4f9SAndreas Färber * const CPUArchState *env); 2766edf8e2afSMika Westerberg * 2767edf8e2afSMika Westerberg * Parameters: 2768edf8e2afSMika Westerberg * regs - copy register values into here (allocated and zeroed by caller) 2769edf8e2afSMika Westerberg * env - copy registers from here 2770edf8e2afSMika Westerberg * 2771edf8e2afSMika Westerberg * Example for ARM target is provided in this file. 2772edf8e2afSMika Westerberg */ 2773edf8e2afSMika Westerberg 2774edf8e2afSMika Westerberg /* An ELF note in memory */ 2775edf8e2afSMika Westerberg struct memelfnote { 2776edf8e2afSMika Westerberg const char *name; 2777edf8e2afSMika Westerberg size_t namesz; 2778edf8e2afSMika Westerberg size_t namesz_rounded; 2779edf8e2afSMika Westerberg int type; 2780edf8e2afSMika Westerberg size_t datasz; 278180f5ce75SLaurent Vivier size_t datasz_rounded; 2782edf8e2afSMika Westerberg void *data; 2783edf8e2afSMika Westerberg size_t notesz; 2784edf8e2afSMika Westerberg }; 2785edf8e2afSMika Westerberg 2786a2547a13SLaurent Desnogues struct target_elf_siginfo { 2787f8fd4fc4SPaolo Bonzini abi_int si_signo; /* signal number */ 2788f8fd4fc4SPaolo Bonzini abi_int si_code; /* extra code */ 2789f8fd4fc4SPaolo Bonzini abi_int si_errno; /* errno */ 2790edf8e2afSMika Westerberg }; 2791edf8e2afSMika Westerberg 2792a2547a13SLaurent Desnogues struct target_elf_prstatus { 2793a2547a13SLaurent Desnogues struct target_elf_siginfo pr_info; /* Info associated with signal */ 27941ddd592fSPaolo Bonzini abi_short pr_cursig; /* Current signal */ 2795ca98ac83SPaolo Bonzini abi_ulong pr_sigpend; /* XXX */ 2796ca98ac83SPaolo Bonzini abi_ulong pr_sighold; /* XXX */ 2797c227f099SAnthony Liguori target_pid_t pr_pid; 2798c227f099SAnthony Liguori target_pid_t pr_ppid; 2799c227f099SAnthony Liguori target_pid_t pr_pgrp; 2800c227f099SAnthony Liguori target_pid_t pr_sid; 2801edf8e2afSMika Westerberg struct target_timeval pr_utime; /* XXX User time */ 2802edf8e2afSMika Westerberg struct target_timeval pr_stime; /* XXX System time */ 2803edf8e2afSMika Westerberg struct target_timeval pr_cutime; /* XXX Cumulative user time */ 2804edf8e2afSMika Westerberg struct target_timeval pr_cstime; /* XXX Cumulative system time */ 2805c227f099SAnthony Liguori target_elf_gregset_t pr_reg; /* GP registers */ 2806f8fd4fc4SPaolo Bonzini abi_int pr_fpvalid; /* XXX */ 2807edf8e2afSMika Westerberg }; 2808edf8e2afSMika Westerberg 2809edf8e2afSMika Westerberg #define ELF_PRARGSZ (80) /* Number of chars for args */ 2810edf8e2afSMika Westerberg 2811a2547a13SLaurent Desnogues struct target_elf_prpsinfo { 2812edf8e2afSMika Westerberg char pr_state; /* numeric process state */ 2813edf8e2afSMika Westerberg char pr_sname; /* char for pr_state */ 2814edf8e2afSMika Westerberg char pr_zomb; /* zombie */ 2815edf8e2afSMika Westerberg char pr_nice; /* nice val */ 2816ca98ac83SPaolo Bonzini abi_ulong pr_flag; /* flags */ 2817c227f099SAnthony Liguori target_uid_t pr_uid; 2818c227f099SAnthony Liguori target_gid_t pr_gid; 2819c227f099SAnthony Liguori target_pid_t pr_pid, pr_ppid, pr_pgrp, pr_sid; 2820edf8e2afSMika Westerberg /* Lots missing */ 2821edf8e2afSMika Westerberg char pr_fname[16]; /* filename of executable */ 2822edf8e2afSMika Westerberg char pr_psargs[ELF_PRARGSZ]; /* initial part of arg list */ 2823edf8e2afSMika Westerberg }; 2824edf8e2afSMika Westerberg 2825edf8e2afSMika Westerberg /* Here is the structure in which status of each thread is captured. */ 2826edf8e2afSMika Westerberg struct elf_thread_status { 282772cf2d4fSBlue Swirl QTAILQ_ENTRY(elf_thread_status) ets_link; 2828a2547a13SLaurent Desnogues struct target_elf_prstatus prstatus; /* NT_PRSTATUS */ 2829edf8e2afSMika Westerberg #if 0 2830edf8e2afSMika Westerberg elf_fpregset_t fpu; /* NT_PRFPREG */ 2831edf8e2afSMika Westerberg struct task_struct *thread; 2832edf8e2afSMika Westerberg elf_fpxregset_t xfpu; /* ELF_CORE_XFPREG_TYPE */ 2833edf8e2afSMika Westerberg #endif 2834edf8e2afSMika Westerberg struct memelfnote notes[1]; 2835edf8e2afSMika Westerberg int num_notes; 2836edf8e2afSMika Westerberg }; 2837edf8e2afSMika Westerberg 2838edf8e2afSMika Westerberg struct elf_note_info { 2839edf8e2afSMika Westerberg struct memelfnote *notes; 2840a2547a13SLaurent Desnogues struct target_elf_prstatus *prstatus; /* NT_PRSTATUS */ 2841a2547a13SLaurent Desnogues struct target_elf_prpsinfo *psinfo; /* NT_PRPSINFO */ 2842edf8e2afSMika Westerberg 284372cf2d4fSBlue Swirl QTAILQ_HEAD(thread_list_head, elf_thread_status) thread_list; 2844edf8e2afSMika Westerberg #if 0 2845edf8e2afSMika Westerberg /* 2846edf8e2afSMika Westerberg * Current version of ELF coredump doesn't support 2847edf8e2afSMika Westerberg * dumping fp regs etc. 2848edf8e2afSMika Westerberg */ 2849edf8e2afSMika Westerberg elf_fpregset_t *fpu; 2850edf8e2afSMika Westerberg elf_fpxregset_t *xfpu; 2851edf8e2afSMika Westerberg int thread_status_size; 2852edf8e2afSMika Westerberg #endif 2853edf8e2afSMika Westerberg int notes_size; 2854edf8e2afSMika Westerberg int numnote; 2855edf8e2afSMika Westerberg }; 2856edf8e2afSMika Westerberg 2857edf8e2afSMika Westerberg struct vm_area_struct { 28581a1c4db9SMikhail Ilyin target_ulong vma_start; /* start vaddr of memory region */ 28591a1c4db9SMikhail Ilyin target_ulong vma_end; /* end vaddr of memory region */ 2860edf8e2afSMika Westerberg abi_ulong vma_flags; /* protection etc. flags for the region */ 286172cf2d4fSBlue Swirl QTAILQ_ENTRY(vm_area_struct) vma_link; 2862edf8e2afSMika Westerberg }; 2863edf8e2afSMika Westerberg 2864edf8e2afSMika Westerberg struct mm_struct { 286572cf2d4fSBlue Swirl QTAILQ_HEAD(, vm_area_struct) mm_mmap; 2866edf8e2afSMika Westerberg int mm_count; /* number of mappings */ 2867edf8e2afSMika Westerberg }; 2868edf8e2afSMika Westerberg 2869edf8e2afSMika Westerberg static struct mm_struct *vma_init(void); 2870edf8e2afSMika Westerberg static void vma_delete(struct mm_struct *); 28711a1c4db9SMikhail Ilyin static int vma_add_mapping(struct mm_struct *, target_ulong, 28721a1c4db9SMikhail Ilyin target_ulong, abi_ulong); 2873edf8e2afSMika Westerberg static int vma_get_mapping_count(const struct mm_struct *); 2874edf8e2afSMika Westerberg static struct vm_area_struct *vma_first(const struct mm_struct *); 2875edf8e2afSMika Westerberg static struct vm_area_struct *vma_next(struct vm_area_struct *); 2876edf8e2afSMika Westerberg static abi_ulong vma_dump_size(const struct vm_area_struct *); 28771a1c4db9SMikhail Ilyin static int vma_walker(void *priv, target_ulong start, target_ulong end, 2878edf8e2afSMika Westerberg unsigned long flags); 2879edf8e2afSMika Westerberg 2880edf8e2afSMika Westerberg static void fill_elf_header(struct elfhdr *, int, uint16_t, uint32_t); 2881edf8e2afSMika Westerberg static void fill_note(struct memelfnote *, const char *, int, 2882edf8e2afSMika Westerberg unsigned int, void *); 2883a2547a13SLaurent Desnogues static void fill_prstatus(struct target_elf_prstatus *, const TaskState *, int); 2884a2547a13SLaurent Desnogues static int fill_psinfo(struct target_elf_prpsinfo *, const TaskState *); 2885edf8e2afSMika Westerberg static void fill_auxv_note(struct memelfnote *, const TaskState *); 2886edf8e2afSMika Westerberg static void fill_elf_note_phdr(struct elf_phdr *, int, off_t); 2887edf8e2afSMika Westerberg static size_t note_size(const struct memelfnote *); 2888edf8e2afSMika Westerberg static void free_note_info(struct elf_note_info *); 28899349b4f9SAndreas Färber static int fill_note_info(struct elf_note_info *, long, const CPUArchState *); 28909349b4f9SAndreas Färber static void fill_thread_info(struct elf_note_info *, const CPUArchState *); 2891edf8e2afSMika Westerberg static int core_dump_filename(const TaskState *, char *, size_t); 2892edf8e2afSMika Westerberg 2893edf8e2afSMika Westerberg static int dump_write(int, const void *, size_t); 2894edf8e2afSMika Westerberg static int write_note(struct memelfnote *, int); 2895edf8e2afSMika Westerberg static int write_note_info(struct elf_note_info *, int); 2896edf8e2afSMika Westerberg 2897edf8e2afSMika Westerberg #ifdef BSWAP_NEEDED 2898a2547a13SLaurent Desnogues static void bswap_prstatus(struct target_elf_prstatus *prstatus) 2899edf8e2afSMika Westerberg { 2900ca98ac83SPaolo Bonzini prstatus->pr_info.si_signo = tswap32(prstatus->pr_info.si_signo); 2901ca98ac83SPaolo Bonzini prstatus->pr_info.si_code = tswap32(prstatus->pr_info.si_code); 2902ca98ac83SPaolo Bonzini prstatus->pr_info.si_errno = tswap32(prstatus->pr_info.si_errno); 2903edf8e2afSMika Westerberg prstatus->pr_cursig = tswap16(prstatus->pr_cursig); 2904ca98ac83SPaolo Bonzini prstatus->pr_sigpend = tswapal(prstatus->pr_sigpend); 2905ca98ac83SPaolo Bonzini prstatus->pr_sighold = tswapal(prstatus->pr_sighold); 2906edf8e2afSMika Westerberg prstatus->pr_pid = tswap32(prstatus->pr_pid); 2907edf8e2afSMika Westerberg prstatus->pr_ppid = tswap32(prstatus->pr_ppid); 2908edf8e2afSMika Westerberg prstatus->pr_pgrp = tswap32(prstatus->pr_pgrp); 2909edf8e2afSMika Westerberg prstatus->pr_sid = tswap32(prstatus->pr_sid); 2910edf8e2afSMika Westerberg /* cpu times are not filled, so we skip them */ 2911edf8e2afSMika Westerberg /* regs should be in correct format already */ 2912edf8e2afSMika Westerberg prstatus->pr_fpvalid = tswap32(prstatus->pr_fpvalid); 2913edf8e2afSMika Westerberg } 2914edf8e2afSMika Westerberg 2915a2547a13SLaurent Desnogues static void bswap_psinfo(struct target_elf_prpsinfo *psinfo) 2916edf8e2afSMika Westerberg { 2917ca98ac83SPaolo Bonzini psinfo->pr_flag = tswapal(psinfo->pr_flag); 2918edf8e2afSMika Westerberg psinfo->pr_uid = tswap16(psinfo->pr_uid); 2919edf8e2afSMika Westerberg psinfo->pr_gid = tswap16(psinfo->pr_gid); 2920edf8e2afSMika Westerberg psinfo->pr_pid = tswap32(psinfo->pr_pid); 2921edf8e2afSMika Westerberg psinfo->pr_ppid = tswap32(psinfo->pr_ppid); 2922edf8e2afSMika Westerberg psinfo->pr_pgrp = tswap32(psinfo->pr_pgrp); 2923edf8e2afSMika Westerberg psinfo->pr_sid = tswap32(psinfo->pr_sid); 2924edf8e2afSMika Westerberg } 2925991f8f0cSRichard Henderson 2926991f8f0cSRichard Henderson static void bswap_note(struct elf_note *en) 2927991f8f0cSRichard Henderson { 2928991f8f0cSRichard Henderson bswap32s(&en->n_namesz); 2929991f8f0cSRichard Henderson bswap32s(&en->n_descsz); 2930991f8f0cSRichard Henderson bswap32s(&en->n_type); 2931991f8f0cSRichard Henderson } 2932991f8f0cSRichard Henderson #else 2933991f8f0cSRichard Henderson static inline void bswap_prstatus(struct target_elf_prstatus *p) { } 2934991f8f0cSRichard Henderson static inline void bswap_psinfo(struct target_elf_prpsinfo *p) {} 2935991f8f0cSRichard Henderson static inline void bswap_note(struct elf_note *en) { } 2936edf8e2afSMika Westerberg #endif /* BSWAP_NEEDED */ 2937edf8e2afSMika Westerberg 2938edf8e2afSMika Westerberg /* 2939edf8e2afSMika Westerberg * Minimal support for linux memory regions. These are needed 2940edf8e2afSMika Westerberg * when we are finding out what memory exactly belongs to 2941edf8e2afSMika Westerberg * emulated process. No locks needed here, as long as 2942edf8e2afSMika Westerberg * thread that received the signal is stopped. 2943edf8e2afSMika Westerberg */ 2944edf8e2afSMika Westerberg 2945edf8e2afSMika Westerberg static struct mm_struct *vma_init(void) 2946edf8e2afSMika Westerberg { 2947edf8e2afSMika Westerberg struct mm_struct *mm; 2948edf8e2afSMika Westerberg 29497267c094SAnthony Liguori if ((mm = g_malloc(sizeof (*mm))) == NULL) 2950edf8e2afSMika Westerberg return (NULL); 2951edf8e2afSMika Westerberg 2952edf8e2afSMika Westerberg mm->mm_count = 0; 295372cf2d4fSBlue Swirl QTAILQ_INIT(&mm->mm_mmap); 2954edf8e2afSMika Westerberg 2955edf8e2afSMika Westerberg return (mm); 2956edf8e2afSMika Westerberg } 2957edf8e2afSMika Westerberg 2958edf8e2afSMika Westerberg static void vma_delete(struct mm_struct *mm) 2959edf8e2afSMika Westerberg { 2960edf8e2afSMika Westerberg struct vm_area_struct *vma; 2961edf8e2afSMika Westerberg 2962edf8e2afSMika Westerberg while ((vma = vma_first(mm)) != NULL) { 296372cf2d4fSBlue Swirl QTAILQ_REMOVE(&mm->mm_mmap, vma, vma_link); 29647267c094SAnthony Liguori g_free(vma); 2965edf8e2afSMika Westerberg } 29667267c094SAnthony Liguori g_free(mm); 2967edf8e2afSMika Westerberg } 2968edf8e2afSMika Westerberg 29691a1c4db9SMikhail Ilyin static int vma_add_mapping(struct mm_struct *mm, target_ulong start, 29701a1c4db9SMikhail Ilyin target_ulong end, abi_ulong flags) 2971edf8e2afSMika Westerberg { 2972edf8e2afSMika Westerberg struct vm_area_struct *vma; 2973edf8e2afSMika Westerberg 29747267c094SAnthony Liguori if ((vma = g_malloc0(sizeof (*vma))) == NULL) 2975edf8e2afSMika Westerberg return (-1); 2976edf8e2afSMika Westerberg 2977edf8e2afSMika Westerberg vma->vma_start = start; 2978edf8e2afSMika Westerberg vma->vma_end = end; 2979edf8e2afSMika Westerberg vma->vma_flags = flags; 2980edf8e2afSMika Westerberg 298172cf2d4fSBlue Swirl QTAILQ_INSERT_TAIL(&mm->mm_mmap, vma, vma_link); 2982edf8e2afSMika Westerberg mm->mm_count++; 2983edf8e2afSMika Westerberg 2984edf8e2afSMika Westerberg return (0); 2985edf8e2afSMika Westerberg } 2986edf8e2afSMika Westerberg 2987edf8e2afSMika Westerberg static struct vm_area_struct *vma_first(const struct mm_struct *mm) 2988edf8e2afSMika Westerberg { 298972cf2d4fSBlue Swirl return (QTAILQ_FIRST(&mm->mm_mmap)); 2990edf8e2afSMika Westerberg } 2991edf8e2afSMika Westerberg 2992edf8e2afSMika Westerberg static struct vm_area_struct *vma_next(struct vm_area_struct *vma) 2993edf8e2afSMika Westerberg { 299472cf2d4fSBlue Swirl return (QTAILQ_NEXT(vma, vma_link)); 2995edf8e2afSMika Westerberg } 2996edf8e2afSMika Westerberg 2997edf8e2afSMika Westerberg static int vma_get_mapping_count(const struct mm_struct *mm) 2998edf8e2afSMika Westerberg { 2999edf8e2afSMika Westerberg return (mm->mm_count); 3000edf8e2afSMika Westerberg } 3001edf8e2afSMika Westerberg 3002edf8e2afSMika Westerberg /* 3003edf8e2afSMika Westerberg * Calculate file (dump) size of given memory region. 3004edf8e2afSMika Westerberg */ 3005edf8e2afSMika Westerberg static abi_ulong vma_dump_size(const struct vm_area_struct *vma) 3006edf8e2afSMika Westerberg { 3007edf8e2afSMika Westerberg /* if we cannot even read the first page, skip it */ 3008edf8e2afSMika Westerberg if (!access_ok(VERIFY_READ, vma->vma_start, TARGET_PAGE_SIZE)) 3009edf8e2afSMika Westerberg return (0); 3010edf8e2afSMika Westerberg 3011edf8e2afSMika Westerberg /* 3012edf8e2afSMika Westerberg * Usually we don't dump executable pages as they contain 3013edf8e2afSMika Westerberg * non-writable code that debugger can read directly from 3014edf8e2afSMika Westerberg * target library etc. However, thread stacks are marked 3015edf8e2afSMika Westerberg * also executable so we read in first page of given region 3016edf8e2afSMika Westerberg * and check whether it contains elf header. If there is 3017edf8e2afSMika Westerberg * no elf header, we dump it. 3018edf8e2afSMika Westerberg */ 3019edf8e2afSMika Westerberg if (vma->vma_flags & PROT_EXEC) { 3020edf8e2afSMika Westerberg char page[TARGET_PAGE_SIZE]; 3021edf8e2afSMika Westerberg 3022edf8e2afSMika Westerberg copy_from_user(page, vma->vma_start, sizeof (page)); 3023edf8e2afSMika Westerberg if ((page[EI_MAG0] == ELFMAG0) && 3024edf8e2afSMika Westerberg (page[EI_MAG1] == ELFMAG1) && 3025edf8e2afSMika Westerberg (page[EI_MAG2] == ELFMAG2) && 3026edf8e2afSMika Westerberg (page[EI_MAG3] == ELFMAG3)) { 3027edf8e2afSMika Westerberg /* 3028edf8e2afSMika Westerberg * Mappings are possibly from ELF binary. Don't dump 3029edf8e2afSMika Westerberg * them. 3030edf8e2afSMika Westerberg */ 3031edf8e2afSMika Westerberg return (0); 3032edf8e2afSMika Westerberg } 3033edf8e2afSMika Westerberg } 3034edf8e2afSMika Westerberg 3035edf8e2afSMika Westerberg return (vma->vma_end - vma->vma_start); 3036edf8e2afSMika Westerberg } 3037edf8e2afSMika Westerberg 30381a1c4db9SMikhail Ilyin static int vma_walker(void *priv, target_ulong start, target_ulong end, 3039edf8e2afSMika Westerberg unsigned long flags) 3040edf8e2afSMika Westerberg { 3041edf8e2afSMika Westerberg struct mm_struct *mm = (struct mm_struct *)priv; 3042edf8e2afSMika Westerberg 3043edf8e2afSMika Westerberg vma_add_mapping(mm, start, end, flags); 3044edf8e2afSMika Westerberg return (0); 3045edf8e2afSMika Westerberg } 3046edf8e2afSMika Westerberg 3047edf8e2afSMika Westerberg static void fill_note(struct memelfnote *note, const char *name, int type, 3048edf8e2afSMika Westerberg unsigned int sz, void *data) 3049edf8e2afSMika Westerberg { 3050edf8e2afSMika Westerberg unsigned int namesz; 3051edf8e2afSMika Westerberg 3052edf8e2afSMika Westerberg namesz = strlen(name) + 1; 3053edf8e2afSMika Westerberg note->name = name; 3054edf8e2afSMika Westerberg note->namesz = namesz; 3055edf8e2afSMika Westerberg note->namesz_rounded = roundup(namesz, sizeof (int32_t)); 3056edf8e2afSMika Westerberg note->type = type; 305780f5ce75SLaurent Vivier note->datasz = sz; 305880f5ce75SLaurent Vivier note->datasz_rounded = roundup(sz, sizeof (int32_t)); 305980f5ce75SLaurent Vivier 3060edf8e2afSMika Westerberg note->data = data; 3061edf8e2afSMika Westerberg 3062edf8e2afSMika Westerberg /* 3063edf8e2afSMika Westerberg * We calculate rounded up note size here as specified by 3064edf8e2afSMika Westerberg * ELF document. 3065edf8e2afSMika Westerberg */ 3066edf8e2afSMika Westerberg note->notesz = sizeof (struct elf_note) + 306780f5ce75SLaurent Vivier note->namesz_rounded + note->datasz_rounded; 3068edf8e2afSMika Westerberg } 3069edf8e2afSMika Westerberg 3070edf8e2afSMika Westerberg static void fill_elf_header(struct elfhdr *elf, int segs, uint16_t machine, 3071edf8e2afSMika Westerberg uint32_t flags) 3072edf8e2afSMika Westerberg { 3073edf8e2afSMika Westerberg (void) memset(elf, 0, sizeof(*elf)); 3074edf8e2afSMika Westerberg 3075edf8e2afSMika Westerberg (void) memcpy(elf->e_ident, ELFMAG, SELFMAG); 3076edf8e2afSMika Westerberg elf->e_ident[EI_CLASS] = ELF_CLASS; 3077edf8e2afSMika Westerberg elf->e_ident[EI_DATA] = ELF_DATA; 3078edf8e2afSMika Westerberg elf->e_ident[EI_VERSION] = EV_CURRENT; 3079edf8e2afSMika Westerberg elf->e_ident[EI_OSABI] = ELF_OSABI; 3080edf8e2afSMika Westerberg 3081edf8e2afSMika Westerberg elf->e_type = ET_CORE; 3082edf8e2afSMika Westerberg elf->e_machine = machine; 3083edf8e2afSMika Westerberg elf->e_version = EV_CURRENT; 3084edf8e2afSMika Westerberg elf->e_phoff = sizeof(struct elfhdr); 3085edf8e2afSMika Westerberg elf->e_flags = flags; 3086edf8e2afSMika Westerberg elf->e_ehsize = sizeof(struct elfhdr); 3087edf8e2afSMika Westerberg elf->e_phentsize = sizeof(struct elf_phdr); 3088edf8e2afSMika Westerberg elf->e_phnum = segs; 3089edf8e2afSMika Westerberg 3090edf8e2afSMika Westerberg bswap_ehdr(elf); 3091edf8e2afSMika Westerberg } 3092edf8e2afSMika Westerberg 3093edf8e2afSMika Westerberg static void fill_elf_note_phdr(struct elf_phdr *phdr, int sz, off_t offset) 3094edf8e2afSMika Westerberg { 3095edf8e2afSMika Westerberg phdr->p_type = PT_NOTE; 3096edf8e2afSMika Westerberg phdr->p_offset = offset; 3097edf8e2afSMika Westerberg phdr->p_vaddr = 0; 3098edf8e2afSMika Westerberg phdr->p_paddr = 0; 3099edf8e2afSMika Westerberg phdr->p_filesz = sz; 3100edf8e2afSMika Westerberg phdr->p_memsz = 0; 3101edf8e2afSMika Westerberg phdr->p_flags = 0; 3102edf8e2afSMika Westerberg phdr->p_align = 0; 3103edf8e2afSMika Westerberg 3104991f8f0cSRichard Henderson bswap_phdr(phdr, 1); 3105edf8e2afSMika Westerberg } 3106edf8e2afSMika Westerberg 3107edf8e2afSMika Westerberg static size_t note_size(const struct memelfnote *note) 3108edf8e2afSMika Westerberg { 3109edf8e2afSMika Westerberg return (note->notesz); 3110edf8e2afSMika Westerberg } 3111edf8e2afSMika Westerberg 3112a2547a13SLaurent Desnogues static void fill_prstatus(struct target_elf_prstatus *prstatus, 3113edf8e2afSMika Westerberg const TaskState *ts, int signr) 3114edf8e2afSMika Westerberg { 3115edf8e2afSMika Westerberg (void) memset(prstatus, 0, sizeof (*prstatus)); 3116edf8e2afSMika Westerberg prstatus->pr_info.si_signo = prstatus->pr_cursig = signr; 3117edf8e2afSMika Westerberg prstatus->pr_pid = ts->ts_tid; 3118edf8e2afSMika Westerberg prstatus->pr_ppid = getppid(); 3119edf8e2afSMika Westerberg prstatus->pr_pgrp = getpgrp(); 3120edf8e2afSMika Westerberg prstatus->pr_sid = getsid(0); 3121edf8e2afSMika Westerberg 3122edf8e2afSMika Westerberg bswap_prstatus(prstatus); 3123edf8e2afSMika Westerberg } 3124edf8e2afSMika Westerberg 3125a2547a13SLaurent Desnogues static int fill_psinfo(struct target_elf_prpsinfo *psinfo, const TaskState *ts) 3126edf8e2afSMika Westerberg { 3127900cfbcaSJim Meyering char *base_filename; 3128edf8e2afSMika Westerberg unsigned int i, len; 3129edf8e2afSMika Westerberg 3130edf8e2afSMika Westerberg (void) memset(psinfo, 0, sizeof (*psinfo)); 3131edf8e2afSMika Westerberg 3132edf8e2afSMika Westerberg len = ts->info->arg_end - ts->info->arg_start; 3133edf8e2afSMika Westerberg if (len >= ELF_PRARGSZ) 3134edf8e2afSMika Westerberg len = ELF_PRARGSZ - 1; 3135edf8e2afSMika Westerberg if (copy_from_user(&psinfo->pr_psargs, ts->info->arg_start, len)) 3136edf8e2afSMika Westerberg return -EFAULT; 3137edf8e2afSMika Westerberg for (i = 0; i < len; i++) 3138edf8e2afSMika Westerberg if (psinfo->pr_psargs[i] == 0) 3139edf8e2afSMika Westerberg psinfo->pr_psargs[i] = ' '; 3140edf8e2afSMika Westerberg psinfo->pr_psargs[len] = 0; 3141edf8e2afSMika Westerberg 3142edf8e2afSMika Westerberg psinfo->pr_pid = getpid(); 3143edf8e2afSMika Westerberg psinfo->pr_ppid = getppid(); 3144edf8e2afSMika Westerberg psinfo->pr_pgrp = getpgrp(); 3145edf8e2afSMika Westerberg psinfo->pr_sid = getsid(0); 3146edf8e2afSMika Westerberg psinfo->pr_uid = getuid(); 3147edf8e2afSMika Westerberg psinfo->pr_gid = getgid(); 3148edf8e2afSMika Westerberg 3149900cfbcaSJim Meyering base_filename = g_path_get_basename(ts->bprm->filename); 3150900cfbcaSJim Meyering /* 3151900cfbcaSJim Meyering * Using strncpy here is fine: at max-length, 3152900cfbcaSJim Meyering * this field is not NUL-terminated. 3153900cfbcaSJim Meyering */ 3154edf8e2afSMika Westerberg (void) strncpy(psinfo->pr_fname, base_filename, 3155edf8e2afSMika Westerberg sizeof(psinfo->pr_fname)); 3156edf8e2afSMika Westerberg 3157900cfbcaSJim Meyering g_free(base_filename); 3158edf8e2afSMika Westerberg bswap_psinfo(psinfo); 3159edf8e2afSMika Westerberg return (0); 3160edf8e2afSMika Westerberg } 3161edf8e2afSMika Westerberg 3162edf8e2afSMika Westerberg static void fill_auxv_note(struct memelfnote *note, const TaskState *ts) 3163edf8e2afSMika Westerberg { 3164edf8e2afSMika Westerberg elf_addr_t auxv = (elf_addr_t)ts->info->saved_auxv; 3165edf8e2afSMika Westerberg elf_addr_t orig_auxv = auxv; 3166edf8e2afSMika Westerberg void *ptr; 3167125b0f55SAlexander Graf int len = ts->info->auxv_len; 3168edf8e2afSMika Westerberg 3169edf8e2afSMika Westerberg /* 3170edf8e2afSMika Westerberg * Auxiliary vector is stored in target process stack. It contains 3171edf8e2afSMika Westerberg * {type, value} pairs that we need to dump into note. This is not 3172edf8e2afSMika Westerberg * strictly necessary but we do it here for sake of completeness. 3173edf8e2afSMika Westerberg */ 3174edf8e2afSMika Westerberg 3175edf8e2afSMika Westerberg /* read in whole auxv vector and copy it to memelfnote */ 3176edf8e2afSMika Westerberg ptr = lock_user(VERIFY_READ, orig_auxv, len, 0); 3177edf8e2afSMika Westerberg if (ptr != NULL) { 3178edf8e2afSMika Westerberg fill_note(note, "CORE", NT_AUXV, len, ptr); 3179edf8e2afSMika Westerberg unlock_user(ptr, auxv, len); 3180edf8e2afSMika Westerberg } 3181edf8e2afSMika Westerberg } 3182edf8e2afSMika Westerberg 3183edf8e2afSMika Westerberg /* 3184edf8e2afSMika Westerberg * Constructs name of coredump file. We have following convention 3185edf8e2afSMika Westerberg * for the name: 3186edf8e2afSMika Westerberg * qemu_<basename-of-target-binary>_<date>-<time>_<pid>.core 3187edf8e2afSMika Westerberg * 3188edf8e2afSMika Westerberg * Returns 0 in case of success, -1 otherwise (errno is set). 3189edf8e2afSMika Westerberg */ 3190edf8e2afSMika Westerberg static int core_dump_filename(const TaskState *ts, char *buf, 3191edf8e2afSMika Westerberg size_t bufsize) 3192edf8e2afSMika Westerberg { 3193edf8e2afSMika Westerberg char timestamp[64]; 3194edf8e2afSMika Westerberg char *base_filename = NULL; 3195edf8e2afSMika Westerberg struct timeval tv; 3196edf8e2afSMika Westerberg struct tm tm; 3197edf8e2afSMika Westerberg 3198edf8e2afSMika Westerberg assert(bufsize >= PATH_MAX); 3199edf8e2afSMika Westerberg 3200edf8e2afSMika Westerberg if (gettimeofday(&tv, NULL) < 0) { 3201edf8e2afSMika Westerberg (void) fprintf(stderr, "unable to get current timestamp: %s", 3202edf8e2afSMika Westerberg strerror(errno)); 3203edf8e2afSMika Westerberg return (-1); 3204edf8e2afSMika Westerberg } 3205edf8e2afSMika Westerberg 3206b8da57faSWei Jiangang base_filename = g_path_get_basename(ts->bprm->filename); 3207edf8e2afSMika Westerberg (void) strftime(timestamp, sizeof (timestamp), "%Y%m%d-%H%M%S", 3208edf8e2afSMika Westerberg localtime_r(&tv.tv_sec, &tm)); 3209edf8e2afSMika Westerberg (void) snprintf(buf, bufsize, "qemu_%s_%s_%d.core", 3210edf8e2afSMika Westerberg base_filename, timestamp, (int)getpid()); 3211b8da57faSWei Jiangang g_free(base_filename); 3212edf8e2afSMika Westerberg 3213edf8e2afSMika Westerberg return (0); 3214edf8e2afSMika Westerberg } 3215edf8e2afSMika Westerberg 3216edf8e2afSMika Westerberg static int dump_write(int fd, const void *ptr, size_t size) 3217edf8e2afSMika Westerberg { 3218edf8e2afSMika Westerberg const char *bufp = (const char *)ptr; 3219edf8e2afSMika Westerberg ssize_t bytes_written, bytes_left; 3220edf8e2afSMika Westerberg struct rlimit dumpsize; 3221edf8e2afSMika Westerberg off_t pos; 3222edf8e2afSMika Westerberg 3223edf8e2afSMika Westerberg bytes_written = 0; 3224edf8e2afSMika Westerberg getrlimit(RLIMIT_CORE, &dumpsize); 3225edf8e2afSMika Westerberg if ((pos = lseek(fd, 0, SEEK_CUR))==-1) { 3226edf8e2afSMika Westerberg if (errno == ESPIPE) { /* not a seekable stream */ 3227edf8e2afSMika Westerberg bytes_left = size; 3228edf8e2afSMika Westerberg } else { 3229edf8e2afSMika Westerberg return pos; 3230edf8e2afSMika Westerberg } 3231edf8e2afSMika Westerberg } else { 3232edf8e2afSMika Westerberg if (dumpsize.rlim_cur <= pos) { 3233edf8e2afSMika Westerberg return -1; 3234edf8e2afSMika Westerberg } else if (dumpsize.rlim_cur == RLIM_INFINITY) { 3235edf8e2afSMika Westerberg bytes_left = size; 3236edf8e2afSMika Westerberg } else { 3237edf8e2afSMika Westerberg size_t limit_left=dumpsize.rlim_cur - pos; 3238edf8e2afSMika Westerberg bytes_left = limit_left >= size ? size : limit_left ; 3239edf8e2afSMika Westerberg } 3240edf8e2afSMika Westerberg } 3241edf8e2afSMika Westerberg 3242edf8e2afSMika Westerberg /* 3243edf8e2afSMika Westerberg * In normal conditions, single write(2) should do but 3244edf8e2afSMika Westerberg * in case of socket etc. this mechanism is more portable. 3245edf8e2afSMika Westerberg */ 3246edf8e2afSMika Westerberg do { 3247edf8e2afSMika Westerberg bytes_written = write(fd, bufp, bytes_left); 3248edf8e2afSMika Westerberg if (bytes_written < 0) { 3249edf8e2afSMika Westerberg if (errno == EINTR) 3250edf8e2afSMika Westerberg continue; 3251edf8e2afSMika Westerberg return (-1); 3252edf8e2afSMika Westerberg } else if (bytes_written == 0) { /* eof */ 3253edf8e2afSMika Westerberg return (-1); 3254edf8e2afSMika Westerberg } 3255edf8e2afSMika Westerberg bufp += bytes_written; 3256edf8e2afSMika Westerberg bytes_left -= bytes_written; 3257edf8e2afSMika Westerberg } while (bytes_left > 0); 3258edf8e2afSMika Westerberg 3259edf8e2afSMika Westerberg return (0); 3260edf8e2afSMika Westerberg } 3261edf8e2afSMika Westerberg 3262edf8e2afSMika Westerberg static int write_note(struct memelfnote *men, int fd) 3263edf8e2afSMika Westerberg { 3264edf8e2afSMika Westerberg struct elf_note en; 3265edf8e2afSMika Westerberg 3266edf8e2afSMika Westerberg en.n_namesz = men->namesz; 3267edf8e2afSMika Westerberg en.n_type = men->type; 3268edf8e2afSMika Westerberg en.n_descsz = men->datasz; 3269edf8e2afSMika Westerberg 3270edf8e2afSMika Westerberg bswap_note(&en); 3271edf8e2afSMika Westerberg 3272edf8e2afSMika Westerberg if (dump_write(fd, &en, sizeof(en)) != 0) 3273edf8e2afSMika Westerberg return (-1); 3274edf8e2afSMika Westerberg if (dump_write(fd, men->name, men->namesz_rounded) != 0) 3275edf8e2afSMika Westerberg return (-1); 327680f5ce75SLaurent Vivier if (dump_write(fd, men->data, men->datasz_rounded) != 0) 3277edf8e2afSMika Westerberg return (-1); 3278edf8e2afSMika Westerberg 3279edf8e2afSMika Westerberg return (0); 3280edf8e2afSMika Westerberg } 3281edf8e2afSMika Westerberg 32829349b4f9SAndreas Färber static void fill_thread_info(struct elf_note_info *info, const CPUArchState *env) 3283edf8e2afSMika Westerberg { 32840429a971SAndreas Färber CPUState *cpu = ENV_GET_CPU((CPUArchState *)env); 32850429a971SAndreas Färber TaskState *ts = (TaskState *)cpu->opaque; 3286edf8e2afSMika Westerberg struct elf_thread_status *ets; 3287edf8e2afSMika Westerberg 32887267c094SAnthony Liguori ets = g_malloc0(sizeof (*ets)); 3289edf8e2afSMika Westerberg ets->num_notes = 1; /* only prstatus is dumped */ 3290edf8e2afSMika Westerberg fill_prstatus(&ets->prstatus, ts, 0); 3291edf8e2afSMika Westerberg elf_core_copy_regs(&ets->prstatus.pr_reg, env); 3292edf8e2afSMika Westerberg fill_note(&ets->notes[0], "CORE", NT_PRSTATUS, sizeof (ets->prstatus), 3293edf8e2afSMika Westerberg &ets->prstatus); 3294edf8e2afSMika Westerberg 329572cf2d4fSBlue Swirl QTAILQ_INSERT_TAIL(&info->thread_list, ets, ets_link); 3296edf8e2afSMika Westerberg 3297edf8e2afSMika Westerberg info->notes_size += note_size(&ets->notes[0]); 3298edf8e2afSMika Westerberg } 3299edf8e2afSMika Westerberg 33006afafa86SPeter Maydell static void init_note_info(struct elf_note_info *info) 33016afafa86SPeter Maydell { 33026afafa86SPeter Maydell /* Initialize the elf_note_info structure so that it is at 33036afafa86SPeter Maydell * least safe to call free_note_info() on it. Must be 33046afafa86SPeter Maydell * called before calling fill_note_info(). 33056afafa86SPeter Maydell */ 33066afafa86SPeter Maydell memset(info, 0, sizeof (*info)); 33076afafa86SPeter Maydell QTAILQ_INIT(&info->thread_list); 33086afafa86SPeter Maydell } 33096afafa86SPeter Maydell 3310edf8e2afSMika Westerberg static int fill_note_info(struct elf_note_info *info, 33119349b4f9SAndreas Färber long signr, const CPUArchState *env) 3312edf8e2afSMika Westerberg { 3313edf8e2afSMika Westerberg #define NUMNOTES 3 33140429a971SAndreas Färber CPUState *cpu = ENV_GET_CPU((CPUArchState *)env); 33150429a971SAndreas Färber TaskState *ts = (TaskState *)cpu->opaque; 3316edf8e2afSMika Westerberg int i; 3317edf8e2afSMika Westerberg 3318c78d65e8SMarkus Armbruster info->notes = g_new0(struct memelfnote, NUMNOTES); 3319edf8e2afSMika Westerberg if (info->notes == NULL) 3320edf8e2afSMika Westerberg return (-ENOMEM); 33217267c094SAnthony Liguori info->prstatus = g_malloc0(sizeof (*info->prstatus)); 3322edf8e2afSMika Westerberg if (info->prstatus == NULL) 3323edf8e2afSMika Westerberg return (-ENOMEM); 33247267c094SAnthony Liguori info->psinfo = g_malloc0(sizeof (*info->psinfo)); 3325edf8e2afSMika Westerberg if (info->prstatus == NULL) 3326edf8e2afSMika Westerberg return (-ENOMEM); 3327edf8e2afSMika Westerberg 3328edf8e2afSMika Westerberg /* 3329edf8e2afSMika Westerberg * First fill in status (and registers) of current thread 3330edf8e2afSMika Westerberg * including process info & aux vector. 3331edf8e2afSMika Westerberg */ 3332edf8e2afSMika Westerberg fill_prstatus(info->prstatus, ts, signr); 3333edf8e2afSMika Westerberg elf_core_copy_regs(&info->prstatus->pr_reg, env); 3334edf8e2afSMika Westerberg fill_note(&info->notes[0], "CORE", NT_PRSTATUS, 3335edf8e2afSMika Westerberg sizeof (*info->prstatus), info->prstatus); 3336edf8e2afSMika Westerberg fill_psinfo(info->psinfo, ts); 3337edf8e2afSMika Westerberg fill_note(&info->notes[1], "CORE", NT_PRPSINFO, 3338edf8e2afSMika Westerberg sizeof (*info->psinfo), info->psinfo); 3339edf8e2afSMika Westerberg fill_auxv_note(&info->notes[2], ts); 3340edf8e2afSMika Westerberg info->numnote = 3; 3341edf8e2afSMika Westerberg 3342edf8e2afSMika Westerberg info->notes_size = 0; 3343edf8e2afSMika Westerberg for (i = 0; i < info->numnote; i++) 3344edf8e2afSMika Westerberg info->notes_size += note_size(&info->notes[i]); 3345edf8e2afSMika Westerberg 3346edf8e2afSMika Westerberg /* read and fill status of all threads */ 3347edf8e2afSMika Westerberg cpu_list_lock(); 3348bdc44640SAndreas Färber CPU_FOREACH(cpu) { 3349a2247f8eSAndreas Färber if (cpu == thread_cpu) { 3350edf8e2afSMika Westerberg continue; 3351182735efSAndreas Färber } 3352182735efSAndreas Färber fill_thread_info(info, (CPUArchState *)cpu->env_ptr); 3353edf8e2afSMika Westerberg } 3354edf8e2afSMika Westerberg cpu_list_unlock(); 3355edf8e2afSMika Westerberg 3356edf8e2afSMika Westerberg return (0); 3357edf8e2afSMika Westerberg } 3358edf8e2afSMika Westerberg 3359edf8e2afSMika Westerberg static void free_note_info(struct elf_note_info *info) 3360edf8e2afSMika Westerberg { 3361edf8e2afSMika Westerberg struct elf_thread_status *ets; 3362edf8e2afSMika Westerberg 336372cf2d4fSBlue Swirl while (!QTAILQ_EMPTY(&info->thread_list)) { 336472cf2d4fSBlue Swirl ets = QTAILQ_FIRST(&info->thread_list); 336572cf2d4fSBlue Swirl QTAILQ_REMOVE(&info->thread_list, ets, ets_link); 33667267c094SAnthony Liguori g_free(ets); 3367edf8e2afSMika Westerberg } 3368edf8e2afSMika Westerberg 33697267c094SAnthony Liguori g_free(info->prstatus); 33707267c094SAnthony Liguori g_free(info->psinfo); 33717267c094SAnthony Liguori g_free(info->notes); 3372edf8e2afSMika Westerberg } 3373edf8e2afSMika Westerberg 3374edf8e2afSMika Westerberg static int write_note_info(struct elf_note_info *info, int fd) 3375edf8e2afSMika Westerberg { 3376edf8e2afSMika Westerberg struct elf_thread_status *ets; 3377edf8e2afSMika Westerberg int i, error = 0; 3378edf8e2afSMika Westerberg 3379edf8e2afSMika Westerberg /* write prstatus, psinfo and auxv for current thread */ 3380edf8e2afSMika Westerberg for (i = 0; i < info->numnote; i++) 3381edf8e2afSMika Westerberg if ((error = write_note(&info->notes[i], fd)) != 0) 3382edf8e2afSMika Westerberg return (error); 3383edf8e2afSMika Westerberg 3384edf8e2afSMika Westerberg /* write prstatus for each thread */ 338552a53afeSEmilio G. Cota QTAILQ_FOREACH(ets, &info->thread_list, ets_link) { 3386edf8e2afSMika Westerberg if ((error = write_note(&ets->notes[0], fd)) != 0) 3387edf8e2afSMika Westerberg return (error); 3388edf8e2afSMika Westerberg } 3389edf8e2afSMika Westerberg 3390edf8e2afSMika Westerberg return (0); 3391edf8e2afSMika Westerberg } 3392edf8e2afSMika Westerberg 3393edf8e2afSMika Westerberg /* 3394edf8e2afSMika Westerberg * Write out ELF coredump. 3395edf8e2afSMika Westerberg * 3396edf8e2afSMika Westerberg * See documentation of ELF object file format in: 3397edf8e2afSMika Westerberg * http://www.caldera.com/developers/devspecs/gabi41.pdf 3398edf8e2afSMika Westerberg * 3399edf8e2afSMika Westerberg * Coredump format in linux is following: 3400edf8e2afSMika Westerberg * 3401edf8e2afSMika Westerberg * 0 +----------------------+ \ 3402edf8e2afSMika Westerberg * | ELF header | ET_CORE | 3403edf8e2afSMika Westerberg * +----------------------+ | 3404edf8e2afSMika Westerberg * | ELF program headers | |--- headers 3405edf8e2afSMika Westerberg * | - NOTE section | | 3406edf8e2afSMika Westerberg * | - PT_LOAD sections | | 3407edf8e2afSMika Westerberg * +----------------------+ / 3408edf8e2afSMika Westerberg * | NOTEs: | 3409edf8e2afSMika Westerberg * | - NT_PRSTATUS | 3410edf8e2afSMika Westerberg * | - NT_PRSINFO | 3411edf8e2afSMika Westerberg * | - NT_AUXV | 3412edf8e2afSMika Westerberg * +----------------------+ <-- aligned to target page 3413edf8e2afSMika Westerberg * | Process memory dump | 3414edf8e2afSMika Westerberg * : : 3415edf8e2afSMika Westerberg * . . 3416edf8e2afSMika Westerberg * : : 3417edf8e2afSMika Westerberg * | | 3418edf8e2afSMika Westerberg * +----------------------+ 3419edf8e2afSMika Westerberg * 3420edf8e2afSMika Westerberg * NT_PRSTATUS -> struct elf_prstatus (per thread) 3421edf8e2afSMika Westerberg * NT_PRSINFO -> struct elf_prpsinfo 3422edf8e2afSMika Westerberg * NT_AUXV is array of { type, value } pairs (see fill_auxv_note()). 3423edf8e2afSMika Westerberg * 3424edf8e2afSMika Westerberg * Format follows System V format as close as possible. Current 3425edf8e2afSMika Westerberg * version limitations are as follows: 3426edf8e2afSMika Westerberg * - no floating point registers are dumped 3427edf8e2afSMika Westerberg * 3428edf8e2afSMika Westerberg * Function returns 0 in case of success, negative errno otherwise. 3429edf8e2afSMika Westerberg * 3430edf8e2afSMika Westerberg * TODO: make this work also during runtime: it should be 3431edf8e2afSMika Westerberg * possible to force coredump from running process and then 3432edf8e2afSMika Westerberg * continue processing. For example qemu could set up SIGUSR2 3433edf8e2afSMika Westerberg * handler (provided that target process haven't registered 3434edf8e2afSMika Westerberg * handler for that) that does the dump when signal is received. 3435edf8e2afSMika Westerberg */ 34369349b4f9SAndreas Färber static int elf_core_dump(int signr, const CPUArchState *env) 3437edf8e2afSMika Westerberg { 34380429a971SAndreas Färber const CPUState *cpu = ENV_GET_CPU((CPUArchState *)env); 34390429a971SAndreas Färber const TaskState *ts = (const TaskState *)cpu->opaque; 3440edf8e2afSMika Westerberg struct vm_area_struct *vma = NULL; 3441edf8e2afSMika Westerberg char corefile[PATH_MAX]; 3442edf8e2afSMika Westerberg struct elf_note_info info; 3443edf8e2afSMika Westerberg struct elfhdr elf; 3444edf8e2afSMika Westerberg struct elf_phdr phdr; 3445edf8e2afSMika Westerberg struct rlimit dumpsize; 3446edf8e2afSMika Westerberg struct mm_struct *mm = NULL; 3447edf8e2afSMika Westerberg off_t offset = 0, data_offset = 0; 3448edf8e2afSMika Westerberg int segs = 0; 3449edf8e2afSMika Westerberg int fd = -1; 3450edf8e2afSMika Westerberg 34516afafa86SPeter Maydell init_note_info(&info); 34526afafa86SPeter Maydell 3453edf8e2afSMika Westerberg errno = 0; 3454edf8e2afSMika Westerberg getrlimit(RLIMIT_CORE, &dumpsize); 3455edf8e2afSMika Westerberg if (dumpsize.rlim_cur == 0) 3456edf8e2afSMika Westerberg return 0; 3457edf8e2afSMika Westerberg 3458edf8e2afSMika Westerberg if (core_dump_filename(ts, corefile, sizeof (corefile)) < 0) 3459edf8e2afSMika Westerberg return (-errno); 3460edf8e2afSMika Westerberg 3461edf8e2afSMika Westerberg if ((fd = open(corefile, O_WRONLY | O_CREAT, 3462edf8e2afSMika Westerberg S_IRUSR|S_IWUSR|S_IRGRP|S_IROTH)) < 0) 3463edf8e2afSMika Westerberg return (-errno); 3464edf8e2afSMika Westerberg 3465edf8e2afSMika Westerberg /* 3466edf8e2afSMika Westerberg * Walk through target process memory mappings and 3467edf8e2afSMika Westerberg * set up structure containing this information. After 3468edf8e2afSMika Westerberg * this point vma_xxx functions can be used. 3469edf8e2afSMika Westerberg */ 3470edf8e2afSMika Westerberg if ((mm = vma_init()) == NULL) 3471edf8e2afSMika Westerberg goto out; 3472edf8e2afSMika Westerberg 3473edf8e2afSMika Westerberg walk_memory_regions(mm, vma_walker); 3474edf8e2afSMika Westerberg segs = vma_get_mapping_count(mm); 3475edf8e2afSMika Westerberg 3476edf8e2afSMika Westerberg /* 3477edf8e2afSMika Westerberg * Construct valid coredump ELF header. We also 3478edf8e2afSMika Westerberg * add one more segment for notes. 3479edf8e2afSMika Westerberg */ 3480edf8e2afSMika Westerberg fill_elf_header(&elf, segs + 1, ELF_MACHINE, 0); 3481edf8e2afSMika Westerberg if (dump_write(fd, &elf, sizeof (elf)) != 0) 3482edf8e2afSMika Westerberg goto out; 3483edf8e2afSMika Westerberg 3484b6af0975SDaniel P. Berrange /* fill in the in-memory version of notes */ 3485edf8e2afSMika Westerberg if (fill_note_info(&info, signr, env) < 0) 3486edf8e2afSMika Westerberg goto out; 3487edf8e2afSMika Westerberg 3488edf8e2afSMika Westerberg offset += sizeof (elf); /* elf header */ 3489edf8e2afSMika Westerberg offset += (segs + 1) * sizeof (struct elf_phdr); /* program headers */ 3490edf8e2afSMika Westerberg 3491edf8e2afSMika Westerberg /* write out notes program header */ 3492edf8e2afSMika Westerberg fill_elf_note_phdr(&phdr, info.notes_size, offset); 3493edf8e2afSMika Westerberg 3494edf8e2afSMika Westerberg offset += info.notes_size; 3495edf8e2afSMika Westerberg if (dump_write(fd, &phdr, sizeof (phdr)) != 0) 3496edf8e2afSMika Westerberg goto out; 3497edf8e2afSMika Westerberg 3498edf8e2afSMika Westerberg /* 3499edf8e2afSMika Westerberg * ELF specification wants data to start at page boundary so 3500edf8e2afSMika Westerberg * we align it here. 3501edf8e2afSMika Westerberg */ 350280f5ce75SLaurent Vivier data_offset = offset = roundup(offset, ELF_EXEC_PAGESIZE); 3503edf8e2afSMika Westerberg 3504edf8e2afSMika Westerberg /* 3505edf8e2afSMika Westerberg * Write program headers for memory regions mapped in 3506edf8e2afSMika Westerberg * the target process. 3507edf8e2afSMika Westerberg */ 3508edf8e2afSMika Westerberg for (vma = vma_first(mm); vma != NULL; vma = vma_next(vma)) { 3509edf8e2afSMika Westerberg (void) memset(&phdr, 0, sizeof (phdr)); 3510edf8e2afSMika Westerberg 3511edf8e2afSMika Westerberg phdr.p_type = PT_LOAD; 3512edf8e2afSMika Westerberg phdr.p_offset = offset; 3513edf8e2afSMika Westerberg phdr.p_vaddr = vma->vma_start; 3514edf8e2afSMika Westerberg phdr.p_paddr = 0; 3515edf8e2afSMika Westerberg phdr.p_filesz = vma_dump_size(vma); 3516edf8e2afSMika Westerberg offset += phdr.p_filesz; 3517edf8e2afSMika Westerberg phdr.p_memsz = vma->vma_end - vma->vma_start; 3518edf8e2afSMika Westerberg phdr.p_flags = vma->vma_flags & PROT_READ ? PF_R : 0; 3519edf8e2afSMika Westerberg if (vma->vma_flags & PROT_WRITE) 3520edf8e2afSMika Westerberg phdr.p_flags |= PF_W; 3521edf8e2afSMika Westerberg if (vma->vma_flags & PROT_EXEC) 3522edf8e2afSMika Westerberg phdr.p_flags |= PF_X; 3523edf8e2afSMika Westerberg phdr.p_align = ELF_EXEC_PAGESIZE; 3524edf8e2afSMika Westerberg 352580f5ce75SLaurent Vivier bswap_phdr(&phdr, 1); 3526772034b6SPeter Maydell if (dump_write(fd, &phdr, sizeof(phdr)) != 0) { 3527772034b6SPeter Maydell goto out; 3528772034b6SPeter Maydell } 3529edf8e2afSMika Westerberg } 3530edf8e2afSMika Westerberg 3531edf8e2afSMika Westerberg /* 3532edf8e2afSMika Westerberg * Next we write notes just after program headers. No 3533edf8e2afSMika Westerberg * alignment needed here. 3534edf8e2afSMika Westerberg */ 3535edf8e2afSMika Westerberg if (write_note_info(&info, fd) < 0) 3536edf8e2afSMika Westerberg goto out; 3537edf8e2afSMika Westerberg 3538edf8e2afSMika Westerberg /* align data to page boundary */ 3539edf8e2afSMika Westerberg if (lseek(fd, data_offset, SEEK_SET) != data_offset) 3540edf8e2afSMika Westerberg goto out; 3541edf8e2afSMika Westerberg 3542edf8e2afSMika Westerberg /* 3543edf8e2afSMika Westerberg * Finally we can dump process memory into corefile as well. 3544edf8e2afSMika Westerberg */ 3545edf8e2afSMika Westerberg for (vma = vma_first(mm); vma != NULL; vma = vma_next(vma)) { 3546edf8e2afSMika Westerberg abi_ulong addr; 3547edf8e2afSMika Westerberg abi_ulong end; 3548edf8e2afSMika Westerberg 3549edf8e2afSMika Westerberg end = vma->vma_start + vma_dump_size(vma); 3550edf8e2afSMika Westerberg 3551edf8e2afSMika Westerberg for (addr = vma->vma_start; addr < end; 3552edf8e2afSMika Westerberg addr += TARGET_PAGE_SIZE) { 3553edf8e2afSMika Westerberg char page[TARGET_PAGE_SIZE]; 3554edf8e2afSMika Westerberg int error; 3555edf8e2afSMika Westerberg 3556edf8e2afSMika Westerberg /* 3557edf8e2afSMika Westerberg * Read in page from target process memory and 3558edf8e2afSMika Westerberg * write it to coredump file. 3559edf8e2afSMika Westerberg */ 3560edf8e2afSMika Westerberg error = copy_from_user(page, addr, sizeof (page)); 3561edf8e2afSMika Westerberg if (error != 0) { 356249995e17SAurelien Jarno (void) fprintf(stderr, "unable to dump " TARGET_ABI_FMT_lx "\n", 3563edf8e2afSMika Westerberg addr); 3564edf8e2afSMika Westerberg errno = -error; 3565edf8e2afSMika Westerberg goto out; 3566edf8e2afSMika Westerberg } 3567edf8e2afSMika Westerberg if (dump_write(fd, page, TARGET_PAGE_SIZE) < 0) 3568edf8e2afSMika Westerberg goto out; 3569edf8e2afSMika Westerberg } 3570edf8e2afSMika Westerberg } 3571edf8e2afSMika Westerberg 3572edf8e2afSMika Westerberg out: 3573edf8e2afSMika Westerberg free_note_info(&info); 3574edf8e2afSMika Westerberg if (mm != NULL) 3575edf8e2afSMika Westerberg vma_delete(mm); 3576edf8e2afSMika Westerberg (void) close(fd); 3577edf8e2afSMika Westerberg 3578edf8e2afSMika Westerberg if (errno != 0) 3579edf8e2afSMika Westerberg return (-errno); 3580edf8e2afSMika Westerberg return (0); 3581edf8e2afSMika Westerberg } 3582edf8e2afSMika Westerberg #endif /* USE_ELF_CORE_DUMP */ 3583edf8e2afSMika Westerberg 3584e5fe0c52Spbrook void do_init_thread(struct target_pt_regs *regs, struct image_info *infop) 3585e5fe0c52Spbrook { 3586e5fe0c52Spbrook init_thread(regs, infop); 3587e5fe0c52Spbrook } 3588