1 /* This is the Linux kernel elf-loading code, ported into user space */ 2 3 #include <stdio.h> 4 #include <sys/types.h> 5 #include <fcntl.h> 6 #include <errno.h> 7 #include <unistd.h> 8 #include <sys/mman.h> 9 #include <stdlib.h> 10 #include <string.h> 11 12 #include "qemu.h" 13 #include "disas.h" 14 15 #ifdef _ARCH_PPC64 16 #undef ARCH_DLINFO 17 #undef ELF_PLATFORM 18 #undef ELF_HWCAP 19 #undef ELF_CLASS 20 #undef ELF_DATA 21 #undef ELF_ARCH 22 #endif 23 24 /* from personality.h */ 25 26 /* 27 * Flags for bug emulation. 28 * 29 * These occupy the top three bytes. 30 */ 31 enum { 32 ADDR_NO_RANDOMIZE = 0x0040000, /* disable randomization of VA space */ 33 FDPIC_FUNCPTRS = 0x0080000, /* userspace function ptrs point to descriptors 34 * (signal handling) 35 */ 36 MMAP_PAGE_ZERO = 0x0100000, 37 ADDR_COMPAT_LAYOUT = 0x0200000, 38 READ_IMPLIES_EXEC = 0x0400000, 39 ADDR_LIMIT_32BIT = 0x0800000, 40 SHORT_INODE = 0x1000000, 41 WHOLE_SECONDS = 0x2000000, 42 STICKY_TIMEOUTS = 0x4000000, 43 ADDR_LIMIT_3GB = 0x8000000, 44 }; 45 46 /* 47 * Personality types. 48 * 49 * These go in the low byte. Avoid using the top bit, it will 50 * conflict with error returns. 51 */ 52 enum { 53 PER_LINUX = 0x0000, 54 PER_LINUX_32BIT = 0x0000 | ADDR_LIMIT_32BIT, 55 PER_LINUX_FDPIC = 0x0000 | FDPIC_FUNCPTRS, 56 PER_SVR4 = 0x0001 | STICKY_TIMEOUTS | MMAP_PAGE_ZERO, 57 PER_SVR3 = 0x0002 | STICKY_TIMEOUTS | SHORT_INODE, 58 PER_SCOSVR3 = 0x0003 | STICKY_TIMEOUTS | 59 WHOLE_SECONDS | SHORT_INODE, 60 PER_OSR5 = 0x0003 | STICKY_TIMEOUTS | WHOLE_SECONDS, 61 PER_WYSEV386 = 0x0004 | STICKY_TIMEOUTS | SHORT_INODE, 62 PER_ISCR4 = 0x0005 | STICKY_TIMEOUTS, 63 PER_BSD = 0x0006, 64 PER_SUNOS = 0x0006 | STICKY_TIMEOUTS, 65 PER_XENIX = 0x0007 | STICKY_TIMEOUTS | SHORT_INODE, 66 PER_LINUX32 = 0x0008, 67 PER_LINUX32_3GB = 0x0008 | ADDR_LIMIT_3GB, 68 PER_IRIX32 = 0x0009 | STICKY_TIMEOUTS,/* IRIX5 32-bit */ 69 PER_IRIXN32 = 0x000a | STICKY_TIMEOUTS,/* IRIX6 new 32-bit */ 70 PER_IRIX64 = 0x000b | STICKY_TIMEOUTS,/* IRIX6 64-bit */ 71 PER_RISCOS = 0x000c, 72 PER_SOLARIS = 0x000d | STICKY_TIMEOUTS, 73 PER_UW7 = 0x000e | STICKY_TIMEOUTS | MMAP_PAGE_ZERO, 74 PER_OSF4 = 0x000f, /* OSF/1 v4 */ 75 PER_HPUX = 0x0010, 76 PER_MASK = 0x00ff, 77 }; 78 79 /* 80 * Return the base personality without flags. 81 */ 82 #define personality(pers) (pers & PER_MASK) 83 84 /* this flag is uneffective under linux too, should be deleted */ 85 #ifndef MAP_DENYWRITE 86 #define MAP_DENYWRITE 0 87 #endif 88 89 /* should probably go in elf.h */ 90 #ifndef ELIBBAD 91 #define ELIBBAD 80 92 #endif 93 94 #ifdef TARGET_I386 95 96 #define ELF_PLATFORM get_elf_platform() 97 98 static const char *get_elf_platform(void) 99 { 100 static char elf_platform[] = "i386"; 101 int family = (thread_env->cpuid_version >> 8) & 0xff; 102 if (family > 6) 103 family = 6; 104 if (family >= 3) 105 elf_platform[1] = '0' + family; 106 return elf_platform; 107 } 108 109 #define ELF_HWCAP get_elf_hwcap() 110 111 static uint32_t get_elf_hwcap(void) 112 { 113 return thread_env->cpuid_features; 114 } 115 116 #ifdef TARGET_X86_64 117 #define ELF_START_MMAP 0x2aaaaab000ULL 118 #define elf_check_arch(x) ( ((x) == ELF_ARCH) ) 119 120 #define ELF_CLASS ELFCLASS64 121 #define ELF_DATA ELFDATA2LSB 122 #define ELF_ARCH EM_X86_64 123 124 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop) 125 { 126 regs->rax = 0; 127 regs->rsp = infop->start_stack; 128 regs->rip = infop->entry; 129 } 130 131 #else 132 133 #define ELF_START_MMAP 0x80000000 134 135 /* 136 * This is used to ensure we don't load something for the wrong architecture. 137 */ 138 #define elf_check_arch(x) ( ((x) == EM_386) || ((x) == EM_486) ) 139 140 /* 141 * These are used to set parameters in the core dumps. 142 */ 143 #define ELF_CLASS ELFCLASS32 144 #define ELF_DATA ELFDATA2LSB 145 #define ELF_ARCH EM_386 146 147 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop) 148 { 149 regs->esp = infop->start_stack; 150 regs->eip = infop->entry; 151 152 /* SVR4/i386 ABI (pages 3-31, 3-32) says that when the program 153 starts %edx contains a pointer to a function which might be 154 registered using `atexit'. This provides a mean for the 155 dynamic linker to call DT_FINI functions for shared libraries 156 that have been loaded before the code runs. 157 158 A value of 0 tells we have no such handler. */ 159 regs->edx = 0; 160 } 161 #endif 162 163 #define USE_ELF_CORE_DUMP 164 #define ELF_EXEC_PAGESIZE 4096 165 166 #endif 167 168 #ifdef TARGET_ARM 169 170 #define ELF_START_MMAP 0x80000000 171 172 #define elf_check_arch(x) ( (x) == EM_ARM ) 173 174 #define ELF_CLASS ELFCLASS32 175 #ifdef TARGET_WORDS_BIGENDIAN 176 #define ELF_DATA ELFDATA2MSB 177 #else 178 #define ELF_DATA ELFDATA2LSB 179 #endif 180 #define ELF_ARCH EM_ARM 181 182 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop) 183 { 184 abi_long stack = infop->start_stack; 185 memset(regs, 0, sizeof(*regs)); 186 regs->ARM_cpsr = 0x10; 187 if (infop->entry & 1) 188 regs->ARM_cpsr |= CPSR_T; 189 regs->ARM_pc = infop->entry & 0xfffffffe; 190 regs->ARM_sp = infop->start_stack; 191 /* FIXME - what to for failure of get_user()? */ 192 get_user_ual(regs->ARM_r2, stack + 8); /* envp */ 193 get_user_ual(regs->ARM_r1, stack + 4); /* envp */ 194 /* XXX: it seems that r0 is zeroed after ! */ 195 regs->ARM_r0 = 0; 196 /* For uClinux PIC binaries. */ 197 /* XXX: Linux does this only on ARM with no MMU (do we care ?) */ 198 regs->ARM_r10 = infop->start_data; 199 } 200 201 #define USE_ELF_CORE_DUMP 202 #define ELF_EXEC_PAGESIZE 4096 203 204 enum 205 { 206 ARM_HWCAP_ARM_SWP = 1 << 0, 207 ARM_HWCAP_ARM_HALF = 1 << 1, 208 ARM_HWCAP_ARM_THUMB = 1 << 2, 209 ARM_HWCAP_ARM_26BIT = 1 << 3, 210 ARM_HWCAP_ARM_FAST_MULT = 1 << 4, 211 ARM_HWCAP_ARM_FPA = 1 << 5, 212 ARM_HWCAP_ARM_VFP = 1 << 6, 213 ARM_HWCAP_ARM_EDSP = 1 << 7, 214 }; 215 216 #define ELF_HWCAP (ARM_HWCAP_ARM_SWP | ARM_HWCAP_ARM_HALF \ 217 | ARM_HWCAP_ARM_THUMB | ARM_HWCAP_ARM_FAST_MULT \ 218 | ARM_HWCAP_ARM_FPA | ARM_HWCAP_ARM_VFP) 219 220 #endif 221 222 #ifdef TARGET_SPARC 223 #ifdef TARGET_SPARC64 224 225 #define ELF_START_MMAP 0x80000000 226 227 #ifndef TARGET_ABI32 228 #define elf_check_arch(x) ( (x) == EM_SPARCV9 || (x) == EM_SPARC32PLUS ) 229 #else 230 #define elf_check_arch(x) ( (x) == EM_SPARC32PLUS || (x) == EM_SPARC ) 231 #endif 232 233 #define ELF_CLASS ELFCLASS64 234 #define ELF_DATA ELFDATA2MSB 235 #define ELF_ARCH EM_SPARCV9 236 237 #define STACK_BIAS 2047 238 239 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop) 240 { 241 #ifndef TARGET_ABI32 242 regs->tstate = 0; 243 #endif 244 regs->pc = infop->entry; 245 regs->npc = regs->pc + 4; 246 regs->y = 0; 247 #ifdef TARGET_ABI32 248 regs->u_regs[14] = infop->start_stack - 16 * 4; 249 #else 250 if (personality(infop->personality) == PER_LINUX32) 251 regs->u_regs[14] = infop->start_stack - 16 * 4; 252 else 253 regs->u_regs[14] = infop->start_stack - 16 * 8 - STACK_BIAS; 254 #endif 255 } 256 257 #else 258 #define ELF_START_MMAP 0x80000000 259 260 #define elf_check_arch(x) ( (x) == EM_SPARC ) 261 262 #define ELF_CLASS ELFCLASS32 263 #define ELF_DATA ELFDATA2MSB 264 #define ELF_ARCH EM_SPARC 265 266 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop) 267 { 268 regs->psr = 0; 269 regs->pc = infop->entry; 270 regs->npc = regs->pc + 4; 271 regs->y = 0; 272 regs->u_regs[14] = infop->start_stack - 16 * 4; 273 } 274 275 #endif 276 #endif 277 278 #ifdef TARGET_PPC 279 280 #define ELF_START_MMAP 0x80000000 281 282 #if defined(TARGET_PPC64) && !defined(TARGET_ABI32) 283 284 #define elf_check_arch(x) ( (x) == EM_PPC64 ) 285 286 #define ELF_CLASS ELFCLASS64 287 288 #else 289 290 #define elf_check_arch(x) ( (x) == EM_PPC ) 291 292 #define ELF_CLASS ELFCLASS32 293 294 #endif 295 296 #ifdef TARGET_WORDS_BIGENDIAN 297 #define ELF_DATA ELFDATA2MSB 298 #else 299 #define ELF_DATA ELFDATA2LSB 300 #endif 301 #define ELF_ARCH EM_PPC 302 303 /* 304 * We need to put in some extra aux table entries to tell glibc what 305 * the cache block size is, so it can use the dcbz instruction safely. 306 */ 307 #define AT_DCACHEBSIZE 19 308 #define AT_ICACHEBSIZE 20 309 #define AT_UCACHEBSIZE 21 310 /* A special ignored type value for PPC, for glibc compatibility. */ 311 #define AT_IGNOREPPC 22 312 /* 313 * The requirements here are: 314 * - keep the final alignment of sp (sp & 0xf) 315 * - make sure the 32-bit value at the first 16 byte aligned position of 316 * AUXV is greater than 16 for glibc compatibility. 317 * AT_IGNOREPPC is used for that. 318 * - for compatibility with glibc ARCH_DLINFO must always be defined on PPC, 319 * even if DLINFO_ARCH_ITEMS goes to zero or is undefined. 320 */ 321 #define DLINFO_ARCH_ITEMS 5 322 #define ARCH_DLINFO \ 323 do { \ 324 NEW_AUX_ENT(AT_DCACHEBSIZE, 0x20); \ 325 NEW_AUX_ENT(AT_ICACHEBSIZE, 0x20); \ 326 NEW_AUX_ENT(AT_UCACHEBSIZE, 0); \ 327 /* \ 328 * Now handle glibc compatibility. \ 329 */ \ 330 NEW_AUX_ENT(AT_IGNOREPPC, AT_IGNOREPPC); \ 331 NEW_AUX_ENT(AT_IGNOREPPC, AT_IGNOREPPC); \ 332 } while (0) 333 334 static inline void init_thread(struct target_pt_regs *_regs, struct image_info *infop) 335 { 336 abi_ulong pos = infop->start_stack; 337 abi_ulong tmp; 338 #if defined(TARGET_PPC64) && !defined(TARGET_ABI32) 339 abi_ulong entry, toc; 340 #endif 341 342 _regs->gpr[1] = infop->start_stack; 343 #if defined(TARGET_PPC64) && !defined(TARGET_ABI32) 344 entry = ldq_raw(infop->entry) + infop->load_addr; 345 toc = ldq_raw(infop->entry + 8) + infop->load_addr; 346 _regs->gpr[2] = toc; 347 infop->entry = entry; 348 #endif 349 _regs->nip = infop->entry; 350 /* Note that isn't exactly what regular kernel does 351 * but this is what the ABI wants and is needed to allow 352 * execution of PPC BSD programs. 353 */ 354 /* FIXME - what to for failure of get_user()? */ 355 get_user_ual(_regs->gpr[3], pos); 356 pos += sizeof(abi_ulong); 357 _regs->gpr[4] = pos; 358 for (tmp = 1; tmp != 0; pos += sizeof(abi_ulong)) 359 tmp = ldl(pos); 360 _regs->gpr[5] = pos; 361 } 362 363 #define USE_ELF_CORE_DUMP 364 #define ELF_EXEC_PAGESIZE 4096 365 366 #endif 367 368 #ifdef TARGET_MIPS 369 370 #define ELF_START_MMAP 0x80000000 371 372 #define elf_check_arch(x) ( (x) == EM_MIPS ) 373 374 #ifdef TARGET_MIPS64 375 #define ELF_CLASS ELFCLASS64 376 #else 377 #define ELF_CLASS ELFCLASS32 378 #endif 379 #ifdef TARGET_WORDS_BIGENDIAN 380 #define ELF_DATA ELFDATA2MSB 381 #else 382 #define ELF_DATA ELFDATA2LSB 383 #endif 384 #define ELF_ARCH EM_MIPS 385 386 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop) 387 { 388 regs->cp0_status = 2 << CP0St_KSU; 389 regs->cp0_epc = infop->entry; 390 regs->regs[29] = infop->start_stack; 391 } 392 393 #define USE_ELF_CORE_DUMP 394 #define ELF_EXEC_PAGESIZE 4096 395 396 #endif /* TARGET_MIPS */ 397 398 #ifdef TARGET_SH4 399 400 #define ELF_START_MMAP 0x80000000 401 402 #define elf_check_arch(x) ( (x) == EM_SH ) 403 404 #define ELF_CLASS ELFCLASS32 405 #define ELF_DATA ELFDATA2LSB 406 #define ELF_ARCH EM_SH 407 408 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop) 409 { 410 /* Check other registers XXXXX */ 411 regs->pc = infop->entry; 412 regs->regs[15] = infop->start_stack; 413 } 414 415 #define USE_ELF_CORE_DUMP 416 #define ELF_EXEC_PAGESIZE 4096 417 418 #endif 419 420 #ifdef TARGET_CRIS 421 422 #define ELF_START_MMAP 0x80000000 423 424 #define elf_check_arch(x) ( (x) == EM_CRIS ) 425 426 #define ELF_CLASS ELFCLASS32 427 #define ELF_DATA ELFDATA2LSB 428 #define ELF_ARCH EM_CRIS 429 430 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop) 431 { 432 regs->erp = infop->entry; 433 } 434 435 #define USE_ELF_CORE_DUMP 436 #define ELF_EXEC_PAGESIZE 8192 437 438 #endif 439 440 #ifdef TARGET_M68K 441 442 #define ELF_START_MMAP 0x80000000 443 444 #define elf_check_arch(x) ( (x) == EM_68K ) 445 446 #define ELF_CLASS ELFCLASS32 447 #define ELF_DATA ELFDATA2MSB 448 #define ELF_ARCH EM_68K 449 450 /* ??? Does this need to do anything? 451 #define ELF_PLAT_INIT(_r) */ 452 453 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop) 454 { 455 regs->usp = infop->start_stack; 456 regs->sr = 0; 457 regs->pc = infop->entry; 458 } 459 460 #define USE_ELF_CORE_DUMP 461 #define ELF_EXEC_PAGESIZE 8192 462 463 #endif 464 465 #ifdef TARGET_ALPHA 466 467 #define ELF_START_MMAP (0x30000000000ULL) 468 469 #define elf_check_arch(x) ( (x) == ELF_ARCH ) 470 471 #define ELF_CLASS ELFCLASS64 472 #define ELF_DATA ELFDATA2MSB 473 #define ELF_ARCH EM_ALPHA 474 475 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop) 476 { 477 regs->pc = infop->entry; 478 regs->ps = 8; 479 regs->usp = infop->start_stack; 480 regs->unique = infop->start_data; /* ? */ 481 printf("Set unique value to " TARGET_FMT_lx " (" TARGET_FMT_lx ")\n", 482 regs->unique, infop->start_data); 483 } 484 485 #define USE_ELF_CORE_DUMP 486 #define ELF_EXEC_PAGESIZE 8192 487 488 #endif /* TARGET_ALPHA */ 489 490 #ifndef ELF_PLATFORM 491 #define ELF_PLATFORM (NULL) 492 #endif 493 494 #ifndef ELF_HWCAP 495 #define ELF_HWCAP 0 496 #endif 497 498 #ifdef TARGET_ABI32 499 #undef ELF_CLASS 500 #define ELF_CLASS ELFCLASS32 501 #undef bswaptls 502 #define bswaptls(ptr) bswap32s(ptr) 503 #endif 504 505 #include "elf.h" 506 507 struct exec 508 { 509 unsigned int a_info; /* Use macros N_MAGIC, etc for access */ 510 unsigned int a_text; /* length of text, in bytes */ 511 unsigned int a_data; /* length of data, in bytes */ 512 unsigned int a_bss; /* length of uninitialized data area, in bytes */ 513 unsigned int a_syms; /* length of symbol table data in file, in bytes */ 514 unsigned int a_entry; /* start address */ 515 unsigned int a_trsize; /* length of relocation info for text, in bytes */ 516 unsigned int a_drsize; /* length of relocation info for data, in bytes */ 517 }; 518 519 520 #define N_MAGIC(exec) ((exec).a_info & 0xffff) 521 #define OMAGIC 0407 522 #define NMAGIC 0410 523 #define ZMAGIC 0413 524 #define QMAGIC 0314 525 526 /* max code+data+bss space allocated to elf interpreter */ 527 #define INTERP_MAP_SIZE (32 * 1024 * 1024) 528 529 /* max code+data+bss+brk space allocated to ET_DYN executables */ 530 #define ET_DYN_MAP_SIZE (128 * 1024 * 1024) 531 532 /* Necessary parameters */ 533 #define TARGET_ELF_EXEC_PAGESIZE TARGET_PAGE_SIZE 534 #define TARGET_ELF_PAGESTART(_v) ((_v) & ~(unsigned long)(TARGET_ELF_EXEC_PAGESIZE-1)) 535 #define TARGET_ELF_PAGEOFFSET(_v) ((_v) & (TARGET_ELF_EXEC_PAGESIZE-1)) 536 537 #define INTERPRETER_NONE 0 538 #define INTERPRETER_AOUT 1 539 #define INTERPRETER_ELF 2 540 541 #define DLINFO_ITEMS 12 542 543 static inline void memcpy_fromfs(void * to, const void * from, unsigned long n) 544 { 545 memcpy(to, from, n); 546 } 547 548 static int load_aout_interp(void * exptr, int interp_fd); 549 550 #ifdef BSWAP_NEEDED 551 static void bswap_ehdr(struct elfhdr *ehdr) 552 { 553 bswap16s(&ehdr->e_type); /* Object file type */ 554 bswap16s(&ehdr->e_machine); /* Architecture */ 555 bswap32s(&ehdr->e_version); /* Object file version */ 556 bswaptls(&ehdr->e_entry); /* Entry point virtual address */ 557 bswaptls(&ehdr->e_phoff); /* Program header table file offset */ 558 bswaptls(&ehdr->e_shoff); /* Section header table file offset */ 559 bswap32s(&ehdr->e_flags); /* Processor-specific flags */ 560 bswap16s(&ehdr->e_ehsize); /* ELF header size in bytes */ 561 bswap16s(&ehdr->e_phentsize); /* Program header table entry size */ 562 bswap16s(&ehdr->e_phnum); /* Program header table entry count */ 563 bswap16s(&ehdr->e_shentsize); /* Section header table entry size */ 564 bswap16s(&ehdr->e_shnum); /* Section header table entry count */ 565 bswap16s(&ehdr->e_shstrndx); /* Section header string table index */ 566 } 567 568 static void bswap_phdr(struct elf_phdr *phdr) 569 { 570 bswap32s(&phdr->p_type); /* Segment type */ 571 bswaptls(&phdr->p_offset); /* Segment file offset */ 572 bswaptls(&phdr->p_vaddr); /* Segment virtual address */ 573 bswaptls(&phdr->p_paddr); /* Segment physical address */ 574 bswaptls(&phdr->p_filesz); /* Segment size in file */ 575 bswaptls(&phdr->p_memsz); /* Segment size in memory */ 576 bswap32s(&phdr->p_flags); /* Segment flags */ 577 bswaptls(&phdr->p_align); /* Segment alignment */ 578 } 579 580 static void bswap_shdr(struct elf_shdr *shdr) 581 { 582 bswap32s(&shdr->sh_name); 583 bswap32s(&shdr->sh_type); 584 bswaptls(&shdr->sh_flags); 585 bswaptls(&shdr->sh_addr); 586 bswaptls(&shdr->sh_offset); 587 bswaptls(&shdr->sh_size); 588 bswap32s(&shdr->sh_link); 589 bswap32s(&shdr->sh_info); 590 bswaptls(&shdr->sh_addralign); 591 bswaptls(&shdr->sh_entsize); 592 } 593 594 static void bswap_sym(struct elf_sym *sym) 595 { 596 bswap32s(&sym->st_name); 597 bswaptls(&sym->st_value); 598 bswaptls(&sym->st_size); 599 bswap16s(&sym->st_shndx); 600 } 601 #endif 602 603 /* 604 * 'copy_elf_strings()' copies argument/envelope strings from user 605 * memory to free pages in kernel mem. These are in a format ready 606 * to be put directly into the top of new user memory. 607 * 608 */ 609 static abi_ulong copy_elf_strings(int argc,char ** argv, void **page, 610 abi_ulong p) 611 { 612 char *tmp, *tmp1, *pag = NULL; 613 int len, offset = 0; 614 615 if (!p) { 616 return 0; /* bullet-proofing */ 617 } 618 while (argc-- > 0) { 619 tmp = argv[argc]; 620 if (!tmp) { 621 fprintf(stderr, "VFS: argc is wrong"); 622 exit(-1); 623 } 624 tmp1 = tmp; 625 while (*tmp++); 626 len = tmp - tmp1; 627 if (p < len) { /* this shouldn't happen - 128kB */ 628 return 0; 629 } 630 while (len) { 631 --p; --tmp; --len; 632 if (--offset < 0) { 633 offset = p % TARGET_PAGE_SIZE; 634 pag = (char *)page[p/TARGET_PAGE_SIZE]; 635 if (!pag) { 636 pag = (char *)malloc(TARGET_PAGE_SIZE); 637 memset(pag, 0, TARGET_PAGE_SIZE); 638 page[p/TARGET_PAGE_SIZE] = pag; 639 if (!pag) 640 return 0; 641 } 642 } 643 if (len == 0 || offset == 0) { 644 *(pag + offset) = *tmp; 645 } 646 else { 647 int bytes_to_copy = (len > offset) ? offset : len; 648 tmp -= bytes_to_copy; 649 p -= bytes_to_copy; 650 offset -= bytes_to_copy; 651 len -= bytes_to_copy; 652 memcpy_fromfs(pag + offset, tmp, bytes_to_copy + 1); 653 } 654 } 655 } 656 return p; 657 } 658 659 static abi_ulong setup_arg_pages(abi_ulong p, struct linux_binprm *bprm, 660 struct image_info *info) 661 { 662 abi_ulong stack_base, size, error; 663 int i; 664 665 /* Create enough stack to hold everything. If we don't use 666 * it for args, we'll use it for something else... 667 */ 668 size = x86_stack_size; 669 if (size < MAX_ARG_PAGES*TARGET_PAGE_SIZE) 670 size = MAX_ARG_PAGES*TARGET_PAGE_SIZE; 671 error = target_mmap(0, 672 size + qemu_host_page_size, 673 PROT_READ | PROT_WRITE, 674 MAP_PRIVATE | MAP_ANON, 675 -1, 0); 676 if (error == -1) { 677 perror("stk mmap"); 678 exit(-1); 679 } 680 /* we reserve one extra page at the top of the stack as guard */ 681 target_mprotect(error + size, qemu_host_page_size, PROT_NONE); 682 683 stack_base = error + size - MAX_ARG_PAGES*TARGET_PAGE_SIZE; 684 p += stack_base; 685 686 for (i = 0 ; i < MAX_ARG_PAGES ; i++) { 687 if (bprm->page[i]) { 688 info->rss++; 689 /* FIXME - check return value of memcpy_to_target() for failure */ 690 memcpy_to_target(stack_base, bprm->page[i], TARGET_PAGE_SIZE); 691 free(bprm->page[i]); 692 } 693 stack_base += TARGET_PAGE_SIZE; 694 } 695 return p; 696 } 697 698 static void set_brk(abi_ulong start, abi_ulong end) 699 { 700 /* page-align the start and end addresses... */ 701 start = HOST_PAGE_ALIGN(start); 702 end = HOST_PAGE_ALIGN(end); 703 if (end <= start) 704 return; 705 if(target_mmap(start, end - start, 706 PROT_READ | PROT_WRITE | PROT_EXEC, 707 MAP_FIXED | MAP_PRIVATE | MAP_ANON, -1, 0) == -1) { 708 perror("cannot mmap brk"); 709 exit(-1); 710 } 711 } 712 713 714 /* We need to explicitly zero any fractional pages after the data 715 section (i.e. bss). This would contain the junk from the file that 716 should not be in memory. */ 717 static void padzero(abi_ulong elf_bss, abi_ulong last_bss) 718 { 719 abi_ulong nbyte; 720 721 if (elf_bss >= last_bss) 722 return; 723 724 /* XXX: this is really a hack : if the real host page size is 725 smaller than the target page size, some pages after the end 726 of the file may not be mapped. A better fix would be to 727 patch target_mmap(), but it is more complicated as the file 728 size must be known */ 729 if (qemu_real_host_page_size < qemu_host_page_size) { 730 abi_ulong end_addr, end_addr1; 731 end_addr1 = (elf_bss + qemu_real_host_page_size - 1) & 732 ~(qemu_real_host_page_size - 1); 733 end_addr = HOST_PAGE_ALIGN(elf_bss); 734 if (end_addr1 < end_addr) { 735 mmap((void *)g2h(end_addr1), end_addr - end_addr1, 736 PROT_READ|PROT_WRITE|PROT_EXEC, 737 MAP_FIXED|MAP_PRIVATE|MAP_ANON, -1, 0); 738 } 739 } 740 741 nbyte = elf_bss & (qemu_host_page_size-1); 742 if (nbyte) { 743 nbyte = qemu_host_page_size - nbyte; 744 do { 745 /* FIXME - what to do if put_user() fails? */ 746 put_user_u8(0, elf_bss); 747 elf_bss++; 748 } while (--nbyte); 749 } 750 } 751 752 753 static abi_ulong create_elf_tables(abi_ulong p, int argc, int envc, 754 struct elfhdr * exec, 755 abi_ulong load_addr, 756 abi_ulong load_bias, 757 abi_ulong interp_load_addr, int ibcs, 758 struct image_info *info) 759 { 760 abi_ulong sp; 761 int size; 762 abi_ulong u_platform; 763 const char *k_platform; 764 const int n = sizeof(elf_addr_t); 765 766 sp = p; 767 u_platform = 0; 768 k_platform = ELF_PLATFORM; 769 if (k_platform) { 770 size_t len = strlen(k_platform) + 1; 771 sp -= (len + n - 1) & ~(n - 1); 772 u_platform = sp; 773 /* FIXME - check return value of memcpy_to_target() for failure */ 774 memcpy_to_target(sp, k_platform, len); 775 } 776 /* 777 * Force 16 byte _final_ alignment here for generality. 778 */ 779 sp = sp &~ (abi_ulong)15; 780 size = (DLINFO_ITEMS + 1) * 2; 781 if (k_platform) 782 size += 2; 783 #ifdef DLINFO_ARCH_ITEMS 784 size += DLINFO_ARCH_ITEMS * 2; 785 #endif 786 size += envc + argc + 2; 787 size += (!ibcs ? 3 : 1); /* argc itself */ 788 size *= n; 789 if (size & 15) 790 sp -= 16 - (size & 15); 791 792 /* This is correct because Linux defines 793 * elf_addr_t as Elf32_Off / Elf64_Off 794 */ 795 #define NEW_AUX_ENT(id, val) do { \ 796 sp -= n; put_user_ual(val, sp); \ 797 sp -= n; put_user_ual(id, sp); \ 798 } while(0) 799 800 NEW_AUX_ENT (AT_NULL, 0); 801 802 /* There must be exactly DLINFO_ITEMS entries here. */ 803 NEW_AUX_ENT(AT_PHDR, (abi_ulong)(load_addr + exec->e_phoff)); 804 NEW_AUX_ENT(AT_PHENT, (abi_ulong)(sizeof (struct elf_phdr))); 805 NEW_AUX_ENT(AT_PHNUM, (abi_ulong)(exec->e_phnum)); 806 NEW_AUX_ENT(AT_PAGESZ, (abi_ulong)(TARGET_PAGE_SIZE)); 807 NEW_AUX_ENT(AT_BASE, (abi_ulong)(interp_load_addr)); 808 NEW_AUX_ENT(AT_FLAGS, (abi_ulong)0); 809 NEW_AUX_ENT(AT_ENTRY, load_bias + exec->e_entry); 810 NEW_AUX_ENT(AT_UID, (abi_ulong) getuid()); 811 NEW_AUX_ENT(AT_EUID, (abi_ulong) geteuid()); 812 NEW_AUX_ENT(AT_GID, (abi_ulong) getgid()); 813 NEW_AUX_ENT(AT_EGID, (abi_ulong) getegid()); 814 NEW_AUX_ENT(AT_HWCAP, (abi_ulong) ELF_HWCAP); 815 NEW_AUX_ENT(AT_CLKTCK, (abi_ulong) sysconf(_SC_CLK_TCK)); 816 if (k_platform) 817 NEW_AUX_ENT(AT_PLATFORM, u_platform); 818 #ifdef ARCH_DLINFO 819 /* 820 * ARCH_DLINFO must come last so platform specific code can enforce 821 * special alignment requirements on the AUXV if necessary (eg. PPC). 822 */ 823 ARCH_DLINFO; 824 #endif 825 #undef NEW_AUX_ENT 826 827 sp = loader_build_argptr(envc, argc, sp, p, !ibcs); 828 return sp; 829 } 830 831 832 static abi_ulong load_elf_interp(struct elfhdr * interp_elf_ex, 833 int interpreter_fd, 834 abi_ulong *interp_load_addr) 835 { 836 struct elf_phdr *elf_phdata = NULL; 837 struct elf_phdr *eppnt; 838 abi_ulong load_addr = 0; 839 int load_addr_set = 0; 840 int retval; 841 abi_ulong last_bss, elf_bss; 842 abi_ulong error; 843 int i; 844 845 elf_bss = 0; 846 last_bss = 0; 847 error = 0; 848 849 #ifdef BSWAP_NEEDED 850 bswap_ehdr(interp_elf_ex); 851 #endif 852 /* First of all, some simple consistency checks */ 853 if ((interp_elf_ex->e_type != ET_EXEC && 854 interp_elf_ex->e_type != ET_DYN) || 855 !elf_check_arch(interp_elf_ex->e_machine)) { 856 return ~((abi_ulong)0UL); 857 } 858 859 860 /* Now read in all of the header information */ 861 862 if (sizeof(struct elf_phdr) * interp_elf_ex->e_phnum > TARGET_PAGE_SIZE) 863 return ~(abi_ulong)0UL; 864 865 elf_phdata = (struct elf_phdr *) 866 malloc(sizeof(struct elf_phdr) * interp_elf_ex->e_phnum); 867 868 if (!elf_phdata) 869 return ~((abi_ulong)0UL); 870 871 /* 872 * If the size of this structure has changed, then punt, since 873 * we will be doing the wrong thing. 874 */ 875 if (interp_elf_ex->e_phentsize != sizeof(struct elf_phdr)) { 876 free(elf_phdata); 877 return ~((abi_ulong)0UL); 878 } 879 880 retval = lseek(interpreter_fd, interp_elf_ex->e_phoff, SEEK_SET); 881 if(retval >= 0) { 882 retval = read(interpreter_fd, 883 (char *) elf_phdata, 884 sizeof(struct elf_phdr) * interp_elf_ex->e_phnum); 885 } 886 if (retval < 0) { 887 perror("load_elf_interp"); 888 exit(-1); 889 free (elf_phdata); 890 return retval; 891 } 892 #ifdef BSWAP_NEEDED 893 eppnt = elf_phdata; 894 for (i=0; i<interp_elf_ex->e_phnum; i++, eppnt++) { 895 bswap_phdr(eppnt); 896 } 897 #endif 898 899 if (interp_elf_ex->e_type == ET_DYN) { 900 /* in order to avoid hardcoding the interpreter load 901 address in qemu, we allocate a big enough memory zone */ 902 error = target_mmap(0, INTERP_MAP_SIZE, 903 PROT_NONE, MAP_PRIVATE | MAP_ANON, 904 -1, 0); 905 if (error == -1) { 906 perror("mmap"); 907 exit(-1); 908 } 909 load_addr = error; 910 load_addr_set = 1; 911 } 912 913 eppnt = elf_phdata; 914 for(i=0; i<interp_elf_ex->e_phnum; i++, eppnt++) 915 if (eppnt->p_type == PT_LOAD) { 916 int elf_type = MAP_PRIVATE | MAP_DENYWRITE; 917 int elf_prot = 0; 918 abi_ulong vaddr = 0; 919 abi_ulong k; 920 921 if (eppnt->p_flags & PF_R) elf_prot = PROT_READ; 922 if (eppnt->p_flags & PF_W) elf_prot |= PROT_WRITE; 923 if (eppnt->p_flags & PF_X) elf_prot |= PROT_EXEC; 924 if (interp_elf_ex->e_type == ET_EXEC || load_addr_set) { 925 elf_type |= MAP_FIXED; 926 vaddr = eppnt->p_vaddr; 927 } 928 error = target_mmap(load_addr+TARGET_ELF_PAGESTART(vaddr), 929 eppnt->p_filesz + TARGET_ELF_PAGEOFFSET(eppnt->p_vaddr), 930 elf_prot, 931 elf_type, 932 interpreter_fd, 933 eppnt->p_offset - TARGET_ELF_PAGEOFFSET(eppnt->p_vaddr)); 934 935 if (error == -1) { 936 /* Real error */ 937 close(interpreter_fd); 938 free(elf_phdata); 939 return ~((abi_ulong)0UL); 940 } 941 942 if (!load_addr_set && interp_elf_ex->e_type == ET_DYN) { 943 load_addr = error; 944 load_addr_set = 1; 945 } 946 947 /* 948 * Find the end of the file mapping for this phdr, and keep 949 * track of the largest address we see for this. 950 */ 951 k = load_addr + eppnt->p_vaddr + eppnt->p_filesz; 952 if (k > elf_bss) elf_bss = k; 953 954 /* 955 * Do the same thing for the memory mapping - between 956 * elf_bss and last_bss is the bss section. 957 */ 958 k = load_addr + eppnt->p_memsz + eppnt->p_vaddr; 959 if (k > last_bss) last_bss = k; 960 } 961 962 /* Now use mmap to map the library into memory. */ 963 964 close(interpreter_fd); 965 966 /* 967 * Now fill out the bss section. First pad the last page up 968 * to the page boundary, and then perform a mmap to make sure 969 * that there are zeromapped pages up to and including the last 970 * bss page. 971 */ 972 padzero(elf_bss, last_bss); 973 elf_bss = TARGET_ELF_PAGESTART(elf_bss + qemu_host_page_size - 1); /* What we have mapped so far */ 974 975 /* Map the last of the bss segment */ 976 if (last_bss > elf_bss) { 977 target_mmap(elf_bss, last_bss-elf_bss, 978 PROT_READ|PROT_WRITE|PROT_EXEC, 979 MAP_FIXED|MAP_PRIVATE|MAP_ANON, -1, 0); 980 } 981 free(elf_phdata); 982 983 *interp_load_addr = load_addr; 984 return ((abi_ulong) interp_elf_ex->e_entry) + load_addr; 985 } 986 987 static int symfind(const void *s0, const void *s1) 988 { 989 struct elf_sym *key = (struct elf_sym *)s0; 990 struct elf_sym *sym = (struct elf_sym *)s1; 991 int result = 0; 992 if (key->st_value < sym->st_value) { 993 result = -1; 994 } else if (key->st_value > sym->st_value + sym->st_size) { 995 result = 1; 996 } 997 return result; 998 } 999 1000 static const char *lookup_symbolxx(struct syminfo *s, target_ulong orig_addr) 1001 { 1002 #if ELF_CLASS == ELFCLASS32 1003 struct elf_sym *syms = s->disas_symtab.elf32; 1004 #else 1005 struct elf_sym *syms = s->disas_symtab.elf64; 1006 #endif 1007 1008 // binary search 1009 struct elf_sym key; 1010 struct elf_sym *sym; 1011 1012 key.st_value = orig_addr; 1013 1014 sym = bsearch(&key, syms, s->disas_num_syms, sizeof(*syms), symfind); 1015 if (sym != NULL) { 1016 return s->disas_strtab + sym->st_name; 1017 } 1018 1019 return ""; 1020 } 1021 1022 /* FIXME: This should use elf_ops.h */ 1023 static int symcmp(const void *s0, const void *s1) 1024 { 1025 struct elf_sym *sym0 = (struct elf_sym *)s0; 1026 struct elf_sym *sym1 = (struct elf_sym *)s1; 1027 return (sym0->st_value < sym1->st_value) 1028 ? -1 1029 : ((sym0->st_value > sym1->st_value) ? 1 : 0); 1030 } 1031 1032 /* Best attempt to load symbols from this ELF object. */ 1033 static void load_symbols(struct elfhdr *hdr, int fd) 1034 { 1035 unsigned int i, nsyms; 1036 struct elf_shdr sechdr, symtab, strtab; 1037 char *strings; 1038 struct syminfo *s; 1039 struct elf_sym *syms; 1040 1041 lseek(fd, hdr->e_shoff, SEEK_SET); 1042 for (i = 0; i < hdr->e_shnum; i++) { 1043 if (read(fd, &sechdr, sizeof(sechdr)) != sizeof(sechdr)) 1044 return; 1045 #ifdef BSWAP_NEEDED 1046 bswap_shdr(&sechdr); 1047 #endif 1048 if (sechdr.sh_type == SHT_SYMTAB) { 1049 symtab = sechdr; 1050 lseek(fd, hdr->e_shoff 1051 + sizeof(sechdr) * sechdr.sh_link, SEEK_SET); 1052 if (read(fd, &strtab, sizeof(strtab)) 1053 != sizeof(strtab)) 1054 return; 1055 #ifdef BSWAP_NEEDED 1056 bswap_shdr(&strtab); 1057 #endif 1058 goto found; 1059 } 1060 } 1061 return; /* Shouldn't happen... */ 1062 1063 found: 1064 /* Now know where the strtab and symtab are. Snarf them. */ 1065 s = malloc(sizeof(*s)); 1066 syms = malloc(symtab.sh_size); 1067 if (!syms) 1068 return; 1069 s->disas_strtab = strings = malloc(strtab.sh_size); 1070 if (!s->disas_strtab) 1071 return; 1072 1073 lseek(fd, symtab.sh_offset, SEEK_SET); 1074 if (read(fd, syms, symtab.sh_size) != symtab.sh_size) 1075 return; 1076 1077 nsyms = symtab.sh_size / sizeof(struct elf_sym); 1078 1079 i = 0; 1080 while (i < nsyms) { 1081 #ifdef BSWAP_NEEDED 1082 bswap_sym(syms + i); 1083 #endif 1084 // Throw away entries which we do not need. 1085 if (syms[i].st_shndx == SHN_UNDEF || 1086 syms[i].st_shndx >= SHN_LORESERVE || 1087 ELF_ST_TYPE(syms[i].st_info) != STT_FUNC) { 1088 nsyms--; 1089 if (i < nsyms) { 1090 syms[i] = syms[nsyms]; 1091 } 1092 continue; 1093 } 1094 #if defined(TARGET_ARM) || defined (TARGET_MIPS) 1095 /* The bottom address bit marks a Thumb or MIPS16 symbol. */ 1096 syms[i].st_value &= ~(target_ulong)1; 1097 #endif 1098 i++; 1099 } 1100 syms = realloc(syms, nsyms * sizeof(*syms)); 1101 1102 qsort(syms, nsyms, sizeof(*syms), symcmp); 1103 1104 lseek(fd, strtab.sh_offset, SEEK_SET); 1105 if (read(fd, strings, strtab.sh_size) != strtab.sh_size) 1106 return; 1107 s->disas_num_syms = nsyms; 1108 #if ELF_CLASS == ELFCLASS32 1109 s->disas_symtab.elf32 = syms; 1110 s->lookup_symbol = (lookup_symbol_t)lookup_symbolxx; 1111 #else 1112 s->disas_symtab.elf64 = syms; 1113 s->lookup_symbol = (lookup_symbol_t)lookup_symbolxx; 1114 #endif 1115 s->next = syminfos; 1116 syminfos = s; 1117 } 1118 1119 int load_elf_binary(struct linux_binprm * bprm, struct target_pt_regs * regs, 1120 struct image_info * info) 1121 { 1122 struct elfhdr elf_ex; 1123 struct elfhdr interp_elf_ex; 1124 struct exec interp_ex; 1125 int interpreter_fd = -1; /* avoid warning */ 1126 abi_ulong load_addr, load_bias; 1127 int load_addr_set = 0; 1128 unsigned int interpreter_type = INTERPRETER_NONE; 1129 unsigned char ibcs2_interpreter; 1130 int i; 1131 abi_ulong mapped_addr; 1132 struct elf_phdr * elf_ppnt; 1133 struct elf_phdr *elf_phdata; 1134 abi_ulong elf_bss, k, elf_brk; 1135 int retval; 1136 char * elf_interpreter; 1137 abi_ulong elf_entry, interp_load_addr = 0; 1138 int status; 1139 abi_ulong start_code, end_code, start_data, end_data; 1140 abi_ulong reloc_func_desc = 0; 1141 abi_ulong elf_stack; 1142 char passed_fileno[6]; 1143 1144 ibcs2_interpreter = 0; 1145 status = 0; 1146 load_addr = 0; 1147 load_bias = 0; 1148 elf_ex = *((struct elfhdr *) bprm->buf); /* exec-header */ 1149 #ifdef BSWAP_NEEDED 1150 bswap_ehdr(&elf_ex); 1151 #endif 1152 1153 /* First of all, some simple consistency checks */ 1154 if ((elf_ex.e_type != ET_EXEC && elf_ex.e_type != ET_DYN) || 1155 (! elf_check_arch(elf_ex.e_machine))) { 1156 return -ENOEXEC; 1157 } 1158 1159 bprm->p = copy_elf_strings(1, &bprm->filename, bprm->page, bprm->p); 1160 bprm->p = copy_elf_strings(bprm->envc,bprm->envp,bprm->page,bprm->p); 1161 bprm->p = copy_elf_strings(bprm->argc,bprm->argv,bprm->page,bprm->p); 1162 if (!bprm->p) { 1163 retval = -E2BIG; 1164 } 1165 1166 /* Now read in all of the header information */ 1167 elf_phdata = (struct elf_phdr *)malloc(elf_ex.e_phentsize*elf_ex.e_phnum); 1168 if (elf_phdata == NULL) { 1169 return -ENOMEM; 1170 } 1171 1172 retval = lseek(bprm->fd, elf_ex.e_phoff, SEEK_SET); 1173 if(retval > 0) { 1174 retval = read(bprm->fd, (char *) elf_phdata, 1175 elf_ex.e_phentsize * elf_ex.e_phnum); 1176 } 1177 1178 if (retval < 0) { 1179 perror("load_elf_binary"); 1180 exit(-1); 1181 free (elf_phdata); 1182 return -errno; 1183 } 1184 1185 #ifdef BSWAP_NEEDED 1186 elf_ppnt = elf_phdata; 1187 for (i=0; i<elf_ex.e_phnum; i++, elf_ppnt++) { 1188 bswap_phdr(elf_ppnt); 1189 } 1190 #endif 1191 elf_ppnt = elf_phdata; 1192 1193 elf_bss = 0; 1194 elf_brk = 0; 1195 1196 1197 elf_stack = ~((abi_ulong)0UL); 1198 elf_interpreter = NULL; 1199 start_code = ~((abi_ulong)0UL); 1200 end_code = 0; 1201 start_data = 0; 1202 end_data = 0; 1203 interp_ex.a_info = 0; 1204 1205 for(i=0;i < elf_ex.e_phnum; i++) { 1206 if (elf_ppnt->p_type == PT_INTERP) { 1207 if ( elf_interpreter != NULL ) 1208 { 1209 free (elf_phdata); 1210 free(elf_interpreter); 1211 close(bprm->fd); 1212 return -EINVAL; 1213 } 1214 1215 /* This is the program interpreter used for 1216 * shared libraries - for now assume that this 1217 * is an a.out format binary 1218 */ 1219 1220 elf_interpreter = (char *)malloc(elf_ppnt->p_filesz); 1221 1222 if (elf_interpreter == NULL) { 1223 free (elf_phdata); 1224 close(bprm->fd); 1225 return -ENOMEM; 1226 } 1227 1228 retval = lseek(bprm->fd, elf_ppnt->p_offset, SEEK_SET); 1229 if(retval >= 0) { 1230 retval = read(bprm->fd, elf_interpreter, elf_ppnt->p_filesz); 1231 } 1232 if(retval < 0) { 1233 perror("load_elf_binary2"); 1234 exit(-1); 1235 } 1236 1237 /* If the program interpreter is one of these two, 1238 then assume an iBCS2 image. Otherwise assume 1239 a native linux image. */ 1240 1241 /* JRP - Need to add X86 lib dir stuff here... */ 1242 1243 if (strcmp(elf_interpreter,"/usr/lib/libc.so.1") == 0 || 1244 strcmp(elf_interpreter,"/usr/lib/ld.so.1") == 0) { 1245 ibcs2_interpreter = 1; 1246 } 1247 1248 #if 0 1249 printf("Using ELF interpreter %s\n", elf_interpreter); 1250 #endif 1251 if (retval >= 0) { 1252 retval = open(path(elf_interpreter), O_RDONLY); 1253 if(retval >= 0) { 1254 interpreter_fd = retval; 1255 } 1256 else { 1257 perror(elf_interpreter); 1258 exit(-1); 1259 /* retval = -errno; */ 1260 } 1261 } 1262 1263 if (retval >= 0) { 1264 retval = lseek(interpreter_fd, 0, SEEK_SET); 1265 if(retval >= 0) { 1266 retval = read(interpreter_fd,bprm->buf,128); 1267 } 1268 } 1269 if (retval >= 0) { 1270 interp_ex = *((struct exec *) bprm->buf); /* aout exec-header */ 1271 interp_elf_ex = *((struct elfhdr *) bprm->buf); /* elf exec-header */ 1272 } 1273 if (retval < 0) { 1274 perror("load_elf_binary3"); 1275 exit(-1); 1276 free (elf_phdata); 1277 free(elf_interpreter); 1278 close(bprm->fd); 1279 return retval; 1280 } 1281 } 1282 elf_ppnt++; 1283 } 1284 1285 /* Some simple consistency checks for the interpreter */ 1286 if (elf_interpreter){ 1287 interpreter_type = INTERPRETER_ELF | INTERPRETER_AOUT; 1288 1289 /* Now figure out which format our binary is */ 1290 if ((N_MAGIC(interp_ex) != OMAGIC) && (N_MAGIC(interp_ex) != ZMAGIC) && 1291 (N_MAGIC(interp_ex) != QMAGIC)) { 1292 interpreter_type = INTERPRETER_ELF; 1293 } 1294 1295 if (interp_elf_ex.e_ident[0] != 0x7f || 1296 strncmp((char *)&interp_elf_ex.e_ident[1], "ELF",3) != 0) { 1297 interpreter_type &= ~INTERPRETER_ELF; 1298 } 1299 1300 if (!interpreter_type) { 1301 free(elf_interpreter); 1302 free(elf_phdata); 1303 close(bprm->fd); 1304 return -ELIBBAD; 1305 } 1306 } 1307 1308 /* OK, we are done with that, now set up the arg stuff, 1309 and then start this sucker up */ 1310 1311 { 1312 char * passed_p; 1313 1314 if (interpreter_type == INTERPRETER_AOUT) { 1315 snprintf(passed_fileno, sizeof(passed_fileno), "%d", bprm->fd); 1316 passed_p = passed_fileno; 1317 1318 if (elf_interpreter) { 1319 bprm->p = copy_elf_strings(1,&passed_p,bprm->page,bprm->p); 1320 bprm->argc++; 1321 } 1322 } 1323 if (!bprm->p) { 1324 if (elf_interpreter) { 1325 free(elf_interpreter); 1326 } 1327 free (elf_phdata); 1328 close(bprm->fd); 1329 return -E2BIG; 1330 } 1331 } 1332 1333 /* OK, This is the point of no return */ 1334 info->end_data = 0; 1335 info->end_code = 0; 1336 info->start_mmap = (abi_ulong)ELF_START_MMAP; 1337 info->mmap = 0; 1338 elf_entry = (abi_ulong) elf_ex.e_entry; 1339 1340 #if defined(CONFIG_USE_GUEST_BASE) 1341 /* 1342 * In case where user has not explicitly set the guest_base, we 1343 * probe here that should we set it automatically. 1344 */ 1345 if (!have_guest_base) { 1346 /* 1347 * Go through ELF program header table and find out whether 1348 * any of the segments drop below our current mmap_min_addr and 1349 * in that case set guest_base to corresponding address. 1350 */ 1351 for (i = 0, elf_ppnt = elf_phdata; i < elf_ex.e_phnum; 1352 i++, elf_ppnt++) { 1353 if (elf_ppnt->p_type != PT_LOAD) 1354 continue; 1355 if (HOST_PAGE_ALIGN(elf_ppnt->p_vaddr) < mmap_min_addr) { 1356 guest_base = HOST_PAGE_ALIGN(mmap_min_addr); 1357 break; 1358 } 1359 } 1360 } 1361 #endif /* CONFIG_USE_GUEST_BASE */ 1362 1363 /* Do this so that we can load the interpreter, if need be. We will 1364 change some of these later */ 1365 info->rss = 0; 1366 bprm->p = setup_arg_pages(bprm->p, bprm, info); 1367 info->start_stack = bprm->p; 1368 1369 /* Now we do a little grungy work by mmaping the ELF image into 1370 * the correct location in memory. At this point, we assume that 1371 * the image should be loaded at fixed address, not at a variable 1372 * address. 1373 */ 1374 1375 for(i = 0, elf_ppnt = elf_phdata; i < elf_ex.e_phnum; i++, elf_ppnt++) { 1376 int elf_prot = 0; 1377 int elf_flags = 0; 1378 abi_ulong error; 1379 1380 if (elf_ppnt->p_type != PT_LOAD) 1381 continue; 1382 1383 if (elf_ppnt->p_flags & PF_R) elf_prot |= PROT_READ; 1384 if (elf_ppnt->p_flags & PF_W) elf_prot |= PROT_WRITE; 1385 if (elf_ppnt->p_flags & PF_X) elf_prot |= PROT_EXEC; 1386 elf_flags = MAP_PRIVATE | MAP_DENYWRITE; 1387 if (elf_ex.e_type == ET_EXEC || load_addr_set) { 1388 elf_flags |= MAP_FIXED; 1389 } else if (elf_ex.e_type == ET_DYN) { 1390 /* Try and get dynamic programs out of the way of the default mmap 1391 base, as well as whatever program they might try to exec. This 1392 is because the brk will follow the loader, and is not movable. */ 1393 /* NOTE: for qemu, we do a big mmap to get enough space 1394 without hardcoding any address */ 1395 error = target_mmap(0, ET_DYN_MAP_SIZE, 1396 PROT_NONE, MAP_PRIVATE | MAP_ANON, 1397 -1, 0); 1398 if (error == -1) { 1399 perror("mmap"); 1400 exit(-1); 1401 } 1402 load_bias = TARGET_ELF_PAGESTART(error - elf_ppnt->p_vaddr); 1403 } 1404 1405 error = target_mmap(TARGET_ELF_PAGESTART(load_bias + elf_ppnt->p_vaddr), 1406 (elf_ppnt->p_filesz + 1407 TARGET_ELF_PAGEOFFSET(elf_ppnt->p_vaddr)), 1408 elf_prot, 1409 (MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE), 1410 bprm->fd, 1411 (elf_ppnt->p_offset - 1412 TARGET_ELF_PAGEOFFSET(elf_ppnt->p_vaddr))); 1413 if (error == -1) { 1414 perror("mmap"); 1415 exit(-1); 1416 } 1417 1418 #ifdef LOW_ELF_STACK 1419 if (TARGET_ELF_PAGESTART(elf_ppnt->p_vaddr) < elf_stack) 1420 elf_stack = TARGET_ELF_PAGESTART(elf_ppnt->p_vaddr); 1421 #endif 1422 1423 if (!load_addr_set) { 1424 load_addr_set = 1; 1425 load_addr = elf_ppnt->p_vaddr - elf_ppnt->p_offset; 1426 if (elf_ex.e_type == ET_DYN) { 1427 load_bias += error - 1428 TARGET_ELF_PAGESTART(load_bias + elf_ppnt->p_vaddr); 1429 load_addr += load_bias; 1430 reloc_func_desc = load_bias; 1431 } 1432 } 1433 k = elf_ppnt->p_vaddr; 1434 if (k < start_code) 1435 start_code = k; 1436 if (start_data < k) 1437 start_data = k; 1438 k = elf_ppnt->p_vaddr + elf_ppnt->p_filesz; 1439 if (k > elf_bss) 1440 elf_bss = k; 1441 if ((elf_ppnt->p_flags & PF_X) && end_code < k) 1442 end_code = k; 1443 if (end_data < k) 1444 end_data = k; 1445 k = elf_ppnt->p_vaddr + elf_ppnt->p_memsz; 1446 if (k > elf_brk) elf_brk = k; 1447 } 1448 1449 elf_entry += load_bias; 1450 elf_bss += load_bias; 1451 elf_brk += load_bias; 1452 start_code += load_bias; 1453 end_code += load_bias; 1454 start_data += load_bias; 1455 end_data += load_bias; 1456 1457 if (elf_interpreter) { 1458 if (interpreter_type & 1) { 1459 elf_entry = load_aout_interp(&interp_ex, interpreter_fd); 1460 } 1461 else if (interpreter_type & 2) { 1462 elf_entry = load_elf_interp(&interp_elf_ex, interpreter_fd, 1463 &interp_load_addr); 1464 } 1465 reloc_func_desc = interp_load_addr; 1466 1467 close(interpreter_fd); 1468 free(elf_interpreter); 1469 1470 if (elf_entry == ~((abi_ulong)0UL)) { 1471 printf("Unable to load interpreter\n"); 1472 free(elf_phdata); 1473 exit(-1); 1474 return 0; 1475 } 1476 } 1477 1478 free(elf_phdata); 1479 1480 if (qemu_log_enabled()) 1481 load_symbols(&elf_ex, bprm->fd); 1482 1483 if (interpreter_type != INTERPRETER_AOUT) close(bprm->fd); 1484 info->personality = (ibcs2_interpreter ? PER_SVR4 : PER_LINUX); 1485 1486 #ifdef LOW_ELF_STACK 1487 info->start_stack = bprm->p = elf_stack - 4; 1488 #endif 1489 bprm->p = create_elf_tables(bprm->p, 1490 bprm->argc, 1491 bprm->envc, 1492 &elf_ex, 1493 load_addr, load_bias, 1494 interp_load_addr, 1495 (interpreter_type == INTERPRETER_AOUT ? 0 : 1), 1496 info); 1497 info->load_addr = reloc_func_desc; 1498 info->start_brk = info->brk = elf_brk; 1499 info->end_code = end_code; 1500 info->start_code = start_code; 1501 info->start_data = start_data; 1502 info->end_data = end_data; 1503 info->start_stack = bprm->p; 1504 1505 /* Calling set_brk effectively mmaps the pages that we need for the bss and break 1506 sections */ 1507 set_brk(elf_bss, elf_brk); 1508 1509 padzero(elf_bss, elf_brk); 1510 1511 #if 0 1512 printf("(start_brk) %x\n" , info->start_brk); 1513 printf("(end_code) %x\n" , info->end_code); 1514 printf("(start_code) %x\n" , info->start_code); 1515 printf("(end_data) %x\n" , info->end_data); 1516 printf("(start_stack) %x\n" , info->start_stack); 1517 printf("(brk) %x\n" , info->brk); 1518 #endif 1519 1520 if ( info->personality == PER_SVR4 ) 1521 { 1522 /* Why this, you ask??? Well SVr4 maps page 0 as read-only, 1523 and some applications "depend" upon this behavior. 1524 Since we do not have the power to recompile these, we 1525 emulate the SVr4 behavior. Sigh. */ 1526 mapped_addr = target_mmap(0, qemu_host_page_size, PROT_READ | PROT_EXEC, 1527 MAP_FIXED | MAP_PRIVATE, -1, 0); 1528 } 1529 1530 info->entry = elf_entry; 1531 1532 return 0; 1533 } 1534 1535 static int load_aout_interp(void * exptr, int interp_fd) 1536 { 1537 printf("a.out interpreter not yet supported\n"); 1538 return(0); 1539 } 1540 1541 void do_init_thread(struct target_pt_regs *regs, struct image_info *infop) 1542 { 1543 init_thread(regs, infop); 1544 } 1545