Lines Matching +full:0 +full:x4000000
49 #define TARGET_ARCH_HAS_SIGTRAMP_PAGE 0
71 ADDR_NO_RANDOMIZE = 0x0040000, /* disable randomization of VA space */
72 FDPIC_FUNCPTRS = 0x0080000, /* userspace function ptrs point to
74 MMAP_PAGE_ZERO = 0x0100000,
75 ADDR_COMPAT_LAYOUT = 0x0200000,
76 READ_IMPLIES_EXEC = 0x0400000,
77 ADDR_LIMIT_32BIT = 0x0800000,
78 SHORT_INODE = 0x1000000,
79 WHOLE_SECONDS = 0x2000000,
80 STICKY_TIMEOUTS = 0x4000000,
81 ADDR_LIMIT_3GB = 0x8000000,
91 PER_LINUX = 0x0000,
92 PER_LINUX_32BIT = 0x0000 | ADDR_LIMIT_32BIT,
93 PER_LINUX_FDPIC = 0x0000 | FDPIC_FUNCPTRS,
94 PER_SVR4 = 0x0001 | STICKY_TIMEOUTS | MMAP_PAGE_ZERO,
95 PER_SVR3 = 0x0002 | STICKY_TIMEOUTS | SHORT_INODE,
96 PER_SCOSVR3 = 0x0003 | STICKY_TIMEOUTS | WHOLE_SECONDS | SHORT_INODE,
97 PER_OSR5 = 0x0003 | STICKY_TIMEOUTS | WHOLE_SECONDS,
98 PER_WYSEV386 = 0x0004 | STICKY_TIMEOUTS | SHORT_INODE,
99 PER_ISCR4 = 0x0005 | STICKY_TIMEOUTS,
100 PER_BSD = 0x0006,
101 PER_SUNOS = 0x0006 | STICKY_TIMEOUTS,
102 PER_XENIX = 0x0007 | STICKY_TIMEOUTS | SHORT_INODE,
103 PER_LINUX32 = 0x0008,
104 PER_LINUX32_3GB = 0x0008 | ADDR_LIMIT_3GB,
105 PER_IRIX32 = 0x0009 | STICKY_TIMEOUTS,/* IRIX5 32-bit */
106 PER_IRIXN32 = 0x000a | STICKY_TIMEOUTS,/* IRIX6 new 32-bit */
107 PER_IRIX64 = 0x000b | STICKY_TIMEOUTS,/* IRIX6 64-bit */
108 PER_RISCOS = 0x000c,
109 PER_SOLARIS = 0x000d | STICKY_TIMEOUTS,
110 PER_UW7 = 0x000e | STICKY_TIMEOUTS | MMAP_PAGE_ZERO,
111 PER_OSF4 = 0x000f, /* OSF/1 v4 */
112 PER_HPUX = 0x0010,
113 PER_MASK = 0x00ff,
128 #define MAP_DENYWRITE 0
178 regs->rax = 0; in init_thread()
195 (*regs)[0] = tswapreg(env->regs[15]); in elf_core_copy_regs()
212 (*regs)[17] = tswapreg(env->segs[R_CS].selector & 0xffff); in elf_core_copy_regs()
215 (*regs)[20] = tswapreg(env->segs[R_SS].selector & 0xffff); in elf_core_copy_regs()
216 (*regs)[21] = tswapreg(env->segs[R_FS].selector & 0xffff); in elf_core_copy_regs()
217 (*regs)[22] = tswapreg(env->segs[R_GS].selector & 0xffff); in elf_core_copy_regs()
218 (*regs)[23] = tswapreg(env->segs[R_DS].selector & 0xffff); in elf_core_copy_regs()
219 (*regs)[24] = tswapreg(env->segs[R_ES].selector & 0xffff); in elf_core_copy_regs()
220 (*regs)[25] = tswapreg(env->segs[R_FS].selector & 0xffff); in elf_core_copy_regs()
221 (*regs)[26] = tswapreg(env->segs[R_GS].selector & 0xffff); in elf_core_copy_regs()
234 if (reserved_va != 0 && in init_guest_commpage()
269 elf_platform[1] = '0' + family; in get_elf_platform()
286 A value of 0 tells we have no such handler. */ in init_thread()
287 regs->edx = 0; in init_thread()
302 (*regs)[0] = tswapreg(env->regs[R_EBX]); in elf_core_copy_regs()
309 (*regs)[7] = tswapreg(env->segs[R_DS].selector & 0xffff); in elf_core_copy_regs()
310 (*regs)[8] = tswapreg(env->segs[R_ES].selector & 0xffff); in elf_core_copy_regs()
311 (*regs)[9] = tswapreg(env->segs[R_FS].selector & 0xffff); in elf_core_copy_regs()
312 (*regs)[10] = tswapreg(env->segs[R_GS].selector & 0xffff); in elf_core_copy_regs()
315 (*regs)[13] = tswapreg(env->segs[R_CS].selector & 0xffff); in elf_core_copy_regs()
318 (*regs)[16] = tswapreg(env->segs[R_SS].selector & 0xffff); in elf_core_copy_regs()
331 } while (0)
355 memset(regs, 0, sizeof(*regs)); in init_thread()
361 regs->uregs[15] = infop->entry & 0xfffffffe; in init_thread()
367 regs->uregs[0] = 0; in init_thread()
386 regs->uregs[8] = 0; in init_thread()
397 (*regs)[0] = tswapreg(env->regs[0]); in elf_core_copy_regs()
415 (*regs)[17] = tswapreg(env->regs[0]); /* XXX */ in elf_core_copy_regs()
423 ARM_HWCAP_ARM_SWP = 1 << 0,
454 ARM_HWCAP2_ARM_AES = 1 << 0,
465 #define HI_COMMPAGE (intptr_t)0xffff0f00u
488 -1, 0); in init_guest_commpage()
498 /* Set kernel helper versions; rest of page is 0. */ in init_guest_commpage()
499 __put_user(5, (uint32_t *)g2h_untagged(0xffff0ffcu)); in init_guest_commpage()
517 uint32_t hwcaps = 0; in get_elf_hwcap()
526 do { if (arm_feature(&cpu->env, feat)) { hwcaps |= hwcap; } } while (0) in get_elf_hwcap()
529 do { if (cpu_isar_feature(feat, cpu)) { hwcaps |= hwcap; } } while (0) in get_elf_hwcap()
569 uint64_t hwcaps = 0; in get_elf_hwcap2()
698 memset(regs, 0, sizeof(*regs)); in init_thread()
700 regs->pc = infop->entry & ~0x3ULL; in init_thread()
712 for (i = 0; i < 32; i++) { in elf_core_copy_regs()
723 ARM_HWCAP_A64_FP = 1 << 0,
772 ARM_HWCAP2_A64_DCPODP = 1 << 0,
842 do { if (cpu_isar_feature(feat, cpu)) { hwcaps |= hwcap; } } while (0)
847 uint32_t hwcaps = 0; in get_elf_hwcap()
887 uint64_t hwcaps = 0; in get_elf_hwcap2()
1093 r |= features & CPU_FEATURE_FSMULD ? HWCAP_SPARC_FSMULD : 0; in get_elf_hwcap()
1094 r |= features & CPU_FEATURE_VIS1 ? HWCAP_SPARC_VIS : 0; in get_elf_hwcap()
1095 r |= features & CPU_FEATURE_VIS2 ? HWCAP_SPARC_VIS2 : 0; in get_elf_hwcap()
1096 r |= features & CPU_FEATURE_FMAF ? HWCAP_SPARC_FMAF : 0; in get_elf_hwcap()
1097 r |= features & CPU_FEATURE_VIS3 ? HWCAP_SPARC_VIS3 : 0; in get_elf_hwcap()
1098 r |= features & CPU_FEATURE_IMA ? HWCAP_SPARC_IMA : 0; in get_elf_hwcap()
1110 regs->y = 0; in init_thread()
1138 QEMU_PPC_FEATURE_32 = 0x80000000,
1139 QEMU_PPC_FEATURE_64 = 0x40000000,
1140 QEMU_PPC_FEATURE_601_INSTR = 0x20000000,
1141 QEMU_PPC_FEATURE_HAS_ALTIVEC = 0x10000000,
1142 QEMU_PPC_FEATURE_HAS_FPU = 0x08000000,
1143 QEMU_PPC_FEATURE_HAS_MMU = 0x04000000,
1144 QEMU_PPC_FEATURE_HAS_4xxMAC = 0x02000000,
1145 QEMU_PPC_FEATURE_UNIFIED_CACHE = 0x01000000,
1146 QEMU_PPC_FEATURE_HAS_SPE = 0x00800000,
1147 QEMU_PPC_FEATURE_HAS_EFP_SINGLE = 0x00400000,
1148 QEMU_PPC_FEATURE_HAS_EFP_DOUBLE = 0x00200000,
1149 QEMU_PPC_FEATURE_NO_TB = 0x00100000,
1150 QEMU_PPC_FEATURE_POWER4 = 0x00080000,
1151 QEMU_PPC_FEATURE_POWER5 = 0x00040000,
1152 QEMU_PPC_FEATURE_POWER5_PLUS = 0x00020000,
1153 QEMU_PPC_FEATURE_CELL = 0x00010000,
1154 QEMU_PPC_FEATURE_BOOKE = 0x00008000,
1155 QEMU_PPC_FEATURE_SMT = 0x00004000,
1156 QEMU_PPC_FEATURE_ICACHE_SNOOP = 0x00002000,
1157 QEMU_PPC_FEATURE_ARCH_2_05 = 0x00001000,
1158 QEMU_PPC_FEATURE_PA6T = 0x00000800,
1159 QEMU_PPC_FEATURE_HAS_DFP = 0x00000400,
1160 QEMU_PPC_FEATURE_POWER6_EXT = 0x00000200,
1161 QEMU_PPC_FEATURE_ARCH_2_06 = 0x00000100,
1162 QEMU_PPC_FEATURE_HAS_VSX = 0x00000080,
1163 QEMU_PPC_FEATURE_PSERIES_PERFMON_COMPAT = 0x00000040,
1165 QEMU_PPC_FEATURE_TRUE_LE = 0x00000002,
1166 QEMU_PPC_FEATURE_PPC_LE = 0x00000001,
1169 QEMU_PPC_FEATURE2_ARCH_2_07 = 0x80000000, /* ISA 2.07 */
1170 QEMU_PPC_FEATURE2_HAS_HTM = 0x40000000, /* Hardware Transactional Memory */
1171 QEMU_PPC_FEATURE2_HAS_DSCR = 0x20000000, /* Data Stream Control Register */
1172 QEMU_PPC_FEATURE2_HAS_EBB = 0x10000000, /* Event Base Branching */
1173 QEMU_PPC_FEATURE2_HAS_ISEL = 0x08000000, /* Integer Select */
1174 QEMU_PPC_FEATURE2_HAS_TAR = 0x04000000, /* Target Address Register */
1175 QEMU_PPC_FEATURE2_VEC_CRYPTO = 0x02000000,
1176 QEMU_PPC_FEATURE2_HTM_NOSC = 0x01000000,
1177 QEMU_PPC_FEATURE2_ARCH_3_00 = 0x00800000, /* ISA 3.00 */
1178 QEMU_PPC_FEATURE2_HAS_IEEE128 = 0x00400000, /* VSX IEEE Bin Float 128-bit */
1179 QEMU_PPC_FEATURE2_DARN = 0x00200000, /* darn random number insn */
1180 QEMU_PPC_FEATURE2_SCV = 0x00100000, /* scv syscall */
1181 QEMU_PPC_FEATURE2_HTM_NO_SUSPEND = 0x00080000, /* TM w/o suspended state */
1182 QEMU_PPC_FEATURE2_ARCH_3_1 = 0x00040000, /* ISA 3.1 */
1183 QEMU_PPC_FEATURE2_MMA = 0x00020000, /* Matrix-Multiply Assist */
1191 uint32_t features = 0; in get_elf_hwcap()
1196 do { if (cpu->env.insns_flags & flag) { features |= feature; } } while (0) in get_elf_hwcap()
1202 } while (0) in get_elf_hwcap()
1227 uint32_t features = 0; in get_elf_hwcap2()
1230 do { if (cpu->env.insns_flags & flag) { features |= feature; } } while (0) in get_elf_hwcap2()
1232 do { if (cpu->env.insns_flags2 & flag) { features |= feature; } } while (0) in get_elf_hwcap2()
1252 * - keep the final alignment of sp (sp & 0xf)
1271 NEW_AUX_ENT(AT_UCACHEBSIZE, 0); \
1272 } while (0)
1298 target_ulong ccr = 0; in elf_core_copy_regs()
1300 for (i = 0; i < ARRAY_SIZE(env->gpr); i++) { in elf_core_copy_regs()
1340 /*Set crmd PG,DA = 1,0 */ in init_thread()
1351 TARGET_EF_R0 = 0,
1361 (*regs)[TARGET_EF_R0] = 0; in elf_core_copy_regs()
1378 HWCAP_LOONGARCH_CPUCFG = (1 << 0),
1396 uint32_t hwcaps = 0; in get_elf_hwcap()
1447 { return _base_platform; } } while (0)
1489 TARGET_EF_R0 = 0,
1508 for (i = 0; i < TARGET_EF_R0; i++) { in elf_core_copy_regs()
1509 (*regs)[i] = 0; in elf_core_copy_regs()
1511 (*regs)[TARGET_EF_R0] = 0; in elf_core_copy_regs()
1517 (*regs)[TARGET_EF_R26] = 0; in elf_core_copy_regs()
1518 (*regs)[TARGET_EF_R27] = 0; in elf_core_copy_regs()
1519 (*regs)[TARGET_EF_LO] = tswapreg(env->active_tc.LO[0]); in elf_core_copy_regs()
1520 (*regs)[TARGET_EF_HI] = tswapreg(env->active_tc.HI[0]); in elf_core_copy_regs()
1532 HWCAP_MIPS_R6 = (1 << 0),
1552 do { if (cpu->env.insn_flags & (_flag)) { hwcaps |= _hwcap; } } while (0)
1555 do { if (cpu->env._reg & (_mask)) { hwcaps |= _hwcap; } } while (0)
1562 } while (0)
1567 uint32_t hwcaps = 0; in get_elf_hwcap()
1608 int i, pos = 0; in elf_core_copy_regs()
1610 for (i = 0; i < 32; i++) { in elf_core_copy_regs()
1616 (*regs)[pos++] = 0; in elf_core_copy_regs()
1618 (*regs)[pos++] = 0; in elf_core_copy_regs()
1649 for (i = 0; i < 32; i++) { in elf_core_copy_regs()
1655 #define ELF_HWCAP 0
1693 for (i = 0; i < 16; i++) { in elf_core_copy_regs()
1703 (*regs)[TARGET_REG_SYSCALL] = 0; /* FIXME */ in elf_core_copy_regs()
1710 SH_CPU_HAS_FPU = 0x0001, /* Hardware FPU support */
1711 SH_CPU_HAS_P2_FLUSH_BUG = 0x0002, /* Need to flush the cache in P2 area */
1712 SH_CPU_HAS_MMU_PAGE_ASSOC = 0x0004, /* SH3: TLB way selection bit support */
1713 SH_CPU_HAS_DSP = 0x0008, /* SH-DSP: DSP support */
1714 SH_CPU_HAS_PERF_COUNTER = 0x0010, /* Hardware performance counters */
1715 SH_CPU_HAS_PTEA = 0x0020, /* PTEA register */
1716 SH_CPU_HAS_LLSC = 0x0040, /* movli.l/movco.l */
1717 SH_CPU_HAS_L2_CACHE = 0x0080, /* Secondary cache / URAM */
1718 SH_CPU_HAS_OP32 = 0x0100, /* 32-bit instruction support */
1719 SH_CPU_HAS_PTEAEX = 0x0200, /* PTE ASID Extension support */
1727 uint32_t hwcap = 0; in get_elf_hwcap()
1752 regs->sr = 0; in init_thread()
1762 (*regs)[0] = tswapreg(env->dregs[1]); in elf_core_copy_regs()
1769 (*regs)[7] = tswapreg(env->aregs[0]); in elf_core_copy_regs()
1776 (*regs)[14] = tswapreg(env->dregs[0]); in elf_core_copy_regs()
1778 (*regs)[16] = tswapreg(env->dregs[0]); /* FIXME: orig_d0 */ in elf_core_copy_regs()
1781 (*regs)[19] = 0; /* FIXME: regs->format | regs->vector */ in elf_core_copy_regs()
1817 do { if (s390_has_feat(_feat)) { hwcap |= _hwcap; } } while (0)
1886 TARGET_REG_PSWM = 0,
1901 for (i = 0; i < 16; i++) { in elf_core_copy_regs()
1905 for (i = 0; i < 16; i++) { in elf_core_copy_regs()
1908 (*regs)[TARGET_REG_ORIG_R2] = 0; in elf_core_copy_regs()
1960 #define STACK_GROWS_DOWN 0
1968 regs->iaoq[0] = infop->entry | PRIV_USER; in init_thread()
1969 regs->iaoq[1] = regs->iaoq[0] + 4; in init_thread()
1970 regs->gr[23] = 0; in init_thread()
1978 #define LO_COMMPAGE 0
1982 /* If reserved_va, then we have already mapped 0 page on the host. */ in init_guest_commpage()
1988 MAP_ANONYMOUS | MAP_PRIVATE | MAP_FIXED_NOREPLACE, -1, 0); in init_guest_commpage()
2020 regs->windowbase = 0; in init_thread()
2067 for (i = 0; i < env->config->nareg; ++i) { in elf_core_copy_regs()
2112 #define ELF_HWCAP 0
2165 #define ARCH_USE_GNU_PROPERTY 0
2182 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
2222 for (int i = 0; i < phnum; ++i, ++phdr) { in bswap_phdr()
2240 for (int i = 0; i < shnum; ++i, ++shdr) { in bswap_shdr()
2325 return 0; /* bullet-proofing */ in copy_elf_strings()
2330 for (i = argc - 1; i >= 0; --i) { in copy_elf_strings()
2340 return 0; in copy_elf_strings()
2351 if (offset == 0) { in copy_elf_strings()
2363 for (i = 0; i < argc; ++i) { in copy_elf_strings()
2371 return 0; in copy_elf_strings()
2383 if (remaining == 0) { in copy_elf_strings()
2423 guard = 0; in setup_arg_pages()
2430 error = target_mmap(0, size + guard, prot, in setup_arg_pages()
2431 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); in setup_arg_pages()
2495 memset(g2h_untagged(start_bss), 0, align_bss - start_bss); in zero_bss()
2501 MAP_FIXED | MAP_PRIVATE | MAP_ANON, -1, 0) == -1) { in zero_bss()
2522 return 0; in elf_is_fdpic()
2535 put_user_u32(loadsegs[n].addr, sp+0); in loader_build_fdpic_loadmap()
2542 put_user_u16(0, sp+0); /* version */ in loader_build_fdpic_loadmap()
2581 info->interpreter_loadmap_addr = 0; in create_elf_tables()
2582 info->interpreter_pt_dynamic_addr = 0; in create_elf_tables()
2586 u_base_platform = 0; in create_elf_tables()
2602 u_platform = 0; in create_elf_tables()
2688 } while(0) in create_elf_tables()
2704 NEW_AUX_ENT(AT_BASE, (abi_ulong)(interp_info ? interp_info->load_addr : 0)); in create_elf_tables()
2705 NEW_AUX_ENT(AT_FLAGS, (abi_ulong)0); in create_elf_tables()
2730 NEW_AUX_ENT (AT_NULL, 0); in create_elf_tables()
2741 for (i = 0; i < argc; ++i) { in create_elf_tables()
2746 put_user_ual(0, u_argv); in create_elf_tables()
2749 for (i = 0; i < envc; ++i) { in create_elf_tables()
2754 put_user_ual(0, u_envp); in create_elf_tables()
2762 #define HI_COMMPAGE 0
2764 #define HI_COMMPAGE 0
2778 * return 0 if it is not available to map, and -1 on mmap error.
2786 MAP_NORESERVE | MAP_FIXED_NOREPLACE, -1, 0); in pgb_try_mmap()
2790 return errno == EEXIST ? 0 : -1; in pgb_try_mmap()
2814 return 0; in pgb_try_mmap_skip_brk()
2826 * On success, retain the mapping at index 0 for reserved_va.
2836 for (int i = ga->nbounds - 1; i >= 0; --i) { in pgb_try_mmap_set()
2837 if (pgb_try_mmap_skip_brk(ga->bounds[i][0] + base, in pgb_try_mmap_set()
2839 brk, i == 0 && reserved_va) <= 0) { in pgb_try_mmap_set()
2868 if (guest_loaddr != 0 && guest_loaddr < mmap_min_addr) { in pgb_addr_set()
2873 memset(ga, 0, sizeof(*ga)); in pgb_addr_set()
2874 n = 0; in pgb_addr_set()
2877 ga->bounds[n][0] = try_identity ? mmap_min_addr : 0; in pgb_addr_set()
2880 /* LO_COMMPAGE and NULL handled by reserving from 0. */ in pgb_addr_set()
2884 ga->bounds[n][0] = 0; in pgb_addr_set()
2888 ga->bounds[n][0] = 0; in pgb_addr_set()
2895 ga->bounds[n][0] = guest_loaddr; in pgb_addr_set()
2904 * due to comparison between unsigned and (possible) 0. in pgb_addr_set()
2911 ga->bounds[n][0] = HI_COMMPAGE & qemu_real_host_page_mask(); in pgb_addr_set()
2934 uintptr_t brk = (uintptr_t)sbrk(0); in pgb_fixed()
2938 "host minimum alignment (0x%" PRIxPTR ")\n", in pgb_fixed()
2977 for (int i = ga->nbounds - 1; i >= 0; --i) { in pgb_try_itree()
2978 uintptr_t s = base + ga->bounds[i][0]; in pgb_try_itree()
2993 return 0; /* success */ in pgb_try_itree()
3009 if (skip == 0) { in pgb_find_itree()
3037 brk = (uintptr_t)sbrk(0); in pgb_dynamic()
3038 if (pgb_try_mmap_set(&ga, 0, brk)) { in pgb_dynamic()
3039 guest_base = 0; in pgb_dynamic()
3053 brk = (uintptr_t)sbrk(0); in pgb_dynamic()
3077 for (int i = 0; i < ga.nbounds; ++i) { in pgb_dynamic()
3078 error_printf(" %0*" PRIx64 "-%0*" PRIx64 "\n", in pgb_dynamic()
3079 w, (uint64_t)ga.bounds[i][0], in pgb_dynamic()
3097 "address space (0x%" PRIx64 " > 0x%lx)", in probe_guest_base()
3104 "than the host can provide (0x%" PRIx64 ")", in probe_guest_base()
3124 "@ 0x%" PRIx64 "\n", (uint64_t)guest_base); in probe_guest_base()
3128 /* The string "GNU\0" as a magic number. */
3154 pr_type = data[0]; in parse_elf_property()
3251 prev_type = 0; in parse_elf_properties()
3290 if (!imgsrc_read(ehdr, 0, sizeof(*ehdr), src, &err)) { in load_elf_image()
3311 info->nsegs = 0; in load_elf_image()
3312 info->pt_dynamic_addr = 0; in load_elf_image()
3320 loaddr = -1, hiaddr = 0; in load_elf_image()
3321 align = 0; in load_elf_image()
3323 for (i = 0; i < ehdr->e_phnum; ++i) { in load_elf_image()
3349 if (interp_name[eppnt->p_filesz - 1] != 0) { in load_elf_image()
3379 probe_guest_base(image_name, 0, hiaddr - loaddr); in load_elf_image()
3425 (ehdr->e_type == ET_EXEC ? MAP_FIXED_NOREPLACE : 0), in load_elf_image()
3426 -1, 0); in load_elf_image()
3451 for (i = 0; i < ehdr->e_phnum; ++i) { in load_elf_image()
3472 info->end_code = 0; in load_elf_image()
3474 info->end_data = 0; in load_elf_image()
3493 && (pinterp_name == NULL || *pinterp_name == 0) in load_elf_image()
3499 for (i = 0; i < ehdr->e_phnum; i++) { in load_elf_image()
3503 int elf_prot = 0; in load_elf_image()
3526 if (eppnt->p_filesz != 0) { in load_elf_image()
3572 if (info->end_data == 0) { in load_elf_image()
3605 if (fd < 0) { in load_elf_interp()
3612 if (retval < 0) { in load_elf_interp()
3655 for (unsigned i = 0, n = vdso->reloc_count; i < n; i++) { in load_elf_vdso()
3677 int result = 0; in symfind()
3713 : ((sym0->st_value > sym1->st_value) ? 1 : 0); in symcmp()
3720 int i, shnum, nsyms, sym_idx = 0, str_idx = 0; in load_symbols()
3735 for (i = 0; i < shnum; ++i) { in load_symbols()
3775 for (i = 0; i < nsyms; ) { in load_symbols()
3795 if (nsyms == 0) { in load_symbols()
3841 offset = lseek(fd, 0, SEEK_SET); in get_elf_eflags()
3843 return 0; in get_elf_eflags()
3847 return 0; in get_elf_eflags()
3851 return 0; in get_elf_eflags()
3856 return 0; in get_elf_eflags()
3862 return 0; in get_elf_eflags()
3882 memset(&interp_info, 0, sizeof(interp_info)); in load_elf_binary()
3942 if (strcmp(elf_interpreter, "/usr/lib/libc.so.1") == 0 in load_elf_binary()
3943 || strcmp(elf_interpreter, "/usr/lib/ld.so.1") == 0) { in load_elf_binary()
3946 /* Why this, you ask??? Well SVr4 maps page 0 as read-only, in load_elf_binary()
3950 target_mmap(0, TARGET_PAGE_SIZE, PROT_READ | PROT_EXEC, in load_elf_binary()
3952 -1, 0); in load_elf_binary()
3968 abi_long tramp_page = target_mmap(0, TARGET_PAGE_SIZE, in load_elf_binary()
3970 MAP_PRIVATE | MAP_ANON, -1, 0); in load_elf_binary()
3998 return 0; in load_elf_binary()
4135 return 0; in vma_dump_size()
4145 memcmp(g2h_untagged(start), ELFMAG, SELFMAG) == 0) { in vma_dump_size()
4146 return 0; in vma_dump_size()
4227 .pr_sid = getsid(0), in fill_prstatus_note()
4246 .pr_sid = getsid(0), in fill_prpsinfo_note()
4256 for (size_t i = 0; i < len; i++) { in fill_prpsinfo_note()
4257 if (psinfo.pr_psargs[i] == 0) { in fill_prpsinfo_note()
4301 bytes_written = 0; in dump_write()
4310 if (bytes_written < 0) { in dump_write()
4314 } else if (bytes_written == 0) { /* eof */ in dump_write()
4319 } while (bytes_left > 0); in dump_write()
4321 return (0); in dump_write()
4331 page_unprotect(NULL, start, 0); in wmr_page_unprotect_regions()
4338 return 0; in wmr_page_unprotect_regions()
4353 return 0; in wmr_count_and_size_regions()
4369 phdr->p_paddr = 0; in wmr_fill_region_phdr()
4374 phdr->p_flags = (flags & PAGE_READ ? PF_R : 0) in wmr_fill_region_phdr()
4375 | (flags & PAGE_WRITE_ORG ? PF_W : 0) in wmr_fill_region_phdr()
4376 | (flags & PAGE_EXEC ? PF_X : 0); in wmr_fill_region_phdr()
4381 return 0; in wmr_fill_region_phdr()
4391 return 0; in wmr_write_region()
4404 * 0 +----------------------+ \
4431 * Function returns 0 in case of success, negative errno otherwise.
4451 if (prctl(PR_GET_DUMPABLE) == 0) { in elf_core_dump()
4452 return 0; in elf_core_dump()
4455 if (getrlimit(RLIMIT_CORE, &dumpsize) < 0 || dumpsize.rlim_cur == 0) { in elf_core_dump()
4456 return 0; in elf_core_dump()
4469 memset(&css, 0, sizeof(css)); in elf_core_dump()
4472 cpus = 0; in elf_core_dump()
4490 errno = 0; in elf_core_dump()
4499 if (fd < 0) { in elf_core_dump()
4516 fill_elf_header(hptr, css.count + 1, ELF_MACHINE, 0); in elf_core_dump()
4539 fill_prstatus_note(dptr, cpu_iter, cpu_iter == cpu ? signr : 0); in elf_core_dump()
4542 if (dump_write(fd, header, data_offset) < 0) { in elf_core_dump()
4550 if (walk_memory_regions(&fd, wmr_write_region) < 0) { in elf_core_dump()
4553 errno = 0; in elf_core_dump()
4559 if (fd >= 0) { in elf_core_dump()