1 /* 2 * Helper macros to support writing architecture specific 3 * linker scripts. 4 * 5 * A minimal linker scripts has following content: 6 * [This is a sample, architectures may have special requirements] 7 * 8 * OUTPUT_FORMAT(...) 9 * OUTPUT_ARCH(...) 10 * ENTRY(...) 11 * SECTIONS 12 * { 13 * . = START; 14 * __init_begin = .; 15 * HEAD_TEXT_SECTION 16 * INIT_TEXT_SECTION(PAGE_SIZE) 17 * INIT_DATA_SECTION(...) 18 * PERCPU_SECTION(CACHELINE_SIZE) 19 * __init_end = .; 20 * 21 * _stext = .; 22 * TEXT_SECTION = 0 23 * _etext = .; 24 * 25 * _sdata = .; 26 * RO_DATA(PAGE_SIZE) 27 * RW_DATA(...) 28 * _edata = .; 29 * 30 * EXCEPTION_TABLE(...) 31 * 32 * BSS_SECTION(0, 0, 0) 33 * _end = .; 34 * 35 * STABS_DEBUG 36 * DWARF_DEBUG 37 * ELF_DETAILS 38 * 39 * DISCARDS // must be the last 40 * } 41 * 42 * [__init_begin, __init_end] is the init section that may be freed after init 43 * // __init_begin and __init_end should be page aligned, so that we can 44 * // free the whole .init memory 45 * [_stext, _etext] is the text section 46 * [_sdata, _edata] is the data section 47 * 48 * Some of the included output section have their own set of constants. 49 * Examples are: [__initramfs_start, __initramfs_end] for initramfs and 50 * [__nosave_begin, __nosave_end] for the nosave data 51 */ 52 53 #include <asm-generic/codetag.lds.h> 54 55 #ifndef LOAD_OFFSET 56 #define LOAD_OFFSET 0 57 #endif 58 59 /* 60 * Only some architectures want to have the .notes segment visible in 61 * a separate PT_NOTE ELF Program Header. When this happens, it needs 62 * to be visible in both the kernel text's PT_LOAD and the PT_NOTE 63 * Program Headers. In this case, though, the PT_LOAD needs to be made 64 * the default again so that all the following sections don't also end 65 * up in the PT_NOTE Program Header. 66 */ 67 #ifdef EMITS_PT_NOTE 68 #define NOTES_HEADERS :text :note 69 #define NOTES_HEADERS_RESTORE __restore_ph : { *(.__restore_ph) } :text 70 #else 71 #define NOTES_HEADERS 72 #define NOTES_HEADERS_RESTORE 73 #endif 74 75 /* 76 * Some architectures have non-executable read-only exception tables. 77 * They can be added to the RO_DATA segment by specifying their desired 78 * alignment. 79 */ 80 #ifdef RO_EXCEPTION_TABLE_ALIGN 81 #define RO_EXCEPTION_TABLE EXCEPTION_TABLE(RO_EXCEPTION_TABLE_ALIGN) 82 #else 83 #define RO_EXCEPTION_TABLE 84 #endif 85 86 /* Align . function alignment. */ 87 #define ALIGN_FUNCTION() . = ALIGN(CONFIG_FUNCTION_ALIGNMENT) 88 89 /* 90 * Support -ffunction-sections by matching .text and .text.*, 91 * but exclude '.text..*', .text.startup[.*], and .text.exit[.*]. 92 * 93 * .text.startup and .text.startup.* are matched later by INIT_TEXT, and 94 * .text.exit and .text.exit.* are matched later by EXIT_TEXT, so they must be 95 * explicitly excluded here. 96 * 97 * Other .text.* sections that are typically grouped separately, such as 98 * .text.unlikely or .text.hot, must be matched explicitly before using 99 * TEXT_MAIN. 100 * 101 * NOTE: builds *with* and *without* -ffunction-sections are both supported by 102 * this single macro. Even with -ffunction-sections, there may be some objects 103 * NOT compiled with the flag due to the use of a specific Makefile override 104 * like cflags-y or AUTOFDO_PROFILE_foo.o. So this single catchall rule is 105 * needed to support mixed object builds. 106 * 107 * One implication is that functions named startup(), exit(), split(), 108 * unlikely(), hot(), and unknown() are not allowed in the kernel due to the 109 * ambiguity of their section names with -ffunction-sections. For example, 110 * .text.startup could be __attribute__((constructor)) code in a *non* 111 * ffunction-sections object, which should be placed in .init.text; or it could 112 * be an actual function named startup() in an ffunction-sections object, which 113 * should be placed in .text. The build will detect and complain about any such 114 * ambiguously named functions. 115 */ 116 #define TEXT_MAIN \ 117 .text \ 118 .text.[_0-9A-Za-df-rt-z]* \ 119 .text.s[_0-9A-Za-su-z]* .text.s .text.s.* \ 120 .text.st[_0-9A-Zb-z]* .text.st .text.st.* \ 121 .text.sta[_0-9A-Za-qs-z]* .text.sta .text.sta.* \ 122 .text.star[_0-9A-Za-su-z]* .text.star .text.star.* \ 123 .text.start[_0-9A-Za-tv-z]* .text.start .text.start.* \ 124 .text.startu[_0-9A-Za-oq-z]* .text.startu .text.startu.* \ 125 .text.startup[_0-9A-Za-z]* \ 126 .text.e[_0-9A-Za-wy-z]* .text.e .text.e.* \ 127 .text.ex[_0-9A-Za-hj-z]* .text.ex .text.ex.* \ 128 .text.exi[_0-9A-Za-su-z]* .text.exi .text.exi.* \ 129 .text.exit[_0-9A-Za-z]* 130 131 /* 132 * Support -fdata-sections by matching .data, .data.*, and others, 133 * but exclude '.data..*'. 134 */ 135 #define DATA_MAIN .data .data.[0-9a-zA-Z_]* .data.rel.* .data..L* .data..compoundliteral* .data.$__unnamed_* .data.$L* 136 #define SDATA_MAIN .sdata .sdata.[0-9a-zA-Z_]* 137 #define RODATA_MAIN .rodata .rodata.[0-9a-zA-Z_]* .rodata..L* 138 #define BSS_MAIN .bss .bss.[0-9a-zA-Z_]* .bss..L* .bss..compoundliteral* 139 #define SBSS_MAIN .sbss .sbss.[0-9a-zA-Z_]* 140 141 /* 142 * GCC 4.5 and later have a 32 bytes section alignment for structures. 143 * Except GCC 4.9, that feels the need to align on 64 bytes. 144 */ 145 #define STRUCT_ALIGNMENT 32 146 #define STRUCT_ALIGN() . = ALIGN(STRUCT_ALIGNMENT) 147 148 /* 149 * The order of the sched class addresses are important, as they are 150 * used to determine the order of the priority of each sched class in 151 * relation to each other. 152 */ 153 #define SCHED_DATA \ 154 STRUCT_ALIGN(); \ 155 __sched_class_highest = .; \ 156 *(__stop_sched_class) \ 157 *(__dl_sched_class) \ 158 *(__rt_sched_class) \ 159 *(__fair_sched_class) \ 160 *(__ext_sched_class) \ 161 *(__idle_sched_class) \ 162 __sched_class_lowest = .; 163 164 /* The actual configuration determine if the init/exit sections 165 * are handled as text/data or they can be discarded (which 166 * often happens at runtime) 167 */ 168 169 #ifndef CONFIG_HAVE_DYNAMIC_FTRACE_NO_PATCHABLE 170 #define KEEP_PATCHABLE KEEP(*(__patchable_function_entries)) 171 #define PATCHABLE_DISCARDS 172 #else 173 #define KEEP_PATCHABLE 174 #define PATCHABLE_DISCARDS *(__patchable_function_entries) 175 #endif 176 177 #ifndef CONFIG_ARCH_SUPPORTS_CFI 178 /* 179 * Simply points to ftrace_stub, but with the proper protocol. 180 * Defined by the linker script in linux/vmlinux.lds.h 181 */ 182 #define FTRACE_STUB_HACK ftrace_stub_graph = ftrace_stub; 183 #else 184 #define FTRACE_STUB_HACK 185 #endif 186 187 #ifdef CONFIG_DYNAMIC_FTRACE 188 /* 189 * The ftrace call sites are logged to a section whose name depends on the 190 * compiler option used. A given kernel image will only use one, AKA 191 * FTRACE_CALLSITE_SECTION. We capture all of them here to avoid header 192 * dependencies for FTRACE_CALLSITE_SECTION's definition. 193 * 194 * ftrace_ops_list_func will be defined as arch_ftrace_ops_list_func 195 * as some archs will have a different prototype for that function 196 * but ftrace_ops_list_func() will have a single prototype. 197 */ 198 #define MCOUNT_REC() . = ALIGN(8); \ 199 __start_mcount_loc = .; \ 200 KEEP(*(__mcount_loc)) \ 201 KEEP_PATCHABLE \ 202 __stop_mcount_loc = .; \ 203 FTRACE_STUB_HACK \ 204 ftrace_ops_list_func = arch_ftrace_ops_list_func; 205 #else 206 # ifdef CONFIG_FUNCTION_TRACER 207 # define MCOUNT_REC() FTRACE_STUB_HACK \ 208 ftrace_ops_list_func = arch_ftrace_ops_list_func; 209 # else 210 # define MCOUNT_REC() 211 # endif 212 #endif 213 214 #define BOUNDED_SECTION_PRE_LABEL(_sec_, _label_, _BEGIN_, _END_) \ 215 _BEGIN_##_label_ = .; \ 216 KEEP(*(_sec_)) \ 217 _END_##_label_ = .; 218 219 #define BOUNDED_SECTION_POST_LABEL(_sec_, _label_, _BEGIN_, _END_) \ 220 _label_##_BEGIN_ = .; \ 221 KEEP(*(_sec_)) \ 222 _label_##_END_ = .; 223 224 #define BOUNDED_SECTION_BY(_sec_, _label_) \ 225 BOUNDED_SECTION_PRE_LABEL(_sec_, _label_, __start, __stop) 226 227 #define BOUNDED_SECTION(_sec) BOUNDED_SECTION_BY(_sec, _sec) 228 229 #define HEADERED_SECTION_PRE_LABEL(_sec_, _label_, _BEGIN_, _END_, _HDR_) \ 230 _HDR_##_label_ = .; \ 231 KEEP(*(.gnu.linkonce.##_sec_)) \ 232 BOUNDED_SECTION_PRE_LABEL(_sec_, _label_, _BEGIN_, _END_) 233 234 #define HEADERED_SECTION_POST_LABEL(_sec_, _label_, _BEGIN_, _END_, _HDR_) \ 235 _label_##_HDR_ = .; \ 236 KEEP(*(.gnu.linkonce.##_sec_)) \ 237 BOUNDED_SECTION_POST_LABEL(_sec_, _label_, _BEGIN_, _END_) 238 239 #define HEADERED_SECTION_BY(_sec_, _label_) \ 240 HEADERED_SECTION_PRE_LABEL(_sec_, _label_, __start, __stop) 241 242 #define HEADERED_SECTION(_sec) HEADERED_SECTION_BY(_sec, _sec) 243 244 #ifdef CONFIG_TRACE_BRANCH_PROFILING 245 #define LIKELY_PROFILE() \ 246 BOUNDED_SECTION_BY(_ftrace_annotated_branch, _annotated_branch_profile) 247 #else 248 #define LIKELY_PROFILE() 249 #endif 250 251 #ifdef CONFIG_PROFILE_ALL_BRANCHES 252 #define BRANCH_PROFILE() \ 253 BOUNDED_SECTION_BY(_ftrace_branch, _branch_profile) 254 #else 255 #define BRANCH_PROFILE() 256 #endif 257 258 #ifdef CONFIG_KPROBES 259 #define KPROBE_BLACKLIST() \ 260 . = ALIGN(8); \ 261 BOUNDED_SECTION(_kprobe_blacklist) 262 #else 263 #define KPROBE_BLACKLIST() 264 #endif 265 266 #ifdef CONFIG_FUNCTION_ERROR_INJECTION 267 #define ERROR_INJECT_WHITELIST() \ 268 STRUCT_ALIGN(); \ 269 BOUNDED_SECTION(_error_injection_whitelist) 270 #else 271 #define ERROR_INJECT_WHITELIST() 272 #endif 273 274 #ifdef CONFIG_EVENT_TRACING 275 #define FTRACE_EVENTS() \ 276 . = ALIGN(8); \ 277 BOUNDED_SECTION(_ftrace_events) \ 278 BOUNDED_SECTION_BY(_ftrace_eval_map, _ftrace_eval_maps) 279 #else 280 #define FTRACE_EVENTS() 281 #endif 282 283 #ifdef CONFIG_TRACING 284 #define TRACE_PRINTKS() BOUNDED_SECTION_BY(__trace_printk_fmt, ___trace_bprintk_fmt) 285 #define TRACEPOINT_STR() BOUNDED_SECTION_BY(__tracepoint_str, ___tracepoint_str) 286 #else 287 #define TRACE_PRINTKS() 288 #define TRACEPOINT_STR() 289 #endif 290 291 #ifdef CONFIG_FTRACE_SYSCALLS 292 #define TRACE_SYSCALLS() \ 293 . = ALIGN(8); \ 294 BOUNDED_SECTION_BY(__syscalls_metadata, _syscalls_metadata) 295 #else 296 #define TRACE_SYSCALLS() 297 #endif 298 299 #ifdef CONFIG_BPF_EVENTS 300 #define BPF_RAW_TP() STRUCT_ALIGN(); \ 301 BOUNDED_SECTION_BY(__bpf_raw_tp_map, __bpf_raw_tp) 302 #else 303 #define BPF_RAW_TP() 304 #endif 305 306 #ifdef CONFIG_SERIAL_EARLYCON 307 #define EARLYCON_TABLE() \ 308 . = ALIGN(8); \ 309 BOUNDED_SECTION_POST_LABEL(__earlycon_table, __earlycon_table, , _end) 310 #else 311 #define EARLYCON_TABLE() 312 #endif 313 314 #ifdef CONFIG_SECURITY 315 #define LSM_TABLE() \ 316 . = ALIGN(8); \ 317 BOUNDED_SECTION_PRE_LABEL(.lsm_info.init, _lsm_info, __start, __end) 318 319 #define EARLY_LSM_TABLE() \ 320 . = ALIGN(8); \ 321 BOUNDED_SECTION_PRE_LABEL(.early_lsm_info.init, _early_lsm_info, __start, __end) 322 #else 323 #define LSM_TABLE() 324 #define EARLY_LSM_TABLE() 325 #endif 326 327 #define ___OF_TABLE(cfg, name) _OF_TABLE_##cfg(name) 328 #define __OF_TABLE(cfg, name) ___OF_TABLE(cfg, name) 329 #define OF_TABLE(cfg, name) __OF_TABLE(IS_ENABLED(cfg), name) 330 #define _OF_TABLE_0(name) 331 #define _OF_TABLE_1(name) \ 332 . = ALIGN(8); \ 333 __##name##_of_table = .; \ 334 KEEP(*(__##name##_of_table)) \ 335 KEEP(*(__##name##_of_table_end)) 336 337 #define TIMER_OF_TABLES() OF_TABLE(CONFIG_TIMER_OF, timer) 338 #define IRQCHIP_OF_MATCH_TABLE() OF_TABLE(CONFIG_IRQCHIP, irqchip) 339 #define CLK_OF_TABLES() OF_TABLE(CONFIG_COMMON_CLK, clk) 340 #define RESERVEDMEM_OF_TABLES() OF_TABLE(CONFIG_OF_RESERVED_MEM, reservedmem) 341 #define CPU_METHOD_OF_TABLES() OF_TABLE(CONFIG_SMP, cpu_method) 342 #define CPUIDLE_METHOD_OF_TABLES() OF_TABLE(CONFIG_CPU_IDLE, cpuidle_method) 343 344 #ifdef CONFIG_ACPI 345 #define ACPI_PROBE_TABLE(name) \ 346 . = ALIGN(8); \ 347 BOUNDED_SECTION_POST_LABEL(__##name##_acpi_probe_table, \ 348 __##name##_acpi_probe_table,, _end) 349 #else 350 #define ACPI_PROBE_TABLE(name) 351 #endif 352 353 #ifdef CONFIG_THERMAL 354 #define THERMAL_TABLE(name) \ 355 . = ALIGN(8); \ 356 BOUNDED_SECTION_POST_LABEL(__##name##_thermal_table, \ 357 __##name##_thermal_table,, _end) 358 #else 359 #define THERMAL_TABLE(name) 360 #endif 361 362 #define KERNEL_DTB() \ 363 STRUCT_ALIGN(); \ 364 __dtb_start = .; \ 365 KEEP(*(.dtb.init.rodata)) \ 366 __dtb_end = .; 367 368 /* 369 * .data section 370 */ 371 #define DATA_DATA \ 372 *(.xiptext) \ 373 *(DATA_MAIN) \ 374 *(.data..decrypted) \ 375 *(.ref.data) \ 376 *(.data..shared_aligned) /* percpu related */ \ 377 *(.data..unlikely) \ 378 __start_once = .; \ 379 *(.data..once) \ 380 __end_once = .; \ 381 *(.data..do_once) \ 382 STRUCT_ALIGN(); \ 383 *(__tracepoints) \ 384 /* implement dynamic printk debug */ \ 385 . = ALIGN(8); \ 386 BOUNDED_SECTION_BY(__dyndbg_classes, ___dyndbg_classes) \ 387 BOUNDED_SECTION_BY(__dyndbg, ___dyndbg) \ 388 CODETAG_SECTIONS() \ 389 LIKELY_PROFILE() \ 390 BRANCH_PROFILE() \ 391 TRACE_PRINTKS() \ 392 BPF_RAW_TP() \ 393 TRACEPOINT_STR() \ 394 KUNIT_TABLE() 395 396 /* 397 * Data section helpers 398 */ 399 #define NOSAVE_DATA \ 400 . = ALIGN(PAGE_SIZE); \ 401 __nosave_begin = .; \ 402 *(.data..nosave) \ 403 . = ALIGN(PAGE_SIZE); \ 404 __nosave_end = .; 405 406 #define CACHE_HOT_DATA(align) \ 407 . = ALIGN(align); \ 408 *(SORT_BY_ALIGNMENT(.data..hot.*)) \ 409 . = ALIGN(align); 410 411 #define PAGE_ALIGNED_DATA(page_align) \ 412 . = ALIGN(page_align); \ 413 *(.data..page_aligned) \ 414 . = ALIGN(page_align); 415 416 #define READ_MOSTLY_DATA(align) \ 417 . = ALIGN(align); \ 418 *(.data..read_mostly) \ 419 . = ALIGN(align); 420 421 #define CACHELINE_ALIGNED_DATA(align) \ 422 . = ALIGN(align); \ 423 *(.data..cacheline_aligned) 424 425 #define INIT_TASK_DATA(align) \ 426 . = ALIGN(align); \ 427 __start_init_stack = .; \ 428 init_thread_union = .; \ 429 init_stack = .; \ 430 KEEP(*(.data..init_thread_info)) \ 431 . = __start_init_stack + THREAD_SIZE; \ 432 __end_init_stack = .; 433 434 #define JUMP_TABLE_DATA \ 435 . = ALIGN(8); \ 436 BOUNDED_SECTION_BY(__jump_table, ___jump_table) 437 438 #ifdef CONFIG_HAVE_STATIC_CALL_INLINE 439 #define STATIC_CALL_DATA \ 440 . = ALIGN(8); \ 441 BOUNDED_SECTION_BY(.static_call_sites, _static_call_sites) \ 442 BOUNDED_SECTION_BY(.static_call_tramp_key, _static_call_tramp_key) 443 #else 444 #define STATIC_CALL_DATA 445 #endif 446 447 /* 448 * Allow architectures to handle ro_after_init data on their 449 * own by defining an empty RO_AFTER_INIT_DATA. 450 */ 451 #ifndef RO_AFTER_INIT_DATA 452 #define RO_AFTER_INIT_DATA \ 453 . = ALIGN(8); \ 454 __start_ro_after_init = .; \ 455 *(.data..ro_after_init) \ 456 JUMP_TABLE_DATA \ 457 STATIC_CALL_DATA \ 458 __end_ro_after_init = .; 459 #endif 460 461 /* 462 * .kcfi_traps contains a list KCFI trap locations. 463 */ 464 #ifndef KCFI_TRAPS 465 #ifdef CONFIG_ARCH_USES_CFI_TRAPS 466 #define KCFI_TRAPS \ 467 __kcfi_traps : AT(ADDR(__kcfi_traps) - LOAD_OFFSET) { \ 468 BOUNDED_SECTION_BY(.kcfi_traps, ___kcfi_traps) \ 469 } 470 #else 471 #define KCFI_TRAPS 472 #endif 473 #endif 474 475 /* 476 * Read only Data 477 */ 478 #define RO_DATA(align) \ 479 . = ALIGN((align)); \ 480 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \ 481 __start_rodata = .; \ 482 *(.rodata) *(.rodata.*) *(.data.rel.ro*) \ 483 SCHED_DATA \ 484 RO_AFTER_INIT_DATA /* Read only after init */ \ 485 . = ALIGN(8); \ 486 BOUNDED_SECTION_BY(__tracepoints_ptrs, ___tracepoints_ptrs) \ 487 *(__tracepoints_strings)/* Tracepoints: strings */ \ 488 } \ 489 \ 490 .rodata1 : AT(ADDR(.rodata1) - LOAD_OFFSET) { \ 491 *(.rodata1) \ 492 } \ 493 \ 494 /* PCI quirks */ \ 495 .pci_fixup : AT(ADDR(.pci_fixup) - LOAD_OFFSET) { \ 496 BOUNDED_SECTION_PRE_LABEL(.pci_fixup_early, _pci_fixups_early, __start, __end) \ 497 BOUNDED_SECTION_PRE_LABEL(.pci_fixup_header, _pci_fixups_header, __start, __end) \ 498 BOUNDED_SECTION_PRE_LABEL(.pci_fixup_final, _pci_fixups_final, __start, __end) \ 499 BOUNDED_SECTION_PRE_LABEL(.pci_fixup_enable, _pci_fixups_enable, __start, __end) \ 500 BOUNDED_SECTION_PRE_LABEL(.pci_fixup_resume, _pci_fixups_resume, __start, __end) \ 501 BOUNDED_SECTION_PRE_LABEL(.pci_fixup_suspend, _pci_fixups_suspend, __start, __end) \ 502 BOUNDED_SECTION_PRE_LABEL(.pci_fixup_resume_early, _pci_fixups_resume_early, __start, __end) \ 503 BOUNDED_SECTION_PRE_LABEL(.pci_fixup_suspend_late, _pci_fixups_suspend_late, __start, __end) \ 504 } \ 505 \ 506 FW_LOADER_BUILT_IN_DATA \ 507 TRACEDATA \ 508 \ 509 PRINTK_INDEX \ 510 \ 511 /* Kernel symbol table */ \ 512 __ksymtab : AT(ADDR(__ksymtab) - LOAD_OFFSET) { \ 513 __start___ksymtab = .; \ 514 KEEP(*(SORT(___ksymtab+*))) \ 515 __stop___ksymtab = .; \ 516 } \ 517 \ 518 /* Kernel symbol CRC table */ \ 519 __kcrctab : AT(ADDR(__kcrctab) - LOAD_OFFSET) { \ 520 __start___kcrctab = .; \ 521 KEEP(*(SORT(___kcrctab+*))) \ 522 __stop___kcrctab = .; \ 523 } \ 524 \ 525 /* Kernel symbol flags table */ \ 526 __kflagstab : AT(ADDR(__kflagstab) - LOAD_OFFSET) { \ 527 __start___kflagstab = .; \ 528 KEEP(*(SORT(___kflagstab+*))) \ 529 __stop___kflagstab = .; \ 530 } \ 531 \ 532 /* Kernel symbol table: strings */ \ 533 __ksymtab_strings : AT(ADDR(__ksymtab_strings) - LOAD_OFFSET) { \ 534 *(__ksymtab_strings) \ 535 } \ 536 \ 537 /* __*init sections */ \ 538 __init_rodata : AT(ADDR(__init_rodata) - LOAD_OFFSET) { \ 539 *(.ref.rodata) \ 540 } \ 541 \ 542 /* Built-in module parameters. */ \ 543 __param : AT(ADDR(__param) - LOAD_OFFSET) { \ 544 BOUNDED_SECTION_BY(__param, ___param) \ 545 } \ 546 \ 547 /* Built-in module versions. */ \ 548 __modver : AT(ADDR(__modver) - LOAD_OFFSET) { \ 549 BOUNDED_SECTION_BY(__modver, ___modver) \ 550 } \ 551 \ 552 KCFI_TRAPS \ 553 \ 554 RO_EXCEPTION_TABLE \ 555 NOTES \ 556 BTF \ 557 \ 558 . = ALIGN((align)); \ 559 __end_rodata = .; 560 561 562 /* 563 * Non-instrumentable text section 564 */ 565 #define NOINSTR_TEXT \ 566 ALIGN_FUNCTION(); \ 567 __noinstr_text_start = .; \ 568 *(.noinstr.text) \ 569 __cpuidle_text_start = .; \ 570 *(.cpuidle.text) \ 571 __cpuidle_text_end = .; \ 572 __noinstr_text_end = .; 573 574 #define TEXT_SPLIT \ 575 __split_text_start = .; \ 576 *(.text.split .text.split.[0-9a-zA-Z_]*) \ 577 __split_text_end = .; 578 579 #define TEXT_UNLIKELY \ 580 __unlikely_text_start = .; \ 581 *(.text.unlikely .text.unlikely.*) \ 582 __unlikely_text_end = .; 583 584 #define TEXT_HOT \ 585 __hot_text_start = .; \ 586 *(.text.hot .text.hot.*) \ 587 __hot_text_end = .; 588 589 /* 590 * .text section. Map to function alignment to avoid address changes 591 * during second ld run in second ld pass when generating System.map 592 * 593 * TEXT_MAIN here will match symbols with a fixed pattern (for example, 594 * .text.hot or .text.unlikely). Match those before TEXT_MAIN to ensure 595 * they get grouped together. 596 * 597 * Also placing .text.hot section at the beginning of a page, this 598 * would help the TLB performance. 599 */ 600 #define TEXT_TEXT \ 601 ALIGN_FUNCTION(); \ 602 *(.text.asan.* .text.tsan.*) \ 603 *(.text.unknown .text.unknown.*) \ 604 TEXT_SPLIT \ 605 TEXT_UNLIKELY \ 606 . = ALIGN(PAGE_SIZE); \ 607 TEXT_HOT \ 608 *(TEXT_MAIN .text.fixup) \ 609 NOINSTR_TEXT \ 610 *(.ref.text) 611 612 /* sched.text is aling to function alignment to secure we have same 613 * address even at second ld pass when generating System.map */ 614 #define SCHED_TEXT \ 615 ALIGN_FUNCTION(); \ 616 __sched_text_start = .; \ 617 *(.sched.text) \ 618 __sched_text_end = .; 619 620 /* spinlock.text is aling to function alignment to secure we have same 621 * address even at second ld pass when generating System.map */ 622 #define LOCK_TEXT \ 623 ALIGN_FUNCTION(); \ 624 __lock_text_start = .; \ 625 *(.spinlock.text) \ 626 __lock_text_end = .; 627 628 #define KPROBES_TEXT \ 629 ALIGN_FUNCTION(); \ 630 __kprobes_text_start = .; \ 631 *(.kprobes.text) \ 632 __kprobes_text_end = .; 633 634 #define ENTRY_TEXT \ 635 ALIGN_FUNCTION(); \ 636 __entry_text_start = .; \ 637 *(.entry.text) \ 638 __entry_text_end = .; 639 640 #define IRQENTRY_TEXT \ 641 ALIGN_FUNCTION(); \ 642 __irqentry_text_start = .; \ 643 *(.irqentry.text) \ 644 __irqentry_text_end = .; 645 646 #define SOFTIRQENTRY_TEXT \ 647 ALIGN_FUNCTION(); \ 648 __softirqentry_text_start = .; \ 649 *(.softirqentry.text) \ 650 __softirqentry_text_end = .; 651 652 #define STATIC_CALL_TEXT \ 653 ALIGN_FUNCTION(); \ 654 __static_call_text_start = .; \ 655 *(.static_call.text) \ 656 __static_call_text_end = .; 657 658 /* Section used for early init (in .S files) */ 659 #define HEAD_TEXT KEEP(*(.head.text)) 660 661 #define HEAD_TEXT_SECTION \ 662 .head.text : AT(ADDR(.head.text) - LOAD_OFFSET) { \ 663 HEAD_TEXT \ 664 } 665 666 /* 667 * Exception table 668 */ 669 #define EXCEPTION_TABLE(align) \ 670 . = ALIGN(align); \ 671 __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) { \ 672 BOUNDED_SECTION_BY(__ex_table, ___ex_table) \ 673 } 674 675 /* 676 * .BTF 677 */ 678 #ifdef CONFIG_DEBUG_INFO_BTF 679 #define BTF \ 680 . = ALIGN(PAGE_SIZE); \ 681 .BTF : AT(ADDR(.BTF) - LOAD_OFFSET) { \ 682 BOUNDED_SECTION_BY(.BTF, _BTF) \ 683 } \ 684 . = ALIGN(PAGE_SIZE); \ 685 .BTF_ids : AT(ADDR(.BTF_ids) - LOAD_OFFSET) { \ 686 *(.BTF_ids) \ 687 } 688 #else 689 #define BTF 690 #endif 691 692 /* 693 * Init task 694 */ 695 #define INIT_TASK_DATA_SECTION(align) \ 696 . = ALIGN(align); \ 697 .data..init_task : AT(ADDR(.data..init_task) - LOAD_OFFSET) { \ 698 INIT_TASK_DATA(align) \ 699 } 700 701 #ifdef CONFIG_CONSTRUCTORS 702 #define KERNEL_CTORS() . = ALIGN(8); \ 703 __ctors_start = .; \ 704 KEEP(*(SORT(.ctors.*))) \ 705 KEEP(*(.ctors)) \ 706 KEEP(*(SORT(.init_array.*))) \ 707 KEEP(*(.init_array)) \ 708 __ctors_end = .; 709 #else 710 #define KERNEL_CTORS() 711 #endif 712 713 /* init and exit section handling */ 714 #define INIT_DATA \ 715 KEEP(*(SORT(___kentry+*))) \ 716 *(.init.data .init.data.*) \ 717 KERNEL_CTORS() \ 718 MCOUNT_REC() \ 719 *(.init.rodata .init.rodata.*) \ 720 FTRACE_EVENTS() \ 721 TRACE_SYSCALLS() \ 722 KPROBE_BLACKLIST() \ 723 ERROR_INJECT_WHITELIST() \ 724 CLK_OF_TABLES() \ 725 RESERVEDMEM_OF_TABLES() \ 726 TIMER_OF_TABLES() \ 727 CPU_METHOD_OF_TABLES() \ 728 CPUIDLE_METHOD_OF_TABLES() \ 729 KERNEL_DTB() \ 730 IRQCHIP_OF_MATCH_TABLE() \ 731 ACPI_PROBE_TABLE(irqchip) \ 732 ACPI_PROBE_TABLE(timer) \ 733 THERMAL_TABLE(governor) \ 734 EARLYCON_TABLE() \ 735 LSM_TABLE() \ 736 EARLY_LSM_TABLE() \ 737 KUNIT_INIT_TABLE() 738 739 #define INIT_TEXT \ 740 *(.init.text .init.text.*) \ 741 *(.text.startup .text.startup.*) 742 743 #define EXIT_DATA \ 744 *(.exit.data .exit.data.*) \ 745 *(.fini_array .fini_array.*) \ 746 *(.dtors .dtors.*) 747 748 #define EXIT_TEXT \ 749 *(.exit.text) \ 750 *(.text.exit .text.exit.*) 751 752 #define EXIT_CALL \ 753 *(.exitcall.exit) 754 755 /* 756 * bss (Block Started by Symbol) - uninitialized data 757 * zeroed during startup 758 */ 759 #define SBSS(sbss_align) \ 760 . = ALIGN(sbss_align); \ 761 .sbss : AT(ADDR(.sbss) - LOAD_OFFSET) { \ 762 *(.dynsbss) \ 763 *(SBSS_MAIN) \ 764 *(.scommon) \ 765 } 766 767 /* 768 * Allow archectures to redefine BSS_FIRST_SECTIONS to add extra 769 * sections to the front of bss. 770 */ 771 #ifndef BSS_FIRST_SECTIONS 772 #define BSS_FIRST_SECTIONS 773 #endif 774 775 #define BSS(bss_align) \ 776 . = ALIGN(bss_align); \ 777 .bss : AT(ADDR(.bss) - LOAD_OFFSET) { \ 778 BSS_FIRST_SECTIONS \ 779 . = ALIGN(PAGE_SIZE); \ 780 *(.bss..page_aligned) \ 781 . = ALIGN(PAGE_SIZE); \ 782 *(.dynbss) \ 783 *(BSS_MAIN) \ 784 *(COMMON) \ 785 } 786 787 /* 788 * DWARF debug sections. 789 * Symbols in the DWARF debugging sections are relative to 790 * the beginning of the section so we begin them at 0. 791 */ 792 #define DWARF_DEBUG \ 793 /* DWARF 1 */ \ 794 .debug 0 : { *(.debug) } \ 795 .line 0 : { *(.line) } \ 796 /* GNU DWARF 1 extensions */ \ 797 .debug_srcinfo 0 : { *(.debug_srcinfo) } \ 798 .debug_sfnames 0 : { *(.debug_sfnames) } \ 799 /* DWARF 1.1 and DWARF 2 */ \ 800 .debug_aranges 0 : { *(.debug_aranges) } \ 801 .debug_pubnames 0 : { *(.debug_pubnames) } \ 802 /* DWARF 2 */ \ 803 .debug_info 0 : { *(.debug_info \ 804 .gnu.linkonce.wi.*) } \ 805 .debug_abbrev 0 : { *(.debug_abbrev) } \ 806 .debug_line 0 : { *(.debug_line) } \ 807 .debug_frame 0 : { *(.debug_frame) } \ 808 .debug_str 0 : { *(.debug_str) } \ 809 .debug_loc 0 : { *(.debug_loc) } \ 810 .debug_macinfo 0 : { *(.debug_macinfo) } \ 811 .debug_pubtypes 0 : { *(.debug_pubtypes) } \ 812 /* DWARF 3 */ \ 813 .debug_ranges 0 : { *(.debug_ranges) } \ 814 /* SGI/MIPS DWARF 2 extensions */ \ 815 .debug_weaknames 0 : { *(.debug_weaknames) } \ 816 .debug_funcnames 0 : { *(.debug_funcnames) } \ 817 .debug_typenames 0 : { *(.debug_typenames) } \ 818 .debug_varnames 0 : { *(.debug_varnames) } \ 819 /* GNU DWARF 2 extensions */ \ 820 .debug_gnu_pubnames 0 : { *(.debug_gnu_pubnames) } \ 821 .debug_gnu_pubtypes 0 : { *(.debug_gnu_pubtypes) } \ 822 /* DWARF 4 */ \ 823 .debug_types 0 : { *(.debug_types) } \ 824 /* DWARF 5 */ \ 825 .debug_addr 0 : { *(.debug_addr) } \ 826 .debug_line_str 0 : { *(.debug_line_str) } \ 827 .debug_loclists 0 : { *(.debug_loclists) } \ 828 .debug_macro 0 : { *(.debug_macro) } \ 829 .debug_names 0 : { *(.debug_names) } \ 830 .debug_rnglists 0 : { *(.debug_rnglists) } \ 831 .debug_str_offsets 0 : { *(.debug_str_offsets) } 832 833 /* Stabs debugging sections. */ 834 #define STABS_DEBUG \ 835 .stab 0 : { *(.stab) } \ 836 .stabstr 0 : { *(.stabstr) } \ 837 .stab.excl 0 : { *(.stab.excl) } \ 838 .stab.exclstr 0 : { *(.stab.exclstr) } \ 839 .stab.index 0 : { *(.stab.index) } \ 840 .stab.indexstr 0 : { *(.stab.indexstr) } 841 842 /* Required sections not related to debugging. */ 843 #define ELF_DETAILS \ 844 .comment 0 : { *(.comment) } \ 845 .symtab 0 : { *(.symtab) } \ 846 .strtab 0 : { *(.strtab) } \ 847 .shstrtab 0 : { *(.shstrtab) } 848 849 #define MODINFO \ 850 .modinfo : { *(.modinfo) . = ALIGN(8); } 851 852 #ifdef CONFIG_GENERIC_BUG 853 #define BUG_TABLE \ 854 . = ALIGN(8); \ 855 __bug_table : AT(ADDR(__bug_table) - LOAD_OFFSET) { \ 856 BOUNDED_SECTION_BY(__bug_table, ___bug_table) \ 857 } 858 #else 859 #define BUG_TABLE 860 #endif 861 862 #ifdef CONFIG_UNWINDER_ORC 863 #define ORC_UNWIND_TABLE \ 864 .orc_header : AT(ADDR(.orc_header) - LOAD_OFFSET) { \ 865 BOUNDED_SECTION_BY(.orc_header, _orc_header) \ 866 } \ 867 . = ALIGN(4); \ 868 .orc_unwind_ip : AT(ADDR(.orc_unwind_ip) - LOAD_OFFSET) { \ 869 BOUNDED_SECTION_BY(.orc_unwind_ip, _orc_unwind_ip) \ 870 } \ 871 . = ALIGN(2); \ 872 .orc_unwind : AT(ADDR(.orc_unwind) - LOAD_OFFSET) { \ 873 BOUNDED_SECTION_BY(.orc_unwind, _orc_unwind) \ 874 } \ 875 text_size = _etext - _stext; \ 876 . = ALIGN(4); \ 877 .orc_lookup : AT(ADDR(.orc_lookup) - LOAD_OFFSET) { \ 878 orc_lookup = .; \ 879 . += (((text_size + LOOKUP_BLOCK_SIZE - 1) / \ 880 LOOKUP_BLOCK_SIZE) + 1) * 4; \ 881 orc_lookup_end = .; \ 882 } 883 #else 884 #define ORC_UNWIND_TABLE 885 #endif 886 887 /* Built-in firmware blobs */ 888 #ifdef CONFIG_FW_LOADER 889 #define FW_LOADER_BUILT_IN_DATA \ 890 .builtin_fw : AT(ADDR(.builtin_fw) - LOAD_OFFSET) ALIGN(8) { \ 891 BOUNDED_SECTION_PRE_LABEL(.builtin_fw, _builtin_fw, __start, __end) \ 892 } 893 #else 894 #define FW_LOADER_BUILT_IN_DATA 895 #endif 896 897 #ifdef CONFIG_PM_TRACE 898 #define TRACEDATA \ 899 . = ALIGN(4); \ 900 .tracedata : AT(ADDR(.tracedata) - LOAD_OFFSET) { \ 901 BOUNDED_SECTION_POST_LABEL(.tracedata, __tracedata, _start, _end) \ 902 } 903 #else 904 #define TRACEDATA 905 #endif 906 907 #ifdef CONFIG_PRINTK_INDEX 908 #define PRINTK_INDEX \ 909 .printk_index : AT(ADDR(.printk_index) - LOAD_OFFSET) { \ 910 BOUNDED_SECTION_BY(.printk_index, _printk_index) \ 911 } 912 #else 913 #define PRINTK_INDEX 914 #endif 915 916 /* 917 * Discard .note.GNU-stack, which is emitted as PROGBITS by the compiler. 918 * Otherwise, the type of .notes section would become PROGBITS instead of NOTES. 919 * 920 * Also, discard .note.gnu.property, otherwise it forces the notes section to 921 * be 8-byte aligned which causes alignment mismatches with the kernel's custom 922 * 4-byte aligned notes. 923 */ 924 #define NOTES \ 925 /DISCARD/ : { \ 926 *(.note.GNU-stack) \ 927 *(.note.gnu.property) \ 928 } \ 929 .notes : AT(ADDR(.notes) - LOAD_OFFSET) { \ 930 BOUNDED_SECTION_BY(.note.*, _notes) \ 931 } NOTES_HEADERS \ 932 NOTES_HEADERS_RESTORE 933 934 #define INIT_SETUP(initsetup_align) \ 935 . = ALIGN(initsetup_align); \ 936 BOUNDED_SECTION_POST_LABEL(.init.setup, __setup, _start, _end) 937 938 #define INIT_CALLS_LEVEL(level) \ 939 __initcall##level##_start = .; \ 940 KEEP(*(.initcall##level##.init)) \ 941 KEEP(*(.initcall##level##s.init)) \ 942 943 #define INIT_CALLS \ 944 __initcall_start = .; \ 945 KEEP(*(.initcallearly.init)) \ 946 INIT_CALLS_LEVEL(0) \ 947 INIT_CALLS_LEVEL(1) \ 948 INIT_CALLS_LEVEL(2) \ 949 INIT_CALLS_LEVEL(3) \ 950 INIT_CALLS_LEVEL(4) \ 951 INIT_CALLS_LEVEL(5) \ 952 INIT_CALLS_LEVEL(rootfs) \ 953 INIT_CALLS_LEVEL(6) \ 954 INIT_CALLS_LEVEL(7) \ 955 __initcall_end = .; 956 957 #define CON_INITCALL \ 958 BOUNDED_SECTION_POST_LABEL(.con_initcall.init, __con_initcall, _start, _end) 959 960 #define NAMED_SECTION(name) \ 961 . = ALIGN(8); \ 962 name : AT(ADDR(name) - LOAD_OFFSET) \ 963 { BOUNDED_SECTION_PRE_LABEL(name, name, __start_, __stop_) } 964 965 #define RUNTIME_CONST(t,x) NAMED_SECTION(runtime_##t##_##x) 966 967 #define RUNTIME_CONST_VARIABLES \ 968 RUNTIME_CONST(shift, d_hash_shift) \ 969 RUNTIME_CONST(ptr, dentry_hashtable) \ 970 RUNTIME_CONST(ptr, __dentry_cache) \ 971 RUNTIME_CONST(ptr, __names_cache) \ 972 RUNTIME_CONST(ptr, __filp_cache) \ 973 RUNTIME_CONST(ptr, __bfilp_cache) 974 975 /* Alignment must be consistent with (kunit_suite *) in include/kunit/test.h */ 976 #define KUNIT_TABLE() \ 977 . = ALIGN(8); \ 978 BOUNDED_SECTION_POST_LABEL(.kunit_test_suites, __kunit_suites, _start, _end) 979 980 /* Alignment must be consistent with (kunit_suite *) in include/kunit/test.h */ 981 #define KUNIT_INIT_TABLE() \ 982 . = ALIGN(8); \ 983 BOUNDED_SECTION_POST_LABEL(.kunit_init_test_suites, \ 984 __kunit_init_suites, _start, _end) 985 986 #ifdef CONFIG_BLK_DEV_INITRD 987 #define INIT_RAM_FS \ 988 . = ALIGN(4); \ 989 __initramfs_start = .; \ 990 KEEP(*(.init.ramfs)) \ 991 . = ALIGN(8); \ 992 KEEP(*(.init.ramfs.info)) 993 #else 994 #define INIT_RAM_FS 995 #endif 996 997 /* 998 * Memory encryption operates on a page basis. Since we need to clear 999 * the memory encryption mask for this section, it needs to be aligned 1000 * on a page boundary and be a page-size multiple in length. 1001 * 1002 * Note: We use a separate section so that only this section gets 1003 * decrypted to avoid exposing more than we wish. 1004 */ 1005 #ifdef CONFIG_AMD_MEM_ENCRYPT 1006 #define PERCPU_DECRYPTED_SECTION \ 1007 . = ALIGN(PAGE_SIZE); \ 1008 *(.data..percpu..decrypted) \ 1009 . = ALIGN(PAGE_SIZE); 1010 #else 1011 #define PERCPU_DECRYPTED_SECTION 1012 #endif 1013 1014 1015 /* 1016 * Default discarded sections. 1017 * 1018 * Some archs want to discard exit text/data at runtime rather than 1019 * link time due to cross-section references such as alt instructions, 1020 * bug table, eh_frame, etc. DISCARDS must be the last of output 1021 * section definitions so that such archs put those in earlier section 1022 * definitions. 1023 */ 1024 #ifdef RUNTIME_DISCARD_EXIT 1025 #define EXIT_DISCARDS 1026 #else 1027 #define EXIT_DISCARDS \ 1028 EXIT_TEXT \ 1029 EXIT_DATA 1030 #endif 1031 1032 /* 1033 * Clang's -fprofile-arcs, -fsanitize=kernel-address, and 1034 * -fsanitize=thread produce unwanted sections (.eh_frame 1035 * and .init_array.*), but CONFIG_CONSTRUCTORS wants to 1036 * keep any .init_array.* sections. 1037 * https://llvm.org/pr46478 1038 */ 1039 #ifdef CONFIG_UNWIND_TABLES 1040 #define DISCARD_EH_FRAME 1041 #else 1042 #define DISCARD_EH_FRAME *(.eh_frame) 1043 #endif 1044 #if defined(CONFIG_GCOV_KERNEL) || defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KCSAN) 1045 # ifdef CONFIG_CONSTRUCTORS 1046 # define SANITIZER_DISCARDS \ 1047 DISCARD_EH_FRAME 1048 # else 1049 # define SANITIZER_DISCARDS \ 1050 *(.init_array) *(.init_array.*) \ 1051 DISCARD_EH_FRAME 1052 # endif 1053 #else 1054 # define SANITIZER_DISCARDS 1055 #endif 1056 1057 #define COMMON_DISCARDS \ 1058 SANITIZER_DISCARDS \ 1059 PATCHABLE_DISCARDS \ 1060 *(.discard) \ 1061 *(.discard.*) \ 1062 *(.export_symbol) \ 1063 *(.no_trim_symbol) \ 1064 /* ld.bfd warns about .gnu.version* even when not emitted */ \ 1065 *(.gnu.version*) \ 1066 *(__tracepoint_check) \ 1067 1068 #define DISCARDS \ 1069 /DISCARD/ : { \ 1070 EXIT_DISCARDS \ 1071 EXIT_CALL \ 1072 COMMON_DISCARDS \ 1073 } 1074 1075 /** 1076 * PERCPU_INPUT - the percpu input sections 1077 * @cacheline: cacheline size 1078 * 1079 * The core percpu section names and core symbols which do not rely 1080 * directly upon load addresses. 1081 * 1082 * @cacheline is used to align subsections to avoid false cacheline 1083 * sharing between subsections for different purposes. 1084 */ 1085 #define PERCPU_INPUT(cacheline) \ 1086 __per_cpu_start = .; \ 1087 . = ALIGN(PAGE_SIZE); \ 1088 *(.data..percpu..page_aligned) \ 1089 . = ALIGN(cacheline); \ 1090 __per_cpu_hot_start = .; \ 1091 *(SORT_BY_ALIGNMENT(.data..percpu..hot.*)) \ 1092 __per_cpu_hot_end = .; \ 1093 . = ALIGN(cacheline); \ 1094 *(.data..percpu..read_mostly) \ 1095 . = ALIGN(cacheline); \ 1096 *(.data..percpu) \ 1097 *(.data..percpu..shared_aligned) \ 1098 PERCPU_DECRYPTED_SECTION \ 1099 __per_cpu_end = .; 1100 1101 /** 1102 * PERCPU_SECTION - define output section for percpu area 1103 * @cacheline: cacheline size 1104 * 1105 * Macro which expands to output section for percpu area. 1106 * 1107 * @cacheline is used to align subsections to avoid false cacheline 1108 * sharing between subsections for different purposes. 1109 */ 1110 #define PERCPU_SECTION(cacheline) \ 1111 . = ALIGN(PAGE_SIZE); \ 1112 .data..percpu : AT(ADDR(.data..percpu) - LOAD_OFFSET) { \ 1113 PERCPU_INPUT(cacheline) \ 1114 } 1115 1116 1117 /* 1118 * Definition of the high level *_SECTION macros 1119 * They will fit only a subset of the architectures 1120 */ 1121 1122 1123 /* 1124 * Writeable data. 1125 * All sections are combined in a single .data section. 1126 * The sections following CONSTRUCTORS are arranged so their 1127 * typical alignment matches. 1128 * A cacheline is typical/always less than a PAGE_SIZE so 1129 * the sections that has this restriction (or similar) 1130 * is located before the ones requiring PAGE_SIZE alignment. 1131 * NOSAVE_DATA starts and ends with a PAGE_SIZE alignment which 1132 * matches the requirement of PAGE_ALIGNED_DATA. 1133 * 1134 * use 0 as page_align if page_aligned data is not used */ 1135 #define RW_DATA(cacheline, pagealigned, inittask) \ 1136 . = ALIGN(PAGE_SIZE); \ 1137 .data : AT(ADDR(.data) - LOAD_OFFSET) { \ 1138 INIT_TASK_DATA(inittask) \ 1139 NOSAVE_DATA \ 1140 PAGE_ALIGNED_DATA(pagealigned) \ 1141 CACHE_HOT_DATA(cacheline) \ 1142 CACHELINE_ALIGNED_DATA(cacheline) \ 1143 READ_MOSTLY_DATA(cacheline) \ 1144 DATA_DATA \ 1145 CONSTRUCTORS \ 1146 } \ 1147 BUG_TABLE \ 1148 1149 #define INIT_TEXT_SECTION(inittext_align) \ 1150 . = ALIGN(inittext_align); \ 1151 .init.text : AT(ADDR(.init.text) - LOAD_OFFSET) { \ 1152 _sinittext = .; \ 1153 INIT_TEXT \ 1154 _einittext = .; \ 1155 } 1156 1157 #define INIT_DATA_SECTION(initsetup_align) \ 1158 .init.data : AT(ADDR(.init.data) - LOAD_OFFSET) { \ 1159 INIT_DATA \ 1160 INIT_SETUP(initsetup_align) \ 1161 INIT_CALLS \ 1162 CON_INITCALL \ 1163 INIT_RAM_FS \ 1164 } 1165 1166 #define BSS_SECTION(sbss_align, bss_align, stop_align) \ 1167 . = ALIGN(sbss_align); \ 1168 __bss_start = .; \ 1169 SBSS(sbss_align) \ 1170 BSS(bss_align) \ 1171 . = ALIGN(stop_align); \ 1172 __bss_stop = .; 1173