Lines Matching +full:0 +full:x23
40 #if (PAGE_OFFSET & 0x1fffff) != 0
62 .quad 0 // Image load offset from start of RAM, little-endian
65 .quad 0 // reserved
66 .quad 0 // reserved
67 .quad 0 // reserved
84 * x23 primary_entry() .. start_kernel() physical misalignment/KASLR offset
99 cbz x19, 0f
104 0: mov x0, x19
131 b.ne 0f
133 0:
136 tst x19, #SCTLR_ELx_C // Z := (C == 0)
170 cbnz x19, 0f // skip cache invalidation if MMU is on
174 add x1, x0, #0x20 // 4 x 8 bytes
176 0: str_l x19, mmu_enabled_at_boot, x0
265 mov \count, #0
318 bfi x1, xzr, #0, #PAGE_SHIFT - 3
402 bfi x22, x21, #0, #SWAPPER_BLOCK_SHIFT // remapped FDT address
414 cbnz x19, 0f // skip cache invalidation if MMU is on
420 0: ret x28
427 add x5, x5, x23 // add KASLR displacement
567 cbz x0, 0f
572 0:
791 add x11, x11, x23 // actual virtual offset
793 0: cmp x9, x10
798 b.ne 0b
799 add x14, x14, x23 // relocate
800 str x14, [x12, x23]
801 b 0b
841 tbnz x11, #0, 3f // branch to handle bitmaps
842 add x13, x11, x23
844 add x12, x12, x23
851 tbz x11, #0, 5f // skip bit if not set
853 add x12, x12, x23
878 adrp x23, KERNEL_START
879 and x23, x23, MIN_KIMG_ALIGN - 1
888 orr x23, x23, x0 // record kernel offset