1 /* 2 * arch/arm/include/asm/assembler.h 3 * 4 * Copyright (C) 1996-2000 Russell King 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 * 10 * This file contains arm architecture specific defines 11 * for the different processors. 12 * 13 * Do not include any C declarations in this file - it is included by 14 * assembler source. 15 */ 16 #ifndef __ASM_ASSEMBLER_H__ 17 #define __ASM_ASSEMBLER_H__ 18 19 #ifndef __ASSEMBLY__ 20 #error "Only include this from assembly code" 21 #endif 22 23 #include <asm/ptrace.h> 24 #include <asm/domain.h> 25 26 /* 27 * Endian independent macros for shifting bytes within registers. 28 */ 29 #ifndef __ARMEB__ 30 #define pull lsr 31 #define push lsl 32 #define get_byte_0 lsl #0 33 #define get_byte_1 lsr #8 34 #define get_byte_2 lsr #16 35 #define get_byte_3 lsr #24 36 #define put_byte_0 lsl #0 37 #define put_byte_1 lsl #8 38 #define put_byte_2 lsl #16 39 #define put_byte_3 lsl #24 40 #else 41 #define pull lsl 42 #define push lsr 43 #define get_byte_0 lsr #24 44 #define get_byte_1 lsr #16 45 #define get_byte_2 lsr #8 46 #define get_byte_3 lsl #0 47 #define put_byte_0 lsl #24 48 #define put_byte_1 lsl #16 49 #define put_byte_2 lsl #8 50 #define put_byte_3 lsl #0 51 #endif 52 53 /* 54 * Data preload for architectures that support it 55 */ 56 #if __LINUX_ARM_ARCH__ >= 5 57 #define PLD(code...) code 58 #else 59 #define PLD(code...) 60 #endif 61 62 /* 63 * This can be used to enable code to cacheline align the destination 64 * pointer when bulk writing to memory. Experiments on StrongARM and 65 * XScale didn't show this a worthwhile thing to do when the cache is not 66 * set to write-allocate (this would need further testing on XScale when WA 67 * is used). 68 * 69 * On Feroceon there is much to gain however, regardless of cache mode. 70 */ 71 #ifdef CONFIG_CPU_FEROCEON 72 #define CALGN(code...) code 73 #else 74 #define CALGN(code...) 75 #endif 76 77 /* 78 * Enable and disable interrupts 79 */ 80 #if __LINUX_ARM_ARCH__ >= 6 81 .macro disable_irq_notrace 82 cpsid i 83 .endm 84 85 .macro enable_irq_notrace 86 cpsie i 87 .endm 88 #else 89 .macro disable_irq_notrace 90 msr cpsr_c, #PSR_I_BIT | SVC_MODE 91 .endm 92 93 .macro enable_irq_notrace 94 msr cpsr_c, #SVC_MODE 95 .endm 96 #endif 97 98 .macro asm_trace_hardirqs_off 99 #if defined(CONFIG_TRACE_IRQFLAGS) 100 stmdb sp!, {r0-r3, ip, lr} 101 bl trace_hardirqs_off 102 ldmia sp!, {r0-r3, ip, lr} 103 #endif 104 .endm 105 106 .macro asm_trace_hardirqs_on_cond, cond 107 #if defined(CONFIG_TRACE_IRQFLAGS) 108 /* 109 * actually the registers should be pushed and pop'd conditionally, but 110 * after bl the flags are certainly clobbered 111 */ 112 stmdb sp!, {r0-r3, ip, lr} 113 bl\cond trace_hardirqs_on 114 ldmia sp!, {r0-r3, ip, lr} 115 #endif 116 .endm 117 118 .macro asm_trace_hardirqs_on 119 asm_trace_hardirqs_on_cond al 120 .endm 121 122 .macro disable_irq 123 disable_irq_notrace 124 asm_trace_hardirqs_off 125 .endm 126 127 .macro enable_irq 128 asm_trace_hardirqs_on 129 enable_irq_notrace 130 .endm 131 /* 132 * Save the current IRQ state and disable IRQs. Note that this macro 133 * assumes FIQs are enabled, and that the processor is in SVC mode. 134 */ 135 .macro save_and_disable_irqs, oldcpsr 136 mrs \oldcpsr, cpsr 137 disable_irq 138 .endm 139 140 .macro save_and_disable_irqs_notrace, oldcpsr 141 mrs \oldcpsr, cpsr 142 disable_irq_notrace 143 .endm 144 145 /* 146 * Restore interrupt state previously stored in a register. We don't 147 * guarantee that this will preserve the flags. 148 */ 149 .macro restore_irqs_notrace, oldcpsr 150 msr cpsr_c, \oldcpsr 151 .endm 152 153 .macro restore_irqs, oldcpsr 154 tst \oldcpsr, #PSR_I_BIT 155 asm_trace_hardirqs_on_cond eq 156 restore_irqs_notrace \oldcpsr 157 .endm 158 159 #define USER(x...) \ 160 9999: x; \ 161 .pushsection __ex_table,"a"; \ 162 .align 3; \ 163 .long 9999b,9001f; \ 164 .popsection 165 166 #ifdef CONFIG_SMP 167 #define ALT_SMP(instr...) \ 168 9998: instr 169 /* 170 * Note: if you get assembler errors from ALT_UP() when building with 171 * CONFIG_THUMB2_KERNEL, you almost certainly need to use 172 * ALT_SMP( W(instr) ... ) 173 */ 174 #define ALT_UP(instr...) \ 175 .pushsection ".alt.smp.init", "a" ;\ 176 .long 9998b ;\ 177 9997: instr ;\ 178 .if . - 9997b != 4 ;\ 179 .error "ALT_UP() content must assemble to exactly 4 bytes";\ 180 .endif ;\ 181 .popsection 182 #define ALT_UP_B(label) \ 183 .equ up_b_offset, label - 9998b ;\ 184 .pushsection ".alt.smp.init", "a" ;\ 185 .long 9998b ;\ 186 W(b) . + up_b_offset ;\ 187 .popsection 188 #else 189 #define ALT_SMP(instr...) 190 #define ALT_UP(instr...) instr 191 #define ALT_UP_B(label) b label 192 #endif 193 194 /* 195 * Instruction barrier 196 */ 197 .macro instr_sync 198 #if __LINUX_ARM_ARCH__ >= 7 199 isb 200 #elif __LINUX_ARM_ARCH__ == 6 201 mcr p15, 0, r0, c7, c5, 4 202 #endif 203 .endm 204 205 /* 206 * SMP data memory barrier 207 */ 208 .macro smp_dmb mode 209 #ifdef CONFIG_SMP 210 #if __LINUX_ARM_ARCH__ >= 7 211 .ifeqs "\mode","arm" 212 ALT_SMP(dmb) 213 .else 214 ALT_SMP(W(dmb)) 215 .endif 216 #elif __LINUX_ARM_ARCH__ == 6 217 ALT_SMP(mcr p15, 0, r0, c7, c10, 5) @ dmb 218 #else 219 #error Incompatible SMP platform 220 #endif 221 .ifeqs "\mode","arm" 222 ALT_UP(nop) 223 .else 224 ALT_UP(W(nop)) 225 .endif 226 #endif 227 .endm 228 229 #ifdef CONFIG_THUMB2_KERNEL 230 .macro setmode, mode, reg 231 mov \reg, #\mode 232 msr cpsr_c, \reg 233 .endm 234 #else 235 .macro setmode, mode, reg 236 msr cpsr_c, #\mode 237 .endm 238 #endif 239 240 /* 241 * STRT/LDRT access macros with ARM and Thumb-2 variants 242 */ 243 #ifdef CONFIG_THUMB2_KERNEL 244 245 .macro usraccoff, instr, reg, ptr, inc, off, cond, abort, t=TUSER() 246 9999: 247 .if \inc == 1 248 \instr\cond\()b\()\t\().w \reg, [\ptr, #\off] 249 .elseif \inc == 4 250 \instr\cond\()\t\().w \reg, [\ptr, #\off] 251 .else 252 .error "Unsupported inc macro argument" 253 .endif 254 255 .pushsection __ex_table,"a" 256 .align 3 257 .long 9999b, \abort 258 .popsection 259 .endm 260 261 .macro usracc, instr, reg, ptr, inc, cond, rept, abort 262 @ explicit IT instruction needed because of the label 263 @ introduced by the USER macro 264 .ifnc \cond,al 265 .if \rept == 1 266 itt \cond 267 .elseif \rept == 2 268 ittt \cond 269 .else 270 .error "Unsupported rept macro argument" 271 .endif 272 .endif 273 274 @ Slightly optimised to avoid incrementing the pointer twice 275 usraccoff \instr, \reg, \ptr, \inc, 0, \cond, \abort 276 .if \rept == 2 277 usraccoff \instr, \reg, \ptr, \inc, \inc, \cond, \abort 278 .endif 279 280 add\cond \ptr, #\rept * \inc 281 .endm 282 283 #else /* !CONFIG_THUMB2_KERNEL */ 284 285 .macro usracc, instr, reg, ptr, inc, cond, rept, abort, t=TUSER() 286 .rept \rept 287 9999: 288 .if \inc == 1 289 \instr\cond\()b\()\t \reg, [\ptr], #\inc 290 .elseif \inc == 4 291 \instr\cond\()\t \reg, [\ptr], #\inc 292 .else 293 .error "Unsupported inc macro argument" 294 .endif 295 296 .pushsection __ex_table,"a" 297 .align 3 298 .long 9999b, \abort 299 .popsection 300 .endr 301 .endm 302 303 #endif /* CONFIG_THUMB2_KERNEL */ 304 305 .macro strusr, reg, ptr, inc, cond=al, rept=1, abort=9001f 306 usracc str, \reg, \ptr, \inc, \cond, \rept, \abort 307 .endm 308 309 .macro ldrusr, reg, ptr, inc, cond=al, rept=1, abort=9001f 310 usracc ldr, \reg, \ptr, \inc, \cond, \rept, \abort 311 .endm 312 313 /* Utility macro for declaring string literals */ 314 .macro string name:req, string 315 .type \name , #object 316 \name: 317 .asciz "\string" 318 .size \name , . - \name 319 .endm 320 321 #endif /* __ASM_ASSEMBLER_H__ */ 322