xref: /linux/arch/arm64/include/asm/barrier.h (revision 63eb28bb1402891b1ad2be02a530f29a9dd7f1cd)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Based on arch/arm/include/asm/barrier.h
4  *
5  * Copyright (C) 2012 ARM Ltd.
6  */
7 #ifndef __ASM_BARRIER_H
8 #define __ASM_BARRIER_H
9 
10 #ifndef __ASSEMBLY__
11 
12 #include <linux/kasan-checks.h>
13 
14 #include <asm/alternative-macros.h>
15 
16 #define __nops(n)	".rept	" #n "\nnop\n.endr\n"
17 #define nops(n)		asm volatile(__nops(n))
18 
19 #define sev()		asm volatile("sev" : : : "memory")
20 #define wfe()		asm volatile("wfe" : : : "memory")
21 #define wfet(val)	asm volatile("msr s0_3_c1_c0_0, %0"	\
22 				     : : "r" (val) : "memory")
23 #define wfi()		asm volatile("wfi" : : : "memory")
24 #define wfit(val)	asm volatile("msr s0_3_c1_c0_1, %0"	\
25 				     : : "r" (val) : "memory")
26 
27 #define isb()		asm volatile("isb" : : : "memory")
28 #define dmb(opt)	asm volatile("dmb " #opt : : : "memory")
29 #define dsb(opt)	asm volatile("dsb " #opt : : : "memory")
30 
31 #define psb_csync()	asm volatile("hint #17" : : : "memory")
32 #define __tsb_csync()	asm volatile("hint #18" : : : "memory")
33 #define csdb()		asm volatile("hint #20" : : : "memory")
34 
35 /*
36  * Data Gathering Hint:
37  * This instruction prevents merging memory accesses with Normal-NC or
38  * Device-GRE attributes before the hint instruction with any memory accesses
39  * appearing after the hint instruction.
40  */
41 #define dgh()		asm volatile("hint #6" : : : "memory")
42 
43 #define spec_bar()	asm volatile(ALTERNATIVE("dsb nsh\nisb\n",		\
44 						 SB_BARRIER_INSN"nop\n",	\
45 						 ARM64_HAS_SB))
46 
47 #define gsb_ack()	asm volatile(GSB_ACK_BARRIER_INSN : : : "memory")
48 #define gsb_sys()	asm volatile(GSB_SYS_BARRIER_INSN : : : "memory")
49 
50 #ifdef CONFIG_ARM64_PSEUDO_NMI
51 #define pmr_sync()						\
52 	do {							\
53 		asm volatile(					\
54 		ALTERNATIVE_CB("dsb sy",			\
55 			       ARM64_HAS_GIC_PRIO_RELAXED_SYNC,	\
56 			       alt_cb_patch_nops)		\
57 		);						\
58 	} while(0)
59 #else
60 #define pmr_sync()	do {} while (0)
61 #endif
62 
63 #define __mb()		dsb(sy)
64 #define __rmb()		dsb(ld)
65 #define __wmb()		dsb(st)
66 
67 #define __dma_mb()	dmb(osh)
68 #define __dma_rmb()	dmb(oshld)
69 #define __dma_wmb()	dmb(oshst)
70 
71 #define io_stop_wc()	dgh()
72 
73 #define tsb_csync()								\
74 	do {									\
75 		/*								\
76 		 * CPUs affected by Arm Erratum 2054223 or 2067961 needs	\
77 		 * another TSB to ensure the trace is flushed. The barriers	\
78 		 * don't have to be strictly back to back, as long as the	\
79 		 * CPU is in trace prohibited state.				\
80 		 */								\
81 		if (cpus_have_final_cap(ARM64_WORKAROUND_TSB_FLUSH_FAILURE))	\
82 			__tsb_csync();						\
83 		__tsb_csync();							\
84 	} while (0)
85 
86 /*
87  * Generate a mask for array_index__nospec() that is ~0UL when 0 <= idx < sz
88  * and 0 otherwise.
89  */
90 #define array_index_mask_nospec array_index_mask_nospec
array_index_mask_nospec(unsigned long idx,unsigned long sz)91 static inline unsigned long array_index_mask_nospec(unsigned long idx,
92 						    unsigned long sz)
93 {
94 	unsigned long mask;
95 
96 	asm volatile(
97 	"	cmp	%1, %2\n"
98 	"	sbc	%0, xzr, xzr\n"
99 	: "=r" (mask)
100 	: "r" (idx), "Ir" (sz)
101 	: "cc");
102 
103 	csdb();
104 	return mask;
105 }
106 
107 /*
108  * Ensure that reads of the counter are treated the same as memory reads
109  * for the purposes of ordering by subsequent memory barriers.
110  *
111  * This insanity brought to you by speculative system register reads,
112  * out-of-order memory accesses, sequence locks and Thomas Gleixner.
113  *
114  * https://lore.kernel.org/r/alpine.DEB.2.21.1902081950260.1662@nanos.tec.linutronix.de/
115  */
116 #define arch_counter_enforce_ordering(val) do {				\
117 	u64 tmp, _val = (val);						\
118 									\
119 	asm volatile(							\
120 	"	eor	%0, %1, %1\n"					\
121 	"	add	%0, sp, %0\n"					\
122 	"	ldr	xzr, [%0]"					\
123 	: "=r" (tmp) : "r" (_val));					\
124 } while (0)
125 
126 #define __smp_mb()	dmb(ish)
127 #define __smp_rmb()	dmb(ishld)
128 #define __smp_wmb()	dmb(ishst)
129 
130 #define __smp_store_release(p, v)					\
131 do {									\
132 	typeof(p) __p = (p);						\
133 	union { __unqual_scalar_typeof(*p) __val; char __c[1]; } __u =	\
134 		{ .__val = (__force __unqual_scalar_typeof(*p)) (v) };	\
135 	compiletime_assert_atomic_type(*p);				\
136 	kasan_check_write(__p, sizeof(*p));				\
137 	switch (sizeof(*p)) {						\
138 	case 1:								\
139 		asm volatile ("stlrb %w1, %0"				\
140 				: "=Q" (*__p)				\
141 				: "rZ" (*(__u8 *)__u.__c)		\
142 				: "memory");				\
143 		break;							\
144 	case 2:								\
145 		asm volatile ("stlrh %w1, %0"				\
146 				: "=Q" (*__p)				\
147 				: "rZ" (*(__u16 *)__u.__c)		\
148 				: "memory");				\
149 		break;							\
150 	case 4:								\
151 		asm volatile ("stlr %w1, %0"				\
152 				: "=Q" (*__p)				\
153 				: "rZ" (*(__u32 *)__u.__c)		\
154 				: "memory");				\
155 		break;							\
156 	case 8:								\
157 		asm volatile ("stlr %x1, %0"				\
158 				: "=Q" (*__p)				\
159 				: "rZ" (*(__u64 *)__u.__c)		\
160 				: "memory");				\
161 		break;							\
162 	}								\
163 } while (0)
164 
165 #define __smp_load_acquire(p)						\
166 ({									\
167 	union { __unqual_scalar_typeof(*p) __val; char __c[1]; } __u;	\
168 	typeof(p) __p = (p);						\
169 	compiletime_assert_atomic_type(*p);				\
170 	kasan_check_read(__p, sizeof(*p));				\
171 	switch (sizeof(*p)) {						\
172 	case 1:								\
173 		asm volatile ("ldarb %w0, %1"				\
174 			: "=r" (*(__u8 *)__u.__c)			\
175 			: "Q" (*__p) : "memory");			\
176 		break;							\
177 	case 2:								\
178 		asm volatile ("ldarh %w0, %1"				\
179 			: "=r" (*(__u16 *)__u.__c)			\
180 			: "Q" (*__p) : "memory");			\
181 		break;							\
182 	case 4:								\
183 		asm volatile ("ldar %w0, %1"				\
184 			: "=r" (*(__u32 *)__u.__c)			\
185 			: "Q" (*__p) : "memory");			\
186 		break;							\
187 	case 8:								\
188 		asm volatile ("ldar %0, %1"				\
189 			: "=r" (*(__u64 *)__u.__c)			\
190 			: "Q" (*__p) : "memory");			\
191 		break;							\
192 	}								\
193 	(typeof(*p))__u.__val;						\
194 })
195 
196 #define smp_cond_load_relaxed(ptr, cond_expr)				\
197 ({									\
198 	typeof(ptr) __PTR = (ptr);					\
199 	__unqual_scalar_typeof(*ptr) VAL;				\
200 	for (;;) {							\
201 		VAL = READ_ONCE(*__PTR);				\
202 		if (cond_expr)						\
203 			break;						\
204 		__cmpwait_relaxed(__PTR, VAL);				\
205 	}								\
206 	(typeof(*ptr))VAL;						\
207 })
208 
209 #define smp_cond_load_acquire(ptr, cond_expr)				\
210 ({									\
211 	typeof(ptr) __PTR = (ptr);					\
212 	__unqual_scalar_typeof(*ptr) VAL;				\
213 	for (;;) {							\
214 		VAL = smp_load_acquire(__PTR);				\
215 		if (cond_expr)						\
216 			break;						\
217 		__cmpwait_relaxed(__PTR, VAL);				\
218 	}								\
219 	(typeof(*ptr))VAL;						\
220 })
221 
222 #include <asm-generic/barrier.h>
223 
224 #endif	/* __ASSEMBLY__ */
225 
226 #endif	/* __ASM_BARRIER_H */
227