1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_X86_PERCPU_H
3 #define _ASM_X86_PERCPU_H
4
5 #ifdef CONFIG_X86_64
6 #define __percpu_seg gs
7 #else
8 #define __percpu_seg fs
9 #endif
10
11 #ifdef __ASSEMBLY__
12
13 #ifdef CONFIG_SMP
14 #define PER_CPU_VAR(var) %__percpu_seg:var
15 #else /* ! SMP */
16 #define PER_CPU_VAR(var) var
17 #endif /* SMP */
18
19 #ifdef CONFIG_X86_64_SMP
20 #define INIT_PER_CPU_VAR(var) init_per_cpu__##var
21 #else
22 #define INIT_PER_CPU_VAR(var) var
23 #endif
24
25 #else /* ...!ASSEMBLY */
26
27 #include <linux/stringify.h>
28 #include <asm/asm.h>
29
30 #ifdef CONFIG_SMP
31 #define __percpu_prefix "%%"__stringify(__percpu_seg)":"
32 #define __my_cpu_offset this_cpu_read(this_cpu_off)
33
34 /*
35 * Compared to the generic __my_cpu_offset version, the following
36 * saves one instruction and avoids clobbering a temp register.
37 */
38 #define arch_raw_cpu_ptr(ptr) \
39 ({ \
40 unsigned long tcp_ptr__; \
41 asm ("add " __percpu_arg(1) ", %0" \
42 : "=r" (tcp_ptr__) \
43 : "m" (this_cpu_off), "0" (ptr)); \
44 (typeof(*(ptr)) __kernel __force *)tcp_ptr__; \
45 })
46 #else
47 #define __percpu_prefix ""
48 #endif
49
50 #define __percpu_arg(x) __percpu_prefix "%" #x
51
52 /*
53 * Initialized pointers to per-cpu variables needed for the boot
54 * processor need to use these macros to get the proper address
55 * offset from __per_cpu_load on SMP.
56 *
57 * There also must be an entry in vmlinux_64.lds.S
58 */
59 #define DECLARE_INIT_PER_CPU(var) \
60 extern typeof(var) init_per_cpu_var(var)
61
62 #ifdef CONFIG_X86_64_SMP
63 #define init_per_cpu_var(var) init_per_cpu__##var
64 #else
65 #define init_per_cpu_var(var) var
66 #endif
67
68 /* For arch-specific code, we can use direct single-insn ops (they
69 * don't give an lvalue though). */
70
71 #define __pcpu_type_1 u8
72 #define __pcpu_type_2 u16
73 #define __pcpu_type_4 u32
74 #define __pcpu_type_8 u64
75
76 #define __pcpu_cast_1(val) ((u8)(((unsigned long) val) & 0xff))
77 #define __pcpu_cast_2(val) ((u16)(((unsigned long) val) & 0xffff))
78 #define __pcpu_cast_4(val) ((u32)(((unsigned long) val) & 0xffffffff))
79 #define __pcpu_cast_8(val) ((u64)(val))
80
81 #define __pcpu_op1_1(op, dst) op "b " dst
82 #define __pcpu_op1_2(op, dst) op "w " dst
83 #define __pcpu_op1_4(op, dst) op "l " dst
84 #define __pcpu_op1_8(op, dst) op "q " dst
85
86 #define __pcpu_op2_1(op, src, dst) op "b " src ", " dst
87 #define __pcpu_op2_2(op, src, dst) op "w " src ", " dst
88 #define __pcpu_op2_4(op, src, dst) op "l " src ", " dst
89 #define __pcpu_op2_8(op, src, dst) op "q " src ", " dst
90
91 #define __pcpu_reg_1(mod, x) mod "q" (x)
92 #define __pcpu_reg_2(mod, x) mod "r" (x)
93 #define __pcpu_reg_4(mod, x) mod "r" (x)
94 #define __pcpu_reg_8(mod, x) mod "r" (x)
95
96 #define __pcpu_reg_imm_1(x) "qi" (x)
97 #define __pcpu_reg_imm_2(x) "ri" (x)
98 #define __pcpu_reg_imm_4(x) "ri" (x)
99 #define __pcpu_reg_imm_8(x) "re" (x)
100
101 #define percpu_to_op(size, qual, op, _var, _val) \
102 do { \
103 __pcpu_type_##size pto_val__ = __pcpu_cast_##size(_val); \
104 if (0) { \
105 typeof(_var) pto_tmp__; \
106 pto_tmp__ = (_val); \
107 (void)pto_tmp__; \
108 } \
109 asm qual(__pcpu_op2_##size(op, "%[val]", __percpu_arg([var])) \
110 : [var] "+m" (_var) \
111 : [val] __pcpu_reg_imm_##size(pto_val__)); \
112 } while (0)
113
114 #define percpu_unary_op(size, qual, op, _var) \
115 ({ \
116 asm qual (__pcpu_op1_##size(op, __percpu_arg([var])) \
117 : [var] "+m" (_var)); \
118 })
119
120 /*
121 * Generate a percpu add to memory instruction and optimize code
122 * if one is added or subtracted.
123 */
124 #define percpu_add_op(size, qual, var, val) \
125 do { \
126 const int pao_ID__ = (__builtin_constant_p(val) && \
127 ((val) == 1 || (val) == -1)) ? \
128 (int)(val) : 0; \
129 if (0) { \
130 typeof(var) pao_tmp__; \
131 pao_tmp__ = (val); \
132 (void)pao_tmp__; \
133 } \
134 if (pao_ID__ == 1) \
135 percpu_unary_op(size, qual, "inc", var); \
136 else if (pao_ID__ == -1) \
137 percpu_unary_op(size, qual, "dec", var); \
138 else \
139 percpu_to_op(size, qual, "add", var, val); \
140 } while (0)
141
142 #define percpu_from_op(size, qual, op, _var) \
143 ({ \
144 __pcpu_type_##size pfo_val__; \
145 asm qual (__pcpu_op2_##size(op, __percpu_arg([var]), "%[val]") \
146 : [val] __pcpu_reg_##size("=", pfo_val__) \
147 : [var] "m" (_var)); \
148 (typeof(_var))(unsigned long) pfo_val__; \
149 })
150
151 #define percpu_stable_op(size, op, _var) \
152 ({ \
153 __pcpu_type_##size pfo_val__; \
154 asm(__pcpu_op2_##size(op, __percpu_arg(P[var]), "%[val]") \
155 : [val] __pcpu_reg_##size("=", pfo_val__) \
156 : [var] "p" (&(_var))); \
157 (typeof(_var))(unsigned long) pfo_val__; \
158 })
159
160 /*
161 * Add return operation
162 */
163 #define percpu_add_return_op(size, qual, _var, _val) \
164 ({ \
165 __pcpu_type_##size paro_tmp__ = __pcpu_cast_##size(_val); \
166 asm qual (__pcpu_op2_##size("xadd", "%[tmp]", \
167 __percpu_arg([var])) \
168 : [tmp] __pcpu_reg_##size("+", paro_tmp__), \
169 [var] "+m" (_var) \
170 : : "memory"); \
171 (typeof(_var))(unsigned long) (paro_tmp__ + _val); \
172 })
173
174 /*
175 * xchg is implemented using cmpxchg without a lock prefix. xchg is
176 * expensive due to the implied lock prefix. The processor cannot prefetch
177 * cachelines if xchg is used.
178 */
179 #define percpu_xchg_op(size, qual, _var, _nval) \
180 ({ \
181 __pcpu_type_##size pxo_old__; \
182 __pcpu_type_##size pxo_new__ = __pcpu_cast_##size(_nval); \
183 asm qual (__pcpu_op2_##size("mov", __percpu_arg([var]), \
184 "%[oval]") \
185 "\n1:\t" \
186 __pcpu_op2_##size("cmpxchg", "%[nval]", \
187 __percpu_arg([var])) \
188 "\n\tjnz 1b" \
189 : [oval] "=&a" (pxo_old__), \
190 [var] "+m" (_var) \
191 : [nval] __pcpu_reg_##size(, pxo_new__) \
192 : "memory"); \
193 (typeof(_var))(unsigned long) pxo_old__; \
194 })
195
196 /*
197 * cmpxchg has no such implied lock semantics as a result it is much
198 * more efficient for cpu local operations.
199 */
200 #define percpu_cmpxchg_op(size, qual, _var, _oval, _nval) \
201 ({ \
202 __pcpu_type_##size pco_old__ = __pcpu_cast_##size(_oval); \
203 __pcpu_type_##size pco_new__ = __pcpu_cast_##size(_nval); \
204 asm qual (__pcpu_op2_##size("cmpxchg", "%[nval]", \
205 __percpu_arg([var])) \
206 : [oval] "+a" (pco_old__), \
207 [var] "+m" (_var) \
208 : [nval] __pcpu_reg_##size(, pco_new__) \
209 : "memory"); \
210 (typeof(_var))(unsigned long) pco_old__; \
211 })
212
213 #define percpu_try_cmpxchg_op(size, qual, _var, _ovalp, _nval) \
214 ({ \
215 bool success; \
216 __pcpu_type_##size *pco_oval__ = (__pcpu_type_##size *)(_ovalp); \
217 __pcpu_type_##size pco_old__ = *pco_oval__; \
218 __pcpu_type_##size pco_new__ = __pcpu_cast_##size(_nval); \
219 asm qual (__pcpu_op2_##size("cmpxchg", "%[nval]", \
220 __percpu_arg([var])) \
221 CC_SET(z) \
222 : CC_OUT(z) (success), \
223 [oval] "+a" (pco_old__), \
224 [var] "+m" (_var) \
225 : [nval] __pcpu_reg_##size(, pco_new__) \
226 : "memory"); \
227 if (unlikely(!success)) \
228 *pco_oval__ = pco_old__; \
229 likely(success); \
230 })
231
232 #if defined(CONFIG_X86_32) && !defined(CONFIG_UML)
233 #define percpu_cmpxchg64_op(size, qual, _var, _oval, _nval) \
234 ({ \
235 union { \
236 u64 var; \
237 struct { \
238 u32 low, high; \
239 }; \
240 } old__, new__; \
241 \
242 old__.var = _oval; \
243 new__.var = _nval; \
244 \
245 asm qual (ALTERNATIVE("call this_cpu_cmpxchg8b_emu", \
246 "cmpxchg8b " __percpu_arg([var]), X86_FEATURE_CX8) \
247 : [var] "+m" (_var), \
248 "+a" (old__.low), \
249 "+d" (old__.high) \
250 : "b" (new__.low), \
251 "c" (new__.high), \
252 "S" (&(_var)) \
253 : "memory"); \
254 \
255 old__.var; \
256 })
257
258 #define raw_cpu_cmpxchg64(pcp, oval, nval) percpu_cmpxchg64_op(8, , pcp, oval, nval)
259 #define this_cpu_cmpxchg64(pcp, oval, nval) percpu_cmpxchg64_op(8, volatile, pcp, oval, nval)
260
261 #define percpu_try_cmpxchg64_op(size, qual, _var, _ovalp, _nval) \
262 ({ \
263 bool success; \
264 u64 *_oval = (u64 *)(_ovalp); \
265 union { \
266 u64 var; \
267 struct { \
268 u32 low, high; \
269 }; \
270 } old__, new__; \
271 \
272 old__.var = *_oval; \
273 new__.var = _nval; \
274 \
275 asm qual (ALTERNATIVE("call this_cpu_cmpxchg8b_emu", \
276 "cmpxchg8b " __percpu_arg([var]), X86_FEATURE_CX8) \
277 CC_SET(z) \
278 : CC_OUT(z) (success), \
279 [var] "+m" (_var), \
280 "+a" (old__.low), \
281 "+d" (old__.high) \
282 : "b" (new__.low), \
283 "c" (new__.high), \
284 "S" (&(_var)) \
285 : "memory"); \
286 if (unlikely(!success)) \
287 *_oval = old__.var; \
288 likely(success); \
289 })
290
291 #define raw_cpu_try_cmpxchg64(pcp, ovalp, nval) percpu_try_cmpxchg64_op(8, , pcp, ovalp, nval)
292 #define this_cpu_try_cmpxchg64(pcp, ovalp, nval) percpu_try_cmpxchg64_op(8, volatile, pcp, ovalp, nval)
293 #endif
294
295 #ifdef CONFIG_X86_64
296 #define raw_cpu_cmpxchg64(pcp, oval, nval) percpu_cmpxchg_op(8, , pcp, oval, nval);
297 #define this_cpu_cmpxchg64(pcp, oval, nval) percpu_cmpxchg_op(8, volatile, pcp, oval, nval);
298
299 #define raw_cpu_try_cmpxchg64(pcp, ovalp, nval) percpu_try_cmpxchg_op(8, , pcp, ovalp, nval);
300 #define this_cpu_try_cmpxchg64(pcp, ovalp, nval) percpu_try_cmpxchg_op(8, volatile, pcp, ovalp, nval);
301
302 #define percpu_cmpxchg128_op(size, qual, _var, _oval, _nval) \
303 ({ \
304 union { \
305 u128 var; \
306 struct { \
307 u64 low, high; \
308 }; \
309 } old__, new__; \
310 \
311 old__.var = _oval; \
312 new__.var = _nval; \
313 \
314 asm qual (ALTERNATIVE("call this_cpu_cmpxchg16b_emu", \
315 "cmpxchg16b " __percpu_arg([var]), X86_FEATURE_CX16) \
316 : [var] "+m" (_var), \
317 "+a" (old__.low), \
318 "+d" (old__.high) \
319 : "b" (new__.low), \
320 "c" (new__.high), \
321 "S" (&(_var)) \
322 : "memory"); \
323 \
324 old__.var; \
325 })
326
327 #define raw_cpu_cmpxchg128(pcp, oval, nval) percpu_cmpxchg128_op(16, , pcp, oval, nval)
328 #define this_cpu_cmpxchg128(pcp, oval, nval) percpu_cmpxchg128_op(16, volatile, pcp, oval, nval)
329
330 #define percpu_try_cmpxchg128_op(size, qual, _var, _ovalp, _nval) \
331 ({ \
332 bool success; \
333 u128 *_oval = (u128 *)(_ovalp); \
334 union { \
335 u128 var; \
336 struct { \
337 u64 low, high; \
338 }; \
339 } old__, new__; \
340 \
341 old__.var = *_oval; \
342 new__.var = _nval; \
343 \
344 asm qual (ALTERNATIVE("call this_cpu_cmpxchg16b_emu", \
345 "cmpxchg16b " __percpu_arg([var]), X86_FEATURE_CX16) \
346 CC_SET(z) \
347 : CC_OUT(z) (success), \
348 [var] "+m" (_var), \
349 "+a" (old__.low), \
350 "+d" (old__.high) \
351 : "b" (new__.low), \
352 "c" (new__.high), \
353 "S" (&(_var)) \
354 : "memory"); \
355 if (unlikely(!success)) \
356 *_oval = old__.var; \
357 likely(success); \
358 })
359
360 #define raw_cpu_try_cmpxchg128(pcp, ovalp, nval) percpu_try_cmpxchg128_op(16, , pcp, ovalp, nval)
361 #define this_cpu_try_cmpxchg128(pcp, ovalp, nval) percpu_try_cmpxchg128_op(16, volatile, pcp, ovalp, nval)
362 #endif
363
364 /*
365 * this_cpu_read() makes gcc load the percpu variable every time it is
366 * accessed while this_cpu_read_stable() allows the value to be cached.
367 * this_cpu_read_stable() is more efficient and can be used if its value
368 * is guaranteed to be valid across cpus. The current users include
369 * get_current() and get_thread_info() both of which are actually
370 * per-thread variables implemented as per-cpu variables and thus
371 * stable for the duration of the respective task.
372 */
373 #define this_cpu_read_stable_1(pcp) percpu_stable_op(1, "mov", pcp)
374 #define this_cpu_read_stable_2(pcp) percpu_stable_op(2, "mov", pcp)
375 #define this_cpu_read_stable_4(pcp) percpu_stable_op(4, "mov", pcp)
376 #define this_cpu_read_stable_8(pcp) percpu_stable_op(8, "mov", pcp)
377 #define this_cpu_read_stable(pcp) __pcpu_size_call_return(this_cpu_read_stable_, pcp)
378
379 #define raw_cpu_read_1(pcp) percpu_from_op(1, , "mov", pcp)
380 #define raw_cpu_read_2(pcp) percpu_from_op(2, , "mov", pcp)
381 #define raw_cpu_read_4(pcp) percpu_from_op(4, , "mov", pcp)
382
383 #define raw_cpu_write_1(pcp, val) percpu_to_op(1, , "mov", (pcp), val)
384 #define raw_cpu_write_2(pcp, val) percpu_to_op(2, , "mov", (pcp), val)
385 #define raw_cpu_write_4(pcp, val) percpu_to_op(4, , "mov", (pcp), val)
386 #define raw_cpu_add_1(pcp, val) percpu_add_op(1, , (pcp), val)
387 #define raw_cpu_add_2(pcp, val) percpu_add_op(2, , (pcp), val)
388 #define raw_cpu_add_4(pcp, val) percpu_add_op(4, , (pcp), val)
389 #define raw_cpu_and_1(pcp, val) percpu_to_op(1, , "and", (pcp), val)
390 #define raw_cpu_and_2(pcp, val) percpu_to_op(2, , "and", (pcp), val)
391 #define raw_cpu_and_4(pcp, val) percpu_to_op(4, , "and", (pcp), val)
392 #define raw_cpu_or_1(pcp, val) percpu_to_op(1, , "or", (pcp), val)
393 #define raw_cpu_or_2(pcp, val) percpu_to_op(2, , "or", (pcp), val)
394 #define raw_cpu_or_4(pcp, val) percpu_to_op(4, , "or", (pcp), val)
395
396 /*
397 * raw_cpu_xchg() can use a load-store since it is not required to be
398 * IRQ-safe.
399 */
400 #define raw_percpu_xchg_op(var, nval) \
401 ({ \
402 typeof(var) pxo_ret__ = raw_cpu_read(var); \
403 raw_cpu_write(var, (nval)); \
404 pxo_ret__; \
405 })
406
407 #define raw_cpu_xchg_1(pcp, val) raw_percpu_xchg_op(pcp, val)
408 #define raw_cpu_xchg_2(pcp, val) raw_percpu_xchg_op(pcp, val)
409 #define raw_cpu_xchg_4(pcp, val) raw_percpu_xchg_op(pcp, val)
410
411 #define this_cpu_read_1(pcp) percpu_from_op(1, volatile, "mov", pcp)
412 #define this_cpu_read_2(pcp) percpu_from_op(2, volatile, "mov", pcp)
413 #define this_cpu_read_4(pcp) percpu_from_op(4, volatile, "mov", pcp)
414 #define this_cpu_write_1(pcp, val) percpu_to_op(1, volatile, "mov", (pcp), val)
415 #define this_cpu_write_2(pcp, val) percpu_to_op(2, volatile, "mov", (pcp), val)
416 #define this_cpu_write_4(pcp, val) percpu_to_op(4, volatile, "mov", (pcp), val)
417 #define this_cpu_add_1(pcp, val) percpu_add_op(1, volatile, (pcp), val)
418 #define this_cpu_add_2(pcp, val) percpu_add_op(2, volatile, (pcp), val)
419 #define this_cpu_add_4(pcp, val) percpu_add_op(4, volatile, (pcp), val)
420 #define this_cpu_and_1(pcp, val) percpu_to_op(1, volatile, "and", (pcp), val)
421 #define this_cpu_and_2(pcp, val) percpu_to_op(2, volatile, "and", (pcp), val)
422 #define this_cpu_and_4(pcp, val) percpu_to_op(4, volatile, "and", (pcp), val)
423 #define this_cpu_or_1(pcp, val) percpu_to_op(1, volatile, "or", (pcp), val)
424 #define this_cpu_or_2(pcp, val) percpu_to_op(2, volatile, "or", (pcp), val)
425 #define this_cpu_or_4(pcp, val) percpu_to_op(4, volatile, "or", (pcp), val)
426 #define this_cpu_xchg_1(pcp, nval) percpu_xchg_op(1, volatile, pcp, nval)
427 #define this_cpu_xchg_2(pcp, nval) percpu_xchg_op(2, volatile, pcp, nval)
428 #define this_cpu_xchg_4(pcp, nval) percpu_xchg_op(4, volatile, pcp, nval)
429
430 #define raw_cpu_add_return_1(pcp, val) percpu_add_return_op(1, , pcp, val)
431 #define raw_cpu_add_return_2(pcp, val) percpu_add_return_op(2, , pcp, val)
432 #define raw_cpu_add_return_4(pcp, val) percpu_add_return_op(4, , pcp, val)
433 #define raw_cpu_cmpxchg_1(pcp, oval, nval) percpu_cmpxchg_op(1, , pcp, oval, nval)
434 #define raw_cpu_cmpxchg_2(pcp, oval, nval) percpu_cmpxchg_op(2, , pcp, oval, nval)
435 #define raw_cpu_cmpxchg_4(pcp, oval, nval) percpu_cmpxchg_op(4, , pcp, oval, nval)
436 #define raw_cpu_try_cmpxchg_1(pcp, ovalp, nval) percpu_try_cmpxchg_op(1, , pcp, ovalp, nval)
437 #define raw_cpu_try_cmpxchg_2(pcp, ovalp, nval) percpu_try_cmpxchg_op(2, , pcp, ovalp, nval)
438 #define raw_cpu_try_cmpxchg_4(pcp, ovalp, nval) percpu_try_cmpxchg_op(4, , pcp, ovalp, nval)
439
440 #define this_cpu_add_return_1(pcp, val) percpu_add_return_op(1, volatile, pcp, val)
441 #define this_cpu_add_return_2(pcp, val) percpu_add_return_op(2, volatile, pcp, val)
442 #define this_cpu_add_return_4(pcp, val) percpu_add_return_op(4, volatile, pcp, val)
443 #define this_cpu_cmpxchg_1(pcp, oval, nval) percpu_cmpxchg_op(1, volatile, pcp, oval, nval)
444 #define this_cpu_cmpxchg_2(pcp, oval, nval) percpu_cmpxchg_op(2, volatile, pcp, oval, nval)
445 #define this_cpu_cmpxchg_4(pcp, oval, nval) percpu_cmpxchg_op(4, volatile, pcp, oval, nval)
446 #define this_cpu_try_cmpxchg_1(pcp, ovalp, nval) percpu_try_cmpxchg_op(1, volatile, pcp, ovalp, nval)
447 #define this_cpu_try_cmpxchg_2(pcp, ovalp, nval) percpu_try_cmpxchg_op(2, volatile, pcp, ovalp, nval)
448 #define this_cpu_try_cmpxchg_4(pcp, ovalp, nval) percpu_try_cmpxchg_op(4, volatile, pcp, ovalp, nval)
449
450 /*
451 * Per cpu atomic 64 bit operations are only available under 64 bit.
452 * 32 bit must fall back to generic operations.
453 */
454 #ifdef CONFIG_X86_64
455 #define raw_cpu_read_8(pcp) percpu_from_op(8, , "mov", pcp)
456 #define raw_cpu_write_8(pcp, val) percpu_to_op(8, , "mov", (pcp), val)
457 #define raw_cpu_add_8(pcp, val) percpu_add_op(8, , (pcp), val)
458 #define raw_cpu_and_8(pcp, val) percpu_to_op(8, , "and", (pcp), val)
459 #define raw_cpu_or_8(pcp, val) percpu_to_op(8, , "or", (pcp), val)
460 #define raw_cpu_add_return_8(pcp, val) percpu_add_return_op(8, , pcp, val)
461 #define raw_cpu_xchg_8(pcp, nval) raw_percpu_xchg_op(pcp, nval)
462 #define raw_cpu_cmpxchg_8(pcp, oval, nval) percpu_cmpxchg_op(8, , pcp, oval, nval)
463 #define raw_cpu_try_cmpxchg_8(pcp, ovalp, nval) percpu_try_cmpxchg_op(8, , pcp, ovalp, nval)
464
465 #define this_cpu_read_8(pcp) percpu_from_op(8, volatile, "mov", pcp)
466 #define this_cpu_write_8(pcp, val) percpu_to_op(8, volatile, "mov", (pcp), val)
467 #define this_cpu_add_8(pcp, val) percpu_add_op(8, volatile, (pcp), val)
468 #define this_cpu_and_8(pcp, val) percpu_to_op(8, volatile, "and", (pcp), val)
469 #define this_cpu_or_8(pcp, val) percpu_to_op(8, volatile, "or", (pcp), val)
470 #define this_cpu_add_return_8(pcp, val) percpu_add_return_op(8, volatile, pcp, val)
471 #define this_cpu_xchg_8(pcp, nval) percpu_xchg_op(8, volatile, pcp, nval)
472 #define this_cpu_cmpxchg_8(pcp, oval, nval) percpu_cmpxchg_op(8, volatile, pcp, oval, nval)
473 #define this_cpu_try_cmpxchg_8(pcp, ovalp, nval) percpu_try_cmpxchg_op(8, volatile, pcp, ovalp, nval)
474 #endif
475
x86_this_cpu_constant_test_bit(unsigned int nr,const unsigned long __percpu * addr)476 static __always_inline bool x86_this_cpu_constant_test_bit(unsigned int nr,
477 const unsigned long __percpu *addr)
478 {
479 unsigned long __percpu *a =
480 (unsigned long __percpu *)addr + nr / BITS_PER_LONG;
481
482 #ifdef CONFIG_X86_64
483 return ((1UL << (nr % BITS_PER_LONG)) & raw_cpu_read_8(*a)) != 0;
484 #else
485 return ((1UL << (nr % BITS_PER_LONG)) & raw_cpu_read_4(*a)) != 0;
486 #endif
487 }
488
x86_this_cpu_variable_test_bit(int nr,const unsigned long __percpu * addr)489 static inline bool x86_this_cpu_variable_test_bit(int nr,
490 const unsigned long __percpu *addr)
491 {
492 bool oldbit;
493
494 asm volatile("btl "__percpu_arg(2)",%1"
495 CC_SET(c)
496 : CC_OUT(c) (oldbit)
497 : "m" (*(unsigned long __percpu *)addr), "Ir" (nr));
498
499 return oldbit;
500 }
501
502 #define x86_this_cpu_test_bit(nr, addr) \
503 (__builtin_constant_p((nr)) \
504 ? x86_this_cpu_constant_test_bit((nr), (addr)) \
505 : x86_this_cpu_variable_test_bit((nr), (addr)))
506
507
508 #include <asm-generic/percpu.h>
509
510 /* We can use this directly for local CPU (faster). */
511 DECLARE_PER_CPU_READ_MOSTLY(unsigned long, this_cpu_off);
512
513 #endif /* !__ASSEMBLY__ */
514
515 #ifdef CONFIG_SMP
516
517 /*
518 * Define the "EARLY_PER_CPU" macros. These are used for some per_cpu
519 * variables that are initialized and accessed before there are per_cpu
520 * areas allocated.
521 */
522
523 #define DEFINE_EARLY_PER_CPU(_type, _name, _initvalue) \
524 DEFINE_PER_CPU(_type, _name) = _initvalue; \
525 __typeof__(_type) _name##_early_map[NR_CPUS] __initdata = \
526 { [0 ... NR_CPUS-1] = _initvalue }; \
527 __typeof__(_type) *_name##_early_ptr __refdata = _name##_early_map
528
529 #define DEFINE_EARLY_PER_CPU_READ_MOSTLY(_type, _name, _initvalue) \
530 DEFINE_PER_CPU_READ_MOSTLY(_type, _name) = _initvalue; \
531 __typeof__(_type) _name##_early_map[NR_CPUS] __initdata = \
532 { [0 ... NR_CPUS-1] = _initvalue }; \
533 __typeof__(_type) *_name##_early_ptr __refdata = _name##_early_map
534
535 #define EXPORT_EARLY_PER_CPU_SYMBOL(_name) \
536 EXPORT_PER_CPU_SYMBOL(_name)
537
538 #define DECLARE_EARLY_PER_CPU(_type, _name) \
539 DECLARE_PER_CPU(_type, _name); \
540 extern __typeof__(_type) *_name##_early_ptr; \
541 extern __typeof__(_type) _name##_early_map[]
542
543 #define DECLARE_EARLY_PER_CPU_READ_MOSTLY(_type, _name) \
544 DECLARE_PER_CPU_READ_MOSTLY(_type, _name); \
545 extern __typeof__(_type) *_name##_early_ptr; \
546 extern __typeof__(_type) _name##_early_map[]
547
548 #define early_per_cpu_ptr(_name) (_name##_early_ptr)
549 #define early_per_cpu_map(_name, _idx) (_name##_early_map[_idx])
550 #define early_per_cpu(_name, _cpu) \
551 *(early_per_cpu_ptr(_name) ? \
552 &early_per_cpu_ptr(_name)[_cpu] : \
553 &per_cpu(_name, _cpu))
554
555 #else /* !CONFIG_SMP */
556 #define DEFINE_EARLY_PER_CPU(_type, _name, _initvalue) \
557 DEFINE_PER_CPU(_type, _name) = _initvalue
558
559 #define DEFINE_EARLY_PER_CPU_READ_MOSTLY(_type, _name, _initvalue) \
560 DEFINE_PER_CPU_READ_MOSTLY(_type, _name) = _initvalue
561
562 #define EXPORT_EARLY_PER_CPU_SYMBOL(_name) \
563 EXPORT_PER_CPU_SYMBOL(_name)
564
565 #define DECLARE_EARLY_PER_CPU(_type, _name) \
566 DECLARE_PER_CPU(_type, _name)
567
568 #define DECLARE_EARLY_PER_CPU_READ_MOSTLY(_type, _name) \
569 DECLARE_PER_CPU_READ_MOSTLY(_type, _name)
570
571 #define early_per_cpu(_name, _cpu) per_cpu(_name, _cpu)
572 #define early_per_cpu_ptr(_name) NULL
573 /* no early_per_cpu_map() */
574
575 #endif /* !CONFIG_SMP */
576
577 #endif /* _ASM_X86_PERCPU_H */
578