1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_X86_PERCPU_H
3 #define _ASM_X86_PERCPU_H
4 
5 #ifdef CONFIG_X86_64
6 # define __percpu_seg		gs
7 # define __percpu_rel		(%rip)
8 #else
9 # define __percpu_seg		fs
10 # define __percpu_rel
11 #endif
12 
13 #ifdef __ASSEMBLER__
14 
15 #ifdef CONFIG_SMP
16 # define __percpu		%__percpu_seg:
17 #else
18 # define __percpu
19 #endif
20 
21 #define PER_CPU_VAR(var)	__percpu(var)__percpu_rel
22 
23 #else /* !__ASSEMBLY__: */
24 
25 #include <linux/args.h>
26 #include <linux/build_bug.h>
27 #include <linux/stringify.h>
28 #include <asm/asm.h>
29 
30 #ifdef CONFIG_SMP
31 
32 #ifdef CONFIG_CC_HAS_NAMED_AS
33 
34 #ifdef __CHECKER__
35 # define __seg_gs		__attribute__((address_space(__seg_gs)))
36 # define __seg_fs		__attribute__((address_space(__seg_fs)))
37 #endif
38 
39 #define __percpu_seg_override	CONCATENATE(__seg_, __percpu_seg)
40 #define __percpu_prefix		""
41 
42 #else /* !CONFIG_CC_HAS_NAMED_AS: */
43 
44 #define __percpu_seg_override
45 #define __percpu_prefix		"%%"__stringify(__percpu_seg)":"
46 
47 #endif /* CONFIG_CC_HAS_NAMED_AS */
48 
49 #define __force_percpu_prefix	"%%"__stringify(__percpu_seg)":"
50 #define __my_cpu_offset		this_cpu_read(this_cpu_off)
51 
52 /*
53  * Compared to the generic __my_cpu_offset version, the following
54  * saves one instruction and avoids clobbering a temp register.
55  *
56  * arch_raw_cpu_ptr should not be used in 32-bit VDSO for a 64-bit
57  * kernel, because games are played with CONFIG_X86_64 there and
58  * sizeof(this_cpu_off) becames 4.
59  */
60 #ifndef BUILD_VDSO32_64
61 #define arch_raw_cpu_ptr(_ptr)						\
62 ({									\
63 	unsigned long tcp_ptr__ = raw_cpu_read_long(this_cpu_off);	\
64 									\
65 	tcp_ptr__ += (__force unsigned long)(_ptr);			\
66 	(TYPEOF_UNQUAL(*(_ptr)) __force __kernel *)tcp_ptr__;		\
67 })
68 #else
69 #define arch_raw_cpu_ptr(_ptr)						\
70 ({									\
71 	BUILD_BUG();							\
72 	(TYPEOF_UNQUAL(*(_ptr)) __force __kernel *)0;			\
73 })
74 #endif
75 
76 #define PER_CPU_VAR(var)	%__percpu_seg:(var)__percpu_rel
77 
78 #else /* !CONFIG_SMP: */
79 
80 #define __percpu_seg_override
81 #define __percpu_prefix		""
82 #define __force_percpu_prefix	""
83 
84 #define PER_CPU_VAR(var)	(var)__percpu_rel
85 
86 #endif /* CONFIG_SMP */
87 
88 #if defined(CONFIG_USE_X86_SEG_SUPPORT) && defined(USE_TYPEOF_UNQUAL)
89 # define __my_cpu_type(var)	typeof(var)
90 # define __my_cpu_ptr(ptr)	(ptr)
91 # define __my_cpu_var(var)	(var)
92 
93 # define __percpu_qual		__percpu_seg_override
94 #else
95 # define __my_cpu_type(var)	typeof(var) __percpu_seg_override
96 # define __my_cpu_ptr(ptr)	(__my_cpu_type(*(ptr))*)(__force uintptr_t)(ptr)
97 # define __my_cpu_var(var)	(*__my_cpu_ptr(&(var)))
98 #endif
99 
100 #define __percpu_arg(x)		__percpu_prefix "%" #x
101 #define __force_percpu_arg(x)	__force_percpu_prefix "%" #x
102 
103 /*
104  * For arch-specific code, we can use direct single-insn ops (they
105  * don't give an lvalue though).
106  */
107 
108 #define __pcpu_type_1		u8
109 #define __pcpu_type_2		u16
110 #define __pcpu_type_4		u32
111 #define __pcpu_type_8		u64
112 
113 #define __pcpu_cast_1(val)	((u8)(((unsigned long) val) & 0xff))
114 #define __pcpu_cast_2(val)	((u16)(((unsigned long) val) & 0xffff))
115 #define __pcpu_cast_4(val)	((u32)(((unsigned long) val) & 0xffffffff))
116 #define __pcpu_cast_8(val)	((u64)(val))
117 
118 #define __pcpu_op_1(op)		op "b "
119 #define __pcpu_op_2(op)		op "w "
120 #define __pcpu_op_4(op)		op "l "
121 #define __pcpu_op_8(op)		op "q "
122 
123 #define __pcpu_reg_1(mod, x)	mod "q" (x)
124 #define __pcpu_reg_2(mod, x)	mod "r" (x)
125 #define __pcpu_reg_4(mod, x)	mod "r" (x)
126 #define __pcpu_reg_8(mod, x)	mod "r" (x)
127 
128 #define __pcpu_reg_imm_1(x)	"qi" (x)
129 #define __pcpu_reg_imm_2(x)	"ri" (x)
130 #define __pcpu_reg_imm_4(x)	"ri" (x)
131 #define __pcpu_reg_imm_8(x)	"re" (x)
132 
133 #ifdef CONFIG_USE_X86_SEG_SUPPORT
134 
135 #define __raw_cpu_read(size, qual, pcp)					\
136 ({									\
137 	*(qual __my_cpu_type(pcp) *)__my_cpu_ptr(&(pcp));		\
138 })
139 
140 #define __raw_cpu_write(size, qual, pcp, val)				\
141 do {									\
142 	*(qual __my_cpu_type(pcp) *)__my_cpu_ptr(&(pcp)) = (val);	\
143 } while (0)
144 
145 #define __raw_cpu_read_const(pcp)	__raw_cpu_read(, , pcp)
146 
147 #else /* !CONFIG_USE_X86_SEG_SUPPORT: */
148 
149 #define __raw_cpu_read(size, qual, _var)				\
150 ({									\
151 	__pcpu_type_##size pfo_val__;					\
152 									\
153 	asm qual (__pcpu_op_##size("mov")				\
154 		  __percpu_arg([var]) ", %[val]"			\
155 	    : [val] __pcpu_reg_##size("=", pfo_val__)			\
156 	    : [var] "m" (__my_cpu_var(_var)));				\
157 									\
158 	(typeof(_var))(unsigned long) pfo_val__;			\
159 })
160 
161 #define __raw_cpu_write(size, qual, _var, _val)				\
162 do {									\
163 	__pcpu_type_##size pto_val__ = __pcpu_cast_##size(_val);	\
164 									\
165 	if (0) {		                                        \
166 		TYPEOF_UNQUAL(_var) pto_tmp__;				\
167 		pto_tmp__ = (_val);					\
168 		(void)pto_tmp__;					\
169 	}								\
170 	asm qual (__pcpu_op_##size("mov") "%[val], "			\
171 		  __percpu_arg([var])					\
172 	    : [var] "=m" (__my_cpu_var(_var))				\
173 	    : [val] __pcpu_reg_imm_##size(pto_val__));			\
174 } while (0)
175 
176 /*
177  * The generic per-CPU infrastrucutre is not suitable for
178  * reading const-qualified variables.
179  */
180 #define __raw_cpu_read_const(pcp)	({ BUILD_BUG(); (typeof(pcp))0; })
181 
182 #endif /* CONFIG_USE_X86_SEG_SUPPORT */
183 
184 #define __raw_cpu_read_stable(size, _var)				\
185 ({									\
186 	__pcpu_type_##size pfo_val__;					\
187 									\
188 	asm(__pcpu_op_##size("mov")					\
189 	    __force_percpu_arg(a[var]) ", %[val]"			\
190 	    : [val] __pcpu_reg_##size("=", pfo_val__)			\
191 	    : [var] "i" (&(_var)));					\
192 									\
193 	(typeof(_var))(unsigned long) pfo_val__;			\
194 })
195 
196 #define percpu_unary_op(size, qual, op, _var)				\
197 ({									\
198 	asm qual (__pcpu_op_##size(op) __percpu_arg([var])		\
199 	    : [var] "+m" (__my_cpu_var(_var)));				\
200 })
201 
202 #define percpu_binary_op(size, qual, op, _var, _val)			\
203 do {									\
204 	__pcpu_type_##size pto_val__ = __pcpu_cast_##size(_val);	\
205 									\
206 	if (0) {		                                        \
207 		TYPEOF_UNQUAL(_var) pto_tmp__;				\
208 		pto_tmp__ = (_val);					\
209 		(void)pto_tmp__;					\
210 	}								\
211 	asm qual (__pcpu_op_##size(op) "%[val], " __percpu_arg([var])	\
212 	    : [var] "+m" (__my_cpu_var(_var))				\
213 	    : [val] __pcpu_reg_imm_##size(pto_val__));			\
214 } while (0)
215 
216 /*
217  * Generate a per-CPU add to memory instruction and optimize code
218  * if one is added or subtracted.
219  */
220 #define percpu_add_op(size, qual, var, val)				\
221 do {									\
222 	const int pao_ID__ =						\
223 		(__builtin_constant_p(val) &&				\
224 			((val) == 1 ||					\
225 			 (val) == (typeof(val))-1)) ? (int)(val) : 0;	\
226 									\
227 	if (0) {							\
228 		TYPEOF_UNQUAL(var) pao_tmp__;				\
229 		pao_tmp__ = (val);					\
230 		(void)pao_tmp__;					\
231 	}								\
232 	if (pao_ID__ == 1)						\
233 		percpu_unary_op(size, qual, "inc", var);		\
234 	else if (pao_ID__ == -1)					\
235 		percpu_unary_op(size, qual, "dec", var);		\
236 	else								\
237 		percpu_binary_op(size, qual, "add", var, val);		\
238 } while (0)
239 
240 /*
241  * Add return operation
242  */
243 #define percpu_add_return_op(size, qual, _var, _val)			\
244 ({									\
245 	__pcpu_type_##size paro_tmp__ = __pcpu_cast_##size(_val);	\
246 									\
247 	asm qual (__pcpu_op_##size("xadd") "%[tmp], "			\
248 		  __percpu_arg([var])					\
249 		  : [tmp] __pcpu_reg_##size("+", paro_tmp__),		\
250 		    [var] "+m" (__my_cpu_var(_var))			\
251 		  : : "memory");					\
252 	(typeof(_var))(unsigned long) (paro_tmp__ + _val);		\
253 })
254 
255 /*
256  * raw_cpu_xchg() can use a load-store since
257  * it is not required to be IRQ-safe.
258  */
259 #define raw_percpu_xchg_op(_var, _nval)					\
260 ({									\
261 	TYPEOF_UNQUAL(_var) pxo_old__ = raw_cpu_read(_var);		\
262 									\
263 	raw_cpu_write(_var, _nval);					\
264 									\
265 	pxo_old__;							\
266 })
267 
268 /*
269  * this_cpu_xchg() is implemented using CMPXCHG without a LOCK prefix.
270  * XCHG is expensive due to the implied LOCK prefix. The processor
271  * cannot prefetch cachelines if XCHG is used.
272  */
273 #define this_percpu_xchg_op(_var, _nval)				\
274 ({									\
275 	TYPEOF_UNQUAL(_var) pxo_old__ = this_cpu_read(_var);		\
276 									\
277 	do { } while (!this_cpu_try_cmpxchg(_var, &pxo_old__, _nval));	\
278 									\
279 	pxo_old__;							\
280 })
281 
282 /*
283  * CMPXCHG has no such implied lock semantics as a result it is much
284  * more efficient for CPU-local operations.
285  */
286 #define percpu_cmpxchg_op(size, qual, _var, _oval, _nval)		\
287 ({									\
288 	__pcpu_type_##size pco_old__ = __pcpu_cast_##size(_oval);	\
289 	__pcpu_type_##size pco_new__ = __pcpu_cast_##size(_nval);	\
290 									\
291 	asm qual (__pcpu_op_##size("cmpxchg") "%[nval], "		\
292 		  __percpu_arg([var])					\
293 		  : [oval] "+a" (pco_old__),				\
294 		    [var] "+m" (__my_cpu_var(_var))			\
295 		  : [nval] __pcpu_reg_##size(, pco_new__)		\
296 		  : "memory");						\
297 									\
298 	(typeof(_var))(unsigned long) pco_old__;			\
299 })
300 
301 #define percpu_try_cmpxchg_op(size, qual, _var, _ovalp, _nval)		\
302 ({									\
303 	bool success;							\
304 	__pcpu_type_##size *pco_oval__ = (__pcpu_type_##size *)(_ovalp); \
305 	__pcpu_type_##size pco_old__ = *pco_oval__;			\
306 	__pcpu_type_##size pco_new__ = __pcpu_cast_##size(_nval);	\
307 									\
308 	asm qual (__pcpu_op_##size("cmpxchg") "%[nval], "		\
309 		  __percpu_arg([var])					\
310 		  CC_SET(z)						\
311 		  : CC_OUT(z) (success),				\
312 		    [oval] "+a" (pco_old__),				\
313 		    [var] "+m" (__my_cpu_var(_var))			\
314 		  : [nval] __pcpu_reg_##size(, pco_new__)		\
315 		  : "memory");						\
316 	if (unlikely(!success))						\
317 		*pco_oval__ = pco_old__;				\
318 									\
319 	likely(success);						\
320 })
321 
322 #if defined(CONFIG_X86_32) && !defined(CONFIG_UML)
323 
324 #define percpu_cmpxchg64_op(size, qual, _var, _oval, _nval)		\
325 ({									\
326 	union {								\
327 		u64 var;						\
328 		struct {						\
329 			u32 low, high;					\
330 		};							\
331 	} old__, new__;							\
332 									\
333 	old__.var = _oval;						\
334 	new__.var = _nval;						\
335 									\
336 	asm_inline qual (						\
337 		ALTERNATIVE("call this_cpu_cmpxchg8b_emu",		\
338 			    "cmpxchg8b " __percpu_arg([var]), X86_FEATURE_CX8) \
339 		: ALT_OUTPUT_SP([var] "+m" (__my_cpu_var(_var)),	\
340 				"+a" (old__.low), "+d" (old__.high))	\
341 		: "b" (new__.low), "c" (new__.high),			\
342 		  "S" (&(_var))						\
343 		: "memory");						\
344 									\
345 	old__.var;							\
346 })
347 
348 #define raw_cpu_cmpxchg64(pcp, oval, nval)		percpu_cmpxchg64_op(8,         , pcp, oval, nval)
349 #define this_cpu_cmpxchg64(pcp, oval, nval)		percpu_cmpxchg64_op(8, volatile, pcp, oval, nval)
350 
351 #define percpu_try_cmpxchg64_op(size, qual, _var, _ovalp, _nval)	\
352 ({									\
353 	bool success;							\
354 	u64 *_oval = (u64 *)(_ovalp);					\
355 	union {								\
356 		u64 var;						\
357 		struct {						\
358 			u32 low, high;					\
359 		};							\
360 	} old__, new__;							\
361 									\
362 	old__.var = *_oval;						\
363 	new__.var = _nval;						\
364 									\
365 	asm_inline qual (						\
366 		ALTERNATIVE("call this_cpu_cmpxchg8b_emu",		\
367 			    "cmpxchg8b " __percpu_arg([var]), X86_FEATURE_CX8) \
368 		CC_SET(z)						\
369 		: ALT_OUTPUT_SP(CC_OUT(z) (success),			\
370 				[var] "+m" (__my_cpu_var(_var)),	\
371 				"+a" (old__.low), "+d" (old__.high))	\
372 		: "b" (new__.low), "c" (new__.high),			\
373 		  "S" (&(_var))						\
374 		: "memory");						\
375 	if (unlikely(!success))						\
376 		*_oval = old__.var;					\
377 									\
378 	likely(success);						\
379 })
380 
381 #define raw_cpu_try_cmpxchg64(pcp, ovalp, nval)		percpu_try_cmpxchg64_op(8,         , pcp, ovalp, nval)
382 #define this_cpu_try_cmpxchg64(pcp, ovalp, nval)	percpu_try_cmpxchg64_op(8, volatile, pcp, ovalp, nval)
383 
384 #endif /* defined(CONFIG_X86_32) && !defined(CONFIG_UML) */
385 
386 #ifdef CONFIG_X86_64
387 #define raw_cpu_cmpxchg64(pcp, oval, nval)		percpu_cmpxchg_op(8,         , pcp, oval, nval);
388 #define this_cpu_cmpxchg64(pcp, oval, nval)		percpu_cmpxchg_op(8, volatile, pcp, oval, nval);
389 
390 #define raw_cpu_try_cmpxchg64(pcp, ovalp, nval)		percpu_try_cmpxchg_op(8,         , pcp, ovalp, nval);
391 #define this_cpu_try_cmpxchg64(pcp, ovalp, nval)	percpu_try_cmpxchg_op(8, volatile, pcp, ovalp, nval);
392 
393 #define percpu_cmpxchg128_op(size, qual, _var, _oval, _nval)		\
394 ({									\
395 	union {								\
396 		u128 var;						\
397 		struct {						\
398 			u64 low, high;					\
399 		};							\
400 	} old__, new__;							\
401 									\
402 	old__.var = _oval;						\
403 	new__.var = _nval;						\
404 									\
405 	asm_inline qual (						\
406 		ALTERNATIVE("call this_cpu_cmpxchg16b_emu",		\
407 			    "cmpxchg16b " __percpu_arg([var]), X86_FEATURE_CX16) \
408 		: ALT_OUTPUT_SP([var] "+m" (__my_cpu_var(_var)),	\
409 				"+a" (old__.low), "+d" (old__.high))	\
410 		: "b" (new__.low), "c" (new__.high),			\
411 		  "S" (&(_var))						\
412 		: "memory");						\
413 									\
414 	old__.var;							\
415 })
416 
417 #define raw_cpu_cmpxchg128(pcp, oval, nval)		percpu_cmpxchg128_op(16,         , pcp, oval, nval)
418 #define this_cpu_cmpxchg128(pcp, oval, nval)		percpu_cmpxchg128_op(16, volatile, pcp, oval, nval)
419 
420 #define percpu_try_cmpxchg128_op(size, qual, _var, _ovalp, _nval)	\
421 ({									\
422 	bool success;							\
423 	u128 *_oval = (u128 *)(_ovalp);					\
424 	union {								\
425 		u128 var;						\
426 		struct {						\
427 			u64 low, high;					\
428 		};							\
429 	} old__, new__;							\
430 									\
431 	old__.var = *_oval;						\
432 	new__.var = _nval;						\
433 									\
434 	asm_inline qual (						\
435 		ALTERNATIVE("call this_cpu_cmpxchg16b_emu",		\
436 			    "cmpxchg16b " __percpu_arg([var]), X86_FEATURE_CX16) \
437 		CC_SET(z)						\
438 		: ALT_OUTPUT_SP(CC_OUT(z) (success),			\
439 				[var] "+m" (__my_cpu_var(_var)),	\
440 				"+a" (old__.low), "+d" (old__.high))	\
441 		: "b" (new__.low), "c" (new__.high),			\
442 		  "S" (&(_var))						\
443 		: "memory");						\
444 	if (unlikely(!success))						\
445 		*_oval = old__.var;					\
446 									\
447 	likely(success);						\
448 })
449 
450 #define raw_cpu_try_cmpxchg128(pcp, ovalp, nval)	percpu_try_cmpxchg128_op(16,         , pcp, ovalp, nval)
451 #define this_cpu_try_cmpxchg128(pcp, ovalp, nval)	percpu_try_cmpxchg128_op(16, volatile, pcp, ovalp, nval)
452 
453 #endif /* CONFIG_X86_64 */
454 
455 #define raw_cpu_read_1(pcp)				__raw_cpu_read(1, , pcp)
456 #define raw_cpu_read_2(pcp)				__raw_cpu_read(2, , pcp)
457 #define raw_cpu_read_4(pcp)				__raw_cpu_read(4, , pcp)
458 #define raw_cpu_write_1(pcp, val)			__raw_cpu_write(1, , pcp, val)
459 #define raw_cpu_write_2(pcp, val)			__raw_cpu_write(2, , pcp, val)
460 #define raw_cpu_write_4(pcp, val)			__raw_cpu_write(4, , pcp, val)
461 
462 #define this_cpu_read_1(pcp)				__raw_cpu_read(1, volatile, pcp)
463 #define this_cpu_read_2(pcp)				__raw_cpu_read(2, volatile, pcp)
464 #define this_cpu_read_4(pcp)				__raw_cpu_read(4, volatile, pcp)
465 #define this_cpu_write_1(pcp, val)			__raw_cpu_write(1, volatile, pcp, val)
466 #define this_cpu_write_2(pcp, val)			__raw_cpu_write(2, volatile, pcp, val)
467 #define this_cpu_write_4(pcp, val)			__raw_cpu_write(4, volatile, pcp, val)
468 
469 #define this_cpu_read_stable_1(pcp)			__raw_cpu_read_stable(1, pcp)
470 #define this_cpu_read_stable_2(pcp)			__raw_cpu_read_stable(2, pcp)
471 #define this_cpu_read_stable_4(pcp)			__raw_cpu_read_stable(4, pcp)
472 
473 #define raw_cpu_add_1(pcp, val)				percpu_add_op(1, , (pcp), val)
474 #define raw_cpu_add_2(pcp, val)				percpu_add_op(2, , (pcp), val)
475 #define raw_cpu_add_4(pcp, val)				percpu_add_op(4, , (pcp), val)
476 #define raw_cpu_and_1(pcp, val)				percpu_binary_op(1, , "and", (pcp), val)
477 #define raw_cpu_and_2(pcp, val)				percpu_binary_op(2, , "and", (pcp), val)
478 #define raw_cpu_and_4(pcp, val)				percpu_binary_op(4, , "and", (pcp), val)
479 #define raw_cpu_or_1(pcp, val)				percpu_binary_op(1, , "or", (pcp), val)
480 #define raw_cpu_or_2(pcp, val)				percpu_binary_op(2, , "or", (pcp), val)
481 #define raw_cpu_or_4(pcp, val)				percpu_binary_op(4, , "or", (pcp), val)
482 #define raw_cpu_xchg_1(pcp, val)			raw_percpu_xchg_op(pcp, val)
483 #define raw_cpu_xchg_2(pcp, val)			raw_percpu_xchg_op(pcp, val)
484 #define raw_cpu_xchg_4(pcp, val)			raw_percpu_xchg_op(pcp, val)
485 
486 #define this_cpu_add_1(pcp, val)			percpu_add_op(1, volatile, (pcp), val)
487 #define this_cpu_add_2(pcp, val)			percpu_add_op(2, volatile, (pcp), val)
488 #define this_cpu_add_4(pcp, val)			percpu_add_op(4, volatile, (pcp), val)
489 #define this_cpu_and_1(pcp, val)			percpu_binary_op(1, volatile, "and", (pcp), val)
490 #define this_cpu_and_2(pcp, val)			percpu_binary_op(2, volatile, "and", (pcp), val)
491 #define this_cpu_and_4(pcp, val)			percpu_binary_op(4, volatile, "and", (pcp), val)
492 #define this_cpu_or_1(pcp, val)				percpu_binary_op(1, volatile, "or", (pcp), val)
493 #define this_cpu_or_2(pcp, val)				percpu_binary_op(2, volatile, "or", (pcp), val)
494 #define this_cpu_or_4(pcp, val)				percpu_binary_op(4, volatile, "or", (pcp), val)
495 #define this_cpu_xchg_1(pcp, nval)			this_percpu_xchg_op(pcp, nval)
496 #define this_cpu_xchg_2(pcp, nval)			this_percpu_xchg_op(pcp, nval)
497 #define this_cpu_xchg_4(pcp, nval)			this_percpu_xchg_op(pcp, nval)
498 
499 #define raw_cpu_add_return_1(pcp, val)			percpu_add_return_op(1, , pcp, val)
500 #define raw_cpu_add_return_2(pcp, val)			percpu_add_return_op(2, , pcp, val)
501 #define raw_cpu_add_return_4(pcp, val)			percpu_add_return_op(4, , pcp, val)
502 #define raw_cpu_cmpxchg_1(pcp, oval, nval)		percpu_cmpxchg_op(1, , pcp, oval, nval)
503 #define raw_cpu_cmpxchg_2(pcp, oval, nval)		percpu_cmpxchg_op(2, , pcp, oval, nval)
504 #define raw_cpu_cmpxchg_4(pcp, oval, nval)		percpu_cmpxchg_op(4, , pcp, oval, nval)
505 #define raw_cpu_try_cmpxchg_1(pcp, ovalp, nval)		percpu_try_cmpxchg_op(1, , pcp, ovalp, nval)
506 #define raw_cpu_try_cmpxchg_2(pcp, ovalp, nval)		percpu_try_cmpxchg_op(2, , pcp, ovalp, nval)
507 #define raw_cpu_try_cmpxchg_4(pcp, ovalp, nval)		percpu_try_cmpxchg_op(4, , pcp, ovalp, nval)
508 
509 #define this_cpu_add_return_1(pcp, val)			percpu_add_return_op(1, volatile, pcp, val)
510 #define this_cpu_add_return_2(pcp, val)			percpu_add_return_op(2, volatile, pcp, val)
511 #define this_cpu_add_return_4(pcp, val)			percpu_add_return_op(4, volatile, pcp, val)
512 #define this_cpu_cmpxchg_1(pcp, oval, nval)		percpu_cmpxchg_op(1, volatile, pcp, oval, nval)
513 #define this_cpu_cmpxchg_2(pcp, oval, nval)		percpu_cmpxchg_op(2, volatile, pcp, oval, nval)
514 #define this_cpu_cmpxchg_4(pcp, oval, nval)		percpu_cmpxchg_op(4, volatile, pcp, oval, nval)
515 #define this_cpu_try_cmpxchg_1(pcp, ovalp, nval)	percpu_try_cmpxchg_op(1, volatile, pcp, ovalp, nval)
516 #define this_cpu_try_cmpxchg_2(pcp, ovalp, nval)	percpu_try_cmpxchg_op(2, volatile, pcp, ovalp, nval)
517 #define this_cpu_try_cmpxchg_4(pcp, ovalp, nval)	percpu_try_cmpxchg_op(4, volatile, pcp, ovalp, nval)
518 
519 /*
520  * Per-CPU atomic 64-bit operations are only available under 64-bit kernels.
521  * 32-bit kernels must fall back to generic operations.
522  */
523 #ifdef CONFIG_X86_64
524 
525 #define raw_cpu_read_8(pcp)				__raw_cpu_read(8, , pcp)
526 #define raw_cpu_write_8(pcp, val)			__raw_cpu_write(8, , pcp, val)
527 
528 #define this_cpu_read_8(pcp)				__raw_cpu_read(8, volatile, pcp)
529 #define this_cpu_write_8(pcp, val)			__raw_cpu_write(8, volatile, pcp, val)
530 
531 #define this_cpu_read_stable_8(pcp)			__raw_cpu_read_stable(8, pcp)
532 
533 #define raw_cpu_add_8(pcp, val)				percpu_add_op(8, , (pcp), val)
534 #define raw_cpu_and_8(pcp, val)				percpu_binary_op(8, , "and", (pcp), val)
535 #define raw_cpu_or_8(pcp, val)				percpu_binary_op(8, , "or", (pcp), val)
536 #define raw_cpu_add_return_8(pcp, val)			percpu_add_return_op(8, , pcp, val)
537 #define raw_cpu_xchg_8(pcp, nval)			raw_percpu_xchg_op(pcp, nval)
538 #define raw_cpu_cmpxchg_8(pcp, oval, nval)		percpu_cmpxchg_op(8, , pcp, oval, nval)
539 #define raw_cpu_try_cmpxchg_8(pcp, ovalp, nval)		percpu_try_cmpxchg_op(8, , pcp, ovalp, nval)
540 
541 #define this_cpu_add_8(pcp, val)			percpu_add_op(8, volatile, (pcp), val)
542 #define this_cpu_and_8(pcp, val)			percpu_binary_op(8, volatile, "and", (pcp), val)
543 #define this_cpu_or_8(pcp, val)				percpu_binary_op(8, volatile, "or", (pcp), val)
544 #define this_cpu_add_return_8(pcp, val)			percpu_add_return_op(8, volatile, pcp, val)
545 #define this_cpu_xchg_8(pcp, nval)			this_percpu_xchg_op(pcp, nval)
546 #define this_cpu_cmpxchg_8(pcp, oval, nval)		percpu_cmpxchg_op(8, volatile, pcp, oval, nval)
547 #define this_cpu_try_cmpxchg_8(pcp, ovalp, nval)	percpu_try_cmpxchg_op(8, volatile, pcp, ovalp, nval)
548 
549 #define raw_cpu_read_long(pcp)				raw_cpu_read_8(pcp)
550 
551 #else /* !CONFIG_X86_64: */
552 
553 /* There is no generic 64-bit read stable operation for 32-bit targets. */
554 #define this_cpu_read_stable_8(pcp)			({ BUILD_BUG(); (typeof(pcp))0; })
555 
556 #define raw_cpu_read_long(pcp)				raw_cpu_read_4(pcp)
557 
558 #endif /* CONFIG_X86_64 */
559 
560 #define this_cpu_read_const(pcp)			__raw_cpu_read_const(pcp)
561 
562 /*
563  * this_cpu_read() makes the compiler load the per-CPU variable every time
564  * it is accessed while this_cpu_read_stable() allows the value to be cached.
565  * this_cpu_read_stable() is more efficient and can be used if its value
566  * is guaranteed to be valid across CPUs.  The current users include
567  * current_task and cpu_current_top_of_stack, both of which are
568  * actually per-thread variables implemented as per-CPU variables and
569  * thus stable for the duration of the respective task.
570  */
571 #define this_cpu_read_stable(pcp)			__pcpu_size_call_return(this_cpu_read_stable_, pcp)
572 
573 #define x86_this_cpu_constant_test_bit(_nr, _var)			\
574 ({									\
575 	unsigned long __percpu *addr__ =				\
576 		(unsigned long __percpu *)&(_var) + ((_nr) / BITS_PER_LONG); \
577 									\
578 	!!((1UL << ((_nr) % BITS_PER_LONG)) & raw_cpu_read(*addr__));	\
579 })
580 
581 #define x86_this_cpu_variable_test_bit(_nr, _var)			\
582 ({									\
583 	bool oldbit;							\
584 									\
585 	asm volatile("btl %[nr], " __percpu_arg([var])			\
586 		     CC_SET(c)						\
587 		     : CC_OUT(c) (oldbit)				\
588 		     : [var] "m" (__my_cpu_var(_var)),			\
589 		       [nr] "rI" (_nr));				\
590 	oldbit;								\
591 })
592 
593 #define x86_this_cpu_test_bit(_nr, _var)				\
594 	(__builtin_constant_p(_nr)					\
595 	 ? x86_this_cpu_constant_test_bit(_nr, _var)			\
596 	 : x86_this_cpu_variable_test_bit(_nr, _var))
597 
598 
599 #include <asm-generic/percpu.h>
600 
601 /* We can use this directly for local CPU (faster). */
602 DECLARE_PER_CPU_CACHE_HOT(unsigned long, this_cpu_off);
603 
604 #endif /* !__ASSEMBLER__ */
605 
606 #ifdef CONFIG_SMP
607 
608 /*
609  * Define the "EARLY_PER_CPU" macros.  These are used for some per_cpu
610  * variables that are initialized and accessed before there are per_cpu
611  * areas allocated.
612  */
613 
614 #define	DEFINE_EARLY_PER_CPU(_type, _name, _initvalue)			\
615 	DEFINE_PER_CPU(_type, _name) = _initvalue;			\
616 	__typeof__(_type) _name##_early_map[NR_CPUS] __initdata =	\
617 				{ [0 ... NR_CPUS-1] = _initvalue };	\
618 	__typeof__(_type) *_name##_early_ptr __refdata = _name##_early_map
619 
620 #define DEFINE_EARLY_PER_CPU_READ_MOSTLY(_type, _name, _initvalue)	\
621 	DEFINE_PER_CPU_READ_MOSTLY(_type, _name) = _initvalue;		\
622 	__typeof__(_type) _name##_early_map[NR_CPUS] __initdata =	\
623 				{ [0 ... NR_CPUS-1] = _initvalue };	\
624 	__typeof__(_type) *_name##_early_ptr __refdata = _name##_early_map
625 
626 #define EXPORT_EARLY_PER_CPU_SYMBOL(_name)				\
627 	EXPORT_PER_CPU_SYMBOL(_name)
628 
629 #define DECLARE_EARLY_PER_CPU(_type, _name)				\
630 	DECLARE_PER_CPU(_type, _name);					\
631 	extern __typeof__(_type) *_name##_early_ptr;			\
632 	extern __typeof__(_type)  _name##_early_map[]
633 
634 #define DECLARE_EARLY_PER_CPU_READ_MOSTLY(_type, _name)			\
635 	DECLARE_PER_CPU_READ_MOSTLY(_type, _name);			\
636 	extern __typeof__(_type) *_name##_early_ptr;			\
637 	extern __typeof__(_type)  _name##_early_map[]
638 
639 #define	early_per_cpu_ptr(_name)			(_name##_early_ptr)
640 #define	early_per_cpu_map(_name, _idx)			(_name##_early_map[_idx])
641 
642 #define	early_per_cpu(_name, _cpu)					\
643 	*(early_per_cpu_ptr(_name) ?					\
644 		&early_per_cpu_ptr(_name)[_cpu] :			\
645 		&per_cpu(_name, _cpu))
646 
647 #else /* !CONFIG_SMP: */
648 #define	DEFINE_EARLY_PER_CPU(_type, _name, _initvalue)			\
649 	DEFINE_PER_CPU(_type, _name) = _initvalue
650 
651 #define DEFINE_EARLY_PER_CPU_READ_MOSTLY(_type, _name, _initvalue)	\
652 	DEFINE_PER_CPU_READ_MOSTLY(_type, _name) = _initvalue
653 
654 #define EXPORT_EARLY_PER_CPU_SYMBOL(_name)				\
655 	EXPORT_PER_CPU_SYMBOL(_name)
656 
657 #define DECLARE_EARLY_PER_CPU(_type, _name)				\
658 	DECLARE_PER_CPU(_type, _name)
659 
660 #define DECLARE_EARLY_PER_CPU_READ_MOSTLY(_type, _name)			\
661 	DECLARE_PER_CPU_READ_MOSTLY(_type, _name)
662 
663 #define	early_per_cpu(_name, _cpu)			per_cpu(_name, _cpu)
664 #define	early_per_cpu_ptr(_name)			NULL
665 /* no early_per_cpu_map() */
666 
667 #endif /* !CONFIG_SMP */
668 
669 #endif /* _ASM_X86_PERCPU_H */
670