1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _ASM_X86_PARAVIRT_TYPES_H 3 #define _ASM_X86_PARAVIRT_TYPES_H 4 5 #ifdef CONFIG_PARAVIRT 6 7 #ifndef __ASSEMBLER__ 8 #include <linux/types.h> 9 10 #include <asm/paravirt-base.h> 11 #include <asm/desc_defs.h> 12 #include <asm/pgtable_types.h> 13 #include <asm/nospec-branch.h> 14 15 struct thread_struct; 16 struct mm_struct; 17 struct task_struct; 18 struct cpumask; 19 struct flush_tlb_info; 20 struct vm_area_struct; 21 22 #ifdef CONFIG_PARAVIRT_XXL 23 struct pv_lazy_ops { 24 /* Set deferred update mode, used for batching operations. */ 25 void (*enter)(void); 26 void (*leave)(void); 27 void (*flush)(void); 28 } __no_randomize_layout; 29 #endif 30 31 struct pv_cpu_ops { 32 /* hooks for various privileged instructions */ 33 #ifdef CONFIG_PARAVIRT_XXL 34 unsigned long (*get_debugreg)(int regno); 35 void (*set_debugreg)(int regno, unsigned long value); 36 37 unsigned long (*read_cr0)(void); 38 void (*write_cr0)(unsigned long); 39 40 void (*write_cr4)(unsigned long); 41 42 /* Segment descriptor handling */ 43 void (*load_tr_desc)(void); 44 void (*load_gdt)(const struct desc_ptr *); 45 void (*load_idt)(const struct desc_ptr *); 46 void (*set_ldt)(const void *desc, unsigned entries); 47 unsigned long (*store_tr)(void); 48 void (*load_tls)(struct thread_struct *t, unsigned int cpu); 49 void (*load_gs_index)(unsigned int idx); 50 void (*write_ldt_entry)(struct desc_struct *ldt, int entrynum, 51 const void *desc); 52 void (*write_gdt_entry)(struct desc_struct *, 53 int entrynum, const void *desc, int size); 54 void (*write_idt_entry)(gate_desc *, 55 int entrynum, const gate_desc *gate); 56 void (*alloc_ldt)(struct desc_struct *ldt, unsigned entries); 57 void (*free_ldt)(struct desc_struct *ldt, unsigned entries); 58 59 void (*load_sp0)(unsigned long sp0); 60 61 #ifdef CONFIG_X86_IOPL_IOPERM 62 void (*invalidate_io_bitmap)(void); 63 void (*update_io_bitmap)(void); 64 #endif 65 66 /* cpuid emulation, mostly so that caps bits can be disabled */ 67 void (*cpuid)(unsigned int *eax, unsigned int *ebx, 68 unsigned int *ecx, unsigned int *edx); 69 70 /* Unsafe MSR operations. These will warn or panic on failure. */ 71 u64 (*read_msr)(u32 msr); 72 void (*write_msr)(u32 msr, u64 val); 73 74 /* 75 * Safe MSR operations. 76 * Returns 0 or -EIO. 77 */ 78 int (*read_msr_safe)(u32 msr, u64 *val); 79 int (*write_msr_safe)(u32 msr, u64 val); 80 81 u64 (*read_pmc)(int counter); 82 83 void (*start_context_switch)(struct task_struct *prev); 84 void (*end_context_switch)(struct task_struct *next); 85 #endif 86 } __no_randomize_layout; 87 88 struct pv_irq_ops { 89 #ifdef CONFIG_PARAVIRT_XXL 90 /* 91 * Get/set interrupt state. save_fl is expected to use X86_EFLAGS_IF; 92 * all other bits returned from save_fl are undefined. 93 * 94 * NOTE: These functions callers expect the callee to preserve 95 * more registers than the standard C calling convention. 96 */ 97 struct paravirt_callee_save save_fl; 98 struct paravirt_callee_save irq_disable; 99 struct paravirt_callee_save irq_enable; 100 #endif 101 void (*safe_halt)(void); 102 void (*halt)(void); 103 } __no_randomize_layout; 104 105 struct pv_mmu_ops { 106 /* TLB operations */ 107 void (*flush_tlb_user)(void); 108 void (*flush_tlb_kernel)(void); 109 void (*flush_tlb_one_user)(unsigned long addr); 110 void (*flush_tlb_multi)(const struct cpumask *cpus, 111 const struct flush_tlb_info *info); 112 113 /* Hook for intercepting the destruction of an mm_struct. */ 114 void (*exit_mmap)(struct mm_struct *mm); 115 void (*notify_page_enc_status_changed)(unsigned long pfn, int npages, bool enc); 116 117 #ifdef CONFIG_PARAVIRT_XXL 118 struct paravirt_callee_save read_cr2; 119 void (*write_cr2)(unsigned long); 120 121 unsigned long (*read_cr3)(void); 122 void (*write_cr3)(unsigned long); 123 124 /* Hook for intercepting the creation/use of an mm_struct. */ 125 void (*enter_mmap)(struct mm_struct *mm); 126 127 /* Hooks for allocating and freeing a pagetable top-level */ 128 int (*pgd_alloc)(struct mm_struct *mm); 129 void (*pgd_free)(struct mm_struct *mm, pgd_t *pgd); 130 131 /* 132 * Hooks for allocating/releasing pagetable pages when they're 133 * attached to a pagetable 134 */ 135 void (*alloc_pte)(struct mm_struct *mm, unsigned long pfn); 136 void (*alloc_pmd)(struct mm_struct *mm, unsigned long pfn); 137 void (*alloc_pud)(struct mm_struct *mm, unsigned long pfn); 138 void (*alloc_p4d)(struct mm_struct *mm, unsigned long pfn); 139 void (*release_pte)(unsigned long pfn); 140 void (*release_pmd)(unsigned long pfn); 141 void (*release_pud)(unsigned long pfn); 142 void (*release_p4d)(unsigned long pfn); 143 144 /* Pagetable manipulation functions */ 145 void (*set_pte)(pte_t *ptep, pte_t pteval); 146 void (*set_pmd)(pmd_t *pmdp, pmd_t pmdval); 147 148 pte_t (*ptep_modify_prot_start)(struct vm_area_struct *vma, unsigned long addr, 149 pte_t *ptep); 150 void (*ptep_modify_prot_commit)(struct vm_area_struct *vma, unsigned long addr, 151 pte_t *ptep, pte_t pte); 152 153 struct paravirt_callee_save pte_val; 154 struct paravirt_callee_save make_pte; 155 156 struct paravirt_callee_save pgd_val; 157 struct paravirt_callee_save make_pgd; 158 159 void (*set_pud)(pud_t *pudp, pud_t pudval); 160 161 struct paravirt_callee_save pmd_val; 162 struct paravirt_callee_save make_pmd; 163 164 struct paravirt_callee_save pud_val; 165 struct paravirt_callee_save make_pud; 166 167 void (*set_p4d)(p4d_t *p4dp, p4d_t p4dval); 168 169 struct paravirt_callee_save p4d_val; 170 struct paravirt_callee_save make_p4d; 171 172 void (*set_pgd)(pgd_t *pgdp, pgd_t pgdval); 173 174 struct pv_lazy_ops lazy_mode; 175 176 /* dom0 ops */ 177 178 /* Sometimes the physical address is a pfn, and sometimes its 179 an mfn. We can tell which is which from the index. */ 180 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx, 181 phys_addr_t phys, pgprot_t flags); 182 #endif 183 } __no_randomize_layout; 184 185 /* This contains all the paravirt structures: we get a convenient 186 * number for each function using the offset which we use to indicate 187 * what to patch. */ 188 struct paravirt_patch_template { 189 struct pv_cpu_ops cpu; 190 struct pv_irq_ops irq; 191 struct pv_mmu_ops mmu; 192 } __no_randomize_layout; 193 194 extern struct paravirt_patch_template pv_ops; 195 196 #define paravirt_ptr(array, op) [paravirt_opptr] "m" (array.op) 197 198 /* 199 * This generates an indirect call based on the operation type number. 200 * 201 * Since alternatives run after enabling CET/IBT -- the latter setting/clearing 202 * capabilities and the former requiring all capabilities being finalized -- 203 * these indirect calls are subject to IBT and the paravirt stubs should have 204 * ENDBR on. 205 * 206 * OTOH since this is effectively a __nocfi indirect call, the paravirt stubs 207 * don't need to bother with CFI prefixes. 208 */ 209 #define PARAVIRT_CALL \ 210 ANNOTATE_RETPOLINE_SAFE "\n\t" \ 211 "call *%[paravirt_opptr]" 212 213 /* 214 * These macros are intended to wrap calls through one of the paravirt 215 * ops structs, so that they can be later identified and patched at 216 * runtime. 217 * 218 * Normally, a call to a pv_op function is a simple indirect call: 219 * (pv_op_struct.operations)(args...). 220 * 221 * Unfortunately, this is a relatively slow operation for modern CPUs, 222 * because it cannot necessarily determine what the destination 223 * address is. In this case, the address is a runtime constant, so at 224 * the very least we can patch the call to a simple direct call, or, 225 * ideally, patch an inline implementation into the callsite. (Direct 226 * calls are essentially free, because the call and return addresses 227 * are completely predictable.) 228 * 229 * For i386, these macros rely on the standard gcc "regparm(3)" calling 230 * convention, in which the first three arguments are placed in %eax, 231 * %edx, %ecx (in that order), and the remaining arguments are placed 232 * on the stack. All caller-save registers (eax,edx,ecx) are expected 233 * to be modified (either clobbered or used for return values). 234 * X86_64, on the other hand, already specifies a register-based calling 235 * conventions, returning at %rax, with parameters going in %rdi, %rsi, 236 * %rdx, and %rcx. Note that for this reason, x86_64 does not need any 237 * special handling for dealing with 4 arguments, unlike i386. 238 * However, x86_64 also has to clobber all caller saved registers, which 239 * unfortunately, are quite a bit (r8 - r11) 240 * 241 * Unfortunately there's no way to get gcc to generate the args setup 242 * for the call, and then allow the call itself to be generated by an 243 * inline asm. Because of this, we must do the complete arg setup and 244 * return value handling from within these macros. This is fairly 245 * cumbersome. 246 * 247 * There are 5 sets of PVOP_* macros for dealing with 0-4 arguments. 248 * It could be extended to more arguments, but there would be little 249 * to be gained from that. For each number of arguments, there are 250 * two VCALL and CALL variants for void and non-void functions. 251 * 252 * When there is a return value, the invoker of the macro must specify 253 * the return type. The macro then uses sizeof() on that type to 254 * determine whether it's a 32 or 64 bit value and places the return 255 * in the right register(s) (just %eax for 32-bit, and %edx:%eax for 256 * 64-bit). For x86_64 machines, it just returns in %rax regardless of 257 * the return value size. 258 * 259 * 64-bit arguments are passed as a pair of adjacent 32-bit arguments; 260 * i386 also passes 64-bit arguments as a pair of adjacent 32-bit arguments 261 * in low,high order 262 * 263 * Small structures are passed and returned in registers. The macro 264 * calling convention can't directly deal with this, so the wrapper 265 * functions must do it. 266 * 267 * These PVOP_* macros are only defined within this header. This 268 * means that all uses must be wrapped in inline functions. This also 269 * makes sure the incoming and outgoing types are always correct. 270 */ 271 #ifdef CONFIG_X86_32 272 #define PVOP_CALL_ARGS \ 273 unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; 274 275 #define PVOP_CALL_ARG1(x) "a" ((unsigned long)(x)) 276 #define PVOP_CALL_ARG2(x) "d" ((unsigned long)(x)) 277 #define PVOP_CALL_ARG3(x) "c" ((unsigned long)(x)) 278 279 #define PVOP_VCALL_CLOBBERS "=a" (__eax), "=d" (__edx), \ 280 "=c" (__ecx) 281 #define PVOP_CALL_CLOBBERS PVOP_VCALL_CLOBBERS 282 283 #define PVOP_VCALLEE_CLOBBERS "=a" (__eax), "=d" (__edx) 284 #define PVOP_CALLEE_CLOBBERS PVOP_VCALLEE_CLOBBERS 285 286 #define EXTRA_CLOBBERS 287 #define VEXTRA_CLOBBERS 288 #else /* CONFIG_X86_64 */ 289 /* [re]ax isn't an arg, but the return val */ 290 #define PVOP_CALL_ARGS \ 291 unsigned long __edi = __edi, __esi = __esi, \ 292 __edx = __edx, __ecx = __ecx, __eax = __eax; 293 294 #define PVOP_CALL_ARG1(x) "D" ((unsigned long)(x)) 295 #define PVOP_CALL_ARG2(x) "S" ((unsigned long)(x)) 296 #define PVOP_CALL_ARG3(x) "d" ((unsigned long)(x)) 297 #define PVOP_CALL_ARG4(x) "c" ((unsigned long)(x)) 298 299 #define PVOP_VCALL_CLOBBERS "=D" (__edi), \ 300 "=S" (__esi), "=d" (__edx), \ 301 "=c" (__ecx) 302 #define PVOP_CALL_CLOBBERS PVOP_VCALL_CLOBBERS, "=a" (__eax) 303 304 /* 305 * void functions are still allowed [re]ax for scratch. 306 * 307 * The ZERO_CALL_USED REGS feature may end up zeroing out callee-saved 308 * registers. Make sure we model this with the appropriate clobbers. 309 */ 310 #ifdef CONFIG_ZERO_CALL_USED_REGS 311 #define PVOP_VCALLEE_CLOBBERS "=a" (__eax), PVOP_VCALL_CLOBBERS 312 #else 313 #define PVOP_VCALLEE_CLOBBERS "=a" (__eax) 314 #endif 315 #define PVOP_CALLEE_CLOBBERS PVOP_VCALLEE_CLOBBERS 316 317 #define EXTRA_CLOBBERS , "r8", "r9", "r10", "r11" 318 #define VEXTRA_CLOBBERS , "rax", "r8", "r9", "r10", "r11" 319 #endif /* CONFIG_X86_32 */ 320 321 #define PVOP_RETVAL(rettype) \ 322 ({ unsigned long __mask = ~0UL; \ 323 BUILD_BUG_ON(sizeof(rettype) > sizeof(unsigned long)); \ 324 switch (sizeof(rettype)) { \ 325 case 1: __mask = 0xffUL; break; \ 326 case 2: __mask = 0xffffUL; break; \ 327 case 4: __mask = 0xffffffffUL; break; \ 328 default: break; \ 329 } \ 330 __mask & __eax; \ 331 }) 332 333 /* 334 * Use alternative patching for paravirt calls: 335 * - For replacing an indirect call with a direct one, use the "normal" 336 * ALTERNATIVE() macro with the indirect call as the initial code sequence, 337 * which will be replaced with the related direct call by using the 338 * ALT_FLAG_DIRECT_CALL special case and the "always on" feature. 339 * - In case the replacement is either a direct call or a short code sequence 340 * depending on a feature bit, the ALTERNATIVE_2() macro is being used. 341 * The indirect call is the initial code sequence again, while the special 342 * code sequence is selected with the specified feature bit. In case the 343 * feature is not active, the direct call is used as above via the 344 * ALT_FLAG_DIRECT_CALL special case and the "always on" feature. 345 */ 346 #define ____PVOP_CALL(ret, array, op, call_clbr, extra_clbr, ...) \ 347 ({ \ 348 PVOP_CALL_ARGS; \ 349 asm volatile(ALTERNATIVE(PARAVIRT_CALL, ALT_CALL_INSTR, \ 350 ALT_CALL_ALWAYS) \ 351 : call_clbr, ASM_CALL_CONSTRAINT \ 352 : paravirt_ptr(array, op), \ 353 ##__VA_ARGS__ \ 354 : "memory", "cc" extra_clbr); \ 355 ret; \ 356 }) 357 358 #define ____PVOP_ALT_CALL(ret, array, op, alt, cond, call_clbr, \ 359 extra_clbr, ...) \ 360 ({ \ 361 PVOP_CALL_ARGS; \ 362 asm volatile(ALTERNATIVE_2(PARAVIRT_CALL, \ 363 ALT_CALL_INSTR, ALT_CALL_ALWAYS, \ 364 alt, cond) \ 365 : call_clbr, ASM_CALL_CONSTRAINT \ 366 : paravirt_ptr(array, op), \ 367 ##__VA_ARGS__ \ 368 : "memory", "cc" extra_clbr); \ 369 ret; \ 370 }) 371 372 #define __PVOP_CALL(rettype, array, op, ...) \ 373 ____PVOP_CALL(PVOP_RETVAL(rettype), array, op, \ 374 PVOP_CALL_CLOBBERS, EXTRA_CLOBBERS, ##__VA_ARGS__) 375 376 #define __PVOP_ALT_CALL(rettype, array, op, alt, cond, ...) \ 377 ____PVOP_ALT_CALL(PVOP_RETVAL(rettype), array, op, alt, cond, \ 378 PVOP_CALL_CLOBBERS, EXTRA_CLOBBERS, \ 379 ##__VA_ARGS__) 380 381 #define __PVOP_CALLEESAVE(rettype, array, op, ...) \ 382 ____PVOP_CALL(PVOP_RETVAL(rettype), array, op.func, \ 383 PVOP_CALLEE_CLOBBERS, , ##__VA_ARGS__) 384 385 #define __PVOP_ALT_CALLEESAVE(rettype, array, op, alt, cond, ...) \ 386 ____PVOP_ALT_CALL(PVOP_RETVAL(rettype), array, op.func, alt, cond, \ 387 PVOP_CALLEE_CLOBBERS, , ##__VA_ARGS__) 388 389 390 #define __PVOP_VCALL(array, op, ...) \ 391 (void)____PVOP_CALL(, array, op, PVOP_VCALL_CLOBBERS, \ 392 VEXTRA_CLOBBERS, ##__VA_ARGS__) 393 394 #define __PVOP_ALT_VCALL(array, op, alt, cond, ...) \ 395 (void)____PVOP_ALT_CALL(, array, op, alt, cond, \ 396 PVOP_VCALL_CLOBBERS, VEXTRA_CLOBBERS, \ 397 ##__VA_ARGS__) 398 399 #define __PVOP_VCALLEESAVE(array, op, ...) \ 400 (void)____PVOP_CALL(, array, op.func, \ 401 PVOP_VCALLEE_CLOBBERS, , ##__VA_ARGS__) 402 403 #define __PVOP_ALT_VCALLEESAVE(array, op, alt, cond, ...) \ 404 (void)____PVOP_ALT_CALL(, array, op.func, alt, cond, \ 405 PVOP_VCALLEE_CLOBBERS, , ##__VA_ARGS__) 406 407 408 #define PVOP_CALL0(rettype, array, op) \ 409 __PVOP_CALL(rettype, array, op) 410 #define PVOP_VCALL0(array, op) \ 411 __PVOP_VCALL(array, op) 412 #define PVOP_ALT_CALL0(rettype, array, op, alt, cond) \ 413 __PVOP_ALT_CALL(rettype, array, op, alt, cond) 414 #define PVOP_ALT_VCALL0(array, op, alt, cond) \ 415 __PVOP_ALT_VCALL(array, op, alt, cond) 416 417 #define PVOP_CALLEE0(rettype, array, op) \ 418 __PVOP_CALLEESAVE(rettype, array, op) 419 #define PVOP_VCALLEE0(array, op) \ 420 __PVOP_VCALLEESAVE(array, op) 421 #define PVOP_ALT_CALLEE0(rettype, array, op, alt, cond) \ 422 __PVOP_ALT_CALLEESAVE(rettype, array, op, alt, cond) 423 #define PVOP_ALT_VCALLEE0(array, op, alt, cond) \ 424 __PVOP_ALT_VCALLEESAVE(array, op, alt, cond) 425 426 427 #define PVOP_CALL1(rettype, array, op, arg1) \ 428 __PVOP_CALL(rettype, array, op, PVOP_CALL_ARG1(arg1)) 429 #define PVOP_VCALL1(array, op, arg1) \ 430 __PVOP_VCALL(array, op, PVOP_CALL_ARG1(arg1)) 431 #define PVOP_ALT_VCALL1(array, op, arg1, alt, cond) \ 432 __PVOP_ALT_VCALL(array, op, alt, cond, PVOP_CALL_ARG1(arg1)) 433 434 #define PVOP_CALLEE1(rettype, array, op, arg1) \ 435 __PVOP_CALLEESAVE(rettype, array, op, PVOP_CALL_ARG1(arg1)) 436 #define PVOP_VCALLEE1(array, op, arg1) \ 437 __PVOP_VCALLEESAVE(array, op, PVOP_CALL_ARG1(arg1)) 438 #define PVOP_ALT_CALLEE1(rettype, array, op, arg1, alt, cond) \ 439 __PVOP_ALT_CALLEESAVE(rettype, array, op, alt, cond, PVOP_CALL_ARG1(arg1)) 440 #define PVOP_ALT_VCALLEE1(array, op, arg1, alt, cond) \ 441 __PVOP_ALT_VCALLEESAVE(array, op, alt, cond, PVOP_CALL_ARG1(arg1)) 442 443 444 #define PVOP_CALL2(rettype, array, op, arg1, arg2) \ 445 __PVOP_CALL(rettype, array, op, PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2)) 446 #define PVOP_VCALL2(array, op, arg1, arg2) \ 447 __PVOP_VCALL(array, op, PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2)) 448 449 #define PVOP_CALL3(rettype, array, op, arg1, arg2, arg3) \ 450 __PVOP_CALL(rettype, array, op, PVOP_CALL_ARG1(arg1), \ 451 PVOP_CALL_ARG2(arg2), PVOP_CALL_ARG3(arg3)) 452 #define PVOP_VCALL3(array, op, arg1, arg2, arg3) \ 453 __PVOP_VCALL(array, op, PVOP_CALL_ARG1(arg1), \ 454 PVOP_CALL_ARG2(arg2), PVOP_CALL_ARG3(arg3)) 455 456 #define PVOP_CALL4(rettype, array, op, arg1, arg2, arg3, arg4) \ 457 __PVOP_CALL(rettype, array, op, \ 458 PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2), \ 459 PVOP_CALL_ARG3(arg3), PVOP_CALL_ARG4(arg4)) 460 #define PVOP_VCALL4(array, op, arg1, arg2, arg3, arg4) \ 461 __PVOP_VCALL(array, op, PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2), \ 462 PVOP_CALL_ARG3(arg3), PVOP_CALL_ARG4(arg4)) 463 464 #endif /* __ASSEMBLER__ */ 465 466 #define ALT_NOT_XEN ALT_NOT(X86_FEATURE_XENPV) 467 468 #ifdef CONFIG_X86_32 469 /* save and restore all caller-save registers, except return value */ 470 #define PV_SAVE_ALL_CALLER_REGS "pushl %ecx;" 471 #define PV_RESTORE_ALL_CALLER_REGS "popl %ecx;" 472 #else 473 /* save and restore all caller-save registers, except return value */ 474 #define PV_SAVE_ALL_CALLER_REGS \ 475 "push %rcx;" \ 476 "push %rdx;" \ 477 "push %rsi;" \ 478 "push %rdi;" \ 479 "push %r8;" \ 480 "push %r9;" \ 481 "push %r10;" \ 482 "push %r11;" 483 #define PV_RESTORE_ALL_CALLER_REGS \ 484 "pop %r11;" \ 485 "pop %r10;" \ 486 "pop %r9;" \ 487 "pop %r8;" \ 488 "pop %rdi;" \ 489 "pop %rsi;" \ 490 "pop %rdx;" \ 491 "pop %rcx;" 492 #endif 493 494 /* 495 * Generate a thunk around a function which saves all caller-save 496 * registers except for the return value. This allows C functions to 497 * be called from assembler code where fewer than normal registers are 498 * available. It may also help code generation around calls from C 499 * code if the common case doesn't use many registers. 500 * 501 * When a callee is wrapped in a thunk, the caller can assume that all 502 * arg regs and all scratch registers are preserved across the 503 * call. The return value in rax/eax will not be saved, even for void 504 * functions. 505 */ 506 #define PV_THUNK_NAME(func) "__raw_callee_save_" #func 507 #define __PV_CALLEE_SAVE_REGS_THUNK(func, section) \ 508 extern typeof(func) __raw_callee_save_##func; \ 509 \ 510 asm(".pushsection " section ", \"ax\";" \ 511 ".globl " PV_THUNK_NAME(func) ";" \ 512 ".type " PV_THUNK_NAME(func) ", @function;" \ 513 ASM_FUNC_ALIGN \ 514 PV_THUNK_NAME(func) ":" \ 515 ASM_ENDBR \ 516 FRAME_BEGIN \ 517 PV_SAVE_ALL_CALLER_REGS \ 518 "call " #func ";" \ 519 PV_RESTORE_ALL_CALLER_REGS \ 520 FRAME_END \ 521 ASM_RET \ 522 ".size " PV_THUNK_NAME(func) ", .-" PV_THUNK_NAME(func) ";" \ 523 ".popsection") 524 525 #define PV_CALLEE_SAVE_REGS_THUNK(func) \ 526 __PV_CALLEE_SAVE_REGS_THUNK(func, ".text") 527 528 /* Get a reference to a callee-save function */ 529 #define PV_CALLEE_SAVE(func) \ 530 ((struct paravirt_callee_save) { __raw_callee_save_##func }) 531 532 /* Promise that "func" already uses the right calling convention */ 533 #define __PV_IS_CALLEE_SAVE(func) \ 534 ((struct paravirt_callee_save) { func }) 535 536 #endif /* CONFIG_PARAVIRT */ 537 #endif /* _ASM_X86_PARAVIRT_TYPES_H */ 538