1 #ifndef LIBCFLAT_PROCESSOR_H 2 #define LIBCFLAT_PROCESSOR_H 3 4 #include "libcflat.h" 5 #include "msr.h" 6 #include <stdint.h> 7 8 #ifdef __x86_64__ 9 # define R "r" 10 # define W "q" 11 # define S "8" 12 #else 13 # define R "e" 14 # define W "l" 15 # define S "4" 16 #endif 17 18 #define DF_VECTOR 8 19 #define TS_VECTOR 10 20 #define NP_VECTOR 11 21 #define SS_VECTOR 12 22 #define GP_VECTOR 13 23 #define PF_VECTOR 14 24 #define AC_VECTOR 17 25 26 #define X86_CR0_PE 0x00000001 27 #define X86_CR0_MP 0x00000002 28 #define X86_CR0_EM 0x00000004 29 #define X86_CR0_TS 0x00000008 30 #define X86_CR0_WP 0x00010000 31 #define X86_CR0_AM 0x00040000 32 #define X86_CR0_PG 0x80000000 33 #define X86_CR3_PCID_MASK 0x00000fff 34 #define X86_CR4_TSD 0x00000004 35 #define X86_CR4_DE 0x00000008 36 #define X86_CR4_PSE 0x00000010 37 #define X86_CR4_PAE 0x00000020 38 #define X86_CR4_MCE 0x00000040 39 #define X86_CR4_PGE 0x00000080 40 #define X86_CR4_PCE 0x00000100 41 #define X86_CR4_UMIP 0x00000800 42 #define X86_CR4_VMXE 0x00002000 43 #define X86_CR4_PCIDE 0x00020000 44 #define X86_CR4_SMEP 0x00100000 45 #define X86_CR4_SMAP 0x00200000 46 #define X86_CR4_PKE 0x00400000 47 48 #define X86_EFLAGS_CF 0x00000001 49 #define X86_EFLAGS_FIXED 0x00000002 50 #define X86_EFLAGS_PF 0x00000004 51 #define X86_EFLAGS_AF 0x00000010 52 #define X86_EFLAGS_ZF 0x00000040 53 #define X86_EFLAGS_SF 0x00000080 54 #define X86_EFLAGS_TF 0x00000100 55 #define X86_EFLAGS_IF 0x00000200 56 #define X86_EFLAGS_DF 0x00000400 57 #define X86_EFLAGS_OF 0x00000800 58 #define X86_EFLAGS_IOPL 0x00003000 59 #define X86_EFLAGS_NT 0x00004000 60 #define X86_EFLAGS_AC 0x00040000 61 62 #define X86_EFLAGS_ALU (X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF | \ 63 X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_OF) 64 65 #define X86_IA32_EFER 0xc0000080 66 #define X86_EFER_LMA (1UL << 8) 67 68 /* 69 * CPU features 70 */ 71 72 enum cpuid_output_regs { 73 EAX, 74 EBX, 75 ECX, 76 EDX 77 }; 78 79 struct cpuid { u32 a, b, c, d; }; 80 81 static inline struct cpuid raw_cpuid(u32 function, u32 index) 82 { 83 struct cpuid r; 84 asm volatile ("cpuid" 85 : "=a"(r.a), "=b"(r.b), "=c"(r.c), "=d"(r.d) 86 : "0"(function), "2"(index)); 87 return r; 88 } 89 90 static inline struct cpuid cpuid_indexed(u32 function, u32 index) 91 { 92 u32 level = raw_cpuid(function & 0xf0000000, 0).a; 93 if (level < function) 94 return (struct cpuid) { 0, 0, 0, 0 }; 95 return raw_cpuid(function, index); 96 } 97 98 static inline struct cpuid cpuid(u32 function) 99 { 100 return cpuid_indexed(function, 0); 101 } 102 103 static inline u8 cpuid_maxphyaddr(void) 104 { 105 if (raw_cpuid(0x80000000, 0).a < 0x80000008) 106 return 36; 107 return raw_cpuid(0x80000008, 0).a & 0xff; 108 } 109 110 #define CPUID(a, b, c, d) ((((unsigned long long) a) << 32) | (b << 16) | \ 111 (c << 8) | d) 112 113 /* 114 * Each X86_FEATURE_XXX definition is 64-bit and contains the following 115 * CPUID meta-data: 116 * 117 * [63:32] : input value for EAX 118 * [31:16] : input value for ECX 119 * [15:8] : output register 120 * [7:0] : bit position in output register 121 */ 122 123 /* 124 * Intel CPUID features 125 */ 126 #define X86_FEATURE_MWAIT (CPUID(0x1, 0, ECX, 3)) 127 #define X86_FEATURE_VMX (CPUID(0x1, 0, ECX, 5)) 128 #define X86_FEATURE_PCID (CPUID(0x1, 0, ECX, 17)) 129 #define X86_FEATURE_MOVBE (CPUID(0x1, 0, ECX, 22)) 130 #define X86_FEATURE_TSC_DEADLINE_TIMER (CPUID(0x1, 0, ECX, 24)) 131 #define X86_FEATURE_XSAVE (CPUID(0x1, 0, ECX, 26)) 132 #define X86_FEATURE_OSXSAVE (CPUID(0x1, 0, ECX, 27)) 133 #define X86_FEATURE_RDRAND (CPUID(0x1, 0, ECX, 30)) 134 #define X86_FEATURE_MCE (CPUID(0x1, 0, EDX, 7)) 135 #define X86_FEATURE_APIC (CPUID(0x1, 0, EDX, 9)) 136 #define X86_FEATURE_CLFLUSH (CPUID(0x1, 0, EDX, 19)) 137 #define X86_FEATURE_XMM (CPUID(0x1, 0, EDX, 25)) 138 #define X86_FEATURE_XMM2 (CPUID(0x1, 0, EDX, 26)) 139 #define X86_FEATURE_TSC_ADJUST (CPUID(0x7, 0, EBX, 1)) 140 #define X86_FEATURE_INVPCID_SINGLE (CPUID(0x7, 0, EBX, 7)) 141 #define X86_FEATURE_INVPCID (CPUID(0x7, 0, EBX, 10)) 142 #define X86_FEATURE_RTM (CPUID(0x7, 0, EBX, 11)) 143 #define X86_FEATURE_SMAP (CPUID(0x7, 0, EBX, 20)) 144 #define X86_FEATURE_PCOMMIT (CPUID(0x7, 0, EBX, 22)) 145 #define X86_FEATURE_CLFLUSHOPT (CPUID(0x7, 0, EBX, 23)) 146 #define X86_FEATURE_CLWB (CPUID(0x7, 0, EBX, 24)) 147 #define X86_FEATURE_UMIP (CPUID(0x7, 0, ECX, 2)) 148 #define X86_FEATURE_PKU (CPUID(0x7, 0, ECX, 3)) 149 #define X86_FEATURE_LA57 (CPUID(0x7, 0, ECX, 16)) 150 #define X86_FEATURE_RDPID (CPUID(0x7, 0, ECX, 22)) 151 #define X86_FEATURE_SPEC_CTRL (CPUID(0x7, 0, EDX, 26)) 152 #define X86_FEATURE_NX (CPUID(0x80000001, 0, EDX, 20)) 153 #define X86_FEATURE_RDPRU (CPUID(0x80000008, 0, EBX, 4)) 154 155 /* 156 * AMD CPUID features 157 */ 158 #define X86_FEATURE_SVM (CPUID(0x80000001, 0, ECX, 2)) 159 #define X86_FEATURE_RDTSCP (CPUID(0x80000001, 0, EDX, 27)) 160 #define X86_FEATURE_AMD_IBPB (CPUID(0x80000008, 0, EBX, 12)) 161 #define X86_FEATURE_NPT (CPUID(0x8000000A, 0, EDX, 0)) 162 #define X86_FEATURE_NRIPS (CPUID(0x8000000A, 0, EDX, 3)) 163 164 165 static inline bool this_cpu_has(u64 feature) 166 { 167 u32 input_eax = feature >> 32; 168 u32 input_ecx = (feature >> 16) & 0xffff; 169 u32 output_reg = (feature >> 8) & 0xff; 170 u8 bit = feature & 0xff; 171 struct cpuid c; 172 u32 *tmp; 173 174 c = cpuid_indexed(input_eax, input_ecx); 175 tmp = (u32 *)&c; 176 177 return ((*(tmp + (output_reg % 32))) & (1 << bit)); 178 } 179 180 struct far_pointer32 { 181 u32 offset; 182 u16 selector; 183 } __attribute__((packed)); 184 185 struct descriptor_table_ptr { 186 u16 limit; 187 ulong base; 188 } __attribute__((packed)); 189 190 static inline void barrier(void) 191 { 192 asm volatile ("" : : : "memory"); 193 } 194 195 static inline void clac(void) 196 { 197 asm volatile (".byte 0x0f, 0x01, 0xca" : : : "memory"); 198 } 199 200 static inline void stac(void) 201 { 202 asm volatile (".byte 0x0f, 0x01, 0xcb" : : : "memory"); 203 } 204 205 static inline u16 read_cs(void) 206 { 207 unsigned val; 208 209 asm volatile ("mov %%cs, %0" : "=mr"(val)); 210 return val; 211 } 212 213 static inline u16 read_ds(void) 214 { 215 unsigned val; 216 217 asm volatile ("mov %%ds, %0" : "=mr"(val)); 218 return val; 219 } 220 221 static inline u16 read_es(void) 222 { 223 unsigned val; 224 225 asm volatile ("mov %%es, %0" : "=mr"(val)); 226 return val; 227 } 228 229 static inline u16 read_ss(void) 230 { 231 unsigned val; 232 233 asm volatile ("mov %%ss, %0" : "=mr"(val)); 234 return val; 235 } 236 237 static inline u16 read_fs(void) 238 { 239 unsigned val; 240 241 asm volatile ("mov %%fs, %0" : "=mr"(val)); 242 return val; 243 } 244 245 static inline u16 read_gs(void) 246 { 247 unsigned val; 248 249 asm volatile ("mov %%gs, %0" : "=mr"(val)); 250 return val; 251 } 252 253 static inline unsigned long read_rflags(void) 254 { 255 unsigned long f; 256 asm volatile ("pushf; pop %0\n\t" : "=rm"(f)); 257 return f; 258 } 259 260 static inline void write_ds(unsigned val) 261 { 262 asm volatile ("mov %0, %%ds" : : "rm"(val) : "memory"); 263 } 264 265 static inline void write_es(unsigned val) 266 { 267 asm volatile ("mov %0, %%es" : : "rm"(val) : "memory"); 268 } 269 270 static inline void write_ss(unsigned val) 271 { 272 asm volatile ("mov %0, %%ss" : : "rm"(val) : "memory"); 273 } 274 275 static inline void write_fs(unsigned val) 276 { 277 asm volatile ("mov %0, %%fs" : : "rm"(val) : "memory"); 278 } 279 280 static inline void write_gs(unsigned val) 281 { 282 asm volatile ("mov %0, %%gs" : : "rm"(val) : "memory"); 283 } 284 285 static inline void write_rflags(unsigned long f) 286 { 287 asm volatile ("push %0; popf\n\t" : : "rm"(f)); 288 } 289 290 static inline void set_iopl(int iopl) 291 { 292 unsigned long flags = read_rflags() & ~X86_EFLAGS_IOPL; 293 flags |= iopl * (X86_EFLAGS_IOPL / 3); 294 write_rflags(flags); 295 } 296 297 static inline u64 rdmsr(u32 index) 298 { 299 u32 a, d; 300 asm volatile ("rdmsr" : "=a"(a), "=d"(d) : "c"(index) : "memory"); 301 return a | ((u64)d << 32); 302 } 303 304 static inline void wrmsr(u32 index, u64 val) 305 { 306 u32 a = val, d = val >> 32; 307 asm volatile ("wrmsr" : : "a"(a), "d"(d), "c"(index) : "memory"); 308 } 309 310 static inline uint64_t rdpmc(uint32_t index) 311 { 312 uint32_t a, d; 313 asm volatile ("rdpmc" : "=a"(a), "=d"(d) : "c"(index)); 314 return a | ((uint64_t)d << 32); 315 } 316 317 static inline void write_cr0(ulong val) 318 { 319 asm volatile ("mov %0, %%cr0" : : "r"(val) : "memory"); 320 } 321 322 static inline ulong read_cr0(void) 323 { 324 ulong val; 325 asm volatile ("mov %%cr0, %0" : "=r"(val) : : "memory"); 326 return val; 327 } 328 329 static inline void write_cr2(ulong val) 330 { 331 asm volatile ("mov %0, %%cr2" : : "r"(val) : "memory"); 332 } 333 334 static inline ulong read_cr2(void) 335 { 336 ulong val; 337 asm volatile ("mov %%cr2, %0" : "=r"(val) : : "memory"); 338 return val; 339 } 340 341 static inline void write_cr3(ulong val) 342 { 343 asm volatile ("mov %0, %%cr3" : : "r"(val) : "memory"); 344 } 345 346 static inline ulong read_cr3(void) 347 { 348 ulong val; 349 asm volatile ("mov %%cr3, %0" : "=r"(val) : : "memory"); 350 return val; 351 } 352 353 static inline void write_cr4(ulong val) 354 { 355 asm volatile ("mov %0, %%cr4" : : "r"(val) : "memory"); 356 } 357 358 static inline ulong read_cr4(void) 359 { 360 ulong val; 361 asm volatile ("mov %%cr4, %0" : "=r"(val) : : "memory"); 362 return val; 363 } 364 365 static inline void write_cr8(ulong val) 366 { 367 asm volatile ("mov %0, %%cr8" : : "r"(val) : "memory"); 368 } 369 370 static inline ulong read_cr8(void) 371 { 372 ulong val; 373 asm volatile ("mov %%cr8, %0" : "=r"(val) : : "memory"); 374 return val; 375 } 376 377 static inline void lgdt(const struct descriptor_table_ptr *ptr) 378 { 379 asm volatile ("lgdt %0" : : "m"(*ptr)); 380 } 381 382 static inline void sgdt(struct descriptor_table_ptr *ptr) 383 { 384 asm volatile ("sgdt %0" : "=m"(*ptr)); 385 } 386 387 static inline void lidt(const struct descriptor_table_ptr *ptr) 388 { 389 asm volatile ("lidt %0" : : "m"(*ptr)); 390 } 391 392 static inline void sidt(struct descriptor_table_ptr *ptr) 393 { 394 asm volatile ("sidt %0" : "=m"(*ptr)); 395 } 396 397 static inline void lldt(unsigned val) 398 { 399 asm volatile ("lldt %0" : : "rm"(val)); 400 } 401 402 static inline u16 sldt(void) 403 { 404 u16 val; 405 asm volatile ("sldt %0" : "=rm"(val)); 406 return val; 407 } 408 409 static inline void ltr(u16 val) 410 { 411 asm volatile ("ltr %0" : : "rm"(val)); 412 } 413 414 static inline u16 str(void) 415 { 416 u16 val; 417 asm volatile ("str %0" : "=rm"(val)); 418 return val; 419 } 420 421 static inline void write_dr6(ulong val) 422 { 423 asm volatile ("mov %0, %%dr6" : : "r"(val) : "memory"); 424 } 425 426 static inline ulong read_dr6(void) 427 { 428 ulong val; 429 asm volatile ("mov %%dr6, %0" : "=r"(val)); 430 return val; 431 } 432 433 static inline void write_dr7(ulong val) 434 { 435 asm volatile ("mov %0, %%dr7" : : "r"(val) : "memory"); 436 } 437 438 static inline ulong read_dr7(void) 439 { 440 ulong val; 441 asm volatile ("mov %%dr7, %0" : "=r"(val)); 442 return val; 443 } 444 445 static inline void pause(void) 446 { 447 asm volatile ("pause"); 448 } 449 450 static inline void cli(void) 451 { 452 asm volatile ("cli"); 453 } 454 455 static inline void sti(void) 456 { 457 asm volatile ("sti"); 458 } 459 460 static inline unsigned long long rdtsc(void) 461 { 462 long long r; 463 464 #ifdef __x86_64__ 465 unsigned a, d; 466 467 asm volatile ("rdtsc" : "=a"(a), "=d"(d)); 468 r = a | ((long long)d << 32); 469 #else 470 asm volatile ("rdtsc" : "=A"(r)); 471 #endif 472 return r; 473 } 474 475 static inline unsigned long long rdtscp(u32 *aux) 476 { 477 long long r; 478 479 #ifdef __x86_64__ 480 unsigned a, d; 481 482 asm volatile ("rdtscp" : "=a"(a), "=d"(d), "=c"(*aux)); 483 r = a | ((long long)d << 32); 484 #else 485 asm volatile ("rdtscp" : "=A"(r), "=c"(*aux)); 486 #endif 487 return r; 488 } 489 490 static inline void wrtsc(u64 tsc) 491 { 492 unsigned a = tsc, d = tsc >> 32; 493 494 asm volatile("wrmsr" : : "a"(a), "d"(d), "c"(0x10)); 495 } 496 497 static inline void irq_disable(void) 498 { 499 asm volatile("cli"); 500 } 501 502 /* Note that irq_enable() does not ensure an interrupt shadow due 503 * to the vagaries of compiler optimizations. If you need the 504 * shadow, use a single asm with "sti" and the instruction after it. 505 */ 506 static inline void irq_enable(void) 507 { 508 asm volatile("sti"); 509 } 510 511 static inline void invlpg(volatile void *va) 512 { 513 asm volatile("invlpg (%0)" ::"r" (va) : "memory"); 514 } 515 516 static inline void safe_halt(void) 517 { 518 asm volatile("sti; hlt"); 519 } 520 521 static inline u32 read_pkru(void) 522 { 523 unsigned int eax, edx; 524 unsigned int ecx = 0; 525 unsigned int pkru; 526 527 asm volatile(".byte 0x0f,0x01,0xee\n\t" 528 : "=a" (eax), "=d" (edx) 529 : "c" (ecx)); 530 pkru = eax; 531 return pkru; 532 } 533 534 static inline void write_pkru(u32 pkru) 535 { 536 unsigned int eax = pkru; 537 unsigned int ecx = 0; 538 unsigned int edx = 0; 539 540 asm volatile(".byte 0x0f,0x01,0xef\n\t" 541 : : "a" (eax), "c" (ecx), "d" (edx)); 542 } 543 544 static inline bool is_canonical(u64 addr) 545 { 546 return (s64)(addr << 16) >> 16 == addr; 547 } 548 549 static inline void clear_bit(int bit, u8 *addr) 550 { 551 __asm__ __volatile__("btr %1, %0" 552 : "+m" (*addr) : "Ir" (bit) : "cc", "memory"); 553 } 554 555 static inline void set_bit(int bit, u8 *addr) 556 { 557 __asm__ __volatile__("bts %1, %0" 558 : "+m" (*addr) : "Ir" (bit) : "cc", "memory"); 559 } 560 561 static inline void flush_tlb(void) 562 { 563 ulong cr4; 564 565 cr4 = read_cr4(); 566 write_cr4(cr4 ^ X86_CR4_PGE); 567 write_cr4(cr4); 568 } 569 570 static inline int has_spec_ctrl(void) 571 { 572 return !!(this_cpu_has(X86_FEATURE_SPEC_CTRL)); 573 } 574 575 static inline int cpu_has_efer_nx(void) 576 { 577 return !!(this_cpu_has(X86_FEATURE_NX)); 578 } 579 580 #endif 581