xref: /src/sys/amd64/include/cpufunc.h (revision 89d7b30c652c98ea12abc5eb9424464cbfb45953)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 2003 Peter Wemm.
5  * Copyright (c) 1993 The Regents of the University of California.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. Neither the name of the University nor the names of its contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  */
32 
33 /*
34  * Functions to provide access to special i386 instructions.
35  * This in included in sys/systm.h, and that file should be
36  * used in preference to this.
37  */
38 
39 #ifdef __i386__
40 #include <i386/cpufunc.h>
41 #else /* !__i386__ */
42 
43 #ifndef _MACHINE_CPUFUNC_H_
44 #define	_MACHINE_CPUFUNC_H_
45 
46 struct region_descriptor;
47 
48 #define readb(va)	(*(volatile uint8_t *) (va))
49 #define readw(va)	(*(volatile uint16_t *) (va))
50 #define readl(va)	(*(volatile uint32_t *) (va))
51 #define readq(va)	(*(volatile uint64_t *) (va))
52 
53 #define writeb(va, d)	(*(volatile uint8_t *) (va) = (d))
54 #define writew(va, d)	(*(volatile uint16_t *) (va) = (d))
55 #define writel(va, d)	(*(volatile uint32_t *) (va) = (d))
56 #define writeq(va, d)	(*(volatile uint64_t *) (va) = (d))
57 
58 static __inline void
breakpoint(void)59 breakpoint(void)
60 {
61 	__asm __volatile("int $3");
62 }
63 
64 #define	bsfl(mask)	__builtin_ctz(mask)
65 
66 #define	bsfq(mask)	__builtin_ctzl(mask)
67 
68 static __inline void
clflush(u_long addr)69 clflush(u_long addr)
70 {
71 
72 	__asm __volatile("clflush %0" : : "m" (*(char *)addr));
73 }
74 
75 static __inline void
clflushopt(u_long addr)76 clflushopt(u_long addr)
77 {
78 
79 	__asm __volatile("clflushopt %0" : : "m" (*(char *)addr));
80 }
81 
82 static __inline void
clwb(u_long addr)83 clwb(u_long addr)
84 {
85 
86 	__asm __volatile("clwb %0" : : "m" (*(char *)addr));
87 }
88 
89 static __inline void
clts(void)90 clts(void)
91 {
92 
93 	__asm __volatile("clts");
94 }
95 
96 static __inline void
disable_intr(void)97 disable_intr(void)
98 {
99 	__asm __volatile("cli" : : : "memory");
100 }
101 
102 static __inline void
cpuid_count(u_int ax,u_int cx,u_int * p)103 cpuid_count(u_int ax, u_int cx, u_int *p)
104 {
105 	__asm __volatile("cpuid"
106 	    : "=a" (p[0]), "=b" (p[1]), "=c" (p[2]), "=d" (p[3])
107 	    :  "0" (ax), "c" (cx));
108 }
109 
110 static __inline void
do_cpuid(u_int ax,u_int * p)111 do_cpuid(u_int ax, u_int *p)
112 {
113 	cpuid_count(ax, 0, p);
114 }
115 
116 static __inline void
enable_intr(void)117 enable_intr(void)
118 {
119 	__asm __volatile("sti");
120 }
121 
122 static __inline void
halt(void)123 halt(void)
124 {
125 	__asm __volatile("hlt");
126 }
127 
128 static __inline u_char
inb(u_int port)129 inb(u_int port)
130 {
131 	u_char	data;
132 
133 	__asm __volatile("inb %w1, %0" : "=a" (data) : "Nd" (port));
134 	return (data);
135 }
136 
137 static __inline u_int
inl(u_int port)138 inl(u_int port)
139 {
140 	u_int	data;
141 
142 	__asm __volatile("inl %w1, %0" : "=a" (data) : "Nd" (port));
143 	return (data);
144 }
145 
146 static __inline void
insb(u_int port,void * addr,size_t count)147 insb(u_int port, void *addr, size_t count)
148 {
149 	__asm __volatile("rep; insb"
150 			 : "+D" (addr), "+c" (count)
151 			 : "d" (port)
152 			 : "memory");
153 }
154 
155 static __inline void
insw(u_int port,void * addr,size_t count)156 insw(u_int port, void *addr, size_t count)
157 {
158 	__asm __volatile("rep; insw"
159 			 : "+D" (addr), "+c" (count)
160 			 : "d" (port)
161 			 : "memory");
162 }
163 
164 static __inline void
insl(u_int port,void * addr,size_t count)165 insl(u_int port, void *addr, size_t count)
166 {
167 	__asm __volatile("rep; insl"
168 			 : "+D" (addr), "+c" (count)
169 			 : "d" (port)
170 			 : "memory");
171 }
172 
173 static __inline void
invd(void)174 invd(void)
175 {
176 	__asm __volatile("invd");
177 }
178 
179 static __inline u_short
inw(u_int port)180 inw(u_int port)
181 {
182 	u_short	data;
183 
184 	__asm __volatile("inw %w1, %0" : "=a" (data) : "Nd" (port));
185 	return (data);
186 }
187 
188 static __inline void
outb(u_int port,u_char data)189 outb(u_int port, u_char data)
190 {
191 	__asm __volatile("outb %0, %w1" : : "a" (data), "Nd" (port));
192 }
193 
194 static __inline void
outl(u_int port,u_int data)195 outl(u_int port, u_int data)
196 {
197 	__asm __volatile("outl %0, %w1" : : "a" (data), "Nd" (port));
198 }
199 
200 static __inline void
outsb(u_int port,const void * addr,size_t count)201 outsb(u_int port, const void *addr, size_t count)
202 {
203 	__asm __volatile("rep; outsb"
204 			 : "+S" (addr), "+c" (count)
205 			 : "d" (port));
206 }
207 
208 static __inline void
outsw(u_int port,const void * addr,size_t count)209 outsw(u_int port, const void *addr, size_t count)
210 {
211 	__asm __volatile("rep; outsw"
212 			 : "+S" (addr), "+c" (count)
213 			 : "d" (port));
214 }
215 
216 static __inline void
outsl(u_int port,const void * addr,size_t count)217 outsl(u_int port, const void *addr, size_t count)
218 {
219 	__asm __volatile("rep; outsl"
220 			 : "+S" (addr), "+c" (count)
221 			 : "d" (port));
222 }
223 
224 static __inline void
outw(u_int port,u_short data)225 outw(u_int port, u_short data)
226 {
227 	__asm __volatile("outw %0, %w1" : : "a" (data), "Nd" (port));
228 }
229 
230 static __inline u_long
popcntq(u_long mask)231 popcntq(u_long mask)
232 {
233 	u_long result;
234 
235 	__asm __volatile("popcntq %1,%0" : "=r" (result) : "rm" (mask));
236 	return (result);
237 }
238 
239 static __inline void
lfence(void)240 lfence(void)
241 {
242 
243 	__asm __volatile("lfence" : : : "memory");
244 }
245 
246 static __inline void
mfence(void)247 mfence(void)
248 {
249 
250 	__asm __volatile("mfence" : : : "memory");
251 }
252 
253 static __inline void
sfence(void)254 sfence(void)
255 {
256 
257 	__asm __volatile("sfence" : : : "memory");
258 }
259 
260 static __inline void
ia32_pause(void)261 ia32_pause(void)
262 {
263 	__asm __volatile("pause");
264 }
265 
266 static __inline u_long
read_rflags(void)267 read_rflags(void)
268 {
269 	u_long	rf;
270 
271 	__asm __volatile("pushfq; popq %0" : "=r" (rf));
272 	return (rf);
273 }
274 
275 static __inline uint64_t
rdmsr(u_int msr)276 rdmsr(u_int msr)
277 {
278 	uint32_t low, high;
279 
280 	__asm __volatile("rdmsr" : "=a" (low), "=d" (high) : "c" (msr));
281 	return (low | ((uint64_t)high << 32));
282 }
283 
284 static __inline uint32_t
rdmsr32(u_int msr)285 rdmsr32(u_int msr)
286 {
287 	uint32_t low;
288 
289 	__asm __volatile("rdmsr" : "=a" (low) : "c" (msr) : "rdx");
290 	return (low);
291 }
292 
293 static __inline uint64_t
rdpmc(u_int pmc)294 rdpmc(u_int pmc)
295 {
296 	uint32_t low, high;
297 
298 	__asm __volatile("rdpmc" : "=a" (low), "=d" (high) : "c" (pmc));
299 	return (low | ((uint64_t)high << 32));
300 }
301 
302 static __inline uint64_t
rdtsc(void)303 rdtsc(void)
304 {
305 	uint32_t low, high;
306 
307 	__asm __volatile("rdtsc" : "=a" (low), "=d" (high));
308 	return (low | ((uint64_t)high << 32));
309 }
310 
311 static __inline uint64_t
rdtsc_ordered_lfence(void)312 rdtsc_ordered_lfence(void)
313 {
314 	lfence();
315 	return (rdtsc());
316 }
317 
318 static __inline uint64_t
rdtsc_ordered_mfence(void)319 rdtsc_ordered_mfence(void)
320 {
321 	mfence();
322 	return (rdtsc());
323 }
324 
325 static __inline uint64_t
rdtscp(void)326 rdtscp(void)
327 {
328 	uint32_t low, high;
329 
330 	__asm __volatile("rdtscp" : "=a" (low), "=d" (high) : : "ecx");
331 	return (low | ((uint64_t)high << 32));
332 }
333 
334 static __inline uint64_t
rdtscp_aux(uint32_t * aux)335 rdtscp_aux(uint32_t *aux)
336 {
337 	uint32_t low, high;
338 
339 	__asm __volatile("rdtscp" : "=a" (low), "=d" (high), "=c" (*aux));
340 	return (low | ((uint64_t)high << 32));
341 }
342 
343 static __inline uint32_t
rdtsc32(void)344 rdtsc32(void)
345 {
346 	uint32_t rv;
347 
348 	__asm __volatile("rdtsc" : "=a" (rv) : : "edx");
349 	return (rv);
350 }
351 
352 static __inline uint32_t
rdtscp32(void)353 rdtscp32(void)
354 {
355 	uint32_t rv;
356 
357 	__asm __volatile("rdtscp" : "=a" (rv) : : "ecx", "edx");
358 	return (rv);
359 }
360 
361 static __inline void
wbinvd(void)362 wbinvd(void)
363 {
364 	__asm __volatile("wbinvd");
365 }
366 
367 static __inline void
write_rflags(u_long rf)368 write_rflags(u_long rf)
369 {
370 	__asm __volatile("pushq %0;  popfq" : : "r" (rf));
371 }
372 
373 static __inline void
wrmsr(u_int msr,uint64_t newval)374 wrmsr(u_int msr, uint64_t newval)
375 {
376 	uint32_t low, high;
377 
378 	low = newval;
379 	high = newval >> 32;
380 	__asm __volatile("wrmsr" : : "a" (low), "d" (high), "c" (msr));
381 }
382 
383 static __inline void
load_cr0(u_long data)384 load_cr0(u_long data)
385 {
386 
387 	__asm __volatile("movq %0,%%cr0" : : "r" (data));
388 }
389 
390 static __inline u_long
rcr0(void)391 rcr0(void)
392 {
393 	u_long	data;
394 
395 	__asm __volatile("movq %%cr0,%0" : "=r" (data));
396 	return (data);
397 }
398 
399 static __inline u_long
rcr2(void)400 rcr2(void)
401 {
402 	u_long	data;
403 
404 	__asm __volatile("movq %%cr2,%0" : "=r" (data));
405 	return (data);
406 }
407 
408 static __inline void
load_cr3(u_long data)409 load_cr3(u_long data)
410 {
411 
412 	__asm __volatile("movq %0,%%cr3" : : "r" (data) : "memory");
413 }
414 
415 static __inline u_long
rcr3(void)416 rcr3(void)
417 {
418 	u_long	data;
419 
420 	__asm __volatile("movq %%cr3,%0" : "=r" (data));
421 	return (data);
422 }
423 
424 static __inline void
load_cr4(u_long data)425 load_cr4(u_long data)
426 {
427 	__asm __volatile("movq %0,%%cr4" : : "r" (data));
428 }
429 
430 static __inline u_long
rcr4(void)431 rcr4(void)
432 {
433 	u_long	data;
434 
435 	__asm __volatile("movq %%cr4,%0" : "=r" (data));
436 	return (data);
437 }
438 
439 static __inline u_long
rxcr(u_int reg)440 rxcr(u_int reg)
441 {
442 	u_int low, high;
443 
444 	__asm __volatile("xgetbv" : "=a" (low), "=d" (high) : "c" (reg));
445 	return (low | ((uint64_t)high << 32));
446 }
447 
448 static __inline void
load_xcr(u_int reg,u_long val)449 load_xcr(u_int reg, u_long val)
450 {
451 	u_int low, high;
452 
453 	low = val;
454 	high = val >> 32;
455 	__asm __volatile("xsetbv" : : "c" (reg), "a" (low), "d" (high));
456 }
457 
458 /*
459  * Global TLB flush (except for thise for pages marked PG_G)
460  */
461 static __inline void
invltlb(void)462 invltlb(void)
463 {
464 
465 	load_cr3(rcr3());
466 }
467 
468 #ifndef CR4_PGE
469 #define	CR4_PGE	0x00000080	/* Page global enable */
470 #endif
471 
472 /*
473  * Perform the guaranteed invalidation of all TLB entries.  This
474  * includes the global entries, and entries in all PCIDs, not only the
475  * current context.  The function works both on non-PCID CPUs and CPUs
476  * with the PCID turned off or on.  See IA-32 SDM Vol. 3a 4.10.4.1
477  * Operations that Invalidate TLBs and Paging-Structure Caches.
478  */
479 static __inline void
invltlb_glob(void)480 invltlb_glob(void)
481 {
482 	uint64_t cr4;
483 
484 	cr4 = rcr4();
485 	load_cr4(cr4 & ~CR4_PGE);
486 	/*
487 	 * Although preemption at this point could be detrimental to
488 	 * performance, it would not lead to an error.  PG_G is simply
489 	 * ignored if CR4.PGE is clear.  Moreover, in case this block
490 	 * is re-entered, the load_cr4() either above or below will
491 	 * modify CR4.PGE flushing the TLB.
492 	 */
493 	load_cr4(cr4 | CR4_PGE);
494 }
495 
496 /*
497  * TLB flush for an individual page (even if it has PG_G).
498  * Only works on 486+ CPUs (i386 does not have PG_G).
499  */
500 static __inline void
invlpg(u_long addr)501 invlpg(u_long addr)
502 {
503 
504 	__asm __volatile("invlpg %0" : : "m" (*(char *)addr) : "memory");
505 }
506 
507 #define	INVPCID_ADDR	0
508 #define	INVPCID_CTX	1
509 #define	INVPCID_CTXGLOB	2
510 #define	INVPCID_ALLCTX	3
511 
512 struct invpcid_descr {
513 	uint64_t	pcid:12 __packed;
514 	uint64_t	pad:52 __packed;
515 	uint64_t	addr;
516 } __packed;
517 
518 static __inline void
invpcid(struct invpcid_descr * d,int type)519 invpcid(struct invpcid_descr *d, int type)
520 {
521 
522 	__asm __volatile("invpcid (%0),%1"
523 	    : : "r" (d), "r" ((u_long)type) : "memory");
524 }
525 
526 #define	INVLPGB_VA		0x0001
527 #define	INVLPGB_PCID		0x0002
528 #define	INVLPGB_ASID		0x0004
529 #define	INVLPGB_GLOB		0x0008
530 #define	INVLPGB_FIN		0x0010
531 #define	INVLPGB_NEST		0x0020
532 
533 #define	INVLPGB_DESCR(asid, pcid)	(((pcid) << 16) | (asid))
534 
535 #define	INVLPGB_2M_CNT		(1u << 31)
536 
537 static __inline void
invlpgb(uint64_t rax,uint32_t edx,uint32_t ecx)538 invlpgb(uint64_t rax, uint32_t edx, uint32_t ecx)
539 {
540 	__asm __volatile("invlpgb" : : "a" (rax), "d" (edx), "c" (ecx));
541 }
542 
543 static __inline void
tlbsync(void)544 tlbsync(void)
545 {
546 	__asm __volatile("tlbsync");
547 }
548 
549 static __inline u_short
rfs(void)550 rfs(void)
551 {
552 	u_short sel;
553 	__asm __volatile("movw %%fs,%0" : "=rm" (sel));
554 	return (sel);
555 }
556 
557 static __inline u_short
rgs(void)558 rgs(void)
559 {
560 	u_short sel;
561 	__asm __volatile("movw %%gs,%0" : "=rm" (sel));
562 	return (sel);
563 }
564 
565 static __inline u_short
rss(void)566 rss(void)
567 {
568 	u_short sel;
569 	__asm __volatile("movw %%ss,%0" : "=rm" (sel));
570 	return (sel);
571 }
572 
573 static __inline u_short
rcs(void)574 rcs(void)
575 {
576 	u_short sel;
577 
578 	__asm __volatile("movw %%cs,%0" : "=rm" (sel));
579 	return (sel);
580 }
581 
582 static __inline void
load_ds(u_short sel)583 load_ds(u_short sel)
584 {
585 	__asm __volatile("movw %0,%%ds" : : "rm" (sel));
586 }
587 
588 static __inline void
load_es(u_short sel)589 load_es(u_short sel)
590 {
591 	__asm __volatile("movw %0,%%es" : : "rm" (sel));
592 }
593 
594 static __inline void
cpu_monitor(const void * addr,u_long extensions,u_int hints)595 cpu_monitor(const void *addr, u_long extensions, u_int hints)
596 {
597 
598 	__asm __volatile("monitor"
599 	    : : "a" (addr), "c" (extensions), "d" (hints));
600 }
601 
602 static __inline void
cpu_mwait(u_long extensions,u_int hints)603 cpu_mwait(u_long extensions, u_int hints)
604 {
605 
606 	__asm __volatile("mwait" : : "a" (hints), "c" (extensions));
607 }
608 
609 static __inline uint32_t
rdpkru(void)610 rdpkru(void)
611 {
612 	uint32_t res;
613 
614 	__asm __volatile("rdpkru" :  "=a" (res) : "c" (0) : "edx");
615 	return (res);
616 }
617 
618 static __inline void
wrpkru(uint32_t mask)619 wrpkru(uint32_t mask)
620 {
621 
622 	__asm __volatile("wrpkru" :  : "a" (mask),  "c" (0), "d" (0));
623 }
624 
625 #ifdef _KERNEL
626 /* This is defined in <machine/specialreg.h> but is too painful to get to */
627 #ifndef	MSR_FSBASE
628 #define	MSR_FSBASE	0xc0000100
629 #endif
630 static __inline void
load_fs(u_short sel)631 load_fs(u_short sel)
632 {
633 	/* Preserve the fsbase value across the selector load */
634 	__asm __volatile("rdmsr; movw %0,%%fs; wrmsr"
635 	    : : "rm" (sel), "c" (MSR_FSBASE) : "eax", "edx");
636 }
637 
638 #ifndef	MSR_GSBASE
639 #define	MSR_GSBASE	0xc0000101
640 #endif
641 static __inline void
load_gs(u_short sel)642 load_gs(u_short sel)
643 {
644 	/*
645 	 * Preserve the gsbase value across the selector load.
646 	 * Note that we have to disable interrupts because the gsbase
647 	 * being trashed happens to be the kernel gsbase at the time.
648 	 */
649 	__asm __volatile("pushfq; cli; rdmsr; movw %0,%%gs; wrmsr; popfq"
650 	    : : "rm" (sel), "c" (MSR_GSBASE) : "eax", "edx");
651 }
652 #else
653 /* Usable by userland */
654 static __inline void
load_fs(u_short sel)655 load_fs(u_short sel)
656 {
657 	__asm __volatile("movw %0,%%fs" : : "rm" (sel));
658 }
659 
660 static __inline void
load_gs(u_short sel)661 load_gs(u_short sel)
662 {
663 	__asm __volatile("movw %0,%%gs" : : "rm" (sel));
664 }
665 #endif
666 
667 static __inline uint64_t
rdfsbase(void)668 rdfsbase(void)
669 {
670 	uint64_t x;
671 
672 	__asm __volatile("rdfsbase %0" : "=r" (x));
673 	return (x);
674 }
675 
676 static __inline void
wrfsbase(uint64_t x)677 wrfsbase(uint64_t x)
678 {
679 
680 	__asm __volatile("wrfsbase %0" : : "r" (x));
681 }
682 
683 static __inline uint64_t
rdgsbase(void)684 rdgsbase(void)
685 {
686 	uint64_t x;
687 
688 	__asm __volatile("rdgsbase %0" : "=r" (x));
689 	return (x);
690 }
691 
692 static __inline void
wrgsbase(uint64_t x)693 wrgsbase(uint64_t x)
694 {
695 
696 	__asm __volatile("wrgsbase %0" : : "r" (x));
697 }
698 
699 static __inline void
bare_lgdt(struct region_descriptor * addr)700 bare_lgdt(struct region_descriptor *addr)
701 {
702 	__asm __volatile("lgdt (%0)" : : "r" (addr));
703 }
704 
705 static __inline void
sgdt(struct region_descriptor * addr)706 sgdt(struct region_descriptor *addr)
707 {
708 	char *loc;
709 
710 	loc = (char *)addr;
711 	__asm __volatile("sgdt %0" : "=m" (*loc) : : "memory");
712 }
713 
714 static __inline void
lidt(struct region_descriptor * addr)715 lidt(struct region_descriptor *addr)
716 {
717 	__asm __volatile("lidt (%0)" : : "r" (addr));
718 }
719 
720 static __inline void
sidt(struct region_descriptor * addr)721 sidt(struct region_descriptor *addr)
722 {
723 	char *loc;
724 
725 	loc = (char *)addr;
726 	__asm __volatile("sidt %0" : "=m" (*loc) : : "memory");
727 }
728 
729 static __inline void
lldt(u_short sel)730 lldt(u_short sel)
731 {
732 	__asm __volatile("lldt %0" : : "r" (sel));
733 }
734 
735 static __inline u_short
sldt(void)736 sldt(void)
737 {
738 	u_short sel;
739 
740 	__asm __volatile("sldt %0" : "=r" (sel));
741 	return (sel);
742 }
743 
744 static __inline void
ltr(u_short sel)745 ltr(u_short sel)
746 {
747 	__asm __volatile("ltr %0" : : "r" (sel));
748 }
749 
750 static __inline uint32_t
read_tr(void)751 read_tr(void)
752 {
753 	u_short sel;
754 
755 	__asm __volatile("str %0" : "=r" (sel));
756 	return (sel);
757 }
758 
759 static __inline uint64_t
rdr0(void)760 rdr0(void)
761 {
762 	uint64_t data;
763 	__asm __volatile("movq %%dr0,%0" : "=r" (data));
764 	return (data);
765 }
766 
767 static __inline void
load_dr0(uint64_t dr0)768 load_dr0(uint64_t dr0)
769 {
770 	__asm __volatile("movq %0,%%dr0" : : "r" (dr0));
771 }
772 
773 static __inline uint64_t
rdr1(void)774 rdr1(void)
775 {
776 	uint64_t data;
777 	__asm __volatile("movq %%dr1,%0" : "=r" (data));
778 	return (data);
779 }
780 
781 static __inline void
load_dr1(uint64_t dr1)782 load_dr1(uint64_t dr1)
783 {
784 	__asm __volatile("movq %0,%%dr1" : : "r" (dr1));
785 }
786 
787 static __inline uint64_t
rdr2(void)788 rdr2(void)
789 {
790 	uint64_t data;
791 	__asm __volatile("movq %%dr2,%0" : "=r" (data));
792 	return (data);
793 }
794 
795 static __inline void
load_dr2(uint64_t dr2)796 load_dr2(uint64_t dr2)
797 {
798 	__asm __volatile("movq %0,%%dr2" : : "r" (dr2));
799 }
800 
801 static __inline uint64_t
rdr3(void)802 rdr3(void)
803 {
804 	uint64_t data;
805 	__asm __volatile("movq %%dr3,%0" : "=r" (data));
806 	return (data);
807 }
808 
809 static __inline void
load_dr3(uint64_t dr3)810 load_dr3(uint64_t dr3)
811 {
812 	__asm __volatile("movq %0,%%dr3" : : "r" (dr3));
813 }
814 
815 static __inline uint64_t
rdr6(void)816 rdr6(void)
817 {
818 	uint64_t data;
819 	__asm __volatile("movq %%dr6,%0" : "=r" (data));
820 	return (data);
821 }
822 
823 static __inline void
load_dr6(uint64_t dr6)824 load_dr6(uint64_t dr6)
825 {
826 	__asm __volatile("movq %0,%%dr6" : : "r" (dr6));
827 }
828 
829 static __inline uint64_t
rdr7(void)830 rdr7(void)
831 {
832 	uint64_t data;
833 	__asm __volatile("movq %%dr7,%0" : "=r" (data));
834 	return (data);
835 }
836 
837 static __inline void
load_dr7(uint64_t dr7)838 load_dr7(uint64_t dr7)
839 {
840 	__asm __volatile("movq %0,%%dr7" : : "r" (dr7));
841 }
842 
843 static __inline register_t
intr_disable(void)844 intr_disable(void)
845 {
846 	register_t rflags;
847 
848 	rflags = read_rflags();
849 	disable_intr();
850 	return (rflags);
851 }
852 
853 static __inline void
intr_restore(register_t rflags)854 intr_restore(register_t rflags)
855 {
856 	write_rflags(rflags);
857 }
858 
859 static __inline void
stac(void)860 stac(void)
861 {
862 
863 	__asm __volatile("stac" : : : "cc");
864 }
865 
866 static __inline void
clac(void)867 clac(void)
868 {
869 
870 	__asm __volatile("clac" : : : "cc");
871 }
872 
873 enum {
874 	SGX_ECREATE	= 0x0,
875 	SGX_EADD	= 0x1,
876 	SGX_EINIT	= 0x2,
877 	SGX_EREMOVE	= 0x3,
878 	SGX_EDGBRD	= 0x4,
879 	SGX_EDGBWR	= 0x5,
880 	SGX_EEXTEND	= 0x6,
881 	SGX_ELDU	= 0x8,
882 	SGX_EBLOCK	= 0x9,
883 	SGX_EPA		= 0xA,
884 	SGX_EWB		= 0xB,
885 	SGX_ETRACK	= 0xC,
886 };
887 
888 enum {
889 	SGX_PT_SECS = 0x00,
890 	SGX_PT_TCS  = 0x01,
891 	SGX_PT_REG  = 0x02,
892 	SGX_PT_VA   = 0x03,
893 	SGX_PT_TRIM = 0x04,
894 };
895 
896 int sgx_encls(uint32_t eax, uint64_t rbx, uint64_t rcx, uint64_t rdx);
897 
898 static __inline int
sgx_ecreate(void * pginfo,void * secs)899 sgx_ecreate(void *pginfo, void *secs)
900 {
901 
902 	return (sgx_encls(SGX_ECREATE, (uint64_t)pginfo,
903 	    (uint64_t)secs, 0));
904 }
905 
906 static __inline int
sgx_eadd(void * pginfo,void * epc)907 sgx_eadd(void *pginfo, void *epc)
908 {
909 
910 	return (sgx_encls(SGX_EADD, (uint64_t)pginfo,
911 	    (uint64_t)epc, 0));
912 }
913 
914 static __inline int
sgx_einit(void * sigstruct,void * secs,void * einittoken)915 sgx_einit(void *sigstruct, void *secs, void *einittoken)
916 {
917 
918 	return (sgx_encls(SGX_EINIT, (uint64_t)sigstruct,
919 	    (uint64_t)secs, (uint64_t)einittoken));
920 }
921 
922 static __inline int
sgx_eextend(void * secs,void * epc)923 sgx_eextend(void *secs, void *epc)
924 {
925 
926 	return (sgx_encls(SGX_EEXTEND, (uint64_t)secs,
927 	    (uint64_t)epc, 0));
928 }
929 
930 static __inline int
sgx_epa(void * epc)931 sgx_epa(void *epc)
932 {
933 
934 	return (sgx_encls(SGX_EPA, SGX_PT_VA, (uint64_t)epc, 0));
935 }
936 
937 static __inline int
sgx_eldu(uint64_t rbx,uint64_t rcx,uint64_t rdx)938 sgx_eldu(uint64_t rbx, uint64_t rcx,
939     uint64_t rdx)
940 {
941 
942 	return (sgx_encls(SGX_ELDU, rbx, rcx, rdx));
943 }
944 
945 static __inline int
sgx_eremove(void * epc)946 sgx_eremove(void *epc)
947 {
948 
949 	return (sgx_encls(SGX_EREMOVE, 0, (uint64_t)epc, 0));
950 }
951 
952 static __inline void
xrstors(uint8_t * save_area,uint64_t state_bitmap)953 xrstors(uint8_t *save_area, uint64_t state_bitmap)
954 {
955 	uint32_t low, hi;
956 
957 	low = state_bitmap;
958 	hi = state_bitmap >> 32;
959 	__asm __volatile("xrstors %0" : : "m"(*save_area), "a"(low),
960 	    "d"(hi));
961 }
962 
963 static __inline void
xsaves(uint8_t * save_area,uint64_t state_bitmap)964 xsaves(uint8_t *save_area, uint64_t state_bitmap)
965 {
966 	uint32_t low, hi;
967 
968 	low = state_bitmap;
969 	hi = state_bitmap >> 32;
970 	__asm __volatile("xsaves %0" : "=m"(*save_area) : "a"(low),
971 	    "d"(hi)
972 	    : "memory");
973 }
974 
975 void	reset_dbregs(void);
976 
977 #ifdef _KERNEL
978 int	rdmsr_safe(u_int msr, uint64_t *val);
979 int	wrmsr_safe(u_int msr, uint64_t newval);
980 #endif
981 
982 #endif /* !_MACHINE_CPUFUNC_H_ */
983 
984 #endif /* __i386__ */
985