1 #include "libcflat.h" 2 #include "x86/desc.h" 3 #include "x86/processor.h" 4 #include "x86/vm.h" 5 #include "x86/msr.h" 6 7 #define X86_FEATURE_PKU 3 8 #define CR0_WP_MASK (1UL << 16) 9 #define PTE_PKEY_BIT 59 10 #define USER_BASE (1 << 24) 11 #define USER_VAR(v) (*((__typeof__(&(v))) (((unsigned long)&v) + USER_BASE))) 12 13 volatile int pf_count = 0; 14 volatile unsigned save; 15 volatile unsigned test; 16 17 void set_cr0_wp(int wp) 18 { 19 unsigned long cr0 = read_cr0(); 20 21 cr0 &= ~CR0_WP_MASK; 22 if (wp) 23 cr0 |= CR0_WP_MASK; 24 write_cr0(cr0); 25 } 26 27 static inline u32 read_pkru(void) 28 { 29 unsigned int eax, edx; 30 unsigned int ecx = 0; 31 unsigned int pkru; 32 33 asm volatile(".byte 0x0f,0x01,0xee\n\t" 34 : "=a" (eax), "=d" (edx) 35 : "c" (ecx)); 36 pkru = eax; 37 return pkru; 38 } 39 40 static void write_pkru(u32 pkru) 41 { 42 unsigned int eax = pkru; 43 unsigned int ecx = 0; 44 unsigned int edx = 0; 45 46 asm volatile(".byte 0x0f,0x01,0xef\n\t" 47 : : "a" (eax), "c" (ecx), "d" (edx)); 48 } 49 50 void do_pf_tss(unsigned long error_code) 51 { 52 pf_count++; 53 save = test; 54 write_pkru(0); 55 } 56 57 extern void pf_tss(void); 58 59 asm ("pf_tss: \n\t" 60 #ifdef __x86_64__ 61 // no task on x86_64, save/restore caller-save regs 62 "push %rax; push %rcx; push %rdx; push %rsi; push %rdi\n" 63 "push %r8; push %r9; push %r10; push %r11\n" 64 #endif 65 "call do_pf_tss \n\t" 66 #ifdef __x86_64__ 67 "pop %r11; pop %r10; pop %r9; pop %r8\n" 68 "pop %rdi; pop %rsi; pop %rdx; pop %rcx; pop %rax\n" 69 #endif 70 "add $"S", %"R "sp\n\t" // discard error code 71 "iret"W" \n\t" 72 "jmp pf_tss\n\t" 73 ); 74 75 static void init_test() 76 { 77 pf_count = 0; 78 79 invlpg(&test); 80 invlpg(&USER_VAR(test)); 81 write_pkru(0); 82 set_cr0_wp(0); 83 } 84 85 int main(int ac, char **av) 86 { 87 unsigned long i; 88 unsigned int pkey = 0x2; 89 unsigned int pkru_ad = 0x10; 90 unsigned int pkru_wd = 0x20; 91 92 if (!(cpuid_indexed(7, 0).c & (1 << X86_FEATURE_PKU))) { 93 printf("PKU not enabled, aborting\n"); 94 abort(); 95 } 96 97 setup_vm(); 98 setup_alt_stack(); 99 set_intr_alt_stack(14, pf_tss); 100 wrmsr(MSR_EFER, rdmsr(MSR_EFER) | EFER_LMA); 101 102 for (i = 0; i < USER_BASE; i += PAGE_SIZE) { 103 *get_pte(phys_to_virt(read_cr3()), phys_to_virt(i)) &= ~PTE_USER; 104 *get_pte(phys_to_virt(read_cr3()), phys_to_virt(i)) |= ((unsigned long)pkey << PTE_PKEY_BIT); 105 invlpg((void *)i); 106 } 107 108 for (i = USER_BASE; i < 2 * USER_BASE; i += PAGE_SIZE) { 109 *get_pte(phys_to_virt(read_cr3()), phys_to_virt(i)) &= ~USER_BASE; 110 *get_pte(phys_to_virt(read_cr3()), phys_to_virt(i)) |= ((unsigned long)pkey << PTE_PKEY_BIT); 111 invlpg((void *)i); 112 } 113 114 write_cr4(read_cr4() | X86_CR4_PKE); 115 write_cr3(read_cr3()); 116 117 init_test(); 118 set_cr0_wp(1); 119 write_pkru(pkru_ad); 120 test = 21; 121 report("write to supervisor page when pkru is ad and wp == 1", pf_count == 0 && test == 21); 122 123 init_test(); 124 set_cr0_wp(0); 125 write_pkru(pkru_ad); 126 test = 22; 127 report("write to supervisor page when pkru is ad and wp == 0", pf_count == 0 && test == 22); 128 129 init_test(); 130 set_cr0_wp(1); 131 write_pkru(pkru_wd); 132 test = 23; 133 report("write to supervisor page when pkru is wd and wp == 1", pf_count == 0 && test == 23); 134 135 init_test(); 136 set_cr0_wp(0); 137 write_pkru(pkru_wd); 138 test = 24; 139 report("write to supervisor page when pkru is wd and wp == 0", pf_count == 0 && test == 24); 140 141 init_test(); 142 write_pkru(pkru_wd); 143 set_cr0_wp(0); 144 USER_VAR(test) = 25; 145 report("write to user page when pkru is wd and wp == 0", pf_count == 0 && test == 25); 146 147 init_test(); 148 write_pkru(pkru_wd); 149 set_cr0_wp(1); 150 USER_VAR(test) = 26; 151 report("write to user page when pkru is wd and wp == 1", pf_count == 1 && test == 26 && save == 25); 152 153 init_test(); 154 write_pkru(pkru_ad); 155 (void)USER_VAR(test); 156 report("read from user page when pkru is ad", pf_count == 1 && save == 26); 157 158 // TODO: implicit kernel access from ring 3 (e.g. int) 159 160 return report_summary(); 161 } 162