1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <fcntl.h>
3 #include <stdatomic.h>
4 #include <stdio.h>
5 #include <stdlib.h>
6 #include <string.h>
7 #include <sys/ioctl.h>
8 #include <unistd.h>
9
10 #include "apic.h"
11 #include "kvm_util.h"
12 #include "processor.h"
13 #include "test_util.h"
14
15 static bool is_x2apic;
16
17 #define IRQ_VECTOR 0x20
18
19 /* See also the comment at similar assertion in memslot_perf_test.c */
20 static_assert(ATOMIC_INT_LOCK_FREE == 2, "atomic int is not lockless");
21
22 static atomic_uint tpr_guest_irq_sync_val;
23
tpr_guest_irq_sync_flag_reset(void)24 static void tpr_guest_irq_sync_flag_reset(void)
25 {
26 atomic_store_explicit(&tpr_guest_irq_sync_val, 0,
27 memory_order_release);
28 }
29
tpr_guest_irq_sync_val_get(void)30 static unsigned int tpr_guest_irq_sync_val_get(void)
31 {
32 return atomic_load_explicit(&tpr_guest_irq_sync_val,
33 memory_order_acquire);
34 }
35
tpr_guest_irq_sync_val_inc(void)36 static void tpr_guest_irq_sync_val_inc(void)
37 {
38 atomic_fetch_add_explicit(&tpr_guest_irq_sync_val, 1,
39 memory_order_acq_rel);
40 }
41
tpr_guest_irq_handler_xapic(struct ex_regs * regs)42 static void tpr_guest_irq_handler_xapic(struct ex_regs *regs)
43 {
44 tpr_guest_irq_sync_val_inc();
45
46 xapic_write_reg(APIC_EOI, 0);
47 }
48
tpr_guest_irq_handler_x2apic(struct ex_regs * regs)49 static void tpr_guest_irq_handler_x2apic(struct ex_regs *regs)
50 {
51 tpr_guest_irq_sync_val_inc();
52
53 x2apic_write_reg(APIC_EOI, 0);
54 }
55
tpr_guest_irq_queue(void)56 static void tpr_guest_irq_queue(void)
57 {
58 if (is_x2apic) {
59 x2apic_write_reg(APIC_SELF_IPI, IRQ_VECTOR);
60 } else {
61 uint32_t icr, icr2;
62
63 icr = APIC_DEST_SELF | APIC_DEST_PHYSICAL | APIC_DM_FIXED |
64 IRQ_VECTOR;
65 icr2 = 0;
66
67 xapic_write_reg(APIC_ICR2, icr2);
68 xapic_write_reg(APIC_ICR, icr);
69 }
70 }
71
tpr_guest_tpr_get(void)72 static uint8_t tpr_guest_tpr_get(void)
73 {
74 uint32_t taskpri;
75
76 if (is_x2apic)
77 taskpri = x2apic_read_reg(APIC_TASKPRI);
78 else
79 taskpri = xapic_read_reg(APIC_TASKPRI);
80
81 return GET_APIC_PRI(taskpri);
82 }
83
tpr_guest_ppr_get(void)84 static uint8_t tpr_guest_ppr_get(void)
85 {
86 uint32_t procpri;
87
88 if (is_x2apic)
89 procpri = x2apic_read_reg(APIC_PROCPRI);
90 else
91 procpri = xapic_read_reg(APIC_PROCPRI);
92
93 return GET_APIC_PRI(procpri);
94 }
95
tpr_guest_cr8_get(void)96 static uint8_t tpr_guest_cr8_get(void)
97 {
98 uint64_t cr8;
99
100 asm volatile ("mov %%cr8, %[cr8]\n\t" : [cr8] "=r"(cr8));
101
102 return cr8 & GENMASK(3, 0);
103 }
104
tpr_guest_check_tpr_ppr_cr8_equal(void)105 static void tpr_guest_check_tpr_ppr_cr8_equal(void)
106 {
107 uint8_t tpr;
108
109 tpr = tpr_guest_tpr_get();
110
111 GUEST_ASSERT_EQ(tpr_guest_ppr_get(), tpr);
112 GUEST_ASSERT_EQ(tpr_guest_cr8_get(), tpr);
113 }
114
tpr_guest_code(void)115 static void tpr_guest_code(void)
116 {
117 cli();
118
119 if (is_x2apic)
120 x2apic_enable();
121 else
122 xapic_enable();
123
124 GUEST_ASSERT_EQ(tpr_guest_tpr_get(), 0);
125 tpr_guest_check_tpr_ppr_cr8_equal();
126
127 tpr_guest_irq_queue();
128
129 /* TPR = 0 but IRQ masked by IF=0, should not fire */
130 udelay(1000);
131 GUEST_ASSERT_EQ(tpr_guest_irq_sync_val_get(), 0);
132
133 sti();
134
135 /* IF=1 now, IRQ should fire */
136 while (tpr_guest_irq_sync_val_get() == 0)
137 cpu_relax();
138 GUEST_ASSERT_EQ(tpr_guest_irq_sync_val_get(), 1);
139
140 GUEST_SYNC(true);
141 tpr_guest_check_tpr_ppr_cr8_equal();
142
143 tpr_guest_irq_queue();
144
145 /* IRQ masked by barely high enough TPR now, should not fire */
146 udelay(1000);
147 GUEST_ASSERT_EQ(tpr_guest_irq_sync_val_get(), 1);
148
149 GUEST_SYNC(false);
150 tpr_guest_check_tpr_ppr_cr8_equal();
151
152 /* TPR barely low enough now to unmask IRQ, should fire */
153 while (tpr_guest_irq_sync_val_get() == 1)
154 cpu_relax();
155 GUEST_ASSERT_EQ(tpr_guest_irq_sync_val_get(), 2);
156
157 GUEST_DONE();
158 }
159
lapic_tpr_get(struct kvm_lapic_state * xapic)160 static uint8_t lapic_tpr_get(struct kvm_lapic_state *xapic)
161 {
162 return GET_APIC_PRI(*((u32 *)&xapic->regs[APIC_TASKPRI]));
163 }
164
lapic_tpr_set(struct kvm_lapic_state * xapic,uint8_t val)165 static void lapic_tpr_set(struct kvm_lapic_state *xapic, uint8_t val)
166 {
167 u32 *taskpri = (u32 *)&xapic->regs[APIC_TASKPRI];
168
169 *taskpri = SET_APIC_PRI(*taskpri, val);
170 }
171
sregs_tpr(struct kvm_sregs * sregs)172 static uint8_t sregs_tpr(struct kvm_sregs *sregs)
173 {
174 return sregs->cr8 & GENMASK(3, 0);
175 }
176
test_tpr_check_tpr_zero(struct kvm_vcpu * vcpu)177 static void test_tpr_check_tpr_zero(struct kvm_vcpu *vcpu)
178 {
179 struct kvm_lapic_state xapic;
180
181 vcpu_ioctl(vcpu, KVM_GET_LAPIC, &xapic);
182
183 TEST_ASSERT_EQ(lapic_tpr_get(&xapic), 0);
184 }
185
test_tpr_check_tpr_cr8_equal(struct kvm_vcpu * vcpu)186 static void test_tpr_check_tpr_cr8_equal(struct kvm_vcpu *vcpu)
187 {
188 struct kvm_sregs sregs;
189 struct kvm_lapic_state xapic;
190
191 vcpu_sregs_get(vcpu, &sregs);
192 vcpu_ioctl(vcpu, KVM_GET_LAPIC, &xapic);
193
194 TEST_ASSERT_EQ(sregs_tpr(&sregs), lapic_tpr_get(&xapic));
195 }
196
test_tpr_set_tpr_for_irq(struct kvm_vcpu * vcpu,bool mask)197 static void test_tpr_set_tpr_for_irq(struct kvm_vcpu *vcpu, bool mask)
198 {
199 struct kvm_lapic_state xapic;
200 uint8_t tpr;
201
202 static_assert(IRQ_VECTOR >= 16, "invalid IRQ vector number");
203 tpr = IRQ_VECTOR / 16;
204 if (!mask)
205 tpr--;
206
207 vcpu_ioctl(vcpu, KVM_GET_LAPIC, &xapic);
208 lapic_tpr_set(&xapic, tpr);
209 vcpu_ioctl(vcpu, KVM_SET_LAPIC, &xapic);
210 }
211
test_tpr(bool __is_x2apic)212 static void test_tpr(bool __is_x2apic)
213 {
214 struct kvm_vcpu *vcpu;
215 struct kvm_vm *vm;
216 bool done = false;
217
218 is_x2apic = __is_x2apic;
219
220 vm = vm_create_with_one_vcpu(&vcpu, tpr_guest_code);
221 if (is_x2apic) {
222 vm_install_exception_handler(vm, IRQ_VECTOR,
223 tpr_guest_irq_handler_x2apic);
224 } else {
225 vm_install_exception_handler(vm, IRQ_VECTOR,
226 tpr_guest_irq_handler_xapic);
227 vcpu_clear_cpuid_feature(vcpu, X86_FEATURE_X2APIC);
228 virt_pg_map(vm, APIC_DEFAULT_GPA, APIC_DEFAULT_GPA);
229 }
230
231 sync_global_to_guest(vcpu->vm, is_x2apic);
232
233 /* According to the SDM/APM the TPR value at reset is 0 */
234 test_tpr_check_tpr_zero(vcpu);
235 test_tpr_check_tpr_cr8_equal(vcpu);
236
237 tpr_guest_irq_sync_flag_reset();
238 sync_global_to_guest(vcpu->vm, tpr_guest_irq_sync_val);
239
240 while (!done) {
241 struct ucall uc;
242
243 alarm(2);
244 vcpu_run(vcpu);
245 alarm(0);
246
247 switch (get_ucall(vcpu, &uc)) {
248 case UCALL_ABORT:
249 REPORT_GUEST_ASSERT(uc);
250 break;
251 case UCALL_DONE:
252 test_tpr_check_tpr_cr8_equal(vcpu);
253 done = true;
254 break;
255 case UCALL_SYNC:
256 test_tpr_check_tpr_cr8_equal(vcpu);
257 test_tpr_set_tpr_for_irq(vcpu, uc.args[1]);
258 break;
259 default:
260 TEST_FAIL("Unknown ucall result 0x%lx", uc.cmd);
261 break;
262 }
263 }
264 kvm_vm_free(vm);
265 }
266
main(int argc,char * argv[])267 int main(int argc, char *argv[])
268 {
269 /*
270 * Use separate VMs for the xAPIC and x2APIC tests so that x2APIC can
271 * be fully hidden from the guest. KVM disallows changing CPUID after
272 * KVM_RUN and AVIC is disabled if _any_ vCPU is allowed to use x2APIC.
273 */
274 test_tpr(false);
275 test_tpr(true);
276 }
277