1 #include "libcflat.h" 2 #include "processor.h" 3 #include "msr.h" 4 #include "isr.h" 5 #include "vm.h" 6 #include "apic.h" 7 #include "desc.h" 8 #include "io.h" 9 #include "smp.h" 10 #include "atomic.h" 11 #include "hyperv.h" 12 13 #define MAX_CPUS 4 14 15 #define SINT1_VEC 0xF1 16 #define SINT2_VEC 0xF2 17 18 #define SINT1_NUM 2 19 #define SINT2_NUM 3 20 #define ONE_MS_IN_100NS 10000 21 22 static atomic_t g_cpus_comp_count; 23 static int g_cpus_count; 24 static struct spinlock g_synic_alloc_lock; 25 26 struct stimer { 27 int sint; 28 int index; 29 atomic_t fire_count; 30 }; 31 32 struct svcpu { 33 int vcpu; 34 void *msg_page; 35 void *evt_page; 36 struct stimer timer[HV_SYNIC_STIMER_COUNT]; 37 }; 38 39 static struct svcpu g_synic_vcpu[MAX_CPUS]; 40 41 static void *synic_alloc_page(void) 42 { 43 void *page; 44 45 spin_lock(&g_synic_alloc_lock); 46 page = alloc_page(); 47 spin_unlock(&g_synic_alloc_lock); 48 return page; 49 } 50 51 static void synic_free_page(void *page) 52 { 53 spin_lock(&g_synic_alloc_lock); 54 free_page(page); 55 spin_unlock(&g_synic_alloc_lock); 56 } 57 58 static void stimer_init(struct stimer *timer, int index) 59 { 60 memset(timer, 0, sizeof(*timer)); 61 timer->index = index; 62 } 63 64 static void synic_enable(void) 65 { 66 int vcpu = smp_id(), i; 67 struct svcpu *svcpu = &g_synic_vcpu[vcpu]; 68 69 memset(svcpu, 0, sizeof(*svcpu)); 70 svcpu->vcpu = vcpu; 71 svcpu->msg_page = synic_alloc_page(); 72 for (i = 0; i < ARRAY_SIZE(svcpu->timer); i++) { 73 stimer_init(&svcpu->timer[i], i); 74 } 75 wrmsr(HV_X64_MSR_SIMP, (u64)virt_to_phys(svcpu->msg_page) | 76 HV_SYNIC_SIMP_ENABLE); 77 wrmsr(HV_X64_MSR_SCONTROL, HV_SYNIC_CONTROL_ENABLE); 78 } 79 80 static void stimer_shutdown(struct stimer *timer) 81 { 82 wrmsr(HV_X64_MSR_STIMER0_CONFIG + 2*timer->index, 0); 83 } 84 85 static void process_stimer_expired(struct svcpu *svcpu, struct stimer *timer, 86 u64 expiration_time, u64 delivery_time) 87 { 88 atomic_inc(&timer->fire_count); 89 } 90 91 static void process_stimer_msg(struct svcpu *svcpu, 92 struct hv_message *msg, int sint) 93 { 94 struct hv_timer_message_payload *payload = 95 (struct hv_timer_message_payload *)msg->u.payload; 96 struct stimer *timer; 97 98 if (msg->header.message_type != HVMSG_TIMER_EXPIRED && 99 msg->header.message_type != HVMSG_NONE) { 100 report("invalid Hyper-V SynIC msg type", false); 101 report_summary(); 102 exit(-1); 103 return; 104 } 105 106 if (msg->header.message_type == HVMSG_NONE) { 107 return; 108 } 109 110 if (msg->header.payload_size < sizeof(*payload)) { 111 report("invalid Hyper-V SynIC msg payload size", false); 112 report_summary(); 113 exit(-1); 114 return; 115 } 116 117 /* Now process timer expiration message */ 118 119 if (payload->timer_index >= ARRAY_SIZE(svcpu->timer)) { 120 report("invalid Hyper-V SynIC timer index", false); 121 report_summary(); 122 exit(-1); 123 return; 124 } 125 timer = &svcpu->timer[payload->timer_index]; 126 process_stimer_expired(svcpu, timer, payload->expiration_time, 127 payload->delivery_time); 128 129 msg->header.message_type = HVMSG_NONE; 130 mb(); 131 if (msg->header.message_flags.msg_pending) { 132 wrmsr(HV_X64_MSR_EOM, 0); 133 } 134 } 135 136 static void __stimer_isr(int vcpu) 137 { 138 struct svcpu *svcpu = &g_synic_vcpu[vcpu]; 139 struct hv_message_page *msg_page; 140 struct hv_message *msg; 141 int i; 142 143 144 msg_page = (struct hv_message_page *)svcpu->msg_page; 145 for (i = 0; i < ARRAY_SIZE(msg_page->sint_message); i++) { 146 msg = &msg_page->sint_message[i]; 147 process_stimer_msg(svcpu, msg, i); 148 } 149 } 150 151 static void stimer_isr(isr_regs_t *regs) 152 { 153 int vcpu = smp_id(); 154 155 __stimer_isr(vcpu); 156 eoi(); 157 } 158 159 static void stimer_isr_auto_eoi(isr_regs_t *regs) 160 { 161 int vcpu = smp_id(); 162 163 __stimer_isr(vcpu); 164 } 165 166 static void stimer_start(struct stimer *timer, 167 bool auto_enable, bool periodic, 168 u64 tick_100ns, int sint) 169 { 170 u64 config, count; 171 172 timer->sint = sint; 173 atomic_set(&timer->fire_count, 0); 174 175 config = 0; 176 if (periodic) { 177 config |= HV_STIMER_PERIODIC; 178 } 179 180 config |= ((u8)(sint & 0xFF)) << 16; 181 config |= HV_STIMER_ENABLE; 182 if (auto_enable) { 183 config |= HV_STIMER_AUTOENABLE; 184 } 185 186 if (periodic) { 187 count = tick_100ns; 188 } else { 189 count = rdmsr(HV_X64_MSR_TIME_REF_COUNT) + tick_100ns; 190 } 191 192 if (!auto_enable) { 193 wrmsr(HV_X64_MSR_STIMER0_COUNT + timer->index*2, count); 194 wrmsr(HV_X64_MSR_STIMER0_CONFIG + timer->index*2, config); 195 } else { 196 wrmsr(HV_X64_MSR_STIMER0_CONFIG + timer->index*2, config); 197 wrmsr(HV_X64_MSR_STIMER0_COUNT + timer->index*2, count); 198 } 199 } 200 201 static void stimers_shutdown(void) 202 { 203 int vcpu = smp_id(), i; 204 struct svcpu *svcpu = &g_synic_vcpu[vcpu]; 205 206 for (i = 0; i < ARRAY_SIZE(svcpu->timer); i++) { 207 stimer_shutdown(&svcpu->timer[i]); 208 } 209 } 210 211 static void synic_disable(void) 212 { 213 int vcpu = smp_id(); 214 struct svcpu *svcpu = &g_synic_vcpu[vcpu]; 215 216 wrmsr(HV_X64_MSR_SCONTROL, 0); 217 wrmsr(HV_X64_MSR_SIMP, 0); 218 wrmsr(HV_X64_MSR_SIEFP, 0); 219 synic_free_page(svcpu->msg_page); 220 } 221 222 static void cpu_comp(void) 223 { 224 atomic_inc(&g_cpus_comp_count); 225 } 226 227 static void stimer_test_prepare(void *ctx) 228 { 229 int vcpu = smp_id(); 230 231 write_cr3((ulong)ctx); 232 synic_enable(); 233 synic_sint_create(vcpu, SINT1_NUM, SINT1_VEC, false); 234 synic_sint_create(vcpu, SINT2_NUM, SINT2_VEC, true); 235 cpu_comp(); 236 } 237 238 static void stimer_test_periodic(int vcpu, struct stimer *timer1, 239 struct stimer *timer2) 240 { 241 /* Check periodic timers */ 242 stimer_start(timer1, false, true, ONE_MS_IN_100NS, SINT1_NUM); 243 stimer_start(timer2, false, true, ONE_MS_IN_100NS, SINT2_NUM); 244 while ((atomic_read(&timer1->fire_count) < 1000) || 245 (atomic_read(&timer2->fire_count) < 1000)) { 246 pause(); 247 } 248 report("Hyper-V SynIC periodic timers test vcpu %d", true, vcpu); 249 stimer_shutdown(timer1); 250 stimer_shutdown(timer2); 251 } 252 253 static void stimer_test_one_shot(int vcpu, struct stimer *timer) 254 { 255 /* Check one-shot timer */ 256 stimer_start(timer, false, false, ONE_MS_IN_100NS, SINT1_NUM); 257 while (atomic_read(&timer->fire_count) < 1) { 258 pause(); 259 } 260 report("Hyper-V SynIC one-shot test vcpu %d", true, vcpu); 261 stimer_shutdown(timer); 262 } 263 264 static void stimer_test_auto_enable_one_shot(int vcpu, struct stimer *timer) 265 { 266 /* Check auto-enable one-shot timer */ 267 stimer_start(timer, true, false, ONE_MS_IN_100NS, SINT1_NUM); 268 while (atomic_read(&timer->fire_count) < 1) { 269 pause(); 270 } 271 report("Hyper-V SynIC auto-enable one-shot timer test vcpu %d", true, vcpu); 272 stimer_shutdown(timer); 273 } 274 275 static void stimer_test_auto_enable_periodic(int vcpu, struct stimer *timer) 276 { 277 /* Check auto-enable periodic timer */ 278 stimer_start(timer, true, true, ONE_MS_IN_100NS, SINT1_NUM); 279 while (atomic_read(&timer->fire_count) < 1000) { 280 pause(); 281 } 282 report("Hyper-V SynIC auto-enable periodic timer test vcpu %d", true, vcpu); 283 stimer_shutdown(timer); 284 } 285 286 static void stimer_test(void *ctx) 287 { 288 int vcpu = smp_id(); 289 struct svcpu *svcpu = &g_synic_vcpu[vcpu]; 290 struct stimer *timer1, *timer2; 291 292 irq_enable(); 293 294 timer1 = &svcpu->timer[0]; 295 timer2 = &svcpu->timer[1]; 296 297 stimer_test_periodic(vcpu, timer1, timer2); 298 stimer_test_one_shot(vcpu, timer1); 299 stimer_test_auto_enable_one_shot(vcpu, timer2); 300 stimer_test_auto_enable_periodic(vcpu, timer1); 301 302 irq_disable(); 303 cpu_comp(); 304 } 305 306 static void stimer_test_cleanup(void *ctx) 307 { 308 int vcpu = smp_id(); 309 310 stimers_shutdown(); 311 synic_sint_destroy(vcpu, SINT1_NUM); 312 synic_sint_destroy(vcpu, SINT2_NUM); 313 synic_disable(); 314 cpu_comp(); 315 } 316 317 static void on_each_cpu_async_wait(void (*func)(void *ctx), void *ctx) 318 { 319 int i; 320 321 atomic_set(&g_cpus_comp_count, 0); 322 for (i = 0; i < g_cpus_count; i++) { 323 on_cpu_async(i, func, ctx); 324 } 325 while (atomic_read(&g_cpus_comp_count) != g_cpus_count) { 326 pause(); 327 } 328 } 329 330 static void stimer_test_all(void) 331 { 332 int ncpus; 333 334 setup_vm(); 335 smp_init(); 336 setup_idt(); 337 enable_apic(); 338 339 handle_irq(SINT1_VEC, stimer_isr); 340 handle_irq(SINT2_VEC, stimer_isr_auto_eoi); 341 342 ncpus = cpu_count(); 343 if (ncpus > MAX_CPUS) { 344 ncpus = MAX_CPUS; 345 } 346 347 printf("cpus = %d\n", ncpus); 348 g_cpus_count = ncpus; 349 350 on_each_cpu_async_wait(stimer_test_prepare, (void *)read_cr3()); 351 on_each_cpu_async_wait(stimer_test, NULL); 352 on_each_cpu_async_wait(stimer_test_cleanup, NULL); 353 } 354 355 int main(int ac, char **av) 356 { 357 358 if (!synic_supported()) { 359 report("Hyper-V SynIC is not supported", true); 360 goto done; 361 } 362 363 if (!stimer_supported()) { 364 report("Hyper-V SynIC timers are not supported", true); 365 goto done; 366 } 367 368 if (!hv_time_ref_counter_supported()) { 369 report("Hyper-V time reference counter is not supported", true); 370 goto done; 371 } 372 373 stimer_test_all(); 374 done: 375 return report_summary(); 376 } 377