1 #include "libcflat.h" 2 #include "processor.h" 3 #include "msr.h" 4 #include "isr.h" 5 #include "vm.h" 6 #include "apic.h" 7 #include "desc.h" 8 #include "smp.h" 9 #include "atomic.h" 10 #include "hyperv.h" 11 12 #define MAX_CPUS 4 13 14 #define SINT1_VEC 0xF1 15 #define SINT2_VEC 0xF2 16 17 #define SINT1_NUM 2 18 #define SINT2_NUM 3 19 #define ONE_MS_IN_100NS 10000 20 21 static atomic_t g_cpus_comp_count; 22 static int g_cpus_count; 23 static struct spinlock g_synic_alloc_lock; 24 25 struct stimer { 26 int sint; 27 int index; 28 atomic_t fire_count; 29 }; 30 31 struct svcpu { 32 int vcpu; 33 void *msg_page; 34 void *evt_page; 35 struct stimer timer[HV_SYNIC_STIMER_COUNT]; 36 }; 37 38 static struct svcpu g_synic_vcpu[MAX_CPUS]; 39 40 static void *synic_alloc_page(void) 41 { 42 void *page; 43 44 spin_lock(&g_synic_alloc_lock); 45 page = alloc_page(); 46 spin_unlock(&g_synic_alloc_lock); 47 return page; 48 } 49 50 static void synic_free_page(void *page) 51 { 52 spin_lock(&g_synic_alloc_lock); 53 free_page(page); 54 spin_unlock(&g_synic_alloc_lock); 55 } 56 57 static void stimer_init(struct stimer *timer, int index) 58 { 59 memset(timer, 0, sizeof(*timer)); 60 timer->index = index; 61 } 62 63 static void synic_enable(void) 64 { 65 int vcpu = smp_id(), i; 66 struct svcpu *svcpu = &g_synic_vcpu[vcpu]; 67 68 memset(svcpu, 0, sizeof(*svcpu)); 69 svcpu->vcpu = vcpu; 70 svcpu->msg_page = synic_alloc_page(); 71 for (i = 0; i < ARRAY_SIZE(svcpu->timer); i++) { 72 stimer_init(&svcpu->timer[i], i); 73 } 74 wrmsr(HV_X64_MSR_SIMP, (u64)virt_to_phys(svcpu->msg_page) | 75 HV_SYNIC_SIMP_ENABLE); 76 wrmsr(HV_X64_MSR_SCONTROL, HV_SYNIC_CONTROL_ENABLE); 77 } 78 79 static void stimer_shutdown(struct stimer *timer) 80 { 81 wrmsr(HV_X64_MSR_STIMER0_CONFIG + 2*timer->index, 0); 82 } 83 84 static void process_stimer_expired(struct svcpu *svcpu, struct stimer *timer, 85 u64 expiration_time, u64 delivery_time) 86 { 87 atomic_inc(&timer->fire_count); 88 } 89 90 static void process_stimer_msg(struct svcpu *svcpu, 91 struct hv_message *msg, int sint) 92 { 93 struct hv_timer_message_payload *payload = 94 (struct hv_timer_message_payload *)msg->u.payload; 95 struct stimer *timer; 96 97 if (msg->header.message_type != HVMSG_TIMER_EXPIRED && 98 msg->header.message_type != HVMSG_NONE) { 99 report("invalid Hyper-V SynIC msg type", false); 100 report_summary(); 101 abort(); 102 } 103 104 if (msg->header.message_type == HVMSG_NONE) { 105 return; 106 } 107 108 if (msg->header.payload_size < sizeof(*payload)) { 109 report("invalid Hyper-V SynIC msg payload size", false); 110 report_summary(); 111 abort(); 112 } 113 114 /* Now process timer expiration message */ 115 116 if (payload->timer_index >= ARRAY_SIZE(svcpu->timer)) { 117 report("invalid Hyper-V SynIC timer index", false); 118 report_summary(); 119 abort(); 120 } 121 timer = &svcpu->timer[payload->timer_index]; 122 process_stimer_expired(svcpu, timer, payload->expiration_time, 123 payload->delivery_time); 124 125 msg->header.message_type = HVMSG_NONE; 126 mb(); 127 if (msg->header.message_flags.msg_pending) { 128 wrmsr(HV_X64_MSR_EOM, 0); 129 } 130 } 131 132 static void __stimer_isr(int vcpu) 133 { 134 struct svcpu *svcpu = &g_synic_vcpu[vcpu]; 135 struct hv_message_page *msg_page; 136 struct hv_message *msg; 137 int i; 138 139 140 msg_page = (struct hv_message_page *)svcpu->msg_page; 141 for (i = 0; i < ARRAY_SIZE(msg_page->sint_message); i++) { 142 msg = &msg_page->sint_message[i]; 143 process_stimer_msg(svcpu, msg, i); 144 } 145 } 146 147 static void stimer_isr(isr_regs_t *regs) 148 { 149 int vcpu = smp_id(); 150 151 __stimer_isr(vcpu); 152 eoi(); 153 } 154 155 static void stimer_isr_auto_eoi(isr_regs_t *regs) 156 { 157 int vcpu = smp_id(); 158 159 __stimer_isr(vcpu); 160 } 161 162 static void stimer_start(struct stimer *timer, 163 bool auto_enable, bool periodic, 164 u64 tick_100ns, int sint) 165 { 166 u64 config, count; 167 168 timer->sint = sint; 169 atomic_set(&timer->fire_count, 0); 170 171 config = 0; 172 if (periodic) { 173 config |= HV_STIMER_PERIODIC; 174 } 175 176 config |= ((u8)(sint & 0xFF)) << 16; 177 config |= HV_STIMER_ENABLE; 178 if (auto_enable) { 179 config |= HV_STIMER_AUTOENABLE; 180 } 181 182 if (periodic) { 183 count = tick_100ns; 184 } else { 185 count = rdmsr(HV_X64_MSR_TIME_REF_COUNT) + tick_100ns; 186 } 187 188 if (!auto_enable) { 189 wrmsr(HV_X64_MSR_STIMER0_COUNT + timer->index*2, count); 190 wrmsr(HV_X64_MSR_STIMER0_CONFIG + timer->index*2, config); 191 } else { 192 wrmsr(HV_X64_MSR_STIMER0_CONFIG + timer->index*2, config); 193 wrmsr(HV_X64_MSR_STIMER0_COUNT + timer->index*2, count); 194 } 195 } 196 197 static void stimers_shutdown(void) 198 { 199 int vcpu = smp_id(), i; 200 struct svcpu *svcpu = &g_synic_vcpu[vcpu]; 201 202 for (i = 0; i < ARRAY_SIZE(svcpu->timer); i++) { 203 stimer_shutdown(&svcpu->timer[i]); 204 } 205 } 206 207 static void synic_disable(void) 208 { 209 int vcpu = smp_id(); 210 struct svcpu *svcpu = &g_synic_vcpu[vcpu]; 211 212 wrmsr(HV_X64_MSR_SCONTROL, 0); 213 wrmsr(HV_X64_MSR_SIMP, 0); 214 wrmsr(HV_X64_MSR_SIEFP, 0); 215 synic_free_page(svcpu->msg_page); 216 } 217 218 static void cpu_comp(void) 219 { 220 atomic_inc(&g_cpus_comp_count); 221 } 222 223 static void stimer_test_prepare(void *ctx) 224 { 225 int vcpu = smp_id(); 226 227 write_cr3((ulong)ctx); 228 synic_enable(); 229 synic_sint_create(vcpu, SINT1_NUM, SINT1_VEC, false); 230 synic_sint_create(vcpu, SINT2_NUM, SINT2_VEC, true); 231 cpu_comp(); 232 } 233 234 static void stimer_test_periodic(int vcpu, struct stimer *timer1, 235 struct stimer *timer2) 236 { 237 /* Check periodic timers */ 238 stimer_start(timer1, false, true, ONE_MS_IN_100NS, SINT1_NUM); 239 stimer_start(timer2, false, true, ONE_MS_IN_100NS, SINT2_NUM); 240 while ((atomic_read(&timer1->fire_count) < 1000) || 241 (atomic_read(&timer2->fire_count) < 1000)) { 242 pause(); 243 } 244 report("Hyper-V SynIC periodic timers test vcpu %d", true, vcpu); 245 stimer_shutdown(timer1); 246 stimer_shutdown(timer2); 247 } 248 249 static void stimer_test_one_shot(int vcpu, struct stimer *timer) 250 { 251 /* Check one-shot timer */ 252 stimer_start(timer, false, false, ONE_MS_IN_100NS, SINT1_NUM); 253 while (atomic_read(&timer->fire_count) < 1) { 254 pause(); 255 } 256 report("Hyper-V SynIC one-shot test vcpu %d", true, vcpu); 257 stimer_shutdown(timer); 258 } 259 260 static void stimer_test_auto_enable_one_shot(int vcpu, struct stimer *timer) 261 { 262 /* Check auto-enable one-shot timer */ 263 stimer_start(timer, true, false, ONE_MS_IN_100NS, SINT1_NUM); 264 while (atomic_read(&timer->fire_count) < 1) { 265 pause(); 266 } 267 report("Hyper-V SynIC auto-enable one-shot timer test vcpu %d", true, vcpu); 268 stimer_shutdown(timer); 269 } 270 271 static void stimer_test_auto_enable_periodic(int vcpu, struct stimer *timer) 272 { 273 /* Check auto-enable periodic timer */ 274 stimer_start(timer, true, true, ONE_MS_IN_100NS, SINT1_NUM); 275 while (atomic_read(&timer->fire_count) < 1000) { 276 pause(); 277 } 278 report("Hyper-V SynIC auto-enable periodic timer test vcpu %d", true, vcpu); 279 stimer_shutdown(timer); 280 } 281 282 static void stimer_test(void *ctx) 283 { 284 int vcpu = smp_id(); 285 struct svcpu *svcpu = &g_synic_vcpu[vcpu]; 286 struct stimer *timer1, *timer2; 287 288 irq_enable(); 289 290 timer1 = &svcpu->timer[0]; 291 timer2 = &svcpu->timer[1]; 292 293 stimer_test_periodic(vcpu, timer1, timer2); 294 stimer_test_one_shot(vcpu, timer1); 295 stimer_test_auto_enable_one_shot(vcpu, timer2); 296 stimer_test_auto_enable_periodic(vcpu, timer1); 297 298 irq_disable(); 299 cpu_comp(); 300 } 301 302 static void stimer_test_cleanup(void *ctx) 303 { 304 int vcpu = smp_id(); 305 306 stimers_shutdown(); 307 synic_sint_destroy(vcpu, SINT1_NUM); 308 synic_sint_destroy(vcpu, SINT2_NUM); 309 synic_disable(); 310 cpu_comp(); 311 } 312 313 static void on_each_cpu_async_wait(void (*func)(void *ctx), void *ctx) 314 { 315 int i; 316 317 atomic_set(&g_cpus_comp_count, 0); 318 for (i = 0; i < g_cpus_count; i++) { 319 on_cpu_async(i, func, ctx); 320 } 321 while (atomic_read(&g_cpus_comp_count) != g_cpus_count) { 322 pause(); 323 } 324 } 325 326 static void stimer_test_all(void) 327 { 328 int ncpus; 329 330 setup_vm(); 331 smp_init(); 332 setup_idt(); 333 enable_apic(); 334 335 handle_irq(SINT1_VEC, stimer_isr); 336 handle_irq(SINT2_VEC, stimer_isr_auto_eoi); 337 338 ncpus = cpu_count(); 339 if (ncpus > MAX_CPUS) { 340 ncpus = MAX_CPUS; 341 } 342 343 printf("cpus = %d\n", ncpus); 344 g_cpus_count = ncpus; 345 346 on_each_cpu_async_wait(stimer_test_prepare, (void *)read_cr3()); 347 on_each_cpu_async_wait(stimer_test, NULL); 348 on_each_cpu_async_wait(stimer_test_cleanup, NULL); 349 } 350 351 int main(int ac, char **av) 352 { 353 354 if (!synic_supported()) { 355 report("Hyper-V SynIC is not supported", true); 356 goto done; 357 } 358 359 if (!stimer_supported()) { 360 report("Hyper-V SynIC timers are not supported", true); 361 goto done; 362 } 363 364 if (!hv_time_ref_counter_supported()) { 365 report("Hyper-V time reference counter is not supported", true); 366 goto done; 367 } 368 369 stimer_test_all(); 370 done: 371 return report_summary(); 372 } 373