1 #include "libcflat.h" 2 #include "processor.h" 3 #include "msr.h" 4 #include "isr.h" 5 #include "vm.h" 6 #include "apic.h" 7 #include "desc.h" 8 #include "smp.h" 9 #include "atomic.h" 10 #include "hyperv.h" 11 #include "asm/barrier.h" 12 13 #define MAX_CPUS 4 14 15 #define SINT1_VEC 0xF1 16 #define SINT2_VEC 0xF2 17 18 #define SINT1_NUM 2 19 #define SINT2_NUM 3 20 #define ONE_MS_IN_100NS 10000 21 22 static atomic_t g_cpus_comp_count; 23 static int g_cpus_count; 24 static struct spinlock g_synic_alloc_lock; 25 26 struct stimer { 27 int sint; 28 int index; 29 atomic_t fire_count; 30 }; 31 32 struct svcpu { 33 int vcpu; 34 void *msg_page; 35 void *evt_page; 36 struct stimer timer[HV_SYNIC_STIMER_COUNT]; 37 }; 38 39 static struct svcpu g_synic_vcpu[MAX_CPUS]; 40 41 static void *synic_alloc_page(void) 42 { 43 void *page; 44 45 spin_lock(&g_synic_alloc_lock); 46 page = alloc_page(); 47 spin_unlock(&g_synic_alloc_lock); 48 return page; 49 } 50 51 static void synic_free_page(void *page) 52 { 53 spin_lock(&g_synic_alloc_lock); 54 free_page(page); 55 spin_unlock(&g_synic_alloc_lock); 56 } 57 58 static void stimer_init(struct stimer *timer, int index) 59 { 60 memset(timer, 0, sizeof(*timer)); 61 timer->index = index; 62 } 63 64 static void synic_enable(void) 65 { 66 int vcpu = smp_id(), i; 67 struct svcpu *svcpu = &g_synic_vcpu[vcpu]; 68 69 memset(svcpu, 0, sizeof(*svcpu)); 70 svcpu->vcpu = vcpu; 71 svcpu->msg_page = synic_alloc_page(); 72 for (i = 0; i < ARRAY_SIZE(svcpu->timer); i++) { 73 stimer_init(&svcpu->timer[i], i); 74 } 75 wrmsr(HV_X64_MSR_SIMP, (u64)virt_to_phys(svcpu->msg_page) | 76 HV_SYNIC_SIMP_ENABLE); 77 wrmsr(HV_X64_MSR_SCONTROL, HV_SYNIC_CONTROL_ENABLE); 78 } 79 80 static void stimer_shutdown(struct stimer *timer) 81 { 82 wrmsr(HV_X64_MSR_STIMER0_CONFIG + 2*timer->index, 0); 83 } 84 85 static void process_stimer_expired(struct svcpu *svcpu, struct stimer *timer, 86 u64 expiration_time, u64 delivery_time) 87 { 88 atomic_inc(&timer->fire_count); 89 } 90 91 static void process_stimer_msg(struct svcpu *svcpu, 92 struct hv_message *msg, int sint) 93 { 94 struct hv_timer_message_payload *payload = 95 (struct hv_timer_message_payload *)msg->u.payload; 96 struct stimer *timer; 97 98 if (msg->header.message_type != HVMSG_TIMER_EXPIRED && 99 msg->header.message_type != HVMSG_NONE) { 100 report("invalid Hyper-V SynIC msg type", false); 101 report_summary(); 102 abort(); 103 } 104 105 if (msg->header.message_type == HVMSG_NONE) { 106 return; 107 } 108 109 if (msg->header.payload_size < sizeof(*payload)) { 110 report("invalid Hyper-V SynIC msg payload size", false); 111 report_summary(); 112 abort(); 113 } 114 115 /* Now process timer expiration message */ 116 117 if (payload->timer_index >= ARRAY_SIZE(svcpu->timer)) { 118 report("invalid Hyper-V SynIC timer index", false); 119 report_summary(); 120 abort(); 121 } 122 timer = &svcpu->timer[payload->timer_index]; 123 process_stimer_expired(svcpu, timer, payload->expiration_time, 124 payload->delivery_time); 125 126 msg->header.message_type = HVMSG_NONE; 127 mb(); 128 if (msg->header.message_flags.msg_pending) { 129 wrmsr(HV_X64_MSR_EOM, 0); 130 } 131 } 132 133 static void __stimer_isr(int vcpu) 134 { 135 struct svcpu *svcpu = &g_synic_vcpu[vcpu]; 136 struct hv_message_page *msg_page; 137 struct hv_message *msg; 138 int i; 139 140 141 msg_page = (struct hv_message_page *)svcpu->msg_page; 142 for (i = 0; i < ARRAY_SIZE(msg_page->sint_message); i++) { 143 msg = &msg_page->sint_message[i]; 144 process_stimer_msg(svcpu, msg, i); 145 } 146 } 147 148 static void stimer_isr(isr_regs_t *regs) 149 { 150 int vcpu = smp_id(); 151 152 __stimer_isr(vcpu); 153 eoi(); 154 } 155 156 static void stimer_isr_auto_eoi(isr_regs_t *regs) 157 { 158 int vcpu = smp_id(); 159 160 __stimer_isr(vcpu); 161 } 162 163 static void stimer_start(struct stimer *timer, 164 bool auto_enable, bool periodic, 165 u64 tick_100ns, int sint) 166 { 167 u64 config, count; 168 169 timer->sint = sint; 170 atomic_set(&timer->fire_count, 0); 171 172 config = 0; 173 if (periodic) { 174 config |= HV_STIMER_PERIODIC; 175 } 176 177 config |= ((u8)(sint & 0xFF)) << 16; 178 config |= HV_STIMER_ENABLE; 179 if (auto_enable) { 180 config |= HV_STIMER_AUTOENABLE; 181 } 182 183 if (periodic) { 184 count = tick_100ns; 185 } else { 186 count = rdmsr(HV_X64_MSR_TIME_REF_COUNT) + tick_100ns; 187 } 188 189 if (!auto_enable) { 190 wrmsr(HV_X64_MSR_STIMER0_COUNT + timer->index*2, count); 191 wrmsr(HV_X64_MSR_STIMER0_CONFIG + timer->index*2, config); 192 } else { 193 wrmsr(HV_X64_MSR_STIMER0_CONFIG + timer->index*2, config); 194 wrmsr(HV_X64_MSR_STIMER0_COUNT + timer->index*2, count); 195 } 196 } 197 198 static void stimers_shutdown(void) 199 { 200 int vcpu = smp_id(), i; 201 struct svcpu *svcpu = &g_synic_vcpu[vcpu]; 202 203 for (i = 0; i < ARRAY_SIZE(svcpu->timer); i++) { 204 stimer_shutdown(&svcpu->timer[i]); 205 } 206 } 207 208 static void synic_disable(void) 209 { 210 int vcpu = smp_id(); 211 struct svcpu *svcpu = &g_synic_vcpu[vcpu]; 212 213 wrmsr(HV_X64_MSR_SCONTROL, 0); 214 wrmsr(HV_X64_MSR_SIMP, 0); 215 wrmsr(HV_X64_MSR_SIEFP, 0); 216 synic_free_page(svcpu->msg_page); 217 } 218 219 static void cpu_comp(void) 220 { 221 atomic_inc(&g_cpus_comp_count); 222 } 223 224 static void stimer_test_prepare(void *ctx) 225 { 226 int vcpu = smp_id(); 227 228 write_cr3((ulong)ctx); 229 synic_enable(); 230 synic_sint_create(vcpu, SINT1_NUM, SINT1_VEC, false); 231 synic_sint_create(vcpu, SINT2_NUM, SINT2_VEC, true); 232 cpu_comp(); 233 } 234 235 static void stimer_test_periodic(int vcpu, struct stimer *timer1, 236 struct stimer *timer2) 237 { 238 /* Check periodic timers */ 239 stimer_start(timer1, false, true, ONE_MS_IN_100NS, SINT1_NUM); 240 stimer_start(timer2, false, true, ONE_MS_IN_100NS, SINT2_NUM); 241 while ((atomic_read(&timer1->fire_count) < 1000) || 242 (atomic_read(&timer2->fire_count) < 1000)) { 243 pause(); 244 } 245 report("Hyper-V SynIC periodic timers test vcpu %d", true, vcpu); 246 stimer_shutdown(timer1); 247 stimer_shutdown(timer2); 248 } 249 250 static void stimer_test_one_shot(int vcpu, struct stimer *timer) 251 { 252 /* Check one-shot timer */ 253 stimer_start(timer, false, false, ONE_MS_IN_100NS, SINT1_NUM); 254 while (atomic_read(&timer->fire_count) < 1) { 255 pause(); 256 } 257 report("Hyper-V SynIC one-shot test vcpu %d", true, vcpu); 258 stimer_shutdown(timer); 259 } 260 261 static void stimer_test_auto_enable_one_shot(int vcpu, struct stimer *timer) 262 { 263 /* Check auto-enable one-shot timer */ 264 stimer_start(timer, true, false, ONE_MS_IN_100NS, SINT1_NUM); 265 while (atomic_read(&timer->fire_count) < 1) { 266 pause(); 267 } 268 report("Hyper-V SynIC auto-enable one-shot timer test vcpu %d", true, vcpu); 269 stimer_shutdown(timer); 270 } 271 272 static void stimer_test_auto_enable_periodic(int vcpu, struct stimer *timer) 273 { 274 /* Check auto-enable periodic timer */ 275 stimer_start(timer, true, true, ONE_MS_IN_100NS, SINT1_NUM); 276 while (atomic_read(&timer->fire_count) < 1000) { 277 pause(); 278 } 279 report("Hyper-V SynIC auto-enable periodic timer test vcpu %d", true, vcpu); 280 stimer_shutdown(timer); 281 } 282 283 static void stimer_test(void *ctx) 284 { 285 int vcpu = smp_id(); 286 struct svcpu *svcpu = &g_synic_vcpu[vcpu]; 287 struct stimer *timer1, *timer2; 288 289 irq_enable(); 290 291 timer1 = &svcpu->timer[0]; 292 timer2 = &svcpu->timer[1]; 293 294 stimer_test_periodic(vcpu, timer1, timer2); 295 stimer_test_one_shot(vcpu, timer1); 296 stimer_test_auto_enable_one_shot(vcpu, timer2); 297 stimer_test_auto_enable_periodic(vcpu, timer1); 298 299 irq_disable(); 300 cpu_comp(); 301 } 302 303 static void stimer_test_cleanup(void *ctx) 304 { 305 int vcpu = smp_id(); 306 307 stimers_shutdown(); 308 synic_sint_destroy(vcpu, SINT1_NUM); 309 synic_sint_destroy(vcpu, SINT2_NUM); 310 synic_disable(); 311 cpu_comp(); 312 } 313 314 static void on_each_cpu_async_wait(void (*func)(void *ctx), void *ctx) 315 { 316 int i; 317 318 atomic_set(&g_cpus_comp_count, 0); 319 for (i = 0; i < g_cpus_count; i++) { 320 on_cpu_async(i, func, ctx); 321 } 322 while (atomic_read(&g_cpus_comp_count) != g_cpus_count) { 323 pause(); 324 } 325 } 326 327 static void stimer_test_all(void) 328 { 329 int ncpus; 330 331 setup_vm(); 332 smp_init(); 333 setup_idt(); 334 enable_apic(); 335 336 handle_irq(SINT1_VEC, stimer_isr); 337 handle_irq(SINT2_VEC, stimer_isr_auto_eoi); 338 339 ncpus = cpu_count(); 340 if (ncpus > MAX_CPUS) { 341 ncpus = MAX_CPUS; 342 } 343 344 printf("cpus = %d\n", ncpus); 345 g_cpus_count = ncpus; 346 347 on_each_cpu_async_wait(stimer_test_prepare, (void *)read_cr3()); 348 on_each_cpu_async_wait(stimer_test, NULL); 349 on_each_cpu_async_wait(stimer_test_cleanup, NULL); 350 } 351 352 int main(int ac, char **av) 353 { 354 355 if (!synic_supported()) { 356 report("Hyper-V SynIC is not supported", true); 357 goto done; 358 } 359 360 if (!stimer_supported()) { 361 report("Hyper-V SynIC timers are not supported", true); 362 goto done; 363 } 364 365 if (!hv_time_ref_counter_supported()) { 366 report("Hyper-V time reference counter is not supported", true); 367 goto done; 368 } 369 370 stimer_test_all(); 371 done: 372 return report_summary(); 373 } 374