Lines Matching +full:tsx +full:- +full:ctrl

5 #include "x86/apic-defs.h"
27 "add $(2f-1b), %%eax\n\t" \
38 "add $(2f-1b), %%rax\n\t" \
196 u32 eax = cntrs & (BIT_ULL(32) - 1); in __precise_loop()
228 * as the counts will vary depending on how many asynchronous VM-Exits in adjust_events_range()
273 return evt->ctr < MSR_CORE_PERF_FIXED_CTR0 || in is_gp()
274 evt->ctr >= MSR_IA32_PMC0; in is_gp()
280 return cnt->ctr - (is_gp(cnt) ? pmu.msr_gp_counter_base : in event_to_global_idx()
281 (MSR_CORE_PERF_FIXED_CTR0 - FIXED_CNT_INDEX)); in event_to_global_idx()
284 return (cnt->ctr - pmu.msr_gp_counter_base) / 2; in event_to_global_idx()
286 return cnt->ctr - pmu.msr_gp_counter_base; in event_to_global_idx()
295 if (gp_events[i].unit_sel == (cnt->config & 0xffff)) in get_counter_event()
298 unsigned int idx = cnt->ctr - MSR_CORE_PERF_FIXED_CTR0; in get_counter_event()
312 cnt->idx = event_to_global_idx(cnt); in global_enable()
313 wrmsr(pmu.msr_global_ctl, rdmsr(pmu.msr_global_ctl) | BIT_ULL(cnt->idx)); in global_enable()
321 wrmsr(pmu.msr_global_ctl, rdmsr(pmu.msr_global_ctl) & ~BIT_ULL(cnt->idx)); in global_disable()
326 evt->count = count; in __start_event()
327 wrmsr(evt->ctr, evt->count); in __start_event()
330 evt->config | EVNTSEL_EN); in __start_event()
332 uint32_t ctrl = rdmsr(MSR_CORE_PERF_FIXED_CTR_CTRL); in __start_event() local
333 int shift = (evt->ctr - MSR_CORE_PERF_FIXED_CTR0) * 4; in __start_event()
336 if (evt->config & EVNTSEL_OS) in __start_event()
338 if (evt->config & EVNTSEL_USR) in __start_event()
340 if (evt->config & EVNTSEL_INT) in __start_event()
342 ctrl = (ctrl & ~(0xf << shift)) | (usrospmi << shift); in __start_event()
343 wrmsr(MSR_CORE_PERF_FIXED_CTR_CTRL, ctrl); in __start_event()
358 evt->config & ~EVNTSEL_EN); in __stop_event()
360 uint32_t ctrl = rdmsr(MSR_CORE_PERF_FIXED_CTR_CTRL); in __stop_event() local
361 int shift = (evt->ctr - MSR_CORE_PERF_FIXED_CTR0) * 4; in __stop_event()
362 wrmsr(MSR_CORE_PERF_FIXED_CTR_CTRL, ctrl & ~(0xf << shift)); in __stop_event()
364 evt->count = rdmsr(evt->ctr); in __stop_event()
408 pass = count >= e->min && count <= e->max; in verify_event()
410 printf("FAIL: %d <= %"PRId64" <= %d\n", e->min, count, e->max); in verify_event()
417 return verify_event(cnt->count, get_counter_event(cnt)); in verify_counter()
423 .config = EVNTSEL_OS | EVNTSEL_USR | evt->unit_sel, in check_gp_counter()
430 report(verify_event(cnt.count, evt), "%s-%d", evt->name, i); in check_gp_counter()
456 report(verify_event(cnt.count, &fixed_events[i]), "fixed-%d", i); in check_fixed_counters()
500 assert(cnt->count > 1); in measure_for_overflow()
501 return 1 - cnt->count; in measure_for_overflow()
531 cnt.count &= (1ull << pmu.gp_counter_width) - 1; in check_counter_overflow()
539 cnt.count &= (1ull << pmu.gp_counter_width) - 1; in check_counter_overflow()
551 report(cnt.count == 1, "cntr-%d", i); in check_counter_overflow()
553 report(cnt.count == 0xffffffffffff || cnt.count < 7, "cntr-%d", i); in check_counter_overflow()
559 report(status & (1ull << idx), "status-%d", i); in check_counter_overflow()
562 report(!(status & (1ull << idx)), "status clear-%d", i); in check_counter_overflow()
563 report(check_irq() == (i % 2), "irq-%d", i); in check_counter_overflow()
588 uint32_t idx = (uint32_t)cnt->idx | (1u << 31); in do_rdpmc_fast()
593 cnt->count = rdpmc(idx); in do_rdpmc_fast()
613 * Without full-width writes, only the low 32 bits are writable, in check_rdpmc()
614 * and the value is sign-extended. in check_rdpmc()
622 x &= (1ull << pmu.gp_counter_width) - 1; in check_rdpmc()
625 report(rdpmc(i) == x, "cntr-%d", i); in check_rdpmc()
629 report_skip("fast-%d", i); in check_rdpmc()
631 report(cnt.count == (u32)val, "fast-%d", i); in check_rdpmc()
634 uint64_t x = val & ((1ull << pmu.fixed_counter_width) - 1); in check_rdpmc()
641 report(rdpmc(i | (1 << 30)) == x, "fixed cntr-%d", i); in check_rdpmc()
645 report_skip("fixed fast-%d", i); in check_rdpmc()
647 report(cnt.count == (u32)x, "fixed fast-%d", i); in check_rdpmc()
681 count = -1; in check_running_counter_wrmsr()
683 count &= (1ull << pmu.gp_counter_width) - 1; in check_running_counter_wrmsr()
702 uint64_t gp_counter_width = (1ull << pmu.gp_counter_width) - 1; in check_emulated_instr()
727 brnch_start = -KVM_FEP_BRANCHES; in check_emulated_instr()
728 instr_start = -KVM_FEP_INSNS; in check_emulated_instr()
745 // Check that the end count - start count is at least the expected in check_emulated_instr()
748 report(instr_cnt.count - instr_start == KVM_FEP_INSNS, in check_emulated_instr()
750 report(brnch_cnt.count - brnch_start == KVM_FEP_BRANCHES, in check_emulated_instr()
753 report(instr_cnt.count - instr_start >= KVM_FEP_INSNS, in check_emulated_instr()
755 report(brnch_cnt.count - brnch_start >= KVM_FEP_BRANCHES, in check_emulated_instr()
778 report_prefix_push("TSX cycles"); in check_tsx_cycles()
797 /* Generate a non-canonical #GP to trigger ABORT. */ in check_tsx_cycles()
803 report(cnt.count > 0, "gp cntr-%d with a value of %" PRId64 "", i, cnt.count); in check_tsx_cycles()
815 * a warm-up state to warm up the cache, it leads to the measured cycles in warm_up()
816 * value may exceed the pre-defined cycles upper boundary and cause in warm_up()
817 * false positive. To avoid this, introduce an warm-up state before in warm_up()
848 u64 val_32 = val_64 & ((1ull << 32) - 1); in check_gp_counters_write_width()
849 u64 val_max_width = val_64 & ((1ull << pmu.gp_counter_width) - 1); in check_gp_counters_write_width()
853 * MSR_IA32_PERFCTRn supports 64-bit writes, in check_gp_counters_write_width()
916 * This loop has to run long enough to dominate the VM-exit in set_ref_cycle_expectations()
930 tsc_delta = ((t2 - t1) + (t3 - t0)) / 2; in set_ref_cycle_expectations()
1012 report_prefix_push("full-width writes"); in main()