Lines Matching +full:coexist +full:- +full:support

5  *  Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
8 * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra
12 * For licencing details see kernel-base/COPYING
25 * register -------------------------------
27 *-----------------------------------------
31 *-----------------------------------------
34 * we can pre-allocate their slot in the per-cpu
35 * per-core reg tables.
38 EXTRA_REG_NONE = -1, /* not used */
66 return ((ecode & c->cmask) - c->code) <= (u64)c->size; in constraint_match()
91 return event->hw.flags & PERF_X86_EVENT_TOPDOWN; in is_topdown_count()
96 u64 config = event->attr.config; in is_metric_event()
105 return (event->attr.config & INTEL_ARCH_EVENT_MASK) == INTEL_TD_SLOTS; in is_slots_event()
117 return is_x86_event(leader) ? !!(leader->hw.flags & flags) : false; in check_leader_group()
122 return check_leader_group(event->group_leader, PERF_X86_EVENT_BRANCH_COUNTERS); in is_branch_counters_group()
127 return check_leader_group(event->group_leader, PERF_X86_EVENT_PEBS_CNTR); in is_pebs_counter_event_group()
137 #define PEBS_COUNTER_MASK ((1ULL << MAX_PEBS_EVENTS) - 1)
184 raw_spinlock_t lock; /* per-core: protect structure */
198 int refcnt; /* per-core: #HT threads */
199 unsigned core_id; /* per-core: core id */
223 int refcnt; /* per-core: #HT threads */
224 unsigned core_id; /* per-core: core id */
327 * manage shared (per-core, per-cpu) registers
367 .size = (e) - (c), \
408 * will increase scheduling cycles for an over-committed system
428 * Constraint on the Event code + UMask + fixed-mask
432 * - inv
433 * - edge
434 * - cnt-mask
435 * - in_tx
436 * - in_tx_checkpointed
438 * The any-thread option is supported starting with v3.
565 * We define the end marker as having a weight of -1
571 #define EVENT_CONSTRAINT_END { .weight = -1 }
574 * Check for end marker with weight == -1
577 for ((e) = (c); (e)->weight != -1; (e)++)
594 int idx; /* per_xxx->regs[] reg index */
680 #define PERF_PEBS_DATA_SOURCE_MASK (PERF_PEBS_DATA_SOURCE_MAX - 1)
682 #define PERF_PEBS_DATA_SOURCE_GRT_MASK (PERF_PEBS_DATA_SOURCE_GRT_MAX - 1)
751 __Fp = &hybrid_pmu(_pmu)->_field; \
761 __Fp = &hybrid_pmu(_pmu)->_var; \
771 __Fp = hybrid_pmu(_pmu)->_field; \
777 * struct x86_pmu - generic x86 pmu
915 bool lbr_pt_coexist; /* (LBR|BTS) may coexist with PT */
964 * Intel host/guest support (KVM)
977 * Hybrid support
1009 * Add padding to guarantee the 64-byte alignment of the state buffer.
1041 #define PMU_FL_NO_HT_SHARING 0x1 /* no hyper-threading resource sharing */
1048 #define PMU_FL_INSTR_LATENCY 0x80 /* Support Instruction Latency in PEBS Memory Info Record */
1050 #define PMU_FL_RETIRE_LATENCY 0x200 /* Support Retire Latency in PEBS */
1051 #define PMU_FL_BR_CNTR 0x400 /* Support branch counter logging */
1105 return &((struct x86_perf_task_context_arch_lbr *)ctx)->opt; in task_context_opt()
1107 return &((struct x86_perf_task_context *)ctx)->opt; in task_context_opt()
1124 * 'not supported', -1 means 'hw_event makes no sense on
1194 return hwc->flags & PERF_X86_EVENT_AMD_BRS; in has_amd_brs()
1199 return hwc->flags & PERF_X86_EVENT_PAIR; in is_counter_pair()
1207 if (hwc->extra_reg.reg) in __x86_pmu_enable_event()
1208 wrmsrl(hwc->extra_reg.reg, hwc->extra_reg.config); in __x86_pmu_enable_event()
1215 wrmsrl(x86_pmu_config_addr(hwc->idx + 1), x86_pmu.perf_ctr_pair_en); in __x86_pmu_enable_event()
1217 wrmsrl(hwc->config_base, (hwc->config | enable_mask) & ~disable_mask); in __x86_pmu_enable_event()
1231 struct hw_perf_event *hwc = &event->hw; in x86_pmu_disable_event()
1233 wrmsrl(hwc->config_base, hwc->config & ~disable_mask); in x86_pmu_disable_event()
1236 wrmsrl(x86_pmu_config_addr(hwc->idx + 1), 0); in x86_pmu_disable_event()
1267 return event->attr.config & hybrid(event->pmu, config_mask); in x86_pmu_get_event_config()
1289 * vm86 mode using the known zero-based code segment and 'fix up' the registers
1297 regs->cs = kernel_ip(ip) ? __KERNEL_CS : __USER_CS; in set_linear_ip()
1298 if (regs->flags & X86_VM_MASK) in set_linear_ip()
1299 regs->flags ^= (PERF_EFLAGS_VM | X86_VM_MASK); in set_linear_ip()
1300 regs->ip = ip; in set_linear_ip()
1431 perf_sched_cb_inc(event->pmu); in amd_pmu_brs_add()
1432 cpuc->lbr_users++; in amd_pmu_brs_add()
1443 cpuc->lbr_users--; in amd_pmu_brs_del()
1444 WARN_ON_ONCE(cpuc->lbr_users < 0); in amd_pmu_brs_del()
1446 perf_sched_cb_dec(event->pmu); in amd_pmu_brs_del()
1495 return -EOPNOTSUPP; in amd_brs_init()
1513 return !!(event->hw.flags & PERF_X86_EVENT_PEBS_VIA_PT); in is_pebs_pt()
1520 struct hw_perf_event *hwc = &event->hw; in intel_pmu_has_bts_period()
1523 if (event->attr.freq) in intel_pmu_has_bts_period()
1526 hw_event = hwc->config & INTEL_ARCH_EVENT_MASK; in intel_pmu_has_bts_period()
1534 struct hw_perf_event *hwc = &event->hw; in intel_pmu_has_bts()
1536 return intel_pmu_has_bts_period(event, hwc->sample_period); in intel_pmu_has_bts()