Lines Matching +full:coexist +full:- +full:support

5  *  Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
8 * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra
12 * For licencing details see kernel-base/COPYING
25 * register -------------------------------
27 *-----------------------------------------
31 *-----------------------------------------
34 * we can pre-allocate their slot in the per-cpu
35 * per-core reg tables.
38 EXTRA_REG_NONE = -1, /* not used */
66 return ((ecode & c->cmask) - c->code) <= (u64)c->size; in constraint_match()
91 return event->hw.flags & PERF_X86_EVENT_TOPDOWN; in is_topdown_count()
96 u64 config = event->attr.config; in is_metric_event()
105 return (event->attr.config & INTEL_ARCH_EVENT_MASK) == INTEL_TD_SLOTS; in is_slots_event()
115 return event->group_leader->hw.flags & PERF_X86_EVENT_BRANCH_COUNTERS; in is_branch_counters_group()
125 #define PEBS_COUNTER_MASK ((1ULL << MAX_PEBS_EVENTS) - 1)
172 raw_spinlock_t lock; /* per-core: protect structure */
186 int refcnt; /* per-core: #HT threads */
187 unsigned core_id; /* per-core: core id */
211 int refcnt; /* per-core: #HT threads */
212 unsigned core_id; /* per-core: core id */
315 * manage shared (per-core, per-cpu) registers
355 .size = (e) - (c), \
396 * will increase scheduling cycles for an over-committed system
416 * Constraint on the Event code + UMask + fixed-mask
420 * - inv
421 * - edge
422 * - cnt-mask
423 * - in_tx
424 * - in_tx_checkpointed
426 * The any-thread option is supported starting with v3.
545 * We define the end marker as having a weight of -1
551 #define EVENT_CONSTRAINT_END { .weight = -1 }
554 * Check for end marker with weight == -1
557 for ((e) = (c); (e)->weight != -1; (e)++)
574 int idx; /* per_xxx->regs[] reg index */
659 #define PERF_PEBS_DATA_SOURCE_MASK (PERF_PEBS_DATA_SOURCE_MAX - 1)
724 __Fp = &hybrid_pmu(_pmu)->_field; \
734 __Fp = &hybrid_pmu(_pmu)->_var; \
744 __Fp = hybrid_pmu(_pmu)->_field; \
750 * struct x86_pmu - generic x86 pmu
878 bool lbr_pt_coexist; /* (LBR|BTS) may coexist with PT */
935 * Intel host/guest support (KVM)
948 * Hybrid support
980 * Add padding to guarantee the 64-byte alignment of the state buffer.
1012 #define PMU_FL_NO_HT_SHARING 0x1 /* no hyper-threading resource sharing */
1019 #define PMU_FL_INSTR_LATENCY 0x80 /* Support Instruction Latency in PEBS Memory Info Record */
1021 #define PMU_FL_RETIRE_LATENCY 0x200 /* Support Retire Latency in PEBS */
1022 #define PMU_FL_BR_CNTR 0x400 /* Support branch counter logging */
1074 return &((struct x86_perf_task_context_arch_lbr *)ctx)->opt; in task_context_opt()
1076 return &((struct x86_perf_task_context *)ctx)->opt; in task_context_opt()
1093 * 'not supported', -1 means 'hw_event makes no sense on
1151 return hwc->flags & PERF_X86_EVENT_AMD_BRS; in has_amd_brs()
1156 return hwc->flags & PERF_X86_EVENT_PAIR; in is_counter_pair()
1164 if (hwc->extra_reg.reg) in __x86_pmu_enable_event()
1165 wrmsrl(hwc->extra_reg.reg, hwc->extra_reg.config); in __x86_pmu_enable_event()
1172 wrmsrl(x86_pmu_config_addr(hwc->idx + 1), x86_pmu.perf_ctr_pair_en); in __x86_pmu_enable_event()
1174 wrmsrl(hwc->config_base, (hwc->config | enable_mask) & ~disable_mask); in __x86_pmu_enable_event()
1188 struct hw_perf_event *hwc = &event->hw; in x86_pmu_disable_event()
1190 wrmsrl(hwc->config_base, hwc->config & ~disable_mask); in x86_pmu_disable_event()
1193 wrmsrl(x86_pmu_config_addr(hwc->idx + 1), 0); in x86_pmu_disable_event()
1222 * vm86 mode using the known zero-based code segment and 'fix up' the registers
1230 regs->cs = kernel_ip(ip) ? __KERNEL_CS : __USER_CS; in set_linear_ip()
1231 if (regs->flags & X86_VM_MASK) in set_linear_ip()
1232 regs->flags ^= (PERF_EFLAGS_VM | X86_VM_MASK); in set_linear_ip()
1233 regs->ip = ip; in set_linear_ip()
1350 perf_sched_cb_inc(event->pmu); in amd_pmu_brs_add()
1351 cpuc->lbr_users++; in amd_pmu_brs_add()
1362 cpuc->lbr_users--; in amd_pmu_brs_del()
1363 WARN_ON_ONCE(cpuc->lbr_users < 0); in amd_pmu_brs_del()
1365 perf_sched_cb_dec(event->pmu); in amd_pmu_brs_del()
1412 return -EOPNOTSUPP; in amd_brs_init()
1430 return !!(event->hw.flags & PERF_X86_EVENT_PEBS_VIA_PT); in is_pebs_pt()
1437 struct hw_perf_event *hwc = &event->hw; in intel_pmu_has_bts_period()
1440 if (event->attr.freq) in intel_pmu_has_bts_period()
1443 hw_event = hwc->config & INTEL_ARCH_EVENT_MASK; in intel_pmu_has_bts_period()
1451 struct hw_perf_event *hwc = &event->hw; in intel_pmu_has_bts()
1453 return intel_pmu_has_bts_period(event, hwc->sample_period); in intel_pmu_has_bts()