Lines Matching full:pmu

77 	struct pmu *pmu; /* for custom pmu ops */  member
79 * Uncore PMU would store relevant platform topology configuration here
110 struct pmu pmu; member
140 struct intel_uncore_pmu *pmu; member
187 return container_of(dev_get_drvdata(dev), struct intel_uncore_pmu, pmu); in dev_to_uncore_pmu()
227 if (offset < box->pmu->type->mmio_map_size) in uncore_mmio_is_valid_offset()
231 offset, box->pmu->type->name); in uncore_mmio_is_valid_offset()
239 return box->pmu->type->box_ctl + in uncore_mmio_box_ctl()
240 box->pmu->type->mmio_offset * box->pmu->pmu_idx; in uncore_mmio_box_ctl()
245 return box->pmu->type->box_ctl; in uncore_pci_box_ctl()
250 return box->pmu->type->fixed_ctl; in uncore_pci_fixed_ctl()
255 return box->pmu->type->fixed_ctr; in uncore_pci_fixed_ctr()
262 return idx * 8 + box->pmu->type->event_ctl; in uncore_pci_event_ctl()
264 return idx * 4 + box->pmu->type->event_ctl; in uncore_pci_event_ctl()
270 return idx * 8 + box->pmu->type->perf_ctr; in uncore_pci_perf_ctr()
275 struct intel_uncore_pmu *pmu = box->pmu; in uncore_msr_box_offset() local
276 return pmu->type->msr_offsets ? in uncore_msr_box_offset()
277 pmu->type->msr_offsets[pmu->pmu_idx] : in uncore_msr_box_offset()
278 pmu->type->msr_offset * pmu->pmu_idx; in uncore_msr_box_offset()
283 if (!box->pmu->type->box_ctl) in uncore_msr_box_ctl()
285 return box->pmu->type->box_ctl + uncore_msr_box_offset(box); in uncore_msr_box_ctl()
290 if (!box->pmu->type->fixed_ctl) in uncore_msr_fixed_ctl()
292 return box->pmu->type->fixed_ctl + uncore_msr_box_offset(box); in uncore_msr_fixed_ctl()
297 return box->pmu->type->fixed_ctr + uncore_msr_box_offset(box); in uncore_msr_fixed_ctr()
347 struct intel_uncore_pmu *pmu = box->pmu; in uncore_freerunning_counter() local
349 return pmu->type->freerunning[type].counter_base + in uncore_freerunning_counter()
350 pmu->type->freerunning[type].counter_offset * idx + in uncore_freerunning_counter()
351 (pmu->type->freerunning[type].box_offsets ? in uncore_freerunning_counter()
352 pmu->type->freerunning[type].box_offsets[pmu->pmu_idx] : in uncore_freerunning_counter()
353 pmu->type->freerunning[type].box_offset * pmu->pmu_idx); in uncore_freerunning_counter()
361 (box->pmu->type->pair_ctr_ctl ? 2 * idx : idx); in uncore_msr_event_ctl()
363 return box->pmu->type->event_ctl + in uncore_msr_event_ctl()
364 (box->pmu->type->pair_ctr_ctl ? 2 * idx : idx) + in uncore_msr_event_ctl()
374 (box->pmu->type->pair_ctr_ctl ? 2 * idx : idx); in uncore_msr_perf_ctr()
376 return box->pmu->type->perf_ctr + in uncore_msr_perf_ctr()
377 (box->pmu->type->pair_ctr_ctl ? 2 * idx : idx) + in uncore_msr_perf_ctr()
420 return box->pmu->type->perf_ctr_bits; in uncore_perf_ctr_bits()
425 return box->pmu->type->fixed_ctr_bits; in uncore_fixed_ctr_bits()
434 return box->pmu->type->freerunning[type].bits; in uncore_freerunning_bits()
442 return box->pmu->type->freerunning[type].num_counters; in uncore_num_freerunning()
448 return box->pmu->type->num_freerunning_types; in uncore_num_freerunning_types()
463 return box->pmu->type->num_counters; in uncore_num_counters()
487 box->pmu->type->ops->disable_event(box, event); in uncore_disable_event()
493 box->pmu->type->ops->enable_event(box, event); in uncore_enable_event()
499 return box->pmu->type->ops->read_counter(box, event); in uncore_read_counter()
505 if (box->pmu->type->ops->init_box) in uncore_box_init()
506 box->pmu->type->ops->init_box(box); in uncore_box_init()
513 if (box->pmu->type->ops->exit_box) in uncore_box_exit()
514 box->pmu->type->ops->exit_box(box); in uncore_box_exit()
525 return container_of(event->pmu, struct intel_uncore_pmu, pmu); in uncore_event_to_pmu()
533 struct intel_uncore_box *uncore_pmu_to_box(struct intel_uncore_pmu *pmu, int cpu);