Lines Matching full:pmu

55 	struct pmu pmu;
96 return container_of(event->pmu, struct amd_uncore_pmu, pmu);
112 event->pmu->read(event);
165 struct amd_uncore_pmu *pmu = event_to_amd_uncore_pmu(event);
166 struct amd_uncore_ctx *ctx = *per_cpu_ptr(pmu->ctx, event->cpu);
183 struct amd_uncore_pmu *pmu = event_to_amd_uncore_pmu(event);
184 struct amd_uncore_ctx *ctx = *per_cpu_ptr(pmu->ctx, event->cpu);
191 event->pmu->read(event);
204 struct amd_uncore_pmu *pmu = event_to_amd_uncore_pmu(event);
205 struct amd_uncore_ctx *ctx = *per_cpu_ptr(pmu->ctx, event->cpu);
212 for (i = 0; i < pmu->num_counters; i++) {
221 for (i = 0; i < pmu->num_counters; i++) {
234 hwc->config_base = pmu->msr_base + (2 * hwc->idx);
235 hwc->event_base = pmu->msr_base + 1 + (2 * hwc->idx);
236 hwc->event_base_rdpmc = pmu->rdpmc_base + hwc->idx;
239 if (pmu->rdpmc_base < 0)
243 event->pmu->start(event, PERF_EF_RELOAD);
251 struct amd_uncore_pmu *pmu = event_to_amd_uncore_pmu(event);
252 struct amd_uncore_ctx *ctx = *per_cpu_ptr(pmu->ctx, event->cpu);
255 event->pmu->stop(event, PERF_EF_UPDATE);
257 for (i = 0; i < pmu->num_counters; i++) {
269 struct amd_uncore_pmu *pmu;
273 if (event->attr.type != event->pmu->type)
279 pmu = event_to_amd_uncore_pmu(event);
280 ctx = *per_cpu_ptr(pmu->ctx, event->cpu);
321 struct pmu *ptr = dev_get_drvdata(dev);
322 struct amd_uncore_pmu *pmu = container_of(ptr, struct amd_uncore_pmu, pmu);
324 return cpumap_print_to_pagebuf(true, buf, &pmu->active_mask);
474 struct amd_uncore_pmu *pmu;
482 pmu = &uncore->pmus[i];
483 ctx = *per_cpu_ptr(pmu->ctx, cpu);
488 cpumask_clear_cpu(cpu, &pmu->active_mask);
495 *per_cpu_ptr(pmu->ctx, cpu) = NULL;
502 struct amd_uncore_pmu *pmu;
512 pmu = &uncore->pmus[i];
513 *per_cpu_ptr(pmu->ctx, cpu) = NULL;
517 if (gid != pmu->group)
525 prev = *per_cpu_ptr(pmu->ctx, j);
544 pmu->num_counters,
554 cpumask_set_cpu(cpu, &pmu->active_mask);
558 *per_cpu_ptr(pmu->ctx, cpu) = curr;
572 struct amd_uncore_pmu *pmu;
579 pmu = &uncore->pmus[i];
580 curr = *per_cpu_ptr(pmu->ctx, cpu);
586 next = *per_cpu_ptr(pmu->ctx, j);
591 perf_pmu_migrate_context(&pmu->pmu, cpu, j);
592 cpumask_clear_cpu(cpu, &pmu->active_mask);
593 cpumask_set_cpu(j, &pmu->active_mask);
720 struct amd_uncore_pmu *pmu;
741 pmu = &uncore->pmus[0];
742 strscpy(pmu->name, boot_cpu_data.x86 >= 0x17 ? "amd_df" : "amd_nb",
743 sizeof(pmu->name));
744 pmu->num_counters = num_counters;
745 pmu->msr_base = MSR_F15H_NB_PERF_CTL;
746 pmu->rdpmc_base = RDPMC_BASE_NB;
747 pmu->group = amd_uncore_ctx_gid(uncore, cpu);
756 pmu->ctx = alloc_percpu(struct amd_uncore_ctx *);
757 if (!pmu->ctx)
760 pmu->pmu = (struct pmu) {
763 .name = pmu->name,
774 if (perf_pmu_register(&pmu->pmu, pmu->pmu.name, -1)) {
775 free_percpu(pmu->ctx);
776 pmu->ctx = NULL;
780 pr_info("%d %s%s counters detected\n", pmu->num_counters,
782 pmu->pmu.name);
854 struct amd_uncore_pmu *pmu;
875 pmu = &uncore->pmus[0];
876 strscpy(pmu->name, boot_cpu_data.x86 >= 0x17 ? "amd_l3" : "amd_l2",
877 sizeof(pmu->name));
878 pmu->num_counters = num_counters;
879 pmu->msr_base = MSR_F16H_L2I_PERF_CTL;
880 pmu->rdpmc_base = RDPMC_BASE_LLC;
881 pmu->group = amd_uncore_ctx_gid(uncore, cpu);
891 pmu->ctx = alloc_percpu(struct amd_uncore_ctx *);
892 if (!pmu->ctx)
895 pmu->pmu = (struct pmu) {
899 .name = pmu->name,
910 if (perf_pmu_register(&pmu->pmu, pmu->pmu.name, -1)) {
911 free_percpu(pmu->ctx);
912 pmu->ctx = NULL;
916 pr_info("%d %s%s counters detected\n", pmu->num_counters,
918 pmu->pmu.name);
943 struct amd_uncore_pmu *pmu = event_to_amd_uncore_pmu(event);
944 struct amd_uncore_ctx *ctx = *per_cpu_ptr(pmu->ctx, event->cpu);
1017 struct amd_uncore_pmu *pmu;
1050 pmu = &uncore->pmus[index];
1051 snprintf(pmu->name, sizeof(pmu->name), "amd_umc_%hu", index);
1052 pmu->num_counters = group_num_pmcs[gid] / group_num_pmus[gid];
1053 pmu->msr_base = MSR_F19H_UMC_PERF_CTL + i * pmu->num_counters * 2;
1054 pmu->rdpmc_base = -1;
1055 pmu->group = gid;
1057 pmu->ctx = alloc_percpu(struct amd_uncore_ctx *);
1058 if (!pmu->ctx)
1061 pmu->pmu = (struct pmu) {
1064 .name = pmu->name,
1075 if (perf_pmu_register(&pmu->pmu, pmu->pmu.name, -1)) {
1076 free_percpu(pmu->ctx);
1077 pmu->ctx = NULL;
1081 pr_info("%d %s counters detected\n", pmu->num_counters,
1082 pmu->pmu.name);
1193 struct amd_uncore_pmu *pmu;
1209 pmu = &uncore->pmus[j];
1210 if (!pmu->ctx)
1213 perf_pmu_unregister(&pmu->pmu);
1214 free_percpu(pmu->ctx);
1215 pmu->ctx = NULL;