Lines Matching full:pmu
82 static bool pmu_needs_timer(struct i915_pmu *pmu, bool gpu_active) in pmu_needs_timer() argument
84 struct drm_i915_private *i915 = container_of(pmu, typeof(*i915), pmu); in pmu_needs_timer()
92 enable = pmu->enable; in pmu_needs_timer()
150 struct i915_pmu *pmu = &i915->pmu; in get_rc6() local
161 spin_lock_irqsave(&pmu->lock, flags); in get_rc6()
164 pmu->sample[__I915_SAMPLE_RC6].cur = val; in get_rc6()
173 val = ktime_since(pmu->sleep_last); in get_rc6()
174 val += pmu->sample[__I915_SAMPLE_RC6].cur; in get_rc6()
177 if (val < pmu->sample[__I915_SAMPLE_RC6_LAST_REPORTED].cur) in get_rc6()
178 val = pmu->sample[__I915_SAMPLE_RC6_LAST_REPORTED].cur; in get_rc6()
180 pmu->sample[__I915_SAMPLE_RC6_LAST_REPORTED].cur = val; in get_rc6()
182 spin_unlock_irqrestore(&pmu->lock, flags); in get_rc6()
189 struct i915_pmu *pmu = &i915->pmu; in park_rc6() local
191 if (pmu->enable & config_enabled_mask(I915_PMU_RC6_RESIDENCY)) in park_rc6()
192 pmu->sample[__I915_SAMPLE_RC6].cur = __get_rc6(&i915->gt); in park_rc6()
194 pmu->sleep_last = ktime_get(); in park_rc6()
208 static void __i915_pmu_maybe_start_timer(struct i915_pmu *pmu) in __i915_pmu_maybe_start_timer() argument
210 if (!pmu->timer_enabled && pmu_needs_timer(pmu, true)) { in __i915_pmu_maybe_start_timer()
211 pmu->timer_enabled = true; in __i915_pmu_maybe_start_timer()
212 pmu->timer_last = ktime_get(); in __i915_pmu_maybe_start_timer()
213 hrtimer_start_range_ns(&pmu->timer, in __i915_pmu_maybe_start_timer()
221 struct i915_pmu *pmu = &i915->pmu; in i915_pmu_gt_parked() local
223 if (!pmu->base.event_init) in i915_pmu_gt_parked()
226 spin_lock_irq(&pmu->lock); in i915_pmu_gt_parked()
234 pmu->timer_enabled = pmu_needs_timer(pmu, false); in i915_pmu_gt_parked()
236 spin_unlock_irq(&pmu->lock); in i915_pmu_gt_parked()
241 struct i915_pmu *pmu = &i915->pmu; in i915_pmu_gt_unparked() local
243 if (!pmu->base.event_init) in i915_pmu_gt_unparked()
246 spin_lock_irq(&pmu->lock); in i915_pmu_gt_unparked()
251 __i915_pmu_maybe_start_timer(pmu); in i915_pmu_gt_unparked()
253 spin_unlock_irq(&pmu->lock); in i915_pmu_gt_unparked()
274 struct intel_engine_pmu *pmu = &engine->pmu; in engine_sample() local
283 add_sample(&pmu->sample[I915_SAMPLE_WAIT], period_ns); in engine_sample()
285 add_sample(&pmu->sample[I915_SAMPLE_SEMA], period_ns); in engine_sample()
304 add_sample(&pmu->sample[I915_SAMPLE_BUSY], period_ns); in engine_sample()
315 if ((i915->pmu.enable & ENGINE_SAMPLE_MASK) == 0) in engines_sample()
343 static bool frequency_sampling_enabled(struct i915_pmu *pmu) in frequency_sampling_enabled() argument
345 return pmu->enable & in frequency_sampling_enabled()
355 struct i915_pmu *pmu = &i915->pmu; in frequency_sample() local
358 if (!frequency_sampling_enabled(pmu)) in frequency_sample()
365 if (pmu->enable & config_enabled_mask(I915_PMU_ACTUAL_FREQUENCY)) { in frequency_sample()
383 add_sample_mult(&pmu->sample[__I915_SAMPLE_FREQ_ACT], in frequency_sample()
387 if (pmu->enable & config_enabled_mask(I915_PMU_REQUESTED_FREQUENCY)) { in frequency_sample()
388 add_sample_mult(&pmu->sample[__I915_SAMPLE_FREQ_REQ], in frequency_sample()
399 container_of(hrtimer, struct drm_i915_private, pmu.timer); in i915_sample()
400 struct i915_pmu *pmu = &i915->pmu; in i915_sample() local
405 if (!READ_ONCE(pmu->timer_enabled)) in i915_sample()
409 period_ns = ktime_to_ns(ktime_sub(now, pmu->timer_last)); in i915_sample()
410 pmu->timer_last = now; in i915_sample()
445 container_of(event->pmu, typeof(*i915), pmu.base); in i915_pmu_event_destroy()
498 container_of(event->pmu, typeof(*i915), pmu.base); in engine_event_init()
512 container_of(event->pmu, typeof(*i915), pmu.base); in i915_pmu_event_init()
515 if (event->attr.type != event->pmu->type) in i915_pmu_event_init()
548 container_of(event->pmu, typeof(*i915), pmu.base); in __i915_pmu_event_read()
549 struct i915_pmu *pmu = &i915->pmu; in __i915_pmu_event_read() local
569 val = engine->pmu.sample[sample].cur; in __i915_pmu_event_read()
575 div_u64(pmu->sample[__I915_SAMPLE_FREQ_ACT].cur, in __i915_pmu_event_read()
580 div_u64(pmu->sample[__I915_SAMPLE_FREQ_REQ].cur, in __i915_pmu_event_read()
613 container_of(event->pmu, typeof(*i915), pmu.base); in i915_pmu_enable()
615 struct i915_pmu *pmu = &i915->pmu; in i915_pmu_enable() local
620 spin_lock_irqsave(&pmu->lock, flags); in i915_pmu_enable()
626 BUILD_BUG_ON(ARRAY_SIZE(pmu->enable_count) != I915_PMU_MASK_BITS); in i915_pmu_enable()
627 GEM_BUG_ON(bit >= ARRAY_SIZE(pmu->enable_count)); in i915_pmu_enable()
628 GEM_BUG_ON(pmu->enable_count[bit] == ~0); in i915_pmu_enable()
630 if (pmu->enable_count[bit] == 0 && in i915_pmu_enable()
632 pmu->sample[__I915_SAMPLE_RC6_LAST_REPORTED].cur = 0; in i915_pmu_enable()
633 pmu->sample[__I915_SAMPLE_RC6].cur = __get_rc6(&i915->gt); in i915_pmu_enable()
634 pmu->sleep_last = ktime_get(); in i915_pmu_enable()
637 pmu->enable |= BIT_ULL(bit); in i915_pmu_enable()
638 pmu->enable_count[bit]++; in i915_pmu_enable()
643 __i915_pmu_maybe_start_timer(pmu); in i915_pmu_enable()
657 BUILD_BUG_ON(ARRAY_SIZE(engine->pmu.enable_count) != in i915_pmu_enable()
659 BUILD_BUG_ON(ARRAY_SIZE(engine->pmu.sample) != in i915_pmu_enable()
661 GEM_BUG_ON(sample >= ARRAY_SIZE(engine->pmu.enable_count)); in i915_pmu_enable()
662 GEM_BUG_ON(sample >= ARRAY_SIZE(engine->pmu.sample)); in i915_pmu_enable()
663 GEM_BUG_ON(engine->pmu.enable_count[sample] == ~0); in i915_pmu_enable()
665 engine->pmu.enable |= BIT(sample); in i915_pmu_enable()
666 engine->pmu.enable_count[sample]++; in i915_pmu_enable()
669 spin_unlock_irqrestore(&pmu->lock, flags); in i915_pmu_enable()
684 container_of(event->pmu, typeof(*i915), pmu.base); in i915_pmu_disable()
686 struct i915_pmu *pmu = &i915->pmu; in i915_pmu_disable() local
689 spin_lock_irqsave(&pmu->lock, flags); in i915_pmu_disable()
699 GEM_BUG_ON(sample >= ARRAY_SIZE(engine->pmu.enable_count)); in i915_pmu_disable()
700 GEM_BUG_ON(sample >= ARRAY_SIZE(engine->pmu.sample)); in i915_pmu_disable()
701 GEM_BUG_ON(engine->pmu.enable_count[sample] == 0); in i915_pmu_disable()
707 if (--engine->pmu.enable_count[sample] == 0) in i915_pmu_disable()
708 engine->pmu.enable &= ~BIT(sample); in i915_pmu_disable()
711 GEM_BUG_ON(bit >= ARRAY_SIZE(pmu->enable_count)); in i915_pmu_disable()
712 GEM_BUG_ON(pmu->enable_count[bit] == 0); in i915_pmu_disable()
717 if (--pmu->enable_count[bit] == 0) { in i915_pmu_disable()
718 pmu->enable &= ~BIT_ULL(bit); in i915_pmu_disable()
719 pmu->timer_enabled &= pmu_needs_timer(pmu, true); in i915_pmu_disable()
722 spin_unlock_irqrestore(&pmu->lock, flags); in i915_pmu_disable()
859 create_event_attributes(struct i915_pmu *pmu) in create_event_attributes() argument
861 struct drm_i915_private *i915 = container_of(pmu, typeof(*i915), pmu); in create_event_attributes()
974 pmu->i915_attr = i915_attr; in create_event_attributes()
975 pmu->pmu_attr = pmu_attr; in create_event_attributes()
991 static void free_event_attributes(struct i915_pmu *pmu) in free_event_attributes() argument
993 struct attribute **attr_iter = pmu->events_attr_group.attrs; in free_event_attributes()
998 kfree(pmu->events_attr_group.attrs); in free_event_attributes()
999 kfree(pmu->i915_attr); in free_event_attributes()
1000 kfree(pmu->pmu_attr); in free_event_attributes()
1002 pmu->events_attr_group.attrs = NULL; in free_event_attributes()
1003 pmu->i915_attr = NULL; in free_event_attributes()
1004 pmu->pmu_attr = NULL; in free_event_attributes()
1009 struct i915_pmu *pmu = hlist_entry_safe(node, typeof(*pmu), cpuhp.node); in i915_pmu_cpu_online() local
1011 GEM_BUG_ON(!pmu->base.event_init); in i915_pmu_cpu_online()
1022 struct i915_pmu *pmu = hlist_entry_safe(node, typeof(*pmu), cpuhp.node); in i915_pmu_cpu_offline() local
1025 GEM_BUG_ON(!pmu->base.event_init); in i915_pmu_cpu_offline()
1032 perf_pmu_migrate_context(&pmu->base, cpu, target); in i915_pmu_cpu_offline()
1039 static int i915_pmu_register_cpuhp_state(struct i915_pmu *pmu) in i915_pmu_register_cpuhp_state() argument
1052 ret = cpuhp_state_add_instance(slot, &pmu->cpuhp.node); in i915_pmu_register_cpuhp_state()
1058 pmu->cpuhp.slot = slot; in i915_pmu_register_cpuhp_state()
1062 static void i915_pmu_unregister_cpuhp_state(struct i915_pmu *pmu) in i915_pmu_unregister_cpuhp_state() argument
1064 struct drm_i915_private *i915 = container_of(pmu, typeof(*i915), pmu); in i915_pmu_unregister_cpuhp_state()
1066 drm_WARN_ON(&i915->drm, pmu->cpuhp.slot == CPUHP_INVALID); in i915_pmu_unregister_cpuhp_state()
1067 drm_WARN_ON(&i915->drm, cpuhp_state_remove_instance(pmu->cpuhp.slot, &pmu->cpuhp.node)); in i915_pmu_unregister_cpuhp_state()
1068 cpuhp_remove_multi_state(pmu->cpuhp.slot); in i915_pmu_unregister_cpuhp_state()
1069 pmu->cpuhp.slot = CPUHP_INVALID; in i915_pmu_unregister_cpuhp_state()
1085 struct i915_pmu *pmu = &i915->pmu; in i915_pmu_register() local
1088 &pmu->events_attr_group, in i915_pmu_register()
1096 drm_info(&i915->drm, "PMU not supported for this GPU."); in i915_pmu_register()
1100 spin_lock_init(&pmu->lock); in i915_pmu_register()
1101 hrtimer_init(&pmu->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); in i915_pmu_register()
1102 pmu->timer.function = i915_sample; in i915_pmu_register()
1103 pmu->cpuhp.slot = CPUHP_INVALID; in i915_pmu_register()
1106 pmu->name = kasprintf(GFP_KERNEL, in i915_pmu_register()
1109 if (pmu->name) { in i915_pmu_register()
1111 strreplace((char *)pmu->name, ':', '_'); in i915_pmu_register()
1114 pmu->name = "i915"; in i915_pmu_register()
1116 if (!pmu->name) in i915_pmu_register()
1119 pmu->events_attr_group.name = "events"; in i915_pmu_register()
1120 pmu->events_attr_group.attrs = create_event_attributes(pmu); in i915_pmu_register()
1121 if (!pmu->events_attr_group.attrs) in i915_pmu_register()
1124 pmu->base.attr_groups = kmemdup(attr_groups, sizeof(attr_groups), in i915_pmu_register()
1126 if (!pmu->base.attr_groups) in i915_pmu_register()
1129 pmu->base.module = THIS_MODULE; in i915_pmu_register()
1130 pmu->base.task_ctx_nr = perf_invalid_context; in i915_pmu_register()
1131 pmu->base.event_init = i915_pmu_event_init; in i915_pmu_register()
1132 pmu->base.add = i915_pmu_event_add; in i915_pmu_register()
1133 pmu->base.del = i915_pmu_event_del; in i915_pmu_register()
1134 pmu->base.start = i915_pmu_event_start; in i915_pmu_register()
1135 pmu->base.stop = i915_pmu_event_stop; in i915_pmu_register()
1136 pmu->base.read = i915_pmu_event_read; in i915_pmu_register()
1137 pmu->base.event_idx = i915_pmu_event_event_idx; in i915_pmu_register()
1139 ret = perf_pmu_register(&pmu->base, pmu->name, -1); in i915_pmu_register()
1143 ret = i915_pmu_register_cpuhp_state(pmu); in i915_pmu_register()
1150 perf_pmu_unregister(&pmu->base); in i915_pmu_register()
1152 kfree(pmu->base.attr_groups); in i915_pmu_register()
1154 pmu->base.event_init = NULL; in i915_pmu_register()
1155 free_event_attributes(pmu); in i915_pmu_register()
1158 kfree(pmu->name); in i915_pmu_register()
1160 drm_notice(&i915->drm, "Failed to register PMU!\n"); in i915_pmu_register()
1165 struct i915_pmu *pmu = &i915->pmu; in i915_pmu_unregister() local
1167 if (!pmu->base.event_init) in i915_pmu_unregister()
1170 drm_WARN_ON(&i915->drm, pmu->enable); in i915_pmu_unregister()
1172 hrtimer_cancel(&pmu->timer); in i915_pmu_unregister()
1174 i915_pmu_unregister_cpuhp_state(pmu); in i915_pmu_unregister()
1176 perf_pmu_unregister(&pmu->base); in i915_pmu_unregister()
1177 pmu->base.event_init = NULL; in i915_pmu_unregister()
1178 kfree(pmu->base.attr_groups); in i915_pmu_unregister()
1180 kfree(pmu->name); in i915_pmu_unregister()
1181 free_event_attributes(pmu); in i915_pmu_unregister()