Lines Matching refs:pmu

33 	return container_of(event->pmu, struct i915_pmu, base);
36 static struct drm_i915_private *pmu_to_i915(struct i915_pmu *pmu)
38 return container_of(pmu, struct drm_i915_private, pmu);
149 static bool pmu_needs_timer(struct i915_pmu *pmu)
151 struct drm_i915_private *i915 = pmu_to_i915(pmu);
159 enable = pmu->enable;
201 static u64 read_sample(struct i915_pmu *pmu, unsigned int gt_id, int sample)
203 return pmu->sample[gt_id][sample].cur;
207 store_sample(struct i915_pmu *pmu, unsigned int gt_id, int sample, u64 val)
209 pmu->sample[gt_id][sample].cur = val;
213 add_sample_mult(struct i915_pmu *pmu, unsigned int gt_id, int sample, u32 val, u32 mul)
215 pmu->sample[gt_id][sample].cur += mul_u32_u32(val, mul);
222 struct i915_pmu *pmu = &i915->pmu;
233 spin_lock_irqsave(&pmu->lock, flags);
236 store_sample(pmu, gt_id, __I915_SAMPLE_RC6, val);
245 val = ktime_since_raw(pmu->sleep_last[gt_id]);
246 val += read_sample(pmu, gt_id, __I915_SAMPLE_RC6);
249 if (val < read_sample(pmu, gt_id, __I915_SAMPLE_RC6_LAST_REPORTED))
250 val = read_sample(pmu, gt_id, __I915_SAMPLE_RC6_LAST_REPORTED);
252 store_sample(pmu, gt_id, __I915_SAMPLE_RC6_LAST_REPORTED, val);
254 spin_unlock_irqrestore(&pmu->lock, flags);
259 static void init_rc6(struct i915_pmu *pmu)
261 struct drm_i915_private *i915 = pmu_to_i915(pmu);
271 store_sample(pmu, i, __I915_SAMPLE_RC6, val);
272 store_sample(pmu, i, __I915_SAMPLE_RC6_LAST_REPORTED,
274 pmu->sleep_last[i] = ktime_get_raw();
281 struct i915_pmu *pmu = &gt->i915->pmu;
283 store_sample(pmu, gt->info.id, __I915_SAMPLE_RC6, __get_rc6(gt));
284 pmu->sleep_last[gt->info.id] = ktime_get_raw();
287 static void __i915_pmu_maybe_start_timer(struct i915_pmu *pmu)
289 if (!pmu->timer_enabled && pmu_needs_timer(pmu)) {
290 pmu->timer_enabled = true;
291 pmu->timer_last = ktime_get();
292 hrtimer_start_range_ns(&pmu->timer,
300 struct i915_pmu *pmu = &gt->i915->pmu;
302 if (!pmu->registered)
305 spin_lock_irq(&pmu->lock);
313 pmu->unparked &= ~BIT(gt->info.id);
314 if (pmu->unparked == 0)
315 pmu->timer_enabled = false;
317 spin_unlock_irq(&pmu->lock);
322 struct i915_pmu *pmu = &gt->i915->pmu;
324 if (!pmu->registered)
327 spin_lock_irq(&pmu->lock);
332 if (pmu->unparked == 0)
333 __i915_pmu_maybe_start_timer(pmu);
335 pmu->unparked |= BIT(gt->info.id);
337 spin_unlock_irq(&pmu->lock);
358 struct intel_engine_pmu *pmu = &engine->pmu;
367 add_sample(&pmu->sample[I915_SAMPLE_WAIT], period_ns);
369 add_sample(&pmu->sample[I915_SAMPLE_SEMA], period_ns);
388 add_sample(&pmu->sample[I915_SAMPLE_BUSY], period_ns);
393 struct intel_engine_pmu *pmu = &engine->pmu;
401 add_sample(&pmu->sample[I915_SAMPLE_WAIT], period_ns);
405 add_sample(&pmu->sample[I915_SAMPLE_BUSY], period_ns);
424 if ((i915->pmu.enable & ENGINE_SAMPLE_MASK) == 0)
431 if (!engine->pmu.enable)
450 frequency_sampling_enabled(struct i915_pmu *pmu, unsigned int gt)
452 return pmu->enable &
462 struct i915_pmu *pmu = &i915->pmu;
466 if (!frequency_sampling_enabled(pmu, gt_id))
474 if (pmu->enable & config_mask(__I915_PMU_ACTUAL_FREQUENCY(gt_id))) {
490 add_sample_mult(pmu, gt_id, __I915_SAMPLE_FREQ_ACT,
494 if (pmu->enable & config_mask(__I915_PMU_REQUESTED_FREQUENCY(gt_id))) {
495 add_sample_mult(pmu, gt_id, __I915_SAMPLE_FREQ_REQ,
505 struct i915_pmu *pmu = container_of(hrtimer, struct i915_pmu, timer);
506 struct drm_i915_private *i915 = pmu_to_i915(pmu);
512 if (!READ_ONCE(pmu->timer_enabled))
516 period_ns = ktime_to_ns(ktime_sub(now, pmu->timer_last));
517 pmu->timer_last = now;
527 if (!(pmu->unparked & BIT(i)))
541 struct i915_pmu *pmu = event_to_pmu(event);
542 struct drm_i915_private *i915 = pmu_to_i915(pmu);
608 struct i915_pmu *pmu = event_to_pmu(event);
609 struct drm_i915_private *i915 = pmu_to_i915(pmu);
622 struct i915_pmu *pmu = event_to_pmu(event);
623 struct drm_i915_private *i915 = pmu_to_i915(pmu);
626 if (!pmu->registered)
629 if (event->attr.type != event->pmu->type)
659 struct i915_pmu *pmu = event_to_pmu(event);
660 struct drm_i915_private *i915 = pmu_to_i915(pmu);
680 val = engine->pmu.sample[sample].cur;
689 div_u64(read_sample(pmu, gt_id,
695 div_u64(read_sample(pmu, gt_id,
700 val = READ_ONCE(pmu->irq_count);
716 struct i915_pmu *pmu = event_to_pmu(event);
720 if (!pmu->registered) {
735 struct i915_pmu *pmu = event_to_pmu(event);
736 struct drm_i915_private *i915 = pmu_to_i915(pmu);
743 spin_lock_irqsave(&pmu->lock, flags);
749 BUILD_BUG_ON(ARRAY_SIZE(pmu->enable_count) != I915_PMU_MASK_BITS);
750 GEM_BUG_ON(bit >= ARRAY_SIZE(pmu->enable_count));
751 GEM_BUG_ON(pmu->enable_count[bit] == ~0);
753 pmu->enable |= BIT(bit);
754 pmu->enable_count[bit]++;
759 __i915_pmu_maybe_start_timer(pmu);
773 BUILD_BUG_ON(ARRAY_SIZE(engine->pmu.enable_count) !=
775 BUILD_BUG_ON(ARRAY_SIZE(engine->pmu.sample) !=
777 GEM_BUG_ON(sample >= ARRAY_SIZE(engine->pmu.enable_count));
778 GEM_BUG_ON(sample >= ARRAY_SIZE(engine->pmu.sample));
779 GEM_BUG_ON(engine->pmu.enable_count[sample] == ~0);
781 engine->pmu.enable |= BIT(sample);
782 engine->pmu.enable_count[sample]++;
785 spin_unlock_irqrestore(&pmu->lock, flags);
798 struct i915_pmu *pmu = event_to_pmu(event);
799 struct drm_i915_private *i915 = pmu_to_i915(pmu);
806 spin_lock_irqsave(&pmu->lock, flags);
816 GEM_BUG_ON(sample >= ARRAY_SIZE(engine->pmu.enable_count));
817 GEM_BUG_ON(sample >= ARRAY_SIZE(engine->pmu.sample));
818 GEM_BUG_ON(engine->pmu.enable_count[sample] == 0);
824 if (--engine->pmu.enable_count[sample] == 0)
825 engine->pmu.enable &= ~BIT(sample);
828 GEM_BUG_ON(bit >= ARRAY_SIZE(pmu->enable_count));
829 GEM_BUG_ON(pmu->enable_count[bit] == 0);
834 if (--pmu->enable_count[bit] == 0) {
835 pmu->enable &= ~BIT(bit);
836 pmu->timer_enabled &= pmu_needs_timer(pmu);
839 spin_unlock_irqrestore(&pmu->lock, flags);
844 struct i915_pmu *pmu = event_to_pmu(event);
846 if (!pmu->registered)
855 struct i915_pmu *pmu = event_to_pmu(event);
857 if (!pmu->registered)
871 struct i915_pmu *pmu = event_to_pmu(event);
873 if (!pmu->registered)
979 create_event_attributes(struct i915_pmu *pmu)
981 struct drm_i915_private *i915 = pmu_to_i915(pmu);
1114 pmu->i915_attr = i915_attr;
1115 pmu->pmu_attr = pmu_attr;
1131 static void free_event_attributes(struct i915_pmu *pmu)
1133 struct attribute **attr_iter = pmu->events_attr_group.attrs;
1138 kfree(pmu->events_attr_group.attrs);
1139 kfree(pmu->i915_attr);
1140 kfree(pmu->pmu_attr);
1142 pmu->events_attr_group.attrs = NULL;
1143 pmu->i915_attr = NULL;
1144 pmu->pmu_attr = NULL;
1149 struct i915_pmu *pmu = &i915->pmu;
1152 &pmu->events_attr_group,
1157 spin_lock_init(&pmu->lock);
1158 hrtimer_setup(&pmu->timer, i915_sample, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1159 init_rc6(pmu);
1162 pmu->name = kasprintf(GFP_KERNEL,
1165 if (pmu->name) {
1167 strreplace((char *)pmu->name, ':', '_');
1170 pmu->name = "i915";
1172 if (!pmu->name)
1175 pmu->events_attr_group.name = "events";
1176 pmu->events_attr_group.attrs = create_event_attributes(pmu);
1177 if (!pmu->events_attr_group.attrs)
1180 pmu->base.attr_groups = kmemdup(attr_groups, sizeof(attr_groups),
1182 if (!pmu->base.attr_groups)
1185 pmu->base.module = THIS_MODULE;
1186 pmu->base.task_ctx_nr = perf_invalid_context;
1187 pmu->base.scope = PERF_PMU_SCOPE_SYS_WIDE;
1188 pmu->base.event_init = i915_pmu_event_init;
1189 pmu->base.add = i915_pmu_event_add;
1190 pmu->base.del = i915_pmu_event_del;
1191 pmu->base.start = i915_pmu_event_start;
1192 pmu->base.stop = i915_pmu_event_stop;
1193 pmu->base.read = i915_pmu_event_read;
1195 ret = perf_pmu_register(&pmu->base, pmu->name, -1);
1199 pmu->registered = true;
1204 kfree(pmu->base.attr_groups);
1206 free_event_attributes(pmu);
1209 kfree(pmu->name);
1216 struct i915_pmu *pmu = &i915->pmu;
1218 if (!pmu->registered)
1222 pmu->registered = false;
1224 hrtimer_cancel(&pmu->timer);
1226 perf_pmu_unregister(&pmu->base);
1227 kfree(pmu->base.attr_groups);
1229 kfree(pmu->name);
1230 free_event_attributes(pmu);