1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright © 2025 Intel Corporation
4 */
5
6 #include <drm/drm_managed.h>
7
8 #include "abi/guc_actions_abi.h"
9 #include "regs/xe_gt_regs.h"
10
11 #include "xe_bo.h"
12 #include "xe_force_wake.h"
13 #include "xe_gt_printk.h"
14 #include "xe_guc.h"
15 #include "xe_guc_engine_activity.h"
16 #include "xe_guc_ct.h"
17 #include "xe_hw_engine.h"
18 #include "xe_map.h"
19 #include "xe_mmio.h"
20 #include "xe_sriov_pf_helpers.h"
21 #include "xe_trace_guc.h"
22
23 #define TOTAL_QUANTA 0x8000
24
engine_activity_map(struct xe_guc * guc,struct xe_hw_engine * hwe,unsigned int index)25 static struct iosys_map engine_activity_map(struct xe_guc *guc, struct xe_hw_engine *hwe,
26 unsigned int index)
27 {
28 struct xe_guc_engine_activity *engine_activity = &guc->engine_activity;
29 struct engine_activity_buffer *buffer;
30 u16 guc_class = xe_engine_class_to_guc_class(hwe->class);
31 size_t offset;
32
33 if (engine_activity->num_functions) {
34 buffer = &engine_activity->function_buffer;
35 offset = sizeof(struct guc_engine_activity_data) * index;
36 } else {
37 buffer = &engine_activity->device_buffer;
38 offset = 0;
39 }
40
41 offset += offsetof(struct guc_engine_activity_data,
42 engine_activity[guc_class][hwe->logical_instance]);
43
44 return IOSYS_MAP_INIT_OFFSET(&buffer->activity_bo->vmap, offset);
45 }
46
engine_metadata_map(struct xe_guc * guc,unsigned int index)47 static struct iosys_map engine_metadata_map(struct xe_guc *guc,
48 unsigned int index)
49 {
50 struct xe_guc_engine_activity *engine_activity = &guc->engine_activity;
51 struct engine_activity_buffer *buffer;
52 size_t offset;
53
54 if (engine_activity->num_functions) {
55 buffer = &engine_activity->function_buffer;
56 offset = sizeof(struct guc_engine_activity_metadata) * index;
57 } else {
58 buffer = &engine_activity->device_buffer;
59 offset = 0;
60 }
61
62 return IOSYS_MAP_INIT_OFFSET(&buffer->metadata_bo->vmap, offset);
63 }
64
allocate_engine_activity_group(struct xe_guc * guc)65 static int allocate_engine_activity_group(struct xe_guc *guc)
66 {
67 struct xe_guc_engine_activity *engine_activity = &guc->engine_activity;
68 struct xe_device *xe = guc_to_xe(guc);
69 u32 num_activity_group;
70
71 /*
72 * An additional activity group is allocated for PF
73 */
74 num_activity_group = IS_SRIOV_PF(xe) ? xe_sriov_pf_get_totalvfs(xe) + 1 : 1;
75
76 engine_activity->eag = drmm_kcalloc(&xe->drm, num_activity_group,
77 sizeof(struct engine_activity_group), GFP_KERNEL);
78
79 if (!engine_activity->eag)
80 return -ENOMEM;
81
82 engine_activity->num_activity_group = num_activity_group;
83
84 return 0;
85 }
86
allocate_engine_activity_buffers(struct xe_guc * guc,struct engine_activity_buffer * buffer,int count)87 static int allocate_engine_activity_buffers(struct xe_guc *guc,
88 struct engine_activity_buffer *buffer,
89 int count)
90 {
91 u32 metadata_size = sizeof(struct guc_engine_activity_metadata) * count;
92 u32 size = sizeof(struct guc_engine_activity_data) * count;
93 struct xe_gt *gt = guc_to_gt(guc);
94 struct xe_tile *tile = gt_to_tile(gt);
95 struct xe_bo *bo, *metadata_bo;
96
97 metadata_bo = xe_bo_create_pin_map_novm(gt_to_xe(gt), tile, PAGE_ALIGN(metadata_size),
98 ttm_bo_type_kernel, XE_BO_FLAG_SYSTEM |
99 XE_BO_FLAG_GGTT | XE_BO_FLAG_GGTT_INVALIDATE,
100 false);
101
102 if (IS_ERR(metadata_bo))
103 return PTR_ERR(metadata_bo);
104
105 bo = xe_bo_create_pin_map_novm(gt_to_xe(gt), tile, PAGE_ALIGN(size),
106 ttm_bo_type_kernel, XE_BO_FLAG_VRAM_IF_DGFX(tile) |
107 XE_BO_FLAG_GGTT | XE_BO_FLAG_GGTT_INVALIDATE, false);
108
109 if (IS_ERR(bo)) {
110 xe_bo_unpin_map_no_vm(metadata_bo);
111 return PTR_ERR(bo);
112 }
113
114 buffer->metadata_bo = metadata_bo;
115 buffer->activity_bo = bo;
116 return 0;
117 }
118
free_engine_activity_buffers(struct engine_activity_buffer * buffer)119 static void free_engine_activity_buffers(struct engine_activity_buffer *buffer)
120 {
121 xe_bo_unpin_map_no_vm(buffer->metadata_bo);
122 xe_bo_unpin_map_no_vm(buffer->activity_bo);
123 }
124
is_engine_activity_supported(struct xe_guc * guc)125 static bool is_engine_activity_supported(struct xe_guc *guc)
126 {
127 struct xe_uc_fw_version *version = &guc->fw.versions.found[XE_UC_FW_VER_COMPATIBILITY];
128 struct xe_uc_fw_version required = { .major = 1, .minor = 14, .patch = 1 };
129 struct xe_gt *gt = guc_to_gt(guc);
130
131 if (IS_SRIOV_VF(gt_to_xe(gt))) {
132 xe_gt_info(gt, "engine activity stats not supported on VFs\n");
133 return false;
134 }
135
136 /* engine activity stats is supported from GuC interface version (1.14.1) */
137 if (GUC_SUBMIT_VER(guc) < MAKE_GUC_VER_STRUCT(required)) {
138 xe_gt_info(gt,
139 "engine activity stats unsupported in GuC interface v%u.%u.%u, need v%u.%u.%u or higher\n",
140 version->major, version->minor, version->patch, required.major,
141 required.minor, required.patch);
142 return false;
143 }
144
145 return true;
146 }
147
hw_engine_to_engine_activity(struct xe_hw_engine * hwe,unsigned int index)148 static struct engine_activity *hw_engine_to_engine_activity(struct xe_hw_engine *hwe,
149 unsigned int index)
150 {
151 struct xe_guc *guc = &hwe->gt->uc.guc;
152 struct engine_activity_group *eag = &guc->engine_activity.eag[index];
153 u16 guc_class = xe_engine_class_to_guc_class(hwe->class);
154
155 return &eag->engine[guc_class][hwe->logical_instance];
156 }
157
cpu_ns_to_guc_tsc_tick(ktime_t ns,u32 freq)158 static u64 cpu_ns_to_guc_tsc_tick(ktime_t ns, u32 freq)
159 {
160 return mul_u64_u32_div(ns, freq, NSEC_PER_SEC);
161 }
162
163 #define read_engine_activity_record(xe_, map_, field_) \
164 xe_map_rd_field(xe_, map_, 0, struct guc_engine_activity, field_)
165
166 #define read_metadata_record(xe_, map_, field_) \
167 xe_map_rd_field(xe_, map_, 0, struct guc_engine_activity_metadata, field_)
168
get_engine_active_ticks(struct xe_guc * guc,struct xe_hw_engine * hwe,unsigned int index)169 static u64 get_engine_active_ticks(struct xe_guc *guc, struct xe_hw_engine *hwe,
170 unsigned int index)
171 {
172 struct engine_activity *ea = hw_engine_to_engine_activity(hwe, index);
173 struct guc_engine_activity *cached_activity = &ea->activity;
174 struct guc_engine_activity_metadata *cached_metadata = &ea->metadata;
175 struct xe_guc_engine_activity *engine_activity = &guc->engine_activity;
176 struct iosys_map activity_map, metadata_map;
177 struct xe_device *xe = guc_to_xe(guc);
178 struct xe_gt *gt = guc_to_gt(guc);
179 u32 last_update_tick, global_change_num;
180 u64 active_ticks, gpm_ts;
181 u16 change_num;
182
183 activity_map = engine_activity_map(guc, hwe, index);
184 metadata_map = engine_metadata_map(guc, index);
185 global_change_num = read_metadata_record(xe, &metadata_map, global_change_num);
186
187 /* GuC has not initialized activity data yet, return 0 */
188 if (!global_change_num)
189 goto update;
190
191 if (global_change_num == cached_metadata->global_change_num)
192 goto update;
193
194 cached_metadata->global_change_num = global_change_num;
195 change_num = read_engine_activity_record(xe, &activity_map, change_num);
196
197 if (!change_num || change_num == cached_activity->change_num)
198 goto update;
199
200 /* read engine activity values */
201 last_update_tick = read_engine_activity_record(xe, &activity_map, last_update_tick);
202 active_ticks = read_engine_activity_record(xe, &activity_map, active_ticks);
203
204 /* activity calculations */
205 ea->running = !!last_update_tick;
206 ea->total += active_ticks - cached_activity->active_ticks;
207 ea->active = 0;
208
209 /* cache the counter */
210 cached_activity->change_num = change_num;
211 cached_activity->last_update_tick = last_update_tick;
212 cached_activity->active_ticks = active_ticks;
213
214 update:
215 if (ea->running) {
216 gpm_ts = xe_mmio_read64_2x32(>->mmio, MISC_STATUS_0) >>
217 engine_activity->gpm_timestamp_shift;
218 ea->active = lower_32_bits(gpm_ts) - cached_activity->last_update_tick;
219 }
220
221 trace_xe_guc_engine_activity(xe, ea, hwe->name, hwe->instance);
222
223 return ea->total + ea->active;
224 }
225
get_engine_total_ticks(struct xe_guc * guc,struct xe_hw_engine * hwe,unsigned int index)226 static u64 get_engine_total_ticks(struct xe_guc *guc, struct xe_hw_engine *hwe, unsigned int index)
227 {
228 struct engine_activity *ea = hw_engine_to_engine_activity(hwe, index);
229 struct guc_engine_activity_metadata *cached_metadata = &ea->metadata;
230 struct guc_engine_activity *cached_activity = &ea->activity;
231 struct iosys_map activity_map, metadata_map;
232 struct xe_device *xe = guc_to_xe(guc);
233 ktime_t now, cpu_delta;
234 u64 numerator;
235 u16 quanta_ratio;
236
237 activity_map = engine_activity_map(guc, hwe, index);
238 metadata_map = engine_metadata_map(guc, index);
239
240 if (!cached_metadata->guc_tsc_frequency_hz)
241 cached_metadata->guc_tsc_frequency_hz = read_metadata_record(xe, &metadata_map,
242 guc_tsc_frequency_hz);
243
244 quanta_ratio = read_engine_activity_record(xe, &activity_map, quanta_ratio);
245 cached_activity->quanta_ratio = quanta_ratio;
246
247 /* Total ticks calculations */
248 now = ktime_get();
249 cpu_delta = now - ea->last_cpu_ts;
250 ea->last_cpu_ts = now;
251 numerator = (ea->quanta_remainder_ns + cpu_delta) * cached_activity->quanta_ratio;
252 ea->quanta_ns += numerator / TOTAL_QUANTA;
253 ea->quanta_remainder_ns = numerator % TOTAL_QUANTA;
254 ea->quanta = cpu_ns_to_guc_tsc_tick(ea->quanta_ns, cached_metadata->guc_tsc_frequency_hz);
255
256 trace_xe_guc_engine_activity(xe, ea, hwe->name, hwe->instance);
257
258 return ea->quanta;
259 }
260
enable_engine_activity_stats(struct xe_guc * guc)261 static int enable_engine_activity_stats(struct xe_guc *guc)
262 {
263 struct xe_guc_engine_activity *engine_activity = &guc->engine_activity;
264 struct engine_activity_buffer *buffer = &engine_activity->device_buffer;
265 u32 action[] = {
266 XE_GUC_ACTION_SET_DEVICE_ENGINE_ACTIVITY_BUFFER,
267 xe_bo_ggtt_addr(buffer->metadata_bo),
268 0,
269 xe_bo_ggtt_addr(buffer->activity_bo),
270 0,
271 };
272
273 /* Blocking here to ensure the buffers are ready before reading them */
274 return xe_guc_ct_send_block(&guc->ct, action, ARRAY_SIZE(action));
275 }
276
enable_function_engine_activity_stats(struct xe_guc * guc,bool enable)277 static int enable_function_engine_activity_stats(struct xe_guc *guc, bool enable)
278 {
279 struct xe_guc_engine_activity *engine_activity = &guc->engine_activity;
280 u32 metadata_ggtt_addr = 0, ggtt_addr = 0, num_functions = 0;
281 struct engine_activity_buffer *buffer = &engine_activity->function_buffer;
282 u32 action[6];
283 int len = 0;
284
285 if (enable) {
286 metadata_ggtt_addr = xe_bo_ggtt_addr(buffer->metadata_bo);
287 ggtt_addr = xe_bo_ggtt_addr(buffer->activity_bo);
288 num_functions = engine_activity->num_functions;
289 }
290
291 action[len++] = XE_GUC_ACTION_SET_FUNCTION_ENGINE_ACTIVITY_BUFFER;
292 action[len++] = num_functions;
293 action[len++] = metadata_ggtt_addr;
294 action[len++] = 0;
295 action[len++] = ggtt_addr;
296 action[len++] = 0;
297
298 /* Blocking here to ensure the buffers are ready before reading them */
299 return xe_guc_ct_send_block(&guc->ct, action, ARRAY_SIZE(action));
300 }
301
engine_activity_set_cpu_ts(struct xe_guc * guc,unsigned int index)302 static void engine_activity_set_cpu_ts(struct xe_guc *guc, unsigned int index)
303 {
304 struct xe_guc_engine_activity *engine_activity = &guc->engine_activity;
305 struct engine_activity_group *eag = &engine_activity->eag[index];
306 int i, j;
307
308 xe_gt_assert(guc_to_gt(guc), index < engine_activity->num_activity_group);
309
310 for (i = 0; i < GUC_MAX_ENGINE_CLASSES; i++)
311 for (j = 0; j < GUC_MAX_INSTANCES_PER_CLASS; j++)
312 eag->engine[i][j].last_cpu_ts = ktime_get();
313 }
314
gpm_timestamp_shift(struct xe_gt * gt)315 static u32 gpm_timestamp_shift(struct xe_gt *gt)
316 {
317 u32 reg;
318
319 reg = xe_mmio_read32(>->mmio, RPM_CONFIG0);
320
321 return 3 - REG_FIELD_GET(RPM_CONFIG0_CTC_SHIFT_PARAMETER_MASK, reg);
322 }
323
is_function_valid(struct xe_guc * guc,unsigned int fn_id)324 static bool is_function_valid(struct xe_guc *guc, unsigned int fn_id)
325 {
326 struct xe_device *xe = guc_to_xe(guc);
327 struct xe_guc_engine_activity *engine_activity = &guc->engine_activity;
328
329 if (!IS_SRIOV_PF(xe) && fn_id)
330 return false;
331
332 if (engine_activity->num_functions && fn_id >= engine_activity->num_functions)
333 return false;
334
335 return true;
336 }
337
engine_activity_disable_function_stats(struct xe_guc * guc)338 static int engine_activity_disable_function_stats(struct xe_guc *guc)
339 {
340 struct xe_guc_engine_activity *engine_activity = &guc->engine_activity;
341 struct engine_activity_buffer *buffer = &engine_activity->function_buffer;
342 int ret;
343
344 if (!engine_activity->num_functions)
345 return 0;
346
347 ret = enable_function_engine_activity_stats(guc, false);
348 if (ret)
349 return ret;
350
351 free_engine_activity_buffers(buffer);
352 engine_activity->num_functions = 0;
353
354 return 0;
355 }
356
engine_activity_enable_function_stats(struct xe_guc * guc,int num_vfs)357 static int engine_activity_enable_function_stats(struct xe_guc *guc, int num_vfs)
358 {
359 struct xe_guc_engine_activity *engine_activity = &guc->engine_activity;
360 struct engine_activity_buffer *buffer = &engine_activity->function_buffer;
361 int ret, i;
362
363 if (!num_vfs)
364 return 0;
365
366 /* This includes 1 PF and num_vfs */
367 engine_activity->num_functions = num_vfs + 1;
368
369 ret = allocate_engine_activity_buffers(guc, buffer, engine_activity->num_functions);
370 if (ret)
371 return ret;
372
373 ret = enable_function_engine_activity_stats(guc, true);
374 if (ret) {
375 free_engine_activity_buffers(buffer);
376 engine_activity->num_functions = 0;
377 return ret;
378 }
379
380 /* skip PF as it was already setup */
381 for (i = 1; i < engine_activity->num_functions; i++)
382 engine_activity_set_cpu_ts(guc, i);
383
384 return 0;
385 }
386
387 /**
388 * xe_guc_engine_activity_active_ticks - Get engine active ticks
389 * @guc: The GuC object
390 * @hwe: The hw_engine object
391 * @fn_id: function id to report on
392 *
393 * Return: accumulated ticks @hwe was active since engine activity stats were enabled.
394 */
xe_guc_engine_activity_active_ticks(struct xe_guc * guc,struct xe_hw_engine * hwe,unsigned int fn_id)395 u64 xe_guc_engine_activity_active_ticks(struct xe_guc *guc, struct xe_hw_engine *hwe,
396 unsigned int fn_id)
397 {
398 if (!xe_guc_engine_activity_supported(guc))
399 return 0;
400
401 if (!is_function_valid(guc, fn_id))
402 return 0;
403
404 return get_engine_active_ticks(guc, hwe, fn_id);
405 }
406
407 /**
408 * xe_guc_engine_activity_total_ticks - Get engine total ticks
409 * @guc: The GuC object
410 * @hwe: The hw_engine object
411 * @fn_id: function id to report on
412 *
413 * Return: accumulated quanta of ticks allocated for the engine
414 */
xe_guc_engine_activity_total_ticks(struct xe_guc * guc,struct xe_hw_engine * hwe,unsigned int fn_id)415 u64 xe_guc_engine_activity_total_ticks(struct xe_guc *guc, struct xe_hw_engine *hwe,
416 unsigned int fn_id)
417 {
418 if (!xe_guc_engine_activity_supported(guc))
419 return 0;
420
421 if (!is_function_valid(guc, fn_id))
422 return 0;
423
424 return get_engine_total_ticks(guc, hwe, fn_id);
425 }
426
427 /**
428 * xe_guc_engine_activity_supported - Check support for engine activity stats
429 * @guc: The GuC object
430 *
431 * Engine activity stats is supported from GuC interface version (1.14.1)
432 *
433 * Return: true if engine activity stats supported, false otherwise
434 */
xe_guc_engine_activity_supported(struct xe_guc * guc)435 bool xe_guc_engine_activity_supported(struct xe_guc *guc)
436 {
437 struct xe_guc_engine_activity *engine_activity = &guc->engine_activity;
438
439 return engine_activity->supported;
440 }
441
442 /**
443 * xe_guc_engine_activity_function_stats - Enable/Disable per-function engine activity stats
444 * @guc: The GuC object
445 * @num_vfs: number of vfs
446 * @enable: true to enable, false otherwise
447 *
448 * Return: 0 on success, negative error code otherwise
449 */
xe_guc_engine_activity_function_stats(struct xe_guc * guc,int num_vfs,bool enable)450 int xe_guc_engine_activity_function_stats(struct xe_guc *guc, int num_vfs, bool enable)
451 {
452 if (!xe_guc_engine_activity_supported(guc))
453 return 0;
454
455 if (enable)
456 return engine_activity_enable_function_stats(guc, num_vfs);
457
458 return engine_activity_disable_function_stats(guc);
459 }
460
461 /**
462 * xe_guc_engine_activity_enable_stats - Enable engine activity stats
463 * @guc: The GuC object
464 *
465 * Enable engine activity stats and set initial timestamps
466 */
xe_guc_engine_activity_enable_stats(struct xe_guc * guc)467 void xe_guc_engine_activity_enable_stats(struct xe_guc *guc)
468 {
469 int ret;
470
471 if (!xe_guc_engine_activity_supported(guc))
472 return;
473
474 ret = enable_engine_activity_stats(guc);
475 if (ret)
476 xe_gt_err(guc_to_gt(guc), "failed to enable activity stats%d\n", ret);
477 else
478 engine_activity_set_cpu_ts(guc, 0);
479 }
480
engine_activity_fini(void * arg)481 static void engine_activity_fini(void *arg)
482 {
483 struct xe_guc_engine_activity *engine_activity = arg;
484 struct engine_activity_buffer *buffer = &engine_activity->device_buffer;
485
486 free_engine_activity_buffers(buffer);
487 }
488
489 /**
490 * xe_guc_engine_activity_init - Initialize the engine activity data
491 * @guc: The GuC object
492 *
493 * Return: 0 on success, negative error code otherwise.
494 */
xe_guc_engine_activity_init(struct xe_guc * guc)495 int xe_guc_engine_activity_init(struct xe_guc *guc)
496 {
497 struct xe_guc_engine_activity *engine_activity = &guc->engine_activity;
498 struct xe_gt *gt = guc_to_gt(guc);
499 int ret;
500
501 engine_activity->supported = is_engine_activity_supported(guc);
502 if (!engine_activity->supported)
503 return 0;
504
505 ret = allocate_engine_activity_group(guc);
506 if (ret) {
507 xe_gt_err(gt, "failed to allocate engine activity group (%pe)\n", ERR_PTR(ret));
508 return ret;
509 }
510
511 ret = allocate_engine_activity_buffers(guc, &engine_activity->device_buffer, 1);
512 if (ret) {
513 xe_gt_err(gt, "failed to allocate engine activity buffers (%pe)\n", ERR_PTR(ret));
514 return ret;
515 }
516
517 engine_activity->gpm_timestamp_shift = gpm_timestamp_shift(gt);
518
519 return devm_add_action_or_reset(gt_to_xe(gt)->drm.dev, engine_activity_fini,
520 engine_activity);
521 }
522