1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2025 Intel Corporation
4  */
5 
6 #include <drm/drm_managed.h>
7 
8 #include "abi/guc_actions_abi.h"
9 #include "regs/xe_gt_regs.h"
10 
11 #include "xe_bo.h"
12 #include "xe_force_wake.h"
13 #include "xe_gt_printk.h"
14 #include "xe_guc.h"
15 #include "xe_guc_engine_activity.h"
16 #include "xe_guc_ct.h"
17 #include "xe_hw_engine.h"
18 #include "xe_map.h"
19 #include "xe_mmio.h"
20 #include "xe_trace_guc.h"
21 
22 #define TOTAL_QUANTA 0x8000
23 
engine_activity_map(struct xe_guc * guc,struct xe_hw_engine * hwe)24 static struct iosys_map engine_activity_map(struct xe_guc *guc, struct xe_hw_engine *hwe)
25 {
26 	struct xe_guc_engine_activity *engine_activity = &guc->engine_activity;
27 	struct engine_activity_buffer *buffer = &engine_activity->device_buffer;
28 	u16 guc_class = xe_engine_class_to_guc_class(hwe->class);
29 	size_t offset;
30 
31 	offset = offsetof(struct guc_engine_activity_data,
32 			  engine_activity[guc_class][hwe->logical_instance]);
33 
34 	return IOSYS_MAP_INIT_OFFSET(&buffer->activity_bo->vmap, offset);
35 }
36 
engine_metadata_map(struct xe_guc * guc)37 static struct iosys_map engine_metadata_map(struct xe_guc *guc)
38 {
39 	struct xe_guc_engine_activity *engine_activity = &guc->engine_activity;
40 	struct engine_activity_buffer *buffer = &engine_activity->device_buffer;
41 
42 	return buffer->metadata_bo->vmap;
43 }
44 
allocate_engine_activity_group(struct xe_guc * guc)45 static int allocate_engine_activity_group(struct xe_guc *guc)
46 {
47 	struct xe_guc_engine_activity *engine_activity = &guc->engine_activity;
48 	struct xe_device *xe = guc_to_xe(guc);
49 	u32 num_activity_group = 1; /* Will be modified for VF */
50 
51 	engine_activity->eag  = drmm_kcalloc(&xe->drm, num_activity_group,
52 					     sizeof(struct engine_activity_group), GFP_KERNEL);
53 
54 	if (!engine_activity->eag)
55 		return -ENOMEM;
56 
57 	engine_activity->num_activity_group = num_activity_group;
58 
59 	return 0;
60 }
61 
allocate_engine_activity_buffers(struct xe_guc * guc,struct engine_activity_buffer * buffer)62 static int allocate_engine_activity_buffers(struct xe_guc *guc,
63 					    struct engine_activity_buffer *buffer)
64 {
65 	u32 metadata_size = sizeof(struct guc_engine_activity_metadata);
66 	u32 size = sizeof(struct guc_engine_activity_data);
67 	struct xe_gt *gt = guc_to_gt(guc);
68 	struct xe_tile *tile = gt_to_tile(gt);
69 	struct xe_bo *bo, *metadata_bo;
70 
71 	metadata_bo = xe_bo_create_pin_map(gt_to_xe(gt), tile, NULL, PAGE_ALIGN(metadata_size),
72 					   ttm_bo_type_kernel, XE_BO_FLAG_SYSTEM |
73 					   XE_BO_FLAG_GGTT | XE_BO_FLAG_GGTT_INVALIDATE);
74 
75 	if (IS_ERR(metadata_bo))
76 		return PTR_ERR(metadata_bo);
77 
78 	bo = xe_bo_create_pin_map(gt_to_xe(gt), tile, NULL, PAGE_ALIGN(size),
79 				  ttm_bo_type_kernel, XE_BO_FLAG_VRAM_IF_DGFX(tile) |
80 				  XE_BO_FLAG_GGTT | XE_BO_FLAG_GGTT_INVALIDATE);
81 
82 	if (IS_ERR(bo)) {
83 		xe_bo_unpin_map_no_vm(metadata_bo);
84 		return PTR_ERR(bo);
85 	}
86 
87 	buffer->metadata_bo = metadata_bo;
88 	buffer->activity_bo = bo;
89 	return 0;
90 }
91 
free_engine_activity_buffers(struct engine_activity_buffer * buffer)92 static void free_engine_activity_buffers(struct engine_activity_buffer *buffer)
93 {
94 	xe_bo_unpin_map_no_vm(buffer->metadata_bo);
95 	xe_bo_unpin_map_no_vm(buffer->activity_bo);
96 }
97 
is_engine_activity_supported(struct xe_guc * guc)98 static bool is_engine_activity_supported(struct xe_guc *guc)
99 {
100 	struct xe_uc_fw_version *version = &guc->fw.versions.found[XE_UC_FW_VER_COMPATIBILITY];
101 	struct xe_uc_fw_version required = { 1, 14, 1 };
102 	struct xe_gt *gt = guc_to_gt(guc);
103 
104 	if (IS_SRIOV_VF(gt_to_xe(gt))) {
105 		xe_gt_info(gt, "engine activity stats not supported on VFs\n");
106 		return false;
107 	}
108 
109 	/* engine activity stats is supported from GuC interface version (1.14.1) */
110 	if (GUC_SUBMIT_VER(guc) < MAKE_GUC_VER_STRUCT(required)) {
111 		xe_gt_info(gt,
112 			   "engine activity stats unsupported in GuC interface v%u.%u.%u, need v%u.%u.%u or higher\n",
113 			   version->major, version->minor, version->patch, required.major,
114 			   required.minor, required.patch);
115 		return false;
116 	}
117 
118 	return true;
119 }
120 
hw_engine_to_engine_activity(struct xe_hw_engine * hwe)121 static struct engine_activity *hw_engine_to_engine_activity(struct xe_hw_engine *hwe)
122 {
123 	struct xe_guc *guc = &hwe->gt->uc.guc;
124 	struct engine_activity_group *eag = &guc->engine_activity.eag[0];
125 	u16 guc_class = xe_engine_class_to_guc_class(hwe->class);
126 
127 	return &eag->engine[guc_class][hwe->logical_instance];
128 }
129 
cpu_ns_to_guc_tsc_tick(ktime_t ns,u32 freq)130 static u64 cpu_ns_to_guc_tsc_tick(ktime_t ns, u32 freq)
131 {
132 	return mul_u64_u32_div(ns, freq, NSEC_PER_SEC);
133 }
134 
135 #define read_engine_activity_record(xe_, map_, field_) \
136 	xe_map_rd_field(xe_, map_, 0, struct guc_engine_activity, field_)
137 
138 #define read_metadata_record(xe_, map_, field_) \
139 	xe_map_rd_field(xe_, map_, 0, struct guc_engine_activity_metadata, field_)
140 
get_engine_active_ticks(struct xe_guc * guc,struct xe_hw_engine * hwe)141 static u64 get_engine_active_ticks(struct xe_guc *guc, struct xe_hw_engine *hwe)
142 {
143 	struct engine_activity *ea = hw_engine_to_engine_activity(hwe);
144 	struct guc_engine_activity *cached_activity = &ea->activity;
145 	struct guc_engine_activity_metadata *cached_metadata = &ea->metadata;
146 	struct xe_guc_engine_activity *engine_activity = &guc->engine_activity;
147 	struct iosys_map activity_map, metadata_map;
148 	struct xe_device *xe =  guc_to_xe(guc);
149 	struct xe_gt *gt = guc_to_gt(guc);
150 	u32 last_update_tick, global_change_num;
151 	u64 active_ticks, gpm_ts;
152 	u16 change_num;
153 
154 	activity_map = engine_activity_map(guc, hwe);
155 	metadata_map = engine_metadata_map(guc);
156 	global_change_num = read_metadata_record(xe, &metadata_map, global_change_num);
157 
158 	/* GuC has not initialized activity data yet, return 0 */
159 	if (!global_change_num)
160 		goto update;
161 
162 	if (global_change_num == cached_metadata->global_change_num)
163 		goto update;
164 
165 	cached_metadata->global_change_num = global_change_num;
166 	change_num = read_engine_activity_record(xe, &activity_map, change_num);
167 
168 	if (!change_num || change_num == cached_activity->change_num)
169 		goto update;
170 
171 	/* read engine activity values */
172 	last_update_tick = read_engine_activity_record(xe, &activity_map, last_update_tick);
173 	active_ticks = read_engine_activity_record(xe, &activity_map, active_ticks);
174 
175 	/* activity calculations */
176 	ea->running = !!last_update_tick;
177 	ea->total += active_ticks - cached_activity->active_ticks;
178 	ea->active = 0;
179 
180 	/* cache the counter */
181 	cached_activity->change_num = change_num;
182 	cached_activity->last_update_tick = last_update_tick;
183 	cached_activity->active_ticks = active_ticks;
184 
185 update:
186 	if (ea->running) {
187 		gpm_ts = xe_mmio_read64_2x32(&gt->mmio, MISC_STATUS_0) >>
188 			 engine_activity->gpm_timestamp_shift;
189 		ea->active = lower_32_bits(gpm_ts) - cached_activity->last_update_tick;
190 	}
191 
192 	trace_xe_guc_engine_activity(xe, ea, hwe->name, hwe->instance);
193 
194 	return ea->total + ea->active;
195 }
196 
get_engine_total_ticks(struct xe_guc * guc,struct xe_hw_engine * hwe)197 static u64 get_engine_total_ticks(struct xe_guc *guc, struct xe_hw_engine *hwe)
198 {
199 	struct engine_activity *ea = hw_engine_to_engine_activity(hwe);
200 	struct guc_engine_activity_metadata *cached_metadata = &ea->metadata;
201 	struct guc_engine_activity *cached_activity = &ea->activity;
202 	struct iosys_map activity_map, metadata_map;
203 	struct xe_device *xe = guc_to_xe(guc);
204 	ktime_t now, cpu_delta;
205 	u64 numerator;
206 	u16 quanta_ratio;
207 
208 	activity_map = engine_activity_map(guc, hwe);
209 	metadata_map = engine_metadata_map(guc);
210 
211 	if (!cached_metadata->guc_tsc_frequency_hz)
212 		cached_metadata->guc_tsc_frequency_hz = read_metadata_record(xe, &metadata_map,
213 									     guc_tsc_frequency_hz);
214 
215 	quanta_ratio = read_engine_activity_record(xe, &activity_map, quanta_ratio);
216 	cached_activity->quanta_ratio = quanta_ratio;
217 
218 	/* Total ticks calculations */
219 	now = ktime_get();
220 	cpu_delta = now - ea->last_cpu_ts;
221 	ea->last_cpu_ts = now;
222 	numerator = (ea->quanta_remainder_ns + cpu_delta) * cached_activity->quanta_ratio;
223 	ea->quanta_ns += numerator / TOTAL_QUANTA;
224 	ea->quanta_remainder_ns = numerator % TOTAL_QUANTA;
225 	ea->quanta = cpu_ns_to_guc_tsc_tick(ea->quanta_ns, cached_metadata->guc_tsc_frequency_hz);
226 
227 	trace_xe_guc_engine_activity(xe, ea, hwe->name, hwe->instance);
228 
229 	return ea->quanta;
230 }
231 
enable_engine_activity_stats(struct xe_guc * guc)232 static int enable_engine_activity_stats(struct xe_guc *guc)
233 {
234 	struct xe_guc_engine_activity *engine_activity = &guc->engine_activity;
235 	struct engine_activity_buffer *buffer = &engine_activity->device_buffer;
236 	u32 action[] = {
237 		XE_GUC_ACTION_SET_DEVICE_ENGINE_ACTIVITY_BUFFER,
238 		xe_bo_ggtt_addr(buffer->metadata_bo),
239 		0,
240 		xe_bo_ggtt_addr(buffer->activity_bo),
241 		0,
242 	};
243 
244 	/* Blocking here to ensure the buffers are ready before reading them */
245 	return xe_guc_ct_send_block(&guc->ct, action, ARRAY_SIZE(action));
246 }
247 
engine_activity_set_cpu_ts(struct xe_guc * guc)248 static void engine_activity_set_cpu_ts(struct xe_guc *guc)
249 {
250 	struct xe_guc_engine_activity *engine_activity = &guc->engine_activity;
251 	struct engine_activity_group *eag = &engine_activity->eag[0];
252 	int i, j;
253 
254 	for (i = 0; i < GUC_MAX_ENGINE_CLASSES; i++)
255 		for (j = 0; j < GUC_MAX_INSTANCES_PER_CLASS; j++)
256 			eag->engine[i][j].last_cpu_ts = ktime_get();
257 }
258 
gpm_timestamp_shift(struct xe_gt * gt)259 static u32 gpm_timestamp_shift(struct xe_gt *gt)
260 {
261 	u32 reg;
262 
263 	reg = xe_mmio_read32(&gt->mmio, RPM_CONFIG0);
264 
265 	return 3 - REG_FIELD_GET(RPM_CONFIG0_CTC_SHIFT_PARAMETER_MASK, reg);
266 }
267 
268 /**
269  * xe_guc_engine_activity_active_ticks - Get engine active ticks
270  * @guc: The GuC object
271  * @hwe: The hw_engine object
272  *
273  * Return: accumulated ticks @hwe was active since engine activity stats were enabled.
274  */
xe_guc_engine_activity_active_ticks(struct xe_guc * guc,struct xe_hw_engine * hwe)275 u64 xe_guc_engine_activity_active_ticks(struct xe_guc *guc, struct xe_hw_engine *hwe)
276 {
277 	if (!xe_guc_engine_activity_supported(guc))
278 		return 0;
279 
280 	return get_engine_active_ticks(guc, hwe);
281 }
282 
283 /**
284  * xe_guc_engine_activity_total_ticks - Get engine total ticks
285  * @guc: The GuC object
286  * @hwe: The hw_engine object
287  *
288  * Return: accumulated quanta of ticks allocated for the engine
289  */
xe_guc_engine_activity_total_ticks(struct xe_guc * guc,struct xe_hw_engine * hwe)290 u64 xe_guc_engine_activity_total_ticks(struct xe_guc *guc, struct xe_hw_engine *hwe)
291 {
292 	if (!xe_guc_engine_activity_supported(guc))
293 		return 0;
294 
295 	return get_engine_total_ticks(guc, hwe);
296 }
297 
298 /**
299  * xe_guc_engine_activity_supported - Check support for engine activity stats
300  * @guc: The GuC object
301  *
302  * Engine activity stats is supported from GuC interface version (1.14.1)
303  *
304  * Return: true if engine activity stats supported, false otherwise
305  */
xe_guc_engine_activity_supported(struct xe_guc * guc)306 bool xe_guc_engine_activity_supported(struct xe_guc *guc)
307 {
308 	struct xe_guc_engine_activity *engine_activity = &guc->engine_activity;
309 
310 	return engine_activity->supported;
311 }
312 
313 /**
314  * xe_guc_engine_activity_enable_stats - Enable engine activity stats
315  * @guc: The GuC object
316  *
317  * Enable engine activity stats and set initial timestamps
318  */
xe_guc_engine_activity_enable_stats(struct xe_guc * guc)319 void xe_guc_engine_activity_enable_stats(struct xe_guc *guc)
320 {
321 	int ret;
322 
323 	if (!xe_guc_engine_activity_supported(guc))
324 		return;
325 
326 	ret = enable_engine_activity_stats(guc);
327 	if (ret)
328 		xe_gt_err(guc_to_gt(guc), "failed to enable activity stats%d\n", ret);
329 	else
330 		engine_activity_set_cpu_ts(guc);
331 }
332 
engine_activity_fini(void * arg)333 static void engine_activity_fini(void *arg)
334 {
335 	struct xe_guc_engine_activity *engine_activity = arg;
336 	struct engine_activity_buffer *buffer = &engine_activity->device_buffer;
337 
338 	free_engine_activity_buffers(buffer);
339 }
340 
341 /**
342  * xe_guc_engine_activity_init - Initialize the engine activity data
343  * @guc: The GuC object
344  *
345  * Return: 0 on success, negative error code otherwise.
346  */
xe_guc_engine_activity_init(struct xe_guc * guc)347 int xe_guc_engine_activity_init(struct xe_guc *guc)
348 {
349 	struct xe_guc_engine_activity *engine_activity = &guc->engine_activity;
350 	struct xe_gt *gt = guc_to_gt(guc);
351 	int ret;
352 
353 	engine_activity->supported = is_engine_activity_supported(guc);
354 	if (!engine_activity->supported)
355 		return 0;
356 
357 	ret = allocate_engine_activity_group(guc);
358 	if (ret) {
359 		xe_gt_err(gt, "failed to allocate engine activity group (%pe)\n", ERR_PTR(ret));
360 		return ret;
361 	}
362 
363 	ret = allocate_engine_activity_buffers(guc, &engine_activity->device_buffer);
364 	if (ret) {
365 		xe_gt_err(gt, "failed to allocate engine activity buffers (%pe)\n", ERR_PTR(ret));
366 		return ret;
367 	}
368 
369 	engine_activity->gpm_timestamp_shift = gpm_timestamp_shift(gt);
370 
371 	return devm_add_action_or_reset(gt_to_xe(gt)->drm.dev, engine_activity_fini,
372 					engine_activity);
373 }
374