xref: /linux/drivers/gpu/drm/xe/xe_gt_sriov_pf_service.c (revision e78f70bad29c5ae1e1076698b690b15794e9b81e)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2023-2024 Intel Corporation
4  */
5 
6 #include <drm/drm_managed.h>
7 
8 #include "abi/guc_actions_sriov_abi.h"
9 #include "abi/guc_relay_actions_abi.h"
10 
11 #include "regs/xe_gt_regs.h"
12 #include "regs/xe_guc_regs.h"
13 #include "regs/xe_regs.h"
14 
15 #include "xe_mmio.h"
16 #include "xe_gt_sriov_printk.h"
17 #include "xe_gt_sriov_pf_helpers.h"
18 #include "xe_gt_sriov_pf_service.h"
19 #include "xe_gt_sriov_pf_service_types.h"
20 #include "xe_guc_ct.h"
21 #include "xe_guc_hxg_helpers.h"
22 
23 static void pf_init_versions(struct xe_gt *gt)
24 {
25 	BUILD_BUG_ON(!GUC_RELAY_VERSION_BASE_MAJOR && !GUC_RELAY_VERSION_BASE_MINOR);
26 	BUILD_BUG_ON(GUC_RELAY_VERSION_BASE_MAJOR > GUC_RELAY_VERSION_LATEST_MAJOR);
27 
28 	/* base versions may differ between platforms */
29 	gt->sriov.pf.service.version.base.major = GUC_RELAY_VERSION_BASE_MAJOR;
30 	gt->sriov.pf.service.version.base.minor = GUC_RELAY_VERSION_BASE_MINOR;
31 
32 	/* latest version is same for all platforms */
33 	gt->sriov.pf.service.version.latest.major = GUC_RELAY_VERSION_LATEST_MAJOR;
34 	gt->sriov.pf.service.version.latest.minor = GUC_RELAY_VERSION_LATEST_MINOR;
35 }
36 
37 /* Return: 0 on success or a negative error code on failure. */
38 static int pf_negotiate_version(struct xe_gt *gt,
39 				u32 wanted_major, u32 wanted_minor,
40 				u32 *major, u32 *minor)
41 {
42 	struct xe_gt_sriov_pf_service_version base = gt->sriov.pf.service.version.base;
43 	struct xe_gt_sriov_pf_service_version latest = gt->sriov.pf.service.version.latest;
44 
45 	xe_gt_assert(gt, base.major);
46 	xe_gt_assert(gt, base.major <= latest.major);
47 	xe_gt_assert(gt, (base.major < latest.major) || (base.minor <= latest.minor));
48 
49 	/* VF doesn't care - return our latest  */
50 	if (wanted_major == VF2PF_HANDSHAKE_MAJOR_ANY &&
51 	    wanted_minor == VF2PF_HANDSHAKE_MINOR_ANY) {
52 		*major = latest.major;
53 		*minor = latest.minor;
54 		return 0;
55 	}
56 
57 	/* VF wants newer than our - return our latest  */
58 	if (wanted_major > latest.major) {
59 		*major = latest.major;
60 		*minor = latest.minor;
61 		return 0;
62 	}
63 
64 	/* VF wants older than min required - reject */
65 	if (wanted_major < base.major ||
66 	    (wanted_major == base.major && wanted_minor < base.minor)) {
67 		return -EPERM;
68 	}
69 
70 	/* previous major - return wanted, as we should still support it */
71 	if (wanted_major < latest.major) {
72 		/* XXX: we are not prepared for multi-versions yet */
73 		xe_gt_assert(gt, base.major == latest.major);
74 		return -ENOPKG;
75 	}
76 
77 	/* same major - return common minor */
78 	*major = wanted_major;
79 	*minor = min_t(u32, latest.minor, wanted_minor);
80 	return 0;
81 }
82 
83 static void pf_connect(struct xe_gt *gt, u32 vfid, u32 major, u32 minor)
84 {
85 	xe_gt_sriov_pf_assert_vfid(gt, vfid);
86 	xe_gt_assert(gt, major || minor);
87 
88 	gt->sriov.pf.vfs[vfid].version.major = major;
89 	gt->sriov.pf.vfs[vfid].version.minor = minor;
90 }
91 
92 static void pf_disconnect(struct xe_gt *gt, u32 vfid)
93 {
94 	xe_gt_sriov_pf_assert_vfid(gt, vfid);
95 
96 	gt->sriov.pf.vfs[vfid].version.major = 0;
97 	gt->sriov.pf.vfs[vfid].version.minor = 0;
98 }
99 
100 static bool pf_is_negotiated(struct xe_gt *gt, u32 vfid, u32 major, u32 minor)
101 {
102 	xe_gt_sriov_pf_assert_vfid(gt, vfid);
103 
104 	return major == gt->sriov.pf.vfs[vfid].version.major &&
105 	       minor <= gt->sriov.pf.vfs[vfid].version.minor;
106 }
107 
108 static const struct xe_reg tgl_runtime_regs[] = {
109 	RPM_CONFIG0,			/* _MMIO(0x0d00) */
110 	MIRROR_FUSE3,			/* _MMIO(0x9118) */
111 	XELP_EU_ENABLE,			/* _MMIO(0x9134) */
112 	XELP_GT_SLICE_ENABLE,		/* _MMIO(0x9138) */
113 	XELP_GT_GEOMETRY_DSS_ENABLE,	/* _MMIO(0x913c) */
114 	GT_VEBOX_VDBOX_DISABLE,		/* _MMIO(0x9140) */
115 	HUC_KERNEL_LOAD_INFO,		/* _MMIO(0xc1dc) */
116 };
117 
118 static const struct xe_reg ats_m_runtime_regs[] = {
119 	RPM_CONFIG0,			/* _MMIO(0x0d00) */
120 	MIRROR_FUSE3,			/* _MMIO(0x9118) */
121 	MIRROR_FUSE1,			/* _MMIO(0x911c) */
122 	XELP_EU_ENABLE,			/* _MMIO(0x9134) */
123 	XELP_GT_GEOMETRY_DSS_ENABLE,	/* _MMIO(0x913c) */
124 	GT_VEBOX_VDBOX_DISABLE,		/* _MMIO(0x9140) */
125 	XEHP_GT_COMPUTE_DSS_ENABLE,	/* _MMIO(0x9144) */
126 	HUC_KERNEL_LOAD_INFO,		/* _MMIO(0xc1dc) */
127 };
128 
129 static const struct xe_reg pvc_runtime_regs[] = {
130 	RPM_CONFIG0,			/* _MMIO(0x0d00) */
131 	MIRROR_FUSE3,			/* _MMIO(0x9118) */
132 	XELP_EU_ENABLE,			/* _MMIO(0x9134) */
133 	XELP_GT_GEOMETRY_DSS_ENABLE,	/* _MMIO(0x913c) */
134 	GT_VEBOX_VDBOX_DISABLE,		/* _MMIO(0x9140) */
135 	XEHP_GT_COMPUTE_DSS_ENABLE,	/* _MMIO(0x9144) */
136 	XEHPC_GT_COMPUTE_DSS_ENABLE_EXT,/* _MMIO(0x9148) */
137 	HUC_KERNEL_LOAD_INFO,		/* _MMIO(0xc1dc) */
138 };
139 
140 static const struct xe_reg ver_1270_runtime_regs[] = {
141 	RPM_CONFIG0,			/* _MMIO(0x0d00) */
142 	XEHP_FUSE4,			/* _MMIO(0x9114) */
143 	MIRROR_FUSE3,			/* _MMIO(0x9118) */
144 	MIRROR_FUSE1,			/* _MMIO(0x911c) */
145 	XELP_EU_ENABLE,			/* _MMIO(0x9134) */
146 	XELP_GT_GEOMETRY_DSS_ENABLE,	/* _MMIO(0x913c) */
147 	GT_VEBOX_VDBOX_DISABLE,		/* _MMIO(0x9140) */
148 	XEHP_GT_COMPUTE_DSS_ENABLE,	/* _MMIO(0x9144) */
149 	XEHPC_GT_COMPUTE_DSS_ENABLE_EXT,/* _MMIO(0x9148) */
150 	HUC_KERNEL_LOAD_INFO,		/* _MMIO(0xc1dc) */
151 };
152 
153 static const struct xe_reg ver_2000_runtime_regs[] = {
154 	RPM_CONFIG0,			/* _MMIO(0x0d00) */
155 	XEHP_FUSE4,			/* _MMIO(0x9114) */
156 	MIRROR_FUSE3,			/* _MMIO(0x9118) */
157 	MIRROR_FUSE1,			/* _MMIO(0x911c) */
158 	XELP_EU_ENABLE,			/* _MMIO(0x9134) */
159 	XELP_GT_GEOMETRY_DSS_ENABLE,	/* _MMIO(0x913c) */
160 	GT_VEBOX_VDBOX_DISABLE,		/* _MMIO(0x9140) */
161 	XEHP_GT_COMPUTE_DSS_ENABLE,	/* _MMIO(0x9144) */
162 	XEHPC_GT_COMPUTE_DSS_ENABLE_EXT,/* _MMIO(0x9148) */
163 	XE2_GT_COMPUTE_DSS_2,		/* _MMIO(0x914c) */
164 	XE2_GT_GEOMETRY_DSS_1,		/* _MMIO(0x9150) */
165 	XE2_GT_GEOMETRY_DSS_2,		/* _MMIO(0x9154) */
166 	HUC_KERNEL_LOAD_INFO,		/* _MMIO(0xc1dc) */
167 };
168 
169 static const struct xe_reg ver_3000_runtime_regs[] = {
170 	RPM_CONFIG0,			/* _MMIO(0x0d00) */
171 	XEHP_FUSE4,			/* _MMIO(0x9114) */
172 	MIRROR_FUSE3,			/* _MMIO(0x9118) */
173 	MIRROR_FUSE1,			/* _MMIO(0x911c) */
174 	MIRROR_L3BANK_ENABLE,		/* _MMIO(0x9130) */
175 	XELP_EU_ENABLE,			/* _MMIO(0x9134) */
176 	XELP_GT_GEOMETRY_DSS_ENABLE,	/* _MMIO(0x913c) */
177 	GT_VEBOX_VDBOX_DISABLE,		/* _MMIO(0x9140) */
178 	XEHP_GT_COMPUTE_DSS_ENABLE,	/* _MMIO(0x9144) */
179 	XEHPC_GT_COMPUTE_DSS_ENABLE_EXT,/* _MMIO(0x9148) */
180 	XE2_GT_COMPUTE_DSS_2,		/* _MMIO(0x914c) */
181 	XE2_GT_GEOMETRY_DSS_1,		/* _MMIO(0x9150) */
182 	XE2_GT_GEOMETRY_DSS_2,		/* _MMIO(0x9154) */
183 	HUC_KERNEL_LOAD_INFO,		/* _MMIO(0xc1dc) */
184 };
185 
186 static const struct xe_reg *pick_runtime_regs(struct xe_device *xe, unsigned int *count)
187 {
188 	const struct xe_reg *regs;
189 
190 	if (GRAPHICS_VERx100(xe) >= 3000) {
191 		*count = ARRAY_SIZE(ver_3000_runtime_regs);
192 		regs = ver_3000_runtime_regs;
193 	} else if (GRAPHICS_VERx100(xe) >= 2000) {
194 		*count = ARRAY_SIZE(ver_2000_runtime_regs);
195 		regs = ver_2000_runtime_regs;
196 	} else if (GRAPHICS_VERx100(xe) >= 1270) {
197 		*count = ARRAY_SIZE(ver_1270_runtime_regs);
198 		regs = ver_1270_runtime_regs;
199 	} else if (GRAPHICS_VERx100(xe) == 1260) {
200 		*count = ARRAY_SIZE(pvc_runtime_regs);
201 		regs = pvc_runtime_regs;
202 	} else if (GRAPHICS_VERx100(xe) == 1255) {
203 		*count = ARRAY_SIZE(ats_m_runtime_regs);
204 		regs = ats_m_runtime_regs;
205 	} else if (GRAPHICS_VERx100(xe) == 1200) {
206 		*count = ARRAY_SIZE(tgl_runtime_regs);
207 		regs = tgl_runtime_regs;
208 	} else {
209 		regs = ERR_PTR(-ENOPKG);
210 		*count = 0;
211 	}
212 
213 	return regs;
214 }
215 
216 static int pf_alloc_runtime_info(struct xe_gt *gt)
217 {
218 	struct xe_device *xe = gt_to_xe(gt);
219 	const struct xe_reg *regs;
220 	unsigned int size;
221 	u32 *values;
222 
223 	xe_gt_assert(gt, IS_SRIOV_PF(xe));
224 	xe_gt_assert(gt, !gt->sriov.pf.service.runtime.size);
225 	xe_gt_assert(gt, !gt->sriov.pf.service.runtime.regs);
226 	xe_gt_assert(gt, !gt->sriov.pf.service.runtime.values);
227 
228 	regs = pick_runtime_regs(xe, &size);
229 	if (IS_ERR(regs))
230 		return PTR_ERR(regs);
231 
232 	if (unlikely(!size))
233 		return 0;
234 
235 	values = drmm_kcalloc(&xe->drm, size, sizeof(u32), GFP_KERNEL);
236 	if (!values)
237 		return -ENOMEM;
238 
239 	gt->sriov.pf.service.runtime.size = size;
240 	gt->sriov.pf.service.runtime.regs = regs;
241 	gt->sriov.pf.service.runtime.values = values;
242 
243 	return 0;
244 }
245 
246 static void read_many(struct xe_gt *gt, unsigned int count,
247 		      const struct xe_reg *regs, u32 *values)
248 {
249 	while (count--)
250 		*values++ = xe_mmio_read32(&gt->mmio, *regs++);
251 }
252 
253 static void pf_prepare_runtime_info(struct xe_gt *gt)
254 {
255 	const struct xe_reg *regs;
256 	unsigned int size;
257 	u32 *values;
258 
259 	if (!gt->sriov.pf.service.runtime.size)
260 		return;
261 
262 	size = gt->sriov.pf.service.runtime.size;
263 	regs = gt->sriov.pf.service.runtime.regs;
264 	values = gt->sriov.pf.service.runtime.values;
265 
266 	read_many(gt, size, regs, values);
267 
268 	if (IS_ENABLED(CONFIG_DRM_XE_DEBUG_SRIOV)) {
269 		struct drm_printer p = xe_gt_info_printer(gt);
270 
271 		xe_gt_sriov_pf_service_print_runtime(gt, &p);
272 	}
273 }
274 
275 /**
276  * xe_gt_sriov_pf_service_init - Early initialization of the GT SR-IOV PF services.
277  * @gt: the &xe_gt to initialize
278  *
279  * Performs early initialization of the GT SR-IOV PF services, including preparation
280  * of the runtime info that will be shared with VFs.
281  *
282  * This function can only be called on PF.
283  */
284 int xe_gt_sriov_pf_service_init(struct xe_gt *gt)
285 {
286 	int err;
287 
288 	pf_init_versions(gt);
289 
290 	err = pf_alloc_runtime_info(gt);
291 	if (unlikely(err))
292 		goto failed;
293 
294 	return 0;
295 failed:
296 	xe_gt_sriov_err(gt, "Failed to initialize service (%pe)\n", ERR_PTR(err));
297 	return err;
298 }
299 
300 /**
301  * xe_gt_sriov_pf_service_update - Update PF SR-IOV services.
302  * @gt: the &xe_gt to update
303  *
304  * Updates runtime data shared with VFs.
305  *
306  * This function can be called more than once.
307  * This function can only be called on PF.
308  */
309 void xe_gt_sriov_pf_service_update(struct xe_gt *gt)
310 {
311 	pf_prepare_runtime_info(gt);
312 }
313 
314 /**
315  * xe_gt_sriov_pf_service_reset - Reset a connection with the VF.
316  * @gt: the &xe_gt
317  * @vfid: the VF identifier
318  *
319  * Reset a VF driver negotiated VF/PF ABI version.
320  * After that point, the VF driver will have to perform new version handshake
321  * to continue use of the PF services again.
322  *
323  * This function can only be called on PF.
324  */
325 void xe_gt_sriov_pf_service_reset(struct xe_gt *gt, unsigned int vfid)
326 {
327 	pf_disconnect(gt, vfid);
328 }
329 
330 /* Return: 0 on success or a negative error code on failure. */
331 static int pf_process_handshake(struct xe_gt *gt, u32 vfid,
332 				u32 wanted_major, u32 wanted_minor,
333 				u32 *major, u32 *minor)
334 {
335 	int err;
336 
337 	xe_gt_sriov_dbg_verbose(gt, "VF%u wants ABI version %u.%u\n",
338 				vfid, wanted_major, wanted_minor);
339 
340 	err = pf_negotiate_version(gt, wanted_major, wanted_minor, major, minor);
341 
342 	if (err < 0) {
343 		xe_gt_sriov_notice(gt, "VF%u failed to negotiate ABI %u.%u (%pe)\n",
344 				   vfid, wanted_major, wanted_minor, ERR_PTR(err));
345 		pf_disconnect(gt, vfid);
346 	} else {
347 		xe_gt_sriov_dbg(gt, "VF%u negotiated ABI version %u.%u\n",
348 				vfid, *major, *minor);
349 		pf_connect(gt, vfid, *major, *minor);
350 	}
351 
352 	return 0;
353 }
354 
355 /* Return: length of the response message or a negative error code on failure. */
356 static int pf_process_handshake_msg(struct xe_gt *gt, u32 origin,
357 				    const u32 *request, u32 len, u32 *response, u32 size)
358 {
359 	u32 wanted_major, wanted_minor;
360 	u32 major, minor;
361 	u32 mbz;
362 	int err;
363 
364 	if (unlikely(len != VF2PF_HANDSHAKE_REQUEST_MSG_LEN))
365 		return -EMSGSIZE;
366 
367 	mbz = FIELD_GET(VF2PF_HANDSHAKE_REQUEST_MSG_0_MBZ, request[0]);
368 	if (unlikely(mbz))
369 		return -EPFNOSUPPORT;
370 
371 	wanted_major = FIELD_GET(VF2PF_HANDSHAKE_REQUEST_MSG_1_MAJOR, request[1]);
372 	wanted_minor = FIELD_GET(VF2PF_HANDSHAKE_REQUEST_MSG_1_MINOR, request[1]);
373 
374 	err = pf_process_handshake(gt, origin, wanted_major, wanted_minor, &major, &minor);
375 	if (err < 0)
376 		return err;
377 
378 	xe_gt_assert(gt, major || minor);
379 	xe_gt_assert(gt, size >= VF2PF_HANDSHAKE_RESPONSE_MSG_LEN);
380 
381 	response[0] = FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) |
382 		      FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_RESPONSE_SUCCESS) |
383 		      FIELD_PREP(GUC_HXG_RESPONSE_MSG_0_DATA0, 0);
384 	response[1] = FIELD_PREP(VF2PF_HANDSHAKE_RESPONSE_MSG_1_MAJOR, major) |
385 		      FIELD_PREP(VF2PF_HANDSHAKE_RESPONSE_MSG_1_MINOR, minor);
386 
387 	return VF2PF_HANDSHAKE_RESPONSE_MSG_LEN;
388 }
389 
390 struct reg_data {
391 	u32 offset;
392 	u32 value;
393 } __packed;
394 static_assert(hxg_sizeof(struct reg_data) == 2);
395 
396 /* Return: number of entries copied or negative error code on failure. */
397 static int pf_service_runtime_query(struct xe_gt *gt, u32 start, u32 limit,
398 				    struct reg_data *data, u32 *remaining)
399 {
400 	struct xe_gt_sriov_pf_service_runtime_regs *runtime;
401 	unsigned int count, i;
402 	u32 addr;
403 
404 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
405 
406 	runtime = &gt->sriov.pf.service.runtime;
407 
408 	if (start > runtime->size)
409 		return -ERANGE;
410 
411 	count = min_t(u32, runtime->size - start, limit);
412 
413 	for (i = 0; i < count; ++i, ++data) {
414 		addr = runtime->regs[start + i].addr;
415 		data->offset = xe_mmio_adjusted_addr(&gt->mmio, addr);
416 		data->value = runtime->values[start + i];
417 	}
418 
419 	*remaining = runtime->size - start - count;
420 	return count;
421 }
422 
423 /* Return: length of the response message or a negative error code on failure. */
424 static int pf_process_runtime_query_msg(struct xe_gt *gt, u32 origin,
425 					const u32 *msg, u32 msg_len, u32 *response, u32 resp_size)
426 {
427 	const u32 chunk_size = hxg_sizeof(struct reg_data);
428 	struct reg_data *reg_data_buf;
429 	u32 limit, start, max_chunks;
430 	u32 remaining = 0;
431 	int ret;
432 
433 	if (!pf_is_negotiated(gt, origin, 1, 0))
434 		return -EACCES;
435 	if (unlikely(msg_len > VF2PF_QUERY_RUNTIME_REQUEST_MSG_LEN))
436 		return -EMSGSIZE;
437 	if (unlikely(msg_len < VF2PF_QUERY_RUNTIME_REQUEST_MSG_LEN))
438 		return -EPROTO;
439 	if (unlikely(resp_size < VF2PF_QUERY_RUNTIME_RESPONSE_MSG_MIN_LEN))
440 		return -EINVAL;
441 
442 	limit = FIELD_GET(VF2PF_QUERY_RUNTIME_REQUEST_MSG_0_LIMIT, msg[0]);
443 	start = FIELD_GET(VF2PF_QUERY_RUNTIME_REQUEST_MSG_1_START, msg[1]);
444 
445 	resp_size = min_t(u32, resp_size, VF2PF_QUERY_RUNTIME_RESPONSE_MSG_MAX_LEN);
446 	max_chunks = (resp_size - VF2PF_QUERY_RUNTIME_RESPONSE_MSG_MIN_LEN) / chunk_size;
447 	limit = limit == VF2PF_QUERY_RUNTIME_NO_LIMIT ? max_chunks : min_t(u32, max_chunks, limit);
448 	reg_data_buf = (void *)(response + VF2PF_QUERY_RUNTIME_RESPONSE_MSG_MIN_LEN);
449 
450 	ret = pf_service_runtime_query(gt, start, limit, reg_data_buf, &remaining);
451 	if (ret < 0)
452 		return ret;
453 
454 	response[0] = FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) |
455 		      FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_RESPONSE_SUCCESS) |
456 		      FIELD_PREP(VF2PF_QUERY_RUNTIME_RESPONSE_MSG_0_COUNT, ret);
457 	response[1] = FIELD_PREP(VF2PF_QUERY_RUNTIME_RESPONSE_MSG_1_REMAINING, remaining);
458 
459 	return VF2PF_QUERY_RUNTIME_RESPONSE_MSG_MIN_LEN + ret * hxg_sizeof(struct reg_data);
460 }
461 
462 /**
463  * xe_gt_sriov_pf_service_process_request - Service GT level SR-IOV request message from the VF.
464  * @gt: the &xe_gt that provides the service
465  * @origin: VF number that is requesting the service
466  * @msg: request message
467  * @msg_len: length of the request message (in dwords)
468  * @response: placeholder for the response message
469  * @resp_size: length of the response message buffer (in dwords)
470  *
471  * This function processes `Relay Message`_ request from the VF.
472  *
473  * Return: length of the response message or a negative error code on failure.
474  */
475 int xe_gt_sriov_pf_service_process_request(struct xe_gt *gt, u32 origin,
476 					   const u32 *msg, u32 msg_len,
477 					   u32 *response, u32 resp_size)
478 {
479 	u32 action, data __maybe_unused;
480 	int ret;
481 
482 	xe_gt_assert(gt, msg_len >= GUC_HXG_MSG_MIN_LEN);
483 	xe_gt_assert(gt, FIELD_GET(GUC_HXG_MSG_0_TYPE, msg[0]) == GUC_HXG_TYPE_REQUEST);
484 
485 	action = FIELD_GET(GUC_HXG_REQUEST_MSG_0_ACTION, msg[0]);
486 	data = FIELD_GET(GUC_HXG_REQUEST_MSG_0_DATA0, msg[0]);
487 	xe_gt_sriov_dbg_verbose(gt, "service action %#x:%u from VF%u\n",
488 				action, data, origin);
489 
490 	switch (action) {
491 	case GUC_RELAY_ACTION_VF2PF_HANDSHAKE:
492 		ret = pf_process_handshake_msg(gt, origin, msg, msg_len, response, resp_size);
493 		break;
494 	case GUC_RELAY_ACTION_VF2PF_QUERY_RUNTIME:
495 		ret = pf_process_runtime_query_msg(gt, origin, msg, msg_len, response, resp_size);
496 		break;
497 	default:
498 		ret = -EOPNOTSUPP;
499 		break;
500 	}
501 
502 	return ret;
503 }
504 
505 /**
506  * xe_gt_sriov_pf_service_print_runtime - Print PF runtime data shared with VFs.
507  * @gt: the &xe_gt
508  * @p: the &drm_printer
509  *
510  * This function is for PF use only.
511  */
512 int xe_gt_sriov_pf_service_print_runtime(struct xe_gt *gt, struct drm_printer *p)
513 {
514 	const struct xe_reg *regs;
515 	unsigned int size;
516 	u32 *values;
517 
518 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
519 
520 	size = gt->sriov.pf.service.runtime.size;
521 	regs = gt->sriov.pf.service.runtime.regs;
522 	values = gt->sriov.pf.service.runtime.values;
523 
524 	for (; size--; regs++, values++) {
525 		drm_printf(p, "reg[%#x] = %#x\n",
526 			   xe_mmio_adjusted_addr(&gt->mmio, regs->addr), *values);
527 	}
528 
529 	return 0;
530 }
531 
532 /**
533  * xe_gt_sriov_pf_service_print_version - Print ABI versions negotiated with VFs.
534  * @gt: the &xe_gt
535  * @p: the &drm_printer
536  *
537  * This function is for PF use only.
538  */
539 int xe_gt_sriov_pf_service_print_version(struct xe_gt *gt, struct drm_printer *p)
540 {
541 	struct xe_device *xe = gt_to_xe(gt);
542 	unsigned int n, total_vfs = xe_sriov_pf_get_totalvfs(xe);
543 	struct xe_gt_sriov_pf_service_version *version;
544 
545 	xe_gt_assert(gt, IS_SRIOV_PF(xe));
546 
547 	for (n = 1; n <= total_vfs; n++) {
548 		version = &gt->sriov.pf.vfs[n].version;
549 		if (!version->major && !version->minor)
550 			continue;
551 
552 		drm_printf(p, "VF%u:\t%u.%u\n", n, version->major, version->minor);
553 	}
554 
555 	return 0;
556 }
557 
558 #if IS_BUILTIN(CONFIG_DRM_XE_KUNIT_TEST)
559 #include "tests/xe_gt_sriov_pf_service_test.c"
560 #endif
561