1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright © 2023-2024 Intel Corporation
4 */
5
6 #include <drm/drm_managed.h>
7
8 #include "abi/guc_actions_sriov_abi.h"
9 #include "abi/guc_relay_actions_abi.h"
10
11 #include "regs/xe_gt_regs.h"
12 #include "regs/xe_guc_regs.h"
13 #include "regs/xe_regs.h"
14
15 #include "xe_mmio.h"
16 #include "xe_gt_sriov_printk.h"
17 #include "xe_gt_sriov_pf_helpers.h"
18 #include "xe_gt_sriov_pf_service.h"
19 #include "xe_gt_sriov_pf_service_types.h"
20 #include "xe_guc_ct.h"
21 #include "xe_guc_hxg_helpers.h"
22
pf_init_versions(struct xe_gt * gt)23 static void pf_init_versions(struct xe_gt *gt)
24 {
25 BUILD_BUG_ON(!GUC_RELAY_VERSION_BASE_MAJOR && !GUC_RELAY_VERSION_BASE_MINOR);
26 BUILD_BUG_ON(GUC_RELAY_VERSION_BASE_MAJOR > GUC_RELAY_VERSION_LATEST_MAJOR);
27
28 /* base versions may differ between platforms */
29 gt->sriov.pf.service.version.base.major = GUC_RELAY_VERSION_BASE_MAJOR;
30 gt->sriov.pf.service.version.base.minor = GUC_RELAY_VERSION_BASE_MINOR;
31
32 /* latest version is same for all platforms */
33 gt->sriov.pf.service.version.latest.major = GUC_RELAY_VERSION_LATEST_MAJOR;
34 gt->sriov.pf.service.version.latest.minor = GUC_RELAY_VERSION_LATEST_MINOR;
35 }
36
37 /* Return: 0 on success or a negative error code on failure. */
pf_negotiate_version(struct xe_gt * gt,u32 wanted_major,u32 wanted_minor,u32 * major,u32 * minor)38 static int pf_negotiate_version(struct xe_gt *gt,
39 u32 wanted_major, u32 wanted_minor,
40 u32 *major, u32 *minor)
41 {
42 struct xe_gt_sriov_pf_service_version base = gt->sriov.pf.service.version.base;
43 struct xe_gt_sriov_pf_service_version latest = gt->sriov.pf.service.version.latest;
44
45 xe_gt_assert(gt, base.major);
46 xe_gt_assert(gt, base.major <= latest.major);
47 xe_gt_assert(gt, (base.major < latest.major) || (base.minor <= latest.minor));
48
49 /* VF doesn't care - return our latest */
50 if (wanted_major == VF2PF_HANDSHAKE_MAJOR_ANY &&
51 wanted_minor == VF2PF_HANDSHAKE_MINOR_ANY) {
52 *major = latest.major;
53 *minor = latest.minor;
54 return 0;
55 }
56
57 /* VF wants newer than our - return our latest */
58 if (wanted_major > latest.major) {
59 *major = latest.major;
60 *minor = latest.minor;
61 return 0;
62 }
63
64 /* VF wants older than min required - reject */
65 if (wanted_major < base.major ||
66 (wanted_major == base.major && wanted_minor < base.minor)) {
67 return -EPERM;
68 }
69
70 /* previous major - return wanted, as we should still support it */
71 if (wanted_major < latest.major) {
72 /* XXX: we are not prepared for multi-versions yet */
73 xe_gt_assert(gt, base.major == latest.major);
74 return -ENOPKG;
75 }
76
77 /* same major - return common minor */
78 *major = wanted_major;
79 *minor = min_t(u32, latest.minor, wanted_minor);
80 return 0;
81 }
82
pf_connect(struct xe_gt * gt,u32 vfid,u32 major,u32 minor)83 static void pf_connect(struct xe_gt *gt, u32 vfid, u32 major, u32 minor)
84 {
85 xe_gt_sriov_pf_assert_vfid(gt, vfid);
86 xe_gt_assert(gt, major || minor);
87
88 gt->sriov.pf.vfs[vfid].version.major = major;
89 gt->sriov.pf.vfs[vfid].version.minor = minor;
90 }
91
pf_disconnect(struct xe_gt * gt,u32 vfid)92 static void pf_disconnect(struct xe_gt *gt, u32 vfid)
93 {
94 xe_gt_sriov_pf_assert_vfid(gt, vfid);
95
96 gt->sriov.pf.vfs[vfid].version.major = 0;
97 gt->sriov.pf.vfs[vfid].version.minor = 0;
98 }
99
pf_is_negotiated(struct xe_gt * gt,u32 vfid,u32 major,u32 minor)100 static bool pf_is_negotiated(struct xe_gt *gt, u32 vfid, u32 major, u32 minor)
101 {
102 xe_gt_sriov_pf_assert_vfid(gt, vfid);
103
104 return major == gt->sriov.pf.vfs[vfid].version.major &&
105 minor <= gt->sriov.pf.vfs[vfid].version.minor;
106 }
107
108 static const struct xe_reg tgl_runtime_regs[] = {
109 RPM_CONFIG0, /* _MMIO(0x0d00) */
110 MIRROR_FUSE3, /* _MMIO(0x9118) */
111 XELP_EU_ENABLE, /* _MMIO(0x9134) */
112 XELP_GT_SLICE_ENABLE, /* _MMIO(0x9138) */
113 XELP_GT_GEOMETRY_DSS_ENABLE, /* _MMIO(0x913c) */
114 GT_VEBOX_VDBOX_DISABLE, /* _MMIO(0x9140) */
115 CTC_MODE, /* _MMIO(0xa26c) */
116 HUC_KERNEL_LOAD_INFO, /* _MMIO(0xc1dc) */
117 };
118
119 static const struct xe_reg ats_m_runtime_regs[] = {
120 RPM_CONFIG0, /* _MMIO(0x0d00) */
121 MIRROR_FUSE3, /* _MMIO(0x9118) */
122 MIRROR_FUSE1, /* _MMIO(0x911c) */
123 XELP_EU_ENABLE, /* _MMIO(0x9134) */
124 XELP_GT_GEOMETRY_DSS_ENABLE, /* _MMIO(0x913c) */
125 GT_VEBOX_VDBOX_DISABLE, /* _MMIO(0x9140) */
126 XEHP_GT_COMPUTE_DSS_ENABLE, /* _MMIO(0x9144) */
127 CTC_MODE, /* _MMIO(0xa26c) */
128 HUC_KERNEL_LOAD_INFO, /* _MMIO(0xc1dc) */
129 };
130
131 static const struct xe_reg pvc_runtime_regs[] = {
132 RPM_CONFIG0, /* _MMIO(0x0d00) */
133 MIRROR_FUSE3, /* _MMIO(0x9118) */
134 XELP_EU_ENABLE, /* _MMIO(0x9134) */
135 XELP_GT_GEOMETRY_DSS_ENABLE, /* _MMIO(0x913c) */
136 GT_VEBOX_VDBOX_DISABLE, /* _MMIO(0x9140) */
137 XEHP_GT_COMPUTE_DSS_ENABLE, /* _MMIO(0x9144) */
138 XEHPC_GT_COMPUTE_DSS_ENABLE_EXT,/* _MMIO(0x9148) */
139 CTC_MODE, /* _MMIO(0xA26C) */
140 HUC_KERNEL_LOAD_INFO, /* _MMIO(0xc1dc) */
141 };
142
143 static const struct xe_reg ver_1270_runtime_regs[] = {
144 RPM_CONFIG0, /* _MMIO(0x0d00) */
145 XEHP_FUSE4, /* _MMIO(0x9114) */
146 MIRROR_FUSE3, /* _MMIO(0x9118) */
147 MIRROR_FUSE1, /* _MMIO(0x911c) */
148 XELP_EU_ENABLE, /* _MMIO(0x9134) */
149 XELP_GT_GEOMETRY_DSS_ENABLE, /* _MMIO(0x913c) */
150 GT_VEBOX_VDBOX_DISABLE, /* _MMIO(0x9140) */
151 XEHP_GT_COMPUTE_DSS_ENABLE, /* _MMIO(0x9144) */
152 XEHPC_GT_COMPUTE_DSS_ENABLE_EXT,/* _MMIO(0x9148) */
153 CTC_MODE, /* _MMIO(0xa26c) */
154 HUC_KERNEL_LOAD_INFO, /* _MMIO(0xc1dc) */
155 };
156
157 static const struct xe_reg ver_2000_runtime_regs[] = {
158 RPM_CONFIG0, /* _MMIO(0x0d00) */
159 XEHP_FUSE4, /* _MMIO(0x9114) */
160 MIRROR_FUSE3, /* _MMIO(0x9118) */
161 MIRROR_FUSE1, /* _MMIO(0x911c) */
162 XELP_EU_ENABLE, /* _MMIO(0x9134) */
163 XELP_GT_GEOMETRY_DSS_ENABLE, /* _MMIO(0x913c) */
164 GT_VEBOX_VDBOX_DISABLE, /* _MMIO(0x9140) */
165 XEHP_GT_COMPUTE_DSS_ENABLE, /* _MMIO(0x9144) */
166 XEHPC_GT_COMPUTE_DSS_ENABLE_EXT,/* _MMIO(0x9148) */
167 XE2_GT_COMPUTE_DSS_2, /* _MMIO(0x914c) */
168 XE2_GT_GEOMETRY_DSS_1, /* _MMIO(0x9150) */
169 XE2_GT_GEOMETRY_DSS_2, /* _MMIO(0x9154) */
170 CTC_MODE, /* _MMIO(0xa26c) */
171 HUC_KERNEL_LOAD_INFO, /* _MMIO(0xc1dc) */
172 };
173
174 static const struct xe_reg ver_3000_runtime_regs[] = {
175 RPM_CONFIG0, /* _MMIO(0x0d00) */
176 XEHP_FUSE4, /* _MMIO(0x9114) */
177 MIRROR_FUSE3, /* _MMIO(0x9118) */
178 MIRROR_FUSE1, /* _MMIO(0x911c) */
179 MIRROR_L3BANK_ENABLE, /* _MMIO(0x9130) */
180 XELP_EU_ENABLE, /* _MMIO(0x9134) */
181 XELP_GT_GEOMETRY_DSS_ENABLE, /* _MMIO(0x913c) */
182 GT_VEBOX_VDBOX_DISABLE, /* _MMIO(0x9140) */
183 XEHP_GT_COMPUTE_DSS_ENABLE, /* _MMIO(0x9144) */
184 XEHPC_GT_COMPUTE_DSS_ENABLE_EXT,/* _MMIO(0x9148) */
185 XE2_GT_COMPUTE_DSS_2, /* _MMIO(0x914c) */
186 XE2_GT_GEOMETRY_DSS_1, /* _MMIO(0x9150) */
187 XE2_GT_GEOMETRY_DSS_2, /* _MMIO(0x9154) */
188 CTC_MODE, /* _MMIO(0xa26c) */
189 HUC_KERNEL_LOAD_INFO, /* _MMIO(0xc1dc) */
190 };
191
pick_runtime_regs(struct xe_device * xe,unsigned int * count)192 static const struct xe_reg *pick_runtime_regs(struct xe_device *xe, unsigned int *count)
193 {
194 const struct xe_reg *regs;
195
196 if (GRAPHICS_VERx100(xe) >= 3000) {
197 *count = ARRAY_SIZE(ver_3000_runtime_regs);
198 regs = ver_3000_runtime_regs;
199 } else if (GRAPHICS_VERx100(xe) >= 2000) {
200 *count = ARRAY_SIZE(ver_2000_runtime_regs);
201 regs = ver_2000_runtime_regs;
202 } else if (GRAPHICS_VERx100(xe) >= 1270) {
203 *count = ARRAY_SIZE(ver_1270_runtime_regs);
204 regs = ver_1270_runtime_regs;
205 } else if (GRAPHICS_VERx100(xe) == 1260) {
206 *count = ARRAY_SIZE(pvc_runtime_regs);
207 regs = pvc_runtime_regs;
208 } else if (GRAPHICS_VERx100(xe) == 1255) {
209 *count = ARRAY_SIZE(ats_m_runtime_regs);
210 regs = ats_m_runtime_regs;
211 } else if (GRAPHICS_VERx100(xe) == 1200) {
212 *count = ARRAY_SIZE(tgl_runtime_regs);
213 regs = tgl_runtime_regs;
214 } else {
215 regs = ERR_PTR(-ENOPKG);
216 *count = 0;
217 }
218
219 return regs;
220 }
221
pf_alloc_runtime_info(struct xe_gt * gt)222 static int pf_alloc_runtime_info(struct xe_gt *gt)
223 {
224 struct xe_device *xe = gt_to_xe(gt);
225 const struct xe_reg *regs;
226 unsigned int size;
227 u32 *values;
228
229 xe_gt_assert(gt, IS_SRIOV_PF(xe));
230 xe_gt_assert(gt, !gt->sriov.pf.service.runtime.size);
231 xe_gt_assert(gt, !gt->sriov.pf.service.runtime.regs);
232 xe_gt_assert(gt, !gt->sriov.pf.service.runtime.values);
233
234 regs = pick_runtime_regs(xe, &size);
235 if (IS_ERR(regs))
236 return PTR_ERR(regs);
237
238 if (unlikely(!size))
239 return 0;
240
241 values = drmm_kcalloc(&xe->drm, size, sizeof(u32), GFP_KERNEL);
242 if (!values)
243 return -ENOMEM;
244
245 gt->sriov.pf.service.runtime.size = size;
246 gt->sriov.pf.service.runtime.regs = regs;
247 gt->sriov.pf.service.runtime.values = values;
248
249 return 0;
250 }
251
read_many(struct xe_gt * gt,unsigned int count,const struct xe_reg * regs,u32 * values)252 static void read_many(struct xe_gt *gt, unsigned int count,
253 const struct xe_reg *regs, u32 *values)
254 {
255 while (count--)
256 *values++ = xe_mmio_read32(>->mmio, *regs++);
257 }
258
pf_prepare_runtime_info(struct xe_gt * gt)259 static void pf_prepare_runtime_info(struct xe_gt *gt)
260 {
261 const struct xe_reg *regs;
262 unsigned int size;
263 u32 *values;
264
265 if (!gt->sriov.pf.service.runtime.size)
266 return;
267
268 size = gt->sriov.pf.service.runtime.size;
269 regs = gt->sriov.pf.service.runtime.regs;
270 values = gt->sriov.pf.service.runtime.values;
271
272 read_many(gt, size, regs, values);
273
274 if (IS_ENABLED(CONFIG_DRM_XE_DEBUG_SRIOV)) {
275 struct drm_printer p = xe_gt_info_printer(gt);
276
277 xe_gt_sriov_pf_service_print_runtime(gt, &p);
278 }
279 }
280
281 /**
282 * xe_gt_sriov_pf_service_init - Early initialization of the GT SR-IOV PF services.
283 * @gt: the &xe_gt to initialize
284 *
285 * Performs early initialization of the GT SR-IOV PF services, including preparation
286 * of the runtime info that will be shared with VFs.
287 *
288 * This function can only be called on PF.
289 */
xe_gt_sriov_pf_service_init(struct xe_gt * gt)290 int xe_gt_sriov_pf_service_init(struct xe_gt *gt)
291 {
292 int err;
293
294 pf_init_versions(gt);
295
296 err = pf_alloc_runtime_info(gt);
297 if (unlikely(err))
298 goto failed;
299
300 return 0;
301 failed:
302 xe_gt_sriov_err(gt, "Failed to initialize service (%pe)\n", ERR_PTR(err));
303 return err;
304 }
305
306 /**
307 * xe_gt_sriov_pf_service_update - Update PF SR-IOV services.
308 * @gt: the &xe_gt to update
309 *
310 * Updates runtime data shared with VFs.
311 *
312 * This function can be called more than once.
313 * This function can only be called on PF.
314 */
xe_gt_sriov_pf_service_update(struct xe_gt * gt)315 void xe_gt_sriov_pf_service_update(struct xe_gt *gt)
316 {
317 pf_prepare_runtime_info(gt);
318 }
319
320 /**
321 * xe_gt_sriov_pf_service_reset - Reset a connection with the VF.
322 * @gt: the &xe_gt
323 * @vfid: the VF identifier
324 *
325 * Reset a VF driver negotiated VF/PF ABI version.
326 * After that point, the VF driver will have to perform new version handshake
327 * to continue use of the PF services again.
328 *
329 * This function can only be called on PF.
330 */
xe_gt_sriov_pf_service_reset(struct xe_gt * gt,unsigned int vfid)331 void xe_gt_sriov_pf_service_reset(struct xe_gt *gt, unsigned int vfid)
332 {
333 pf_disconnect(gt, vfid);
334 }
335
336 /* Return: 0 on success or a negative error code on failure. */
pf_process_handshake(struct xe_gt * gt,u32 vfid,u32 wanted_major,u32 wanted_minor,u32 * major,u32 * minor)337 static int pf_process_handshake(struct xe_gt *gt, u32 vfid,
338 u32 wanted_major, u32 wanted_minor,
339 u32 *major, u32 *minor)
340 {
341 int err;
342
343 xe_gt_sriov_dbg_verbose(gt, "VF%u wants ABI version %u.%u\n",
344 vfid, wanted_major, wanted_minor);
345
346 err = pf_negotiate_version(gt, wanted_major, wanted_minor, major, minor);
347
348 if (err < 0) {
349 xe_gt_sriov_notice(gt, "VF%u failed to negotiate ABI %u.%u (%pe)\n",
350 vfid, wanted_major, wanted_minor, ERR_PTR(err));
351 pf_disconnect(gt, vfid);
352 } else {
353 xe_gt_sriov_dbg(gt, "VF%u negotiated ABI version %u.%u\n",
354 vfid, *major, *minor);
355 pf_connect(gt, vfid, *major, *minor);
356 }
357
358 return 0;
359 }
360
361 /* Return: length of the response message or a negative error code on failure. */
pf_process_handshake_msg(struct xe_gt * gt,u32 origin,const u32 * request,u32 len,u32 * response,u32 size)362 static int pf_process_handshake_msg(struct xe_gt *gt, u32 origin,
363 const u32 *request, u32 len, u32 *response, u32 size)
364 {
365 u32 wanted_major, wanted_minor;
366 u32 major, minor;
367 u32 mbz;
368 int err;
369
370 if (unlikely(len != VF2PF_HANDSHAKE_REQUEST_MSG_LEN))
371 return -EMSGSIZE;
372
373 mbz = FIELD_GET(VF2PF_HANDSHAKE_REQUEST_MSG_0_MBZ, request[0]);
374 if (unlikely(mbz))
375 return -EPFNOSUPPORT;
376
377 wanted_major = FIELD_GET(VF2PF_HANDSHAKE_REQUEST_MSG_1_MAJOR, request[1]);
378 wanted_minor = FIELD_GET(VF2PF_HANDSHAKE_REQUEST_MSG_1_MINOR, request[1]);
379
380 err = pf_process_handshake(gt, origin, wanted_major, wanted_minor, &major, &minor);
381 if (err < 0)
382 return err;
383
384 xe_gt_assert(gt, major || minor);
385 xe_gt_assert(gt, size >= VF2PF_HANDSHAKE_RESPONSE_MSG_LEN);
386
387 response[0] = FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) |
388 FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_RESPONSE_SUCCESS) |
389 FIELD_PREP(GUC_HXG_RESPONSE_MSG_0_DATA0, 0);
390 response[1] = FIELD_PREP(VF2PF_HANDSHAKE_RESPONSE_MSG_1_MAJOR, major) |
391 FIELD_PREP(VF2PF_HANDSHAKE_RESPONSE_MSG_1_MINOR, minor);
392
393 return VF2PF_HANDSHAKE_RESPONSE_MSG_LEN;
394 }
395
396 struct reg_data {
397 u32 offset;
398 u32 value;
399 } __packed;
400 static_assert(hxg_sizeof(struct reg_data) == 2);
401
402 /* Return: number of entries copied or negative error code on failure. */
pf_service_runtime_query(struct xe_gt * gt,u32 start,u32 limit,struct reg_data * data,u32 * remaining)403 static int pf_service_runtime_query(struct xe_gt *gt, u32 start, u32 limit,
404 struct reg_data *data, u32 *remaining)
405 {
406 struct xe_gt_sriov_pf_service_runtime_regs *runtime;
407 unsigned int count, i;
408 u32 addr;
409
410 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
411
412 runtime = >->sriov.pf.service.runtime;
413
414 if (start > runtime->size)
415 return -ERANGE;
416
417 count = min_t(u32, runtime->size - start, limit);
418
419 for (i = 0; i < count; ++i, ++data) {
420 addr = runtime->regs[start + i].addr;
421 data->offset = xe_mmio_adjusted_addr(>->mmio, addr);
422 data->value = runtime->values[start + i];
423 }
424
425 *remaining = runtime->size - start - count;
426 return count;
427 }
428
429 /* Return: length of the response message or a negative error code on failure. */
pf_process_runtime_query_msg(struct xe_gt * gt,u32 origin,const u32 * msg,u32 msg_len,u32 * response,u32 resp_size)430 static int pf_process_runtime_query_msg(struct xe_gt *gt, u32 origin,
431 const u32 *msg, u32 msg_len, u32 *response, u32 resp_size)
432 {
433 const u32 chunk_size = hxg_sizeof(struct reg_data);
434 struct reg_data *reg_data_buf;
435 u32 limit, start, max_chunks;
436 u32 remaining = 0;
437 int ret;
438
439 if (!pf_is_negotiated(gt, origin, 1, 0))
440 return -EACCES;
441 if (unlikely(msg_len > VF2PF_QUERY_RUNTIME_REQUEST_MSG_LEN))
442 return -EMSGSIZE;
443 if (unlikely(msg_len < VF2PF_QUERY_RUNTIME_REQUEST_MSG_LEN))
444 return -EPROTO;
445 if (unlikely(resp_size < VF2PF_QUERY_RUNTIME_RESPONSE_MSG_MIN_LEN))
446 return -EINVAL;
447
448 limit = FIELD_GET(VF2PF_QUERY_RUNTIME_REQUEST_MSG_0_LIMIT, msg[0]);
449 start = FIELD_GET(VF2PF_QUERY_RUNTIME_REQUEST_MSG_1_START, msg[1]);
450
451 resp_size = min_t(u32, resp_size, VF2PF_QUERY_RUNTIME_RESPONSE_MSG_MAX_LEN);
452 max_chunks = (resp_size - VF2PF_QUERY_RUNTIME_RESPONSE_MSG_MIN_LEN) / chunk_size;
453 limit = limit == VF2PF_QUERY_RUNTIME_NO_LIMIT ? max_chunks : min_t(u32, max_chunks, limit);
454 reg_data_buf = (void *)(response + VF2PF_QUERY_RUNTIME_RESPONSE_MSG_MIN_LEN);
455
456 ret = pf_service_runtime_query(gt, start, limit, reg_data_buf, &remaining);
457 if (ret < 0)
458 return ret;
459
460 response[0] = FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) |
461 FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_RESPONSE_SUCCESS) |
462 FIELD_PREP(VF2PF_QUERY_RUNTIME_RESPONSE_MSG_0_COUNT, ret);
463 response[1] = FIELD_PREP(VF2PF_QUERY_RUNTIME_RESPONSE_MSG_1_REMAINING, remaining);
464
465 return VF2PF_QUERY_RUNTIME_RESPONSE_MSG_MIN_LEN + ret * hxg_sizeof(struct reg_data);
466 }
467
468 /**
469 * xe_gt_sriov_pf_service_process_request - Service GT level SR-IOV request message from the VF.
470 * @gt: the &xe_gt that provides the service
471 * @origin: VF number that is requesting the service
472 * @msg: request message
473 * @msg_len: length of the request message (in dwords)
474 * @response: placeholder for the response message
475 * @resp_size: length of the response message buffer (in dwords)
476 *
477 * This function processes `Relay Message`_ request from the VF.
478 *
479 * Return: length of the response message or a negative error code on failure.
480 */
xe_gt_sriov_pf_service_process_request(struct xe_gt * gt,u32 origin,const u32 * msg,u32 msg_len,u32 * response,u32 resp_size)481 int xe_gt_sriov_pf_service_process_request(struct xe_gt *gt, u32 origin,
482 const u32 *msg, u32 msg_len,
483 u32 *response, u32 resp_size)
484 {
485 u32 action, data __maybe_unused;
486 int ret;
487
488 xe_gt_assert(gt, msg_len >= GUC_HXG_MSG_MIN_LEN);
489 xe_gt_assert(gt, FIELD_GET(GUC_HXG_MSG_0_TYPE, msg[0]) == GUC_HXG_TYPE_REQUEST);
490
491 action = FIELD_GET(GUC_HXG_REQUEST_MSG_0_ACTION, msg[0]);
492 data = FIELD_GET(GUC_HXG_REQUEST_MSG_0_DATA0, msg[0]);
493 xe_gt_sriov_dbg_verbose(gt, "service action %#x:%u from VF%u\n",
494 action, data, origin);
495
496 switch (action) {
497 case GUC_RELAY_ACTION_VF2PF_HANDSHAKE:
498 ret = pf_process_handshake_msg(gt, origin, msg, msg_len, response, resp_size);
499 break;
500 case GUC_RELAY_ACTION_VF2PF_QUERY_RUNTIME:
501 ret = pf_process_runtime_query_msg(gt, origin, msg, msg_len, response, resp_size);
502 break;
503 default:
504 ret = -EOPNOTSUPP;
505 break;
506 }
507
508 return ret;
509 }
510
511 /**
512 * xe_gt_sriov_pf_service_print_runtime - Print PF runtime data shared with VFs.
513 * @gt: the &xe_gt
514 * @p: the &drm_printer
515 *
516 * This function is for PF use only.
517 */
xe_gt_sriov_pf_service_print_runtime(struct xe_gt * gt,struct drm_printer * p)518 int xe_gt_sriov_pf_service_print_runtime(struct xe_gt *gt, struct drm_printer *p)
519 {
520 const struct xe_reg *regs;
521 unsigned int size;
522 u32 *values;
523
524 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
525
526 size = gt->sriov.pf.service.runtime.size;
527 regs = gt->sriov.pf.service.runtime.regs;
528 values = gt->sriov.pf.service.runtime.values;
529
530 for (; size--; regs++, values++) {
531 drm_printf(p, "reg[%#x] = %#x\n",
532 xe_mmio_adjusted_addr(>->mmio, regs->addr), *values);
533 }
534
535 return 0;
536 }
537
538 /**
539 * xe_gt_sriov_pf_service_print_version - Print ABI versions negotiated with VFs.
540 * @gt: the &xe_gt
541 * @p: the &drm_printer
542 *
543 * This function is for PF use only.
544 */
xe_gt_sriov_pf_service_print_version(struct xe_gt * gt,struct drm_printer * p)545 int xe_gt_sriov_pf_service_print_version(struct xe_gt *gt, struct drm_printer *p)
546 {
547 struct xe_device *xe = gt_to_xe(gt);
548 unsigned int n, total_vfs = xe_sriov_pf_get_totalvfs(xe);
549 struct xe_gt_sriov_pf_service_version *version;
550
551 xe_gt_assert(gt, IS_SRIOV_PF(xe));
552
553 for (n = 1; n <= total_vfs; n++) {
554 version = >->sriov.pf.vfs[n].version;
555 if (!version->major && !version->minor)
556 continue;
557
558 drm_printf(p, "VF%u:\t%u.%u\n", n, version->major, version->minor);
559 }
560
561 return 0;
562 }
563
564 #if IS_BUILTIN(CONFIG_DRM_XE_KUNIT_TEST)
565 #include "tests/xe_gt_sriov_pf_service_test.c"
566 #endif
567