1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright © 2023-2024 Intel Corporation
4 */
5
6 #include <linux/bitfield.h>
7 #include <linux/bsearch.h>
8
9 #include <drm/drm_managed.h>
10 #include <drm/drm_print.h>
11
12 #include "abi/guc_actions_sriov_abi.h"
13 #include "abi/guc_communication_mmio_abi.h"
14 #include "abi/guc_klvs_abi.h"
15 #include "abi/guc_relay_actions_abi.h"
16 #include "regs/xe_gt_regs.h"
17 #include "regs/xe_gtt_defs.h"
18
19 #include "xe_assert.h"
20 #include "xe_device.h"
21 #include "xe_ggtt.h"
22 #include "xe_gt_sriov_printk.h"
23 #include "xe_gt_sriov_vf.h"
24 #include "xe_gt_sriov_vf_types.h"
25 #include "xe_guc.h"
26 #include "xe_guc_hxg_helpers.h"
27 #include "xe_guc_relay.h"
28 #include "xe_mmio.h"
29 #include "xe_sriov.h"
30 #include "xe_sriov_vf.h"
31 #include "xe_uc_fw.h"
32 #include "xe_wopcm.h"
33
34 #define make_u64_from_u32(hi, lo) ((u64)((u64)(u32)(hi) << 32 | (u32)(lo)))
35
guc_action_vf_reset(struct xe_guc * guc)36 static int guc_action_vf_reset(struct xe_guc *guc)
37 {
38 u32 request[GUC_HXG_REQUEST_MSG_MIN_LEN] = {
39 FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) |
40 FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_REQUEST) |
41 FIELD_PREP(GUC_HXG_REQUEST_MSG_0_ACTION, GUC_ACTION_VF2GUC_VF_RESET),
42 };
43 int ret;
44
45 ret = xe_guc_mmio_send(guc, request, ARRAY_SIZE(request));
46
47 return ret > 0 ? -EPROTO : ret;
48 }
49
50 #define GUC_RESET_VF_STATE_RETRY_MAX 10
vf_reset_guc_state(struct xe_gt * gt)51 static int vf_reset_guc_state(struct xe_gt *gt)
52 {
53 unsigned int retry = GUC_RESET_VF_STATE_RETRY_MAX;
54 struct xe_guc *guc = >->uc.guc;
55 int err;
56
57 do {
58 err = guc_action_vf_reset(guc);
59 if (!err || err != -ETIMEDOUT)
60 break;
61 } while (--retry);
62
63 if (unlikely(err))
64 xe_gt_sriov_err(gt, "Failed to reset GuC state (%pe)\n", ERR_PTR(err));
65 return err;
66 }
67
68 /**
69 * xe_gt_sriov_vf_reset - Reset GuC VF internal state.
70 * @gt: the &xe_gt
71 *
72 * It requires functional `GuC MMIO based communication`_.
73 *
74 * Return: 0 on success or a negative error code on failure.
75 */
xe_gt_sriov_vf_reset(struct xe_gt * gt)76 int xe_gt_sriov_vf_reset(struct xe_gt *gt)
77 {
78 if (!xe_device_uc_enabled(gt_to_xe(gt)))
79 return -ENODEV;
80
81 return vf_reset_guc_state(gt);
82 }
83
guc_action_match_version(struct xe_guc * guc,struct xe_uc_fw_version * wanted,struct xe_uc_fw_version * found)84 static int guc_action_match_version(struct xe_guc *guc,
85 struct xe_uc_fw_version *wanted,
86 struct xe_uc_fw_version *found)
87 {
88 u32 request[VF2GUC_MATCH_VERSION_REQUEST_MSG_LEN] = {
89 FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) |
90 FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_REQUEST) |
91 FIELD_PREP(GUC_HXG_REQUEST_MSG_0_ACTION,
92 GUC_ACTION_VF2GUC_MATCH_VERSION),
93 FIELD_PREP(VF2GUC_MATCH_VERSION_REQUEST_MSG_1_BRANCH, wanted->branch) |
94 FIELD_PREP(VF2GUC_MATCH_VERSION_REQUEST_MSG_1_MAJOR, wanted->major) |
95 FIELD_PREP(VF2GUC_MATCH_VERSION_REQUEST_MSG_1_MINOR, wanted->minor),
96 };
97 u32 response[GUC_MAX_MMIO_MSG_LEN];
98 int ret;
99
100 BUILD_BUG_ON(VF2GUC_MATCH_VERSION_RESPONSE_MSG_LEN > GUC_MAX_MMIO_MSG_LEN);
101
102 ret = xe_guc_mmio_send_recv(guc, request, ARRAY_SIZE(request), response);
103 if (unlikely(ret < 0))
104 return ret;
105
106 if (unlikely(FIELD_GET(VF2GUC_MATCH_VERSION_RESPONSE_MSG_0_MBZ, response[0])))
107 return -EPROTO;
108
109 memset(found, 0, sizeof(struct xe_uc_fw_version));
110 found->branch = FIELD_GET(VF2GUC_MATCH_VERSION_RESPONSE_MSG_1_BRANCH, response[1]);
111 found->major = FIELD_GET(VF2GUC_MATCH_VERSION_RESPONSE_MSG_1_MAJOR, response[1]);
112 found->minor = FIELD_GET(VF2GUC_MATCH_VERSION_RESPONSE_MSG_1_MINOR, response[1]);
113 found->patch = FIELD_GET(VF2GUC_MATCH_VERSION_RESPONSE_MSG_1_PATCH, response[1]);
114
115 return 0;
116 }
117
guc_action_match_version_any(struct xe_guc * guc,struct xe_uc_fw_version * found)118 static int guc_action_match_version_any(struct xe_guc *guc,
119 struct xe_uc_fw_version *found)
120 {
121 struct xe_uc_fw_version wanted = {
122 .branch = GUC_VERSION_BRANCH_ANY,
123 .major = GUC_VERSION_MAJOR_ANY,
124 .minor = GUC_VERSION_MINOR_ANY,
125 .patch = 0
126 };
127
128 return guc_action_match_version(guc, &wanted, found);
129 }
130
vf_minimum_guc_version(struct xe_gt * gt,struct xe_uc_fw_version * ver)131 static void vf_minimum_guc_version(struct xe_gt *gt, struct xe_uc_fw_version *ver)
132 {
133 struct xe_device *xe = gt_to_xe(gt);
134
135 memset(ver, 0, sizeof(struct xe_uc_fw_version));
136
137 switch (xe->info.platform) {
138 case XE_TIGERLAKE ... XE_PVC:
139 /* 1.1 this is current baseline for Xe driver */
140 ver->branch = 0;
141 ver->major = 1;
142 ver->minor = 1;
143 break;
144 default:
145 /* 1.2 has support for the GMD_ID KLV */
146 ver->branch = 0;
147 ver->major = 1;
148 ver->minor = 2;
149 break;
150 }
151 }
152
vf_wanted_guc_version(struct xe_gt * gt,struct xe_uc_fw_version * ver)153 static void vf_wanted_guc_version(struct xe_gt *gt, struct xe_uc_fw_version *ver)
154 {
155 /* for now it's the same as minimum */
156 return vf_minimum_guc_version(gt, ver);
157 }
158
vf_handshake_with_guc(struct xe_gt * gt)159 static int vf_handshake_with_guc(struct xe_gt *gt)
160 {
161 struct xe_uc_fw_version *guc_version = >->sriov.vf.guc_version;
162 struct xe_uc_fw_version wanted = {0};
163 struct xe_guc *guc = >->uc.guc;
164 bool old = false;
165 int err;
166
167 xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
168
169 /* select wanted version - prefer previous (if any) */
170 if (guc_version->major || guc_version->minor) {
171 wanted = *guc_version;
172 old = true;
173 } else {
174 vf_wanted_guc_version(gt, &wanted);
175 xe_gt_assert(gt, wanted.major != GUC_VERSION_MAJOR_ANY);
176
177 /* First time we handshake, so record the minimum wanted */
178 gt->sriov.vf.wanted_guc_version = wanted;
179 }
180
181 err = guc_action_match_version(guc, &wanted, guc_version);
182 if (unlikely(err))
183 goto fail;
184
185 if (old) {
186 /* we don't support interface version change */
187 if (MAKE_GUC_VER_STRUCT(*guc_version) != MAKE_GUC_VER_STRUCT(wanted)) {
188 xe_gt_sriov_err(gt, "New GuC interface version detected: %u.%u.%u.%u\n",
189 guc_version->branch, guc_version->major,
190 guc_version->minor, guc_version->patch);
191 xe_gt_sriov_info(gt, "Previously used version was: %u.%u.%u.%u\n",
192 wanted.branch, wanted.major,
193 wanted.minor, wanted.patch);
194 err = -EREMCHG;
195 goto fail;
196 } else {
197 /* version is unchanged, no need to re-verify it */
198 return 0;
199 }
200 }
201
202 /* illegal */
203 if (guc_version->major > wanted.major) {
204 err = -EPROTO;
205 goto unsupported;
206 }
207
208 /* there's no fallback on major version. */
209 if (guc_version->major != wanted.major) {
210 err = -ENOPKG;
211 goto unsupported;
212 }
213
214 /* check against minimum version supported by us */
215 vf_minimum_guc_version(gt, &wanted);
216 xe_gt_assert(gt, wanted.major != GUC_VERSION_MAJOR_ANY);
217 if (MAKE_GUC_VER_STRUCT(*guc_version) < MAKE_GUC_VER_STRUCT(wanted)) {
218 err = -ENOKEY;
219 goto unsupported;
220 }
221
222 xe_gt_sriov_dbg(gt, "using GuC interface version %u.%u.%u.%u\n",
223 guc_version->branch, guc_version->major,
224 guc_version->minor, guc_version->patch);
225
226 return 0;
227
228 unsupported:
229 xe_gt_sriov_err(gt, "Unsupported GuC version %u.%u.%u.%u (%pe)\n",
230 guc_version->branch, guc_version->major,
231 guc_version->minor, guc_version->patch,
232 ERR_PTR(err));
233 fail:
234 xe_gt_sriov_err(gt, "Unable to confirm GuC version %u.%u (%pe)\n",
235 wanted.major, wanted.minor, ERR_PTR(err));
236
237 /* try again with *any* just to query which version is supported */
238 if (!guc_action_match_version_any(guc, &wanted))
239 xe_gt_sriov_notice(gt, "GuC reports interface version %u.%u.%u.%u\n",
240 wanted.branch, wanted.major, wanted.minor, wanted.patch);
241 return err;
242 }
243
244 /**
245 * xe_gt_sriov_vf_bootstrap - Query and setup GuC ABI interface version.
246 * @gt: the &xe_gt
247 *
248 * This function is for VF use only.
249 * It requires functional `GuC MMIO based communication`_.
250 *
251 * Return: 0 on success or a negative error code on failure.
252 */
xe_gt_sriov_vf_bootstrap(struct xe_gt * gt)253 int xe_gt_sriov_vf_bootstrap(struct xe_gt *gt)
254 {
255 int err;
256
257 if (!xe_device_uc_enabled(gt_to_xe(gt)))
258 return -ENODEV;
259
260 err = vf_reset_guc_state(gt);
261 if (unlikely(err))
262 return err;
263
264 err = vf_handshake_with_guc(gt);
265 if (unlikely(err))
266 return err;
267
268 return 0;
269 }
270
271 /**
272 * xe_gt_sriov_vf_guc_versions - Minimum required and found GuC ABI versions
273 * @gt: the &xe_gt
274 * @wanted: pointer to the xe_uc_fw_version to be filled with the wanted version
275 * @found: pointer to the xe_uc_fw_version to be filled with the found version
276 *
277 * This function is for VF use only and it can only be used after successful
278 * version handshake with the GuC.
279 */
xe_gt_sriov_vf_guc_versions(struct xe_gt * gt,struct xe_uc_fw_version * wanted,struct xe_uc_fw_version * found)280 void xe_gt_sriov_vf_guc_versions(struct xe_gt *gt,
281 struct xe_uc_fw_version *wanted,
282 struct xe_uc_fw_version *found)
283 {
284 xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
285 xe_gt_assert(gt, gt->sriov.vf.guc_version.major);
286
287 if (wanted)
288 *wanted = gt->sriov.vf.wanted_guc_version;
289
290 if (found)
291 *found = gt->sriov.vf.guc_version;
292 }
293
guc_action_vf_notify_resfix_done(struct xe_guc * guc)294 static int guc_action_vf_notify_resfix_done(struct xe_guc *guc)
295 {
296 u32 request[GUC_HXG_REQUEST_MSG_MIN_LEN] = {
297 FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) |
298 FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_REQUEST) |
299 FIELD_PREP(GUC_HXG_REQUEST_MSG_0_ACTION, GUC_ACTION_VF2GUC_NOTIFY_RESFIX_DONE),
300 };
301 int ret;
302
303 ret = xe_guc_mmio_send(guc, request, ARRAY_SIZE(request));
304
305 return ret > 0 ? -EPROTO : ret;
306 }
307
308 /**
309 * xe_gt_sriov_vf_notify_resfix_done - Notify GuC about resource fixups apply completed.
310 * @gt: the &xe_gt struct instance linked to target GuC
311 *
312 * Returns: 0 if the operation completed successfully, or a negative error
313 * code otherwise.
314 */
xe_gt_sriov_vf_notify_resfix_done(struct xe_gt * gt)315 int xe_gt_sriov_vf_notify_resfix_done(struct xe_gt *gt)
316 {
317 struct xe_guc *guc = >->uc.guc;
318 int err;
319
320 xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
321
322 err = guc_action_vf_notify_resfix_done(guc);
323 if (unlikely(err))
324 xe_gt_sriov_err(gt, "Failed to notify GuC about resource fixup done (%pe)\n",
325 ERR_PTR(err));
326 else
327 xe_gt_sriov_dbg_verbose(gt, "sent GuC resource fixup done\n");
328
329 return err;
330 }
331
guc_action_query_single_klv(struct xe_guc * guc,u32 key,u32 * value,u32 value_len)332 static int guc_action_query_single_klv(struct xe_guc *guc, u32 key,
333 u32 *value, u32 value_len)
334 {
335 u32 request[VF2GUC_QUERY_SINGLE_KLV_REQUEST_MSG_LEN] = {
336 FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) |
337 FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_REQUEST) |
338 FIELD_PREP(GUC_HXG_REQUEST_MSG_0_ACTION,
339 GUC_ACTION_VF2GUC_QUERY_SINGLE_KLV),
340 FIELD_PREP(VF2GUC_QUERY_SINGLE_KLV_REQUEST_MSG_1_KEY, key),
341 };
342 u32 response[GUC_MAX_MMIO_MSG_LEN];
343 u32 length;
344 int ret;
345
346 BUILD_BUG_ON(VF2GUC_QUERY_SINGLE_KLV_RESPONSE_MSG_MAX_LEN > GUC_MAX_MMIO_MSG_LEN);
347 ret = xe_guc_mmio_send_recv(guc, request, ARRAY_SIZE(request), response);
348 if (unlikely(ret < 0))
349 return ret;
350
351 if (unlikely(FIELD_GET(VF2GUC_QUERY_SINGLE_KLV_RESPONSE_MSG_0_MBZ, response[0])))
352 return -EPROTO;
353
354 length = FIELD_GET(VF2GUC_QUERY_SINGLE_KLV_RESPONSE_MSG_0_LENGTH, response[0]);
355 if (unlikely(length > value_len))
356 return -EOVERFLOW;
357 if (unlikely(length < value_len))
358 return -ENODATA;
359
360 switch (value_len) {
361 default:
362 xe_gt_WARN_ON(guc_to_gt(guc), value_len > 3);
363 fallthrough;
364 case 3:
365 value[2] = FIELD_GET(VF2GUC_QUERY_SINGLE_KLV_RESPONSE_MSG_3_VALUE96, response[3]);
366 fallthrough;
367 case 2:
368 value[1] = FIELD_GET(VF2GUC_QUERY_SINGLE_KLV_RESPONSE_MSG_2_VALUE64, response[2]);
369 fallthrough;
370 case 1:
371 value[0] = FIELD_GET(VF2GUC_QUERY_SINGLE_KLV_RESPONSE_MSG_1_VALUE32, response[1]);
372 fallthrough;
373 case 0:
374 break;
375 }
376
377 return 0;
378 }
379
guc_action_query_single_klv32(struct xe_guc * guc,u32 key,u32 * value32)380 static int guc_action_query_single_klv32(struct xe_guc *guc, u32 key, u32 *value32)
381 {
382 return guc_action_query_single_klv(guc, key, value32, hxg_sizeof(u32));
383 }
384
guc_action_query_single_klv64(struct xe_guc * guc,u32 key,u64 * value64)385 static int guc_action_query_single_klv64(struct xe_guc *guc, u32 key, u64 *value64)
386 {
387 u32 value[2];
388 int err;
389
390 err = guc_action_query_single_klv(guc, key, value, hxg_sizeof(value));
391 if (unlikely(err))
392 return err;
393
394 *value64 = make_u64_from_u32(value[1], value[0]);
395 return 0;
396 }
397
has_gmdid(struct xe_device * xe)398 static bool has_gmdid(struct xe_device *xe)
399 {
400 return GRAPHICS_VERx100(xe) >= 1270;
401 }
402
403 /**
404 * xe_gt_sriov_vf_gmdid - Query GMDID over MMIO.
405 * @gt: the &xe_gt
406 *
407 * This function is for VF use only.
408 *
409 * Return: value of GMDID KLV on success or 0 on failure.
410 */
xe_gt_sriov_vf_gmdid(struct xe_gt * gt)411 u32 xe_gt_sriov_vf_gmdid(struct xe_gt *gt)
412 {
413 const char *type = xe_gt_is_media_type(gt) ? "media" : "graphics";
414 struct xe_guc *guc = >->uc.guc;
415 u32 value;
416 int err;
417
418 xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
419 xe_gt_assert(gt, !GRAPHICS_VERx100(gt_to_xe(gt)) || has_gmdid(gt_to_xe(gt)));
420 xe_gt_assert(gt, gt->sriov.vf.guc_version.major > 1 || gt->sriov.vf.guc_version.minor >= 2);
421
422 err = guc_action_query_single_klv32(guc, GUC_KLV_GLOBAL_CFG_GMD_ID_KEY, &value);
423 if (unlikely(err)) {
424 xe_gt_sriov_err(gt, "Failed to obtain %s GMDID (%pe)\n",
425 type, ERR_PTR(err));
426 return 0;
427 }
428
429 xe_gt_sriov_dbg(gt, "%s GMDID = %#x\n", type, value);
430 return value;
431 }
432
vf_get_ggtt_info(struct xe_gt * gt)433 static int vf_get_ggtt_info(struct xe_gt *gt)
434 {
435 struct xe_gt_sriov_vf_selfconfig *config = >->sriov.vf.self_config;
436 struct xe_guc *guc = >->uc.guc;
437 u64 start, size;
438 int err;
439
440 xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
441
442 err = guc_action_query_single_klv64(guc, GUC_KLV_VF_CFG_GGTT_START_KEY, &start);
443 if (unlikely(err))
444 return err;
445
446 err = guc_action_query_single_klv64(guc, GUC_KLV_VF_CFG_GGTT_SIZE_KEY, &size);
447 if (unlikely(err))
448 return err;
449
450 if (config->ggtt_size && config->ggtt_size != size) {
451 xe_gt_sriov_err(gt, "Unexpected GGTT reassignment: %lluK != %lluK\n",
452 size / SZ_1K, config->ggtt_size / SZ_1K);
453 return -EREMCHG;
454 }
455
456 xe_gt_sriov_dbg_verbose(gt, "GGTT %#llx-%#llx = %lluK\n",
457 start, start + size - 1, size / SZ_1K);
458
459 config->ggtt_shift = start - (s64)config->ggtt_base;
460 config->ggtt_base = start;
461 config->ggtt_size = size;
462
463 return config->ggtt_size ? 0 : -ENODATA;
464 }
465
vf_get_lmem_info(struct xe_gt * gt)466 static int vf_get_lmem_info(struct xe_gt *gt)
467 {
468 struct xe_gt_sriov_vf_selfconfig *config = >->sriov.vf.self_config;
469 struct xe_guc *guc = >->uc.guc;
470 char size_str[10];
471 u64 size;
472 int err;
473
474 xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
475
476 err = guc_action_query_single_klv64(guc, GUC_KLV_VF_CFG_LMEM_SIZE_KEY, &size);
477 if (unlikely(err))
478 return err;
479
480 if (config->lmem_size && config->lmem_size != size) {
481 xe_gt_sriov_err(gt, "Unexpected LMEM reassignment: %lluM != %lluM\n",
482 size / SZ_1M, config->lmem_size / SZ_1M);
483 return -EREMCHG;
484 }
485
486 string_get_size(size, 1, STRING_UNITS_2, size_str, sizeof(size_str));
487 xe_gt_sriov_dbg_verbose(gt, "LMEM %lluM %s\n", size / SZ_1M, size_str);
488
489 config->lmem_size = size;
490
491 return config->lmem_size ? 0 : -ENODATA;
492 }
493
vf_get_submission_cfg(struct xe_gt * gt)494 static int vf_get_submission_cfg(struct xe_gt *gt)
495 {
496 struct xe_gt_sriov_vf_selfconfig *config = >->sriov.vf.self_config;
497 struct xe_guc *guc = >->uc.guc;
498 u32 num_ctxs, num_dbs;
499 int err;
500
501 xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
502
503 err = guc_action_query_single_klv32(guc, GUC_KLV_VF_CFG_NUM_CONTEXTS_KEY, &num_ctxs);
504 if (unlikely(err))
505 return err;
506
507 err = guc_action_query_single_klv32(guc, GUC_KLV_VF_CFG_NUM_DOORBELLS_KEY, &num_dbs);
508 if (unlikely(err))
509 return err;
510
511 if (config->num_ctxs && config->num_ctxs != num_ctxs) {
512 xe_gt_sriov_err(gt, "Unexpected CTXs reassignment: %u != %u\n",
513 num_ctxs, config->num_ctxs);
514 return -EREMCHG;
515 }
516 if (config->num_dbs && config->num_dbs != num_dbs) {
517 xe_gt_sriov_err(gt, "Unexpected DBs reassignment: %u != %u\n",
518 num_dbs, config->num_dbs);
519 return -EREMCHG;
520 }
521
522 xe_gt_sriov_dbg_verbose(gt, "CTXs %u DBs %u\n", num_ctxs, num_dbs);
523
524 config->num_ctxs = num_ctxs;
525 config->num_dbs = num_dbs;
526
527 return config->num_ctxs ? 0 : -ENODATA;
528 }
529
vf_cache_gmdid(struct xe_gt * gt)530 static void vf_cache_gmdid(struct xe_gt *gt)
531 {
532 xe_gt_assert(gt, has_gmdid(gt_to_xe(gt)));
533 xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
534
535 gt->sriov.vf.runtime.gmdid = xe_gt_sriov_vf_gmdid(gt);
536 }
537
538 /**
539 * xe_gt_sriov_vf_query_config - Query SR-IOV config data over MMIO.
540 * @gt: the &xe_gt
541 *
542 * This function is for VF use only.
543 *
544 * Return: 0 on success or a negative error code on failure.
545 */
xe_gt_sriov_vf_query_config(struct xe_gt * gt)546 int xe_gt_sriov_vf_query_config(struct xe_gt *gt)
547 {
548 struct xe_device *xe = gt_to_xe(gt);
549 int err;
550
551 err = vf_get_ggtt_info(gt);
552 if (unlikely(err))
553 return err;
554
555 if (IS_DGFX(xe) && xe_gt_is_main_type(gt)) {
556 err = vf_get_lmem_info(gt);
557 if (unlikely(err))
558 return err;
559 }
560
561 err = vf_get_submission_cfg(gt);
562 if (unlikely(err))
563 return err;
564
565 if (has_gmdid(xe))
566 vf_cache_gmdid(gt);
567
568 return 0;
569 }
570
571 /**
572 * xe_gt_sriov_vf_guc_ids - VF GuC context IDs configuration.
573 * @gt: the &xe_gt
574 *
575 * This function is for VF use only.
576 *
577 * Return: number of GuC context IDs assigned to VF.
578 */
xe_gt_sriov_vf_guc_ids(struct xe_gt * gt)579 u16 xe_gt_sriov_vf_guc_ids(struct xe_gt *gt)
580 {
581 xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
582 xe_gt_assert(gt, gt->sriov.vf.guc_version.major);
583 xe_gt_assert(gt, gt->sriov.vf.self_config.num_ctxs);
584
585 return gt->sriov.vf.self_config.num_ctxs;
586 }
587
588 /**
589 * xe_gt_sriov_vf_lmem - VF LMEM configuration.
590 * @gt: the &xe_gt
591 *
592 * This function is for VF use only.
593 *
594 * Return: size of the LMEM assigned to VF.
595 */
xe_gt_sriov_vf_lmem(struct xe_gt * gt)596 u64 xe_gt_sriov_vf_lmem(struct xe_gt *gt)
597 {
598 xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
599 xe_gt_assert(gt, gt->sriov.vf.guc_version.major);
600 xe_gt_assert(gt, gt->sriov.vf.self_config.lmem_size);
601
602 return gt->sriov.vf.self_config.lmem_size;
603 }
604
605 /**
606 * xe_gt_sriov_vf_ggtt - VF GGTT configuration.
607 * @gt: the &xe_gt
608 *
609 * This function is for VF use only.
610 *
611 * Return: size of the GGTT assigned to VF.
612 */
xe_gt_sriov_vf_ggtt(struct xe_gt * gt)613 u64 xe_gt_sriov_vf_ggtt(struct xe_gt *gt)
614 {
615 xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
616 xe_gt_assert(gt, gt->sriov.vf.guc_version.major);
617 xe_gt_assert(gt, gt->sriov.vf.self_config.ggtt_size);
618
619 return gt->sriov.vf.self_config.ggtt_size;
620 }
621
622 /**
623 * xe_gt_sriov_vf_ggtt_base - VF GGTT base offset.
624 * @gt: the &xe_gt
625 *
626 * This function is for VF use only.
627 *
628 * Return: base offset of the GGTT assigned to VF.
629 */
xe_gt_sriov_vf_ggtt_base(struct xe_gt * gt)630 u64 xe_gt_sriov_vf_ggtt_base(struct xe_gt *gt)
631 {
632 xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
633 xe_gt_assert(gt, gt->sriov.vf.guc_version.major);
634 xe_gt_assert(gt, gt->sriov.vf.self_config.ggtt_size);
635
636 return gt->sriov.vf.self_config.ggtt_base;
637 }
638
639 /**
640 * xe_gt_sriov_vf_ggtt_shift - Return shift in GGTT range due to VF migration
641 * @gt: the &xe_gt struct instance
642 *
643 * This function is for VF use only.
644 *
645 * Return: The shift value; could be negative
646 */
xe_gt_sriov_vf_ggtt_shift(struct xe_gt * gt)647 s64 xe_gt_sriov_vf_ggtt_shift(struct xe_gt *gt)
648 {
649 struct xe_gt_sriov_vf_selfconfig *config = >->sriov.vf.self_config;
650
651 xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
652 xe_gt_assert(gt, xe_gt_is_main_type(gt));
653
654 return config->ggtt_shift;
655 }
656
relay_action_handshake(struct xe_gt * gt,u32 * major,u32 * minor)657 static int relay_action_handshake(struct xe_gt *gt, u32 *major, u32 *minor)
658 {
659 u32 request[VF2PF_HANDSHAKE_REQUEST_MSG_LEN] = {
660 FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) |
661 FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_REQUEST) |
662 FIELD_PREP(GUC_HXG_REQUEST_MSG_0_ACTION, GUC_RELAY_ACTION_VF2PF_HANDSHAKE),
663 FIELD_PREP(VF2PF_HANDSHAKE_REQUEST_MSG_1_MAJOR, *major) |
664 FIELD_PREP(VF2PF_HANDSHAKE_REQUEST_MSG_1_MINOR, *minor),
665 };
666 u32 response[VF2PF_HANDSHAKE_RESPONSE_MSG_LEN];
667 int ret;
668
669 xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
670
671 ret = xe_guc_relay_send_to_pf(>->uc.guc.relay,
672 request, ARRAY_SIZE(request),
673 response, ARRAY_SIZE(response));
674 if (unlikely(ret < 0))
675 return ret;
676
677 if (unlikely(ret != VF2PF_HANDSHAKE_RESPONSE_MSG_LEN))
678 return -EPROTO;
679
680 if (unlikely(FIELD_GET(VF2PF_HANDSHAKE_RESPONSE_MSG_0_MBZ, response[0])))
681 return -EPROTO;
682
683 *major = FIELD_GET(VF2PF_HANDSHAKE_RESPONSE_MSG_1_MAJOR, response[1]);
684 *minor = FIELD_GET(VF2PF_HANDSHAKE_RESPONSE_MSG_1_MINOR, response[1]);
685
686 return 0;
687 }
688
vf_connect_pf(struct xe_device * xe,u16 major,u16 minor)689 static void vf_connect_pf(struct xe_device *xe, u16 major, u16 minor)
690 {
691 xe_assert(xe, IS_SRIOV_VF(xe));
692
693 xe->sriov.vf.pf_version.major = major;
694 xe->sriov.vf.pf_version.minor = minor;
695 }
696
vf_disconnect_pf(struct xe_device * xe)697 static void vf_disconnect_pf(struct xe_device *xe)
698 {
699 vf_connect_pf(xe, 0, 0);
700 }
701
vf_handshake_with_pf(struct xe_gt * gt)702 static int vf_handshake_with_pf(struct xe_gt *gt)
703 {
704 struct xe_device *xe = gt_to_xe(gt);
705 u32 major_wanted = GUC_RELAY_VERSION_LATEST_MAJOR;
706 u32 minor_wanted = GUC_RELAY_VERSION_LATEST_MINOR;
707 u32 major = major_wanted, minor = minor_wanted;
708 int err;
709
710 err = relay_action_handshake(gt, &major, &minor);
711 if (unlikely(err))
712 goto failed;
713
714 if (!major && !minor) {
715 err = -ENODATA;
716 goto failed;
717 }
718
719 xe_gt_sriov_dbg(gt, "using VF/PF ABI %u.%u\n", major, minor);
720 vf_connect_pf(xe, major, minor);
721 return 0;
722
723 failed:
724 xe_gt_sriov_err(gt, "Unable to confirm VF/PF ABI version %u.%u (%pe)\n",
725 major, minor, ERR_PTR(err));
726 vf_disconnect_pf(xe);
727 return err;
728 }
729
730 /**
731 * xe_gt_sriov_vf_connect - Establish connection with the PF driver.
732 * @gt: the &xe_gt
733 *
734 * This function is for VF use only.
735 *
736 * Return: 0 on success or a negative error code on failure.
737 */
xe_gt_sriov_vf_connect(struct xe_gt * gt)738 int xe_gt_sriov_vf_connect(struct xe_gt *gt)
739 {
740 int err;
741
742 err = vf_handshake_with_pf(gt);
743 if (unlikely(err))
744 goto failed;
745
746 return 0;
747
748 failed:
749 xe_gt_sriov_err(gt, "Failed to get version info (%pe)\n", ERR_PTR(err));
750 return err;
751 }
752
753 /**
754 * xe_gt_sriov_vf_migrated_event_handler - Start a VF migration recovery,
755 * or just mark that a GuC is ready for it.
756 * @gt: the &xe_gt struct instance linked to target GuC
757 *
758 * This function shall be called only by VF.
759 */
xe_gt_sriov_vf_migrated_event_handler(struct xe_gt * gt)760 void xe_gt_sriov_vf_migrated_event_handler(struct xe_gt *gt)
761 {
762 struct xe_device *xe = gt_to_xe(gt);
763
764 xe_gt_assert(gt, IS_SRIOV_VF(xe));
765
766 set_bit(gt->info.id, &xe->sriov.vf.migration.gt_flags);
767 /*
768 * We need to be certain that if all flags were set, at least one
769 * thread will notice that and schedule the recovery.
770 */
771 smp_mb__after_atomic();
772
773 xe_gt_sriov_info(gt, "ready for recovery after migration\n");
774 xe_sriov_vf_start_migration_recovery(xe);
775 }
776
vf_is_negotiated(struct xe_gt * gt,u16 major,u16 minor)777 static bool vf_is_negotiated(struct xe_gt *gt, u16 major, u16 minor)
778 {
779 struct xe_device *xe = gt_to_xe(gt);
780
781 xe_gt_assert(gt, IS_SRIOV_VF(xe));
782
783 return major == xe->sriov.vf.pf_version.major &&
784 minor <= xe->sriov.vf.pf_version.minor;
785 }
786
vf_prepare_runtime_info(struct xe_gt * gt,unsigned int num_regs)787 static int vf_prepare_runtime_info(struct xe_gt *gt, unsigned int num_regs)
788 {
789 struct vf_runtime_reg *regs = gt->sriov.vf.runtime.regs;
790 unsigned int regs_size = round_up(num_regs, 4);
791 struct xe_device *xe = gt_to_xe(gt);
792
793 xe_gt_assert(gt, IS_SRIOV_VF(xe));
794
795 if (regs) {
796 if (num_regs <= gt->sriov.vf.runtime.regs_size) {
797 memset(regs, 0, num_regs * sizeof(*regs));
798 gt->sriov.vf.runtime.num_regs = num_regs;
799 return 0;
800 }
801
802 drmm_kfree(&xe->drm, regs);
803 gt->sriov.vf.runtime.regs = NULL;
804 gt->sriov.vf.runtime.num_regs = 0;
805 gt->sriov.vf.runtime.regs_size = 0;
806 }
807
808 regs = drmm_kcalloc(&xe->drm, regs_size, sizeof(*regs), GFP_KERNEL);
809 if (unlikely(!regs))
810 return -ENOMEM;
811
812 gt->sriov.vf.runtime.regs = regs;
813 gt->sriov.vf.runtime.num_regs = num_regs;
814 gt->sriov.vf.runtime.regs_size = regs_size;
815 return 0;
816 }
817
vf_query_runtime_info(struct xe_gt * gt)818 static int vf_query_runtime_info(struct xe_gt *gt)
819 {
820 u32 request[VF2PF_QUERY_RUNTIME_REQUEST_MSG_LEN];
821 u32 response[VF2PF_QUERY_RUNTIME_RESPONSE_MSG_MIN_LEN + 32]; /* up to 16 regs */
822 u32 limit = (ARRAY_SIZE(response) - VF2PF_QUERY_RUNTIME_RESPONSE_MSG_MIN_LEN) / 2;
823 u32 count, remaining, num, i;
824 u32 start = 0;
825 int ret;
826
827 xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
828 xe_gt_assert(gt, limit);
829
830 /* this is part of the 1.0 PF/VF ABI */
831 if (!vf_is_negotiated(gt, 1, 0))
832 return -ENOPKG;
833
834 request[0] = FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) |
835 FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_REQUEST) |
836 FIELD_PREP(GUC_HXG_REQUEST_MSG_0_ACTION,
837 GUC_RELAY_ACTION_VF2PF_QUERY_RUNTIME) |
838 FIELD_PREP(VF2PF_QUERY_RUNTIME_REQUEST_MSG_0_LIMIT, limit);
839
840 repeat:
841 request[1] = FIELD_PREP(VF2PF_QUERY_RUNTIME_REQUEST_MSG_1_START, start);
842 ret = xe_guc_relay_send_to_pf(>->uc.guc.relay,
843 request, ARRAY_SIZE(request),
844 response, ARRAY_SIZE(response));
845 if (unlikely(ret < 0))
846 goto failed;
847
848 if (unlikely(ret < VF2PF_QUERY_RUNTIME_RESPONSE_MSG_MIN_LEN)) {
849 ret = -EPROTO;
850 goto failed;
851 }
852 if (unlikely((ret - VF2PF_QUERY_RUNTIME_RESPONSE_MSG_MIN_LEN) % 2)) {
853 ret = -EPROTO;
854 goto failed;
855 }
856
857 num = (ret - VF2PF_QUERY_RUNTIME_RESPONSE_MSG_MIN_LEN) / 2;
858 count = FIELD_GET(VF2PF_QUERY_RUNTIME_RESPONSE_MSG_0_COUNT, response[0]);
859 remaining = FIELD_GET(VF2PF_QUERY_RUNTIME_RESPONSE_MSG_1_REMAINING, response[1]);
860
861 xe_gt_sriov_dbg_verbose(gt, "count=%u num=%u ret=%d start=%u remaining=%u\n",
862 count, num, ret, start, remaining);
863
864 if (unlikely(count != num)) {
865 ret = -EPROTO;
866 goto failed;
867 }
868
869 if (start == 0) {
870 ret = vf_prepare_runtime_info(gt, num + remaining);
871 if (unlikely(ret < 0))
872 goto failed;
873 } else if (unlikely(start + num > gt->sriov.vf.runtime.num_regs)) {
874 ret = -EPROTO;
875 goto failed;
876 }
877
878 for (i = 0; i < num; ++i) {
879 struct vf_runtime_reg *reg = >->sriov.vf.runtime.regs[start + i];
880
881 reg->offset = response[VF2PF_QUERY_RUNTIME_RESPONSE_MSG_MIN_LEN + 2 * i];
882 reg->value = response[VF2PF_QUERY_RUNTIME_RESPONSE_MSG_MIN_LEN + 2 * i + 1];
883 }
884
885 if (remaining) {
886 start += num;
887 goto repeat;
888 }
889
890 return 0;
891
892 failed:
893 vf_prepare_runtime_info(gt, 0);
894 return ret;
895 }
896
vf_show_runtime_info(struct xe_gt * gt)897 static void vf_show_runtime_info(struct xe_gt *gt)
898 {
899 struct vf_runtime_reg *vf_regs = gt->sriov.vf.runtime.regs;
900 unsigned int size = gt->sriov.vf.runtime.num_regs;
901
902 xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
903
904 for (; size--; vf_regs++)
905 xe_gt_sriov_dbg(gt, "runtime(%#x) = %#x\n",
906 vf_regs->offset, vf_regs->value);
907 }
908
909 /**
910 * xe_gt_sriov_vf_query_runtime - Query SR-IOV runtime data.
911 * @gt: the &xe_gt
912 *
913 * This function is for VF use only.
914 *
915 * Return: 0 on success or a negative error code on failure.
916 */
xe_gt_sriov_vf_query_runtime(struct xe_gt * gt)917 int xe_gt_sriov_vf_query_runtime(struct xe_gt *gt)
918 {
919 int err;
920
921 err = vf_query_runtime_info(gt);
922 if (unlikely(err))
923 goto failed;
924
925 if (IS_ENABLED(CONFIG_DRM_XE_DEBUG))
926 vf_show_runtime_info(gt);
927
928 return 0;
929
930 failed:
931 xe_gt_sriov_err(gt, "Failed to get runtime info (%pe)\n",
932 ERR_PTR(err));
933 return err;
934 }
935
vf_runtime_reg_cmp(const void * a,const void * b)936 static int vf_runtime_reg_cmp(const void *a, const void *b)
937 {
938 const struct vf_runtime_reg *ra = a;
939 const struct vf_runtime_reg *rb = b;
940
941 return (int)ra->offset - (int)rb->offset;
942 }
943
vf_lookup_reg(struct xe_gt * gt,u32 addr)944 static struct vf_runtime_reg *vf_lookup_reg(struct xe_gt *gt, u32 addr)
945 {
946 struct xe_gt_sriov_vf_runtime *runtime = >->sriov.vf.runtime;
947 struct vf_runtime_reg key = { .offset = addr };
948
949 xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
950
951 return bsearch(&key, runtime->regs, runtime->num_regs, sizeof(key),
952 vf_runtime_reg_cmp);
953 }
954
955 /**
956 * xe_gt_sriov_vf_read32 - Get a register value from the runtime data.
957 * @gt: the &xe_gt
958 * @reg: the register to read
959 *
960 * This function is for VF use only.
961 * This function shall be called after VF has connected to PF.
962 * This function is dedicated for registers that VFs can't read directly.
963 *
964 * Return: register value obtained from the PF or 0 if not found.
965 */
xe_gt_sriov_vf_read32(struct xe_gt * gt,struct xe_reg reg)966 u32 xe_gt_sriov_vf_read32(struct xe_gt *gt, struct xe_reg reg)
967 {
968 u32 addr = xe_mmio_adjusted_addr(>->mmio, reg.addr);
969 struct vf_runtime_reg *rr;
970
971 xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
972 xe_gt_assert(gt, !reg.vf);
973
974 if (reg.addr == GMD_ID.addr) {
975 xe_gt_sriov_dbg_verbose(gt, "gmdid(%#x) = %#x\n",
976 addr, gt->sriov.vf.runtime.gmdid);
977 return gt->sriov.vf.runtime.gmdid;
978 }
979
980 rr = vf_lookup_reg(gt, addr);
981 if (!rr) {
982 xe_gt_WARN(gt, IS_ENABLED(CONFIG_DRM_XE_DEBUG),
983 "VF is trying to read an inaccessible register %#x+%#x\n",
984 reg.addr, addr - reg.addr);
985 return 0;
986 }
987
988 xe_gt_sriov_dbg_verbose(gt, "runtime[%#x] = %#x\n", addr, rr->value);
989 return rr->value;
990 }
991
992 /**
993 * xe_gt_sriov_vf_write32 - Handle a write to an inaccessible register.
994 * @gt: the &xe_gt
995 * @reg: the register to write
996 * @val: value to write
997 *
998 * This function is for VF use only.
999 * Currently it will trigger a WARN if running on debug build.
1000 */
xe_gt_sriov_vf_write32(struct xe_gt * gt,struct xe_reg reg,u32 val)1001 void xe_gt_sriov_vf_write32(struct xe_gt *gt, struct xe_reg reg, u32 val)
1002 {
1003 u32 addr = xe_mmio_adjusted_addr(>->mmio, reg.addr);
1004
1005 xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
1006 xe_gt_assert(gt, !reg.vf);
1007
1008 /*
1009 * In the future, we may want to handle selected writes to inaccessible
1010 * registers in some custom way, but for now let's just log a warning
1011 * about such attempt, as likely we might be doing something wrong.
1012 */
1013 xe_gt_WARN(gt, IS_ENABLED(CONFIG_DRM_XE_DEBUG),
1014 "VF is trying to write %#x to an inaccessible register %#x+%#x\n",
1015 val, reg.addr, addr - reg.addr);
1016 }
1017
1018 /**
1019 * xe_gt_sriov_vf_print_config - Print VF self config.
1020 * @gt: the &xe_gt
1021 * @p: the &drm_printer
1022 *
1023 * This function is for VF use only.
1024 */
xe_gt_sriov_vf_print_config(struct xe_gt * gt,struct drm_printer * p)1025 void xe_gt_sriov_vf_print_config(struct xe_gt *gt, struct drm_printer *p)
1026 {
1027 struct xe_gt_sriov_vf_selfconfig *config = >->sriov.vf.self_config;
1028 struct xe_device *xe = gt_to_xe(gt);
1029 char buf[10];
1030
1031 xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
1032
1033 drm_printf(p, "GGTT range:\t%#llx-%#llx\n",
1034 config->ggtt_base,
1035 config->ggtt_base + config->ggtt_size - 1);
1036
1037 string_get_size(config->ggtt_size, 1, STRING_UNITS_2, buf, sizeof(buf));
1038 drm_printf(p, "GGTT size:\t%llu (%s)\n", config->ggtt_size, buf);
1039
1040 drm_printf(p, "GGTT shift on last restore:\t%lld\n", config->ggtt_shift);
1041
1042 if (IS_DGFX(xe) && xe_gt_is_main_type(gt)) {
1043 string_get_size(config->lmem_size, 1, STRING_UNITS_2, buf, sizeof(buf));
1044 drm_printf(p, "LMEM size:\t%llu (%s)\n", config->lmem_size, buf);
1045 }
1046
1047 drm_printf(p, "GuC contexts:\t%u\n", config->num_ctxs);
1048 drm_printf(p, "GuC doorbells:\t%u\n", config->num_dbs);
1049 }
1050
1051 /**
1052 * xe_gt_sriov_vf_print_runtime - Print VF's runtime regs received from PF.
1053 * @gt: the &xe_gt
1054 * @p: the &drm_printer
1055 *
1056 * This function is for VF use only.
1057 */
xe_gt_sriov_vf_print_runtime(struct xe_gt * gt,struct drm_printer * p)1058 void xe_gt_sriov_vf_print_runtime(struct xe_gt *gt, struct drm_printer *p)
1059 {
1060 struct vf_runtime_reg *vf_regs = gt->sriov.vf.runtime.regs;
1061 unsigned int size = gt->sriov.vf.runtime.num_regs;
1062
1063 xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
1064
1065 for (; size--; vf_regs++)
1066 drm_printf(p, "%#x = %#x\n", vf_regs->offset, vf_regs->value);
1067 }
1068
1069 /**
1070 * xe_gt_sriov_vf_print_version - Print VF ABI versions.
1071 * @gt: the &xe_gt
1072 * @p: the &drm_printer
1073 *
1074 * This function is for VF use only.
1075 */
xe_gt_sriov_vf_print_version(struct xe_gt * gt,struct drm_printer * p)1076 void xe_gt_sriov_vf_print_version(struct xe_gt *gt, struct drm_printer *p)
1077 {
1078 struct xe_device *xe = gt_to_xe(gt);
1079 struct xe_uc_fw_version *guc_version = >->sriov.vf.guc_version;
1080 struct xe_uc_fw_version *wanted = >->sriov.vf.wanted_guc_version;
1081 struct xe_sriov_vf_relay_version *pf_version = &xe->sriov.vf.pf_version;
1082 struct xe_uc_fw_version ver;
1083
1084 xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
1085
1086 drm_printf(p, "GuC ABI:\n");
1087
1088 vf_minimum_guc_version(gt, &ver);
1089 drm_printf(p, "\tbase:\t%u.%u.%u.*\n", ver.branch, ver.major, ver.minor);
1090
1091 drm_printf(p, "\twanted:\t%u.%u.%u.*\n",
1092 wanted->branch, wanted->major, wanted->minor);
1093
1094 drm_printf(p, "\thandshake:\t%u.%u.%u.%u\n",
1095 guc_version->branch, guc_version->major,
1096 guc_version->minor, guc_version->patch);
1097
1098 drm_printf(p, "PF ABI:\n");
1099
1100 drm_printf(p, "\tbase:\t%u.%u\n",
1101 GUC_RELAY_VERSION_BASE_MAJOR, GUC_RELAY_VERSION_BASE_MINOR);
1102 drm_printf(p, "\twanted:\t%u.%u\n",
1103 GUC_RELAY_VERSION_LATEST_MAJOR, GUC_RELAY_VERSION_LATEST_MINOR);
1104 drm_printf(p, "\thandshake:\t%u.%u\n",
1105 pf_version->major, pf_version->minor);
1106 }
1107