1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright © 2023 Intel Corporation
4 */
5
6 #include "xe_gsc.h"
7
8 #include <linux/delay.h>
9
10 #include <drm/drm_managed.h>
11 #include <drm/drm_print.h>
12
13 #include <generated/xe_wa_oob.h>
14
15 #include "abi/gsc_mkhi_commands_abi.h"
16 #include "xe_bb.h"
17 #include "xe_bo.h"
18 #include "xe_device.h"
19 #include "xe_exec_queue.h"
20 #include "xe_force_wake.h"
21 #include "xe_gsc_proxy.h"
22 #include "xe_gsc_submit.h"
23 #include "xe_gt.h"
24 #include "xe_gt_mcr.h"
25 #include "xe_gt_printk.h"
26 #include "xe_guc_pc.h"
27 #include "xe_huc.h"
28 #include "xe_map.h"
29 #include "xe_mmio.h"
30 #include "xe_pm.h"
31 #include "xe_sched_job.h"
32 #include "xe_uc_fw.h"
33 #include "xe_wa.h"
34 #include "instructions/xe_gsc_commands.h"
35 #include "regs/xe_gsc_regs.h"
36 #include "regs/xe_gt_regs.h"
37 #include "regs/xe_irq_regs.h"
38
39 static struct xe_gt *
gsc_to_gt(struct xe_gsc * gsc)40 gsc_to_gt(struct xe_gsc *gsc)
41 {
42 return container_of(gsc, struct xe_gt, uc.gsc);
43 }
44
memcpy_fw(struct xe_gsc * gsc)45 static int memcpy_fw(struct xe_gsc *gsc)
46 {
47 struct xe_gt *gt = gsc_to_gt(gsc);
48 struct xe_device *xe = gt_to_xe(gt);
49 u32 fw_size = gsc->fw.size;
50 void *storage;
51
52 /*
53 * FIXME: xe_migrate_copy does not work with stolen mem yet, so we use
54 * a memcpy for now.
55 */
56 storage = kmalloc(fw_size, GFP_KERNEL);
57 if (!storage)
58 return -ENOMEM;
59
60 xe_map_memcpy_from(xe, storage, &gsc->fw.bo->vmap, 0, fw_size);
61 xe_map_memcpy_to(xe, &gsc->private->vmap, 0, storage, fw_size);
62 xe_map_memset(xe, &gsc->private->vmap, fw_size, 0,
63 xe_bo_size(gsc->private) - fw_size);
64
65 kfree(storage);
66
67 return 0;
68 }
69
emit_gsc_upload(struct xe_gsc * gsc)70 static int emit_gsc_upload(struct xe_gsc *gsc)
71 {
72 struct xe_gt *gt = gsc_to_gt(gsc);
73 u64 offset = xe_bo_ggtt_addr(gsc->private);
74 struct xe_bb *bb;
75 struct xe_sched_job *job;
76 struct dma_fence *fence;
77 long timeout;
78
79 bb = xe_bb_new(gt, 4, false);
80 if (IS_ERR(bb))
81 return PTR_ERR(bb);
82
83 bb->cs[bb->len++] = GSC_FW_LOAD;
84 bb->cs[bb->len++] = lower_32_bits(offset);
85 bb->cs[bb->len++] = upper_32_bits(offset);
86 bb->cs[bb->len++] = (xe_bo_size(gsc->private) / SZ_4K) |
87 GSC_FW_LOAD_LIMIT_VALID;
88
89 job = xe_bb_create_job(gsc->q, bb);
90 if (IS_ERR(job)) {
91 xe_bb_free(bb, NULL);
92 return PTR_ERR(job);
93 }
94
95 xe_sched_job_arm(job);
96 fence = dma_fence_get(&job->drm.s_fence->finished);
97 xe_sched_job_push(job);
98
99 timeout = dma_fence_wait_timeout(fence, false, HZ);
100 dma_fence_put(fence);
101 xe_bb_free(bb, NULL);
102 if (timeout < 0)
103 return timeout;
104 else if (!timeout)
105 return -ETIME;
106
107 return 0;
108 }
109
110 #define version_query_wr(xe_, map_, offset_, field_, val_) \
111 xe_map_wr_field(xe_, map_, offset_, struct gsc_get_compatibility_version_in, field_, val_)
112 #define version_query_rd(xe_, map_, offset_, field_) \
113 xe_map_rd_field(xe_, map_, offset_, struct gsc_get_compatibility_version_out, field_)
114
emit_version_query_msg(struct xe_device * xe,struct iosys_map * map,u32 wr_offset)115 static u32 emit_version_query_msg(struct xe_device *xe, struct iosys_map *map, u32 wr_offset)
116 {
117 xe_map_memset(xe, map, wr_offset, 0, sizeof(struct gsc_get_compatibility_version_in));
118
119 version_query_wr(xe, map, wr_offset, header.group_id, MKHI_GROUP_ID_GFX_SRV);
120 version_query_wr(xe, map, wr_offset, header.command,
121 MKHI_GFX_SRV_GET_HOST_COMPATIBILITY_VERSION);
122
123 return wr_offset + sizeof(struct gsc_get_compatibility_version_in);
124 }
125
126 #define GSC_VER_PKT_SZ SZ_4K /* 4K each for input and output */
query_compatibility_version(struct xe_gsc * gsc)127 static int query_compatibility_version(struct xe_gsc *gsc)
128 {
129 struct xe_uc_fw_version *compat = &gsc->fw.versions.found[XE_UC_FW_VER_COMPATIBILITY];
130 struct xe_gt *gt = gsc_to_gt(gsc);
131 struct xe_tile *tile = gt_to_tile(gt);
132 struct xe_device *xe = gt_to_xe(gt);
133 struct xe_bo *bo;
134 u32 wr_offset;
135 u32 rd_offset;
136 u64 ggtt_offset;
137 int err;
138
139 bo = xe_bo_create_pin_map(xe, tile, NULL, GSC_VER_PKT_SZ * 2,
140 ttm_bo_type_kernel,
141 XE_BO_FLAG_SYSTEM |
142 XE_BO_FLAG_GGTT);
143 if (IS_ERR(bo)) {
144 xe_gt_err(gt, "failed to allocate bo for GSC version query\n");
145 return PTR_ERR(bo);
146 }
147
148 ggtt_offset = xe_bo_ggtt_addr(bo);
149
150 wr_offset = xe_gsc_emit_header(xe, &bo->vmap, 0, HECI_MEADDRESS_MKHI, 0,
151 sizeof(struct gsc_get_compatibility_version_in));
152 wr_offset = emit_version_query_msg(xe, &bo->vmap, wr_offset);
153
154 err = xe_gsc_pkt_submit_kernel(gsc, ggtt_offset, wr_offset,
155 ggtt_offset + GSC_VER_PKT_SZ,
156 GSC_VER_PKT_SZ);
157 if (err) {
158 xe_gt_err(gt,
159 "failed to submit GSC request for compatibility version: %d\n",
160 err);
161 goto out_bo;
162 }
163
164 err = xe_gsc_read_out_header(xe, &bo->vmap, GSC_VER_PKT_SZ,
165 sizeof(struct gsc_get_compatibility_version_out),
166 &rd_offset);
167 if (err) {
168 xe_gt_err(gt, "HuC: invalid GSC reply for version query (err=%d)\n", err);
169 return err;
170 }
171
172 compat->major = version_query_rd(xe, &bo->vmap, rd_offset, proj_major);
173 compat->minor = version_query_rd(xe, &bo->vmap, rd_offset, compat_major);
174 compat->patch = version_query_rd(xe, &bo->vmap, rd_offset, compat_minor);
175
176 xe_gt_info(gt, "found GSC cv%u.%u.%u\n", compat->major, compat->minor, compat->patch);
177
178 out_bo:
179 xe_bo_unpin_map_no_vm(bo);
180 return err;
181 }
182
gsc_fw_is_loaded(struct xe_gt * gt)183 static int gsc_fw_is_loaded(struct xe_gt *gt)
184 {
185 return xe_mmio_read32(>->mmio, HECI_FWSTS1(MTL_GSC_HECI1_BASE)) &
186 HECI1_FWSTS1_INIT_COMPLETE;
187 }
188
gsc_fw_wait(struct xe_gt * gt)189 static int gsc_fw_wait(struct xe_gt *gt)
190 {
191 /*
192 * GSC load can take up to 250ms from the moment the instruction is
193 * executed by the GSCCS. To account for possible submission delays or
194 * other issues, we use a 500ms timeout in the wait here.
195 */
196 return xe_mmio_wait32(>->mmio, HECI_FWSTS1(MTL_GSC_HECI1_BASE),
197 HECI1_FWSTS1_INIT_COMPLETE,
198 HECI1_FWSTS1_INIT_COMPLETE,
199 500 * USEC_PER_MSEC, NULL, false);
200 }
201
gsc_upload(struct xe_gsc * gsc)202 static int gsc_upload(struct xe_gsc *gsc)
203 {
204 struct xe_gt *gt = gsc_to_gt(gsc);
205 struct xe_device *xe = gt_to_xe(gt);
206 int err;
207
208 /* we should only be here if the init step were successful */
209 xe_assert(xe, xe_uc_fw_is_loadable(&gsc->fw) && gsc->q);
210
211 if (gsc_fw_is_loaded(gt)) {
212 xe_gt_err(gt, "GSC already loaded at upload time\n");
213 return -EEXIST;
214 }
215
216 err = memcpy_fw(gsc);
217 if (err) {
218 xe_gt_err(gt, "Failed to memcpy GSC FW\n");
219 return err;
220 }
221
222 /*
223 * GSC is only killed by an FLR, so we need to trigger one on unload to
224 * make sure we stop it. This is because we assign a chunk of memory to
225 * the GSC as part of the FW load, so we need to make sure it stops
226 * using it when we release it to the system on driver unload. Note that
227 * this is not a problem of the unload per-se, because the GSC will not
228 * touch that memory unless there are requests for it coming from the
229 * driver; therefore, no accesses will happen while Xe is not loaded,
230 * but if we re-load the driver then the GSC might wake up and try to
231 * access that old memory location again.
232 * Given that an FLR is a very disruptive action (see the FLR function
233 * for details), we want to do it as the last action before releasing
234 * the access to the MMIO bar, which means we need to do it as part of
235 * mmio cleanup.
236 */
237 xe->needs_flr_on_fini = true;
238
239 err = emit_gsc_upload(gsc);
240 if (err) {
241 xe_gt_err(gt, "Failed to emit GSC FW upload (%pe)\n", ERR_PTR(err));
242 return err;
243 }
244
245 err = gsc_fw_wait(gt);
246 if (err) {
247 xe_gt_err(gt, "Failed to wait for GSC load (%pe)\n", ERR_PTR(err));
248 return err;
249 }
250
251 err = query_compatibility_version(gsc);
252 if (err)
253 return err;
254
255 err = xe_uc_fw_check_version_requirements(&gsc->fw);
256 if (err)
257 return err;
258
259 return 0;
260 }
261
gsc_upload_and_init(struct xe_gsc * gsc)262 static int gsc_upload_and_init(struct xe_gsc *gsc)
263 {
264 struct xe_gt *gt = gsc_to_gt(gsc);
265 struct xe_tile *tile = gt_to_tile(gt);
266 unsigned int fw_ref;
267 int ret;
268
269 if (XE_WA(tile->primary_gt, 14018094691)) {
270 fw_ref = xe_force_wake_get(gt_to_fw(tile->primary_gt), XE_FORCEWAKE_ALL);
271
272 /*
273 * If the forcewake fails we want to keep going, because the worst
274 * case outcome in failing to apply the WA is that PXP won't work,
275 * which is not fatal. Forcewake get warns implicitly in case of failure
276 */
277 xe_gt_mcr_multicast_write(tile->primary_gt,
278 EU_SYSTOLIC_LIC_THROTTLE_CTL_WITH_LOCK,
279 EU_SYSTOLIC_LIC_THROTTLE_CTL_LOCK_BIT);
280 }
281
282 ret = gsc_upload(gsc);
283
284 if (XE_WA(tile->primary_gt, 14018094691))
285 xe_force_wake_put(gt_to_fw(tile->primary_gt), fw_ref);
286
287 if (ret)
288 return ret;
289
290 xe_uc_fw_change_status(&gsc->fw, XE_UC_FIRMWARE_TRANSFERRED);
291
292 /* GSC load is done, restore expected GT frequencies */
293 xe_gt_sanitize_freq(gt);
294
295 xe_gt_dbg(gt, "GSC FW async load completed\n");
296
297 /* HuC auth failure is not fatal */
298 if (xe_huc_is_authenticated(>->uc.huc, XE_HUC_AUTH_VIA_GUC))
299 xe_huc_auth(>->uc.huc, XE_HUC_AUTH_VIA_GSC);
300
301 ret = xe_gsc_proxy_start(gsc);
302 if (ret)
303 return ret;
304
305 xe_gt_dbg(gt, "GSC proxy init completed\n");
306
307 return 0;
308 }
309
gsc_er_complete(struct xe_gt * gt)310 static int gsc_er_complete(struct xe_gt *gt)
311 {
312 u32 er_status;
313
314 if (!gsc_fw_is_loaded(gt))
315 return 0;
316
317 /*
318 * Starting on Xe2, the GSCCS engine reset is a 2-step process. When the
319 * driver or the GuC hit the GDRST register, the CS is immediately reset
320 * and a success is reported, but the GSC shim keeps resetting in the
321 * background. While the shim reset is ongoing, the CS is able to accept
322 * new context submission, but any commands that require the shim will
323 * be stalled until the reset is completed. This means that we can keep
324 * submitting to the GSCCS as long as we make sure that the preemption
325 * timeout is big enough to cover any delay introduced by the reset.
326 * When the shim reset completes, a specific CS interrupt is triggered,
327 * in response to which we need to check the GSCI_TIMER_STATUS register
328 * to see if the reset was successful or not.
329 * Note that the GSCI_TIMER_STATUS register is not power save/restored,
330 * so it gets reset on MC6 entry. However, a reset failure stops MC6,
331 * so in that scenario we're always guaranteed to find the correct
332 * value.
333 */
334 er_status = xe_mmio_read32(>->mmio, GSCI_TIMER_STATUS) & GSCI_TIMER_STATUS_VALUE;
335
336 if (er_status == GSCI_TIMER_STATUS_TIMER_EXPIRED) {
337 /*
338 * XXX: we should trigger an FLR here, but we don't have support
339 * for that yet. Since we can't recover from the error, we
340 * declare the device as wedged.
341 */
342 xe_gt_err(gt, "GSC ER timed out!\n");
343 xe_device_declare_wedged(gt_to_xe(gt));
344 return -EIO;
345 }
346
347 return 0;
348 }
349
gsc_work(struct work_struct * work)350 static void gsc_work(struct work_struct *work)
351 {
352 struct xe_gsc *gsc = container_of(work, typeof(*gsc), work);
353 struct xe_gt *gt = gsc_to_gt(gsc);
354 struct xe_device *xe = gt_to_xe(gt);
355 unsigned int fw_ref;
356 u32 actions;
357 int ret;
358
359 spin_lock_irq(&gsc->lock);
360 actions = gsc->work_actions;
361 gsc->work_actions = 0;
362 spin_unlock_irq(&gsc->lock);
363
364 xe_pm_runtime_get(xe);
365 fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GSC);
366
367 if (actions & GSC_ACTION_ER_COMPLETE) {
368 ret = gsc_er_complete(gt);
369 if (ret)
370 goto out;
371 }
372
373 if (actions & GSC_ACTION_FW_LOAD) {
374 ret = gsc_upload_and_init(gsc);
375 if (ret && ret != -EEXIST)
376 xe_uc_fw_change_status(&gsc->fw, XE_UC_FIRMWARE_LOAD_FAIL);
377 else
378 xe_uc_fw_change_status(&gsc->fw, XE_UC_FIRMWARE_RUNNING);
379 }
380
381 if (actions & GSC_ACTION_SW_PROXY)
382 xe_gsc_proxy_request_handler(gsc);
383
384 out:
385 xe_force_wake_put(gt_to_fw(gt), fw_ref);
386 xe_pm_runtime_put(xe);
387 }
388
xe_gsc_hwe_irq_handler(struct xe_hw_engine * hwe,u16 intr_vec)389 void xe_gsc_hwe_irq_handler(struct xe_hw_engine *hwe, u16 intr_vec)
390 {
391 struct xe_gt *gt = hwe->gt;
392 struct xe_gsc *gsc = >->uc.gsc;
393
394 if (unlikely(!intr_vec))
395 return;
396
397 if (intr_vec & GSC_ER_COMPLETE) {
398 spin_lock(&gsc->lock);
399 gsc->work_actions |= GSC_ACTION_ER_COMPLETE;
400 spin_unlock(&gsc->lock);
401
402 queue_work(gsc->wq, &gsc->work);
403 }
404 }
405
xe_gsc_init(struct xe_gsc * gsc)406 int xe_gsc_init(struct xe_gsc *gsc)
407 {
408 struct xe_gt *gt = gsc_to_gt(gsc);
409 struct xe_tile *tile = gt_to_tile(gt);
410 int ret;
411
412 gsc->fw.type = XE_UC_FW_TYPE_GSC;
413 INIT_WORK(&gsc->work, gsc_work);
414 spin_lock_init(&gsc->lock);
415
416 /* The GSC uC is only available on the media GT */
417 if (tile->media_gt && (gt != tile->media_gt)) {
418 xe_uc_fw_change_status(&gsc->fw, XE_UC_FIRMWARE_NOT_SUPPORTED);
419 return 0;
420 }
421
422 /*
423 * Some platforms can have GuC but not GSC. That would cause
424 * xe_uc_fw_init(gsc) to return a "not supported" failure code and abort
425 * all firmware loading. So check for GSC being enabled before
426 * propagating the failure back up. That way the higher level will keep
427 * going and load GuC as appropriate.
428 */
429 ret = xe_uc_fw_init(&gsc->fw);
430 if (!xe_uc_fw_is_enabled(&gsc->fw))
431 return 0;
432 else if (ret)
433 goto out;
434
435 ret = xe_gsc_proxy_init(gsc);
436 if (ret && ret != -ENODEV)
437 goto out;
438
439 return 0;
440
441 out:
442 xe_gt_err(gt, "GSC init failed with %d", ret);
443 return ret;
444 }
445
free_resources(void * arg)446 static void free_resources(void *arg)
447 {
448 struct xe_gsc *gsc = arg;
449
450 if (gsc->wq) {
451 destroy_workqueue(gsc->wq);
452 gsc->wq = NULL;
453 }
454
455 if (gsc->q) {
456 xe_exec_queue_put(gsc->q);
457 gsc->q = NULL;
458 }
459 }
460
xe_gsc_init_post_hwconfig(struct xe_gsc * gsc)461 int xe_gsc_init_post_hwconfig(struct xe_gsc *gsc)
462 {
463 struct xe_gt *gt = gsc_to_gt(gsc);
464 struct xe_tile *tile = gt_to_tile(gt);
465 struct xe_device *xe = gt_to_xe(gt);
466 struct xe_hw_engine *hwe = xe_gt_hw_engine(gt, XE_ENGINE_CLASS_OTHER, 0, true);
467 struct xe_exec_queue *q;
468 struct workqueue_struct *wq;
469 struct xe_bo *bo;
470 int err;
471
472 if (!xe_uc_fw_is_available(&gsc->fw))
473 return 0;
474
475 if (!hwe)
476 return -ENODEV;
477
478 bo = xe_managed_bo_create_pin_map(xe, tile, SZ_4M,
479 XE_BO_FLAG_STOLEN |
480 XE_BO_FLAG_GGTT);
481 if (IS_ERR(bo))
482 return PTR_ERR(bo);
483
484 q = xe_exec_queue_create(xe, NULL,
485 BIT(hwe->logical_instance), 1, hwe,
486 EXEC_QUEUE_FLAG_KERNEL |
487 EXEC_QUEUE_FLAG_PERMANENT, 0);
488 if (IS_ERR(q)) {
489 xe_gt_err(gt, "Failed to create queue for GSC submission\n");
490 err = PTR_ERR(q);
491 goto out_bo;
492 }
493
494 wq = alloc_ordered_workqueue("gsc-ordered-wq", 0);
495 if (!wq) {
496 err = -ENOMEM;
497 goto out_q;
498 }
499
500 gsc->private = bo;
501 gsc->q = q;
502 gsc->wq = wq;
503
504 err = devm_add_action_or_reset(xe->drm.dev, free_resources, gsc);
505 if (err)
506 return err;
507
508 xe_uc_fw_change_status(&gsc->fw, XE_UC_FIRMWARE_LOADABLE);
509
510 return 0;
511
512 out_q:
513 xe_exec_queue_put(q);
514 out_bo:
515 xe_bo_unpin_map_no_vm(bo);
516 return err;
517 }
518
xe_gsc_load_start(struct xe_gsc * gsc)519 void xe_gsc_load_start(struct xe_gsc *gsc)
520 {
521 struct xe_gt *gt = gsc_to_gt(gsc);
522 struct xe_device *xe = gt_to_xe(gt);
523
524 if (!xe_uc_fw_is_loadable(&gsc->fw) || !gsc->q)
525 return;
526
527 /*
528 * The GSC HW is only reset by driver FLR or D3cold entry. We don't
529 * support the former at runtime, while the latter is only supported on
530 * DGFX, for which we don't support GSC. Therefore, if GSC failed to
531 * load previously there is no need to try again because the HW is
532 * stuck in the error state.
533 */
534 xe_assert(xe, !IS_DGFX(xe));
535 if (xe_uc_fw_is_in_error_state(&gsc->fw))
536 return;
537
538 /* GSC FW survives GT reset and D3Hot */
539 if (gsc_fw_is_loaded(gt)) {
540 if (xe_gsc_proxy_init_done(gsc))
541 xe_uc_fw_change_status(&gsc->fw, XE_UC_FIRMWARE_RUNNING);
542 else
543 xe_uc_fw_change_status(&gsc->fw, XE_UC_FIRMWARE_TRANSFERRED);
544 return;
545 }
546
547 spin_lock_irq(&gsc->lock);
548 gsc->work_actions |= GSC_ACTION_FW_LOAD;
549 spin_unlock_irq(&gsc->lock);
550
551 queue_work(gsc->wq, &gsc->work);
552 }
553
xe_gsc_wait_for_worker_completion(struct xe_gsc * gsc)554 void xe_gsc_wait_for_worker_completion(struct xe_gsc *gsc)
555 {
556 if (xe_uc_fw_is_loadable(&gsc->fw) && gsc->wq)
557 flush_work(&gsc->work);
558 }
559
xe_gsc_stop_prepare(struct xe_gsc * gsc)560 void xe_gsc_stop_prepare(struct xe_gsc *gsc)
561 {
562 struct xe_gt *gt = gsc_to_gt(gsc);
563 int ret;
564
565 if (!xe_uc_fw_is_loadable(&gsc->fw) || xe_uc_fw_is_in_error_state(&gsc->fw))
566 return;
567
568 xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GSC);
569
570 /*
571 * If the GSC FW load or the proxy init are interrupted, the only way
572 * to recover it is to do an FLR and reload the GSC from scratch.
573 * Therefore, let's wait for the init to complete before stopping
574 * operations. The proxy init is the last step, so we can just wait on
575 * that
576 */
577 ret = xe_gsc_wait_for_proxy_init_done(gsc);
578 if (ret)
579 xe_gt_err(gt, "failed to wait for GSC init completion before uc stop\n");
580 }
581
582 /*
583 * wa_14015076503: if the GSC FW is loaded, we need to alert it before doing a
584 * GSC engine reset by writing a notification bit in the GS1 register and then
585 * triggering an interrupt to GSC; from the interrupt it will take up to 200ms
586 * for the FW to get prepare for the reset, so we need to wait for that amount
587 * of time.
588 * After the reset is complete we need to then clear the GS1 register.
589 */
xe_gsc_wa_14015076503(struct xe_gt * gt,bool prep)590 void xe_gsc_wa_14015076503(struct xe_gt *gt, bool prep)
591 {
592 u32 gs1_set = prep ? HECI_H_GS1_ER_PREP : 0;
593 u32 gs1_clr = prep ? 0 : HECI_H_GS1_ER_PREP;
594
595 /* WA only applies if the GSC is loaded */
596 if (!XE_WA(gt, 14015076503) || !gsc_fw_is_loaded(gt))
597 return;
598
599 xe_mmio_rmw32(>->mmio, HECI_H_GS1(MTL_GSC_HECI2_BASE), gs1_clr, gs1_set);
600
601 if (prep) {
602 /* make sure the reset bit is clear when writing the CSR reg */
603 xe_mmio_rmw32(>->mmio, HECI_H_CSR(MTL_GSC_HECI2_BASE),
604 HECI_H_CSR_RST, HECI_H_CSR_IG);
605 msleep(200);
606 }
607 }
608
609 /**
610 * xe_gsc_print_info - print info about GSC FW status
611 * @gsc: the GSC structure
612 * @p: the printer to be used to print the info
613 */
xe_gsc_print_info(struct xe_gsc * gsc,struct drm_printer * p)614 void xe_gsc_print_info(struct xe_gsc *gsc, struct drm_printer *p)
615 {
616 struct xe_gt *gt = gsc_to_gt(gsc);
617 struct xe_mmio *mmio = >->mmio;
618 unsigned int fw_ref;
619
620 xe_uc_fw_print(&gsc->fw, p);
621
622 drm_printf(p, "\tfound security version %u\n", gsc->security_version);
623
624 if (!xe_uc_fw_is_enabled(&gsc->fw))
625 return;
626
627 fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GSC);
628 if (!fw_ref)
629 return;
630
631 drm_printf(p, "\nHECI1 FWSTS: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
632 xe_mmio_read32(mmio, HECI_FWSTS1(MTL_GSC_HECI1_BASE)),
633 xe_mmio_read32(mmio, HECI_FWSTS2(MTL_GSC_HECI1_BASE)),
634 xe_mmio_read32(mmio, HECI_FWSTS3(MTL_GSC_HECI1_BASE)),
635 xe_mmio_read32(mmio, HECI_FWSTS4(MTL_GSC_HECI1_BASE)),
636 xe_mmio_read32(mmio, HECI_FWSTS5(MTL_GSC_HECI1_BASE)),
637 xe_mmio_read32(mmio, HECI_FWSTS6(MTL_GSC_HECI1_BASE)));
638
639 xe_force_wake_put(gt_to_fw(gt), fw_ref);
640 }
641