1dd08ebf6SMatthew Brost // SPDX-License-Identifier: MIT
2dd08ebf6SMatthew Brost /*
3dd08ebf6SMatthew Brost * Copyright © 2022 Intel Corporation
4dd08ebf6SMatthew Brost */
5dd08ebf6SMatthew Brost
6ea9f879dSLucas De Marchi #include "xe_uc.h"
7ea9f879dSLucas De Marchi
8dd08ebf6SMatthew Brost #include "xe_assert.h"
90d1caff4SDaniele Ceraolo Spurio #include "xe_device.h"
10997a55caSDaniele Ceraolo Spurio #include "xe_gsc.h"
11dd08ebf6SMatthew Brost #include "xe_gsc_proxy.h"
12dd08ebf6SMatthew Brost #include "xe_gt.h"
13587c7334SMichal Wajdeczko #include "xe_gt_printk.h"
14dd08ebf6SMatthew Brost #include "xe_gt_sriov_vf.h"
15dd08ebf6SMatthew Brost #include "xe_guc.h"
16ea9f879dSLucas De Marchi #include "xe_guc_pc.h"
17dd08ebf6SMatthew Brost #include "xe_guc_engine_activity.h"
18dd08ebf6SMatthew Brost #include "xe_huc.h"
19dd08ebf6SMatthew Brost #include "xe_sriov.h"
20dd08ebf6SMatthew Brost #include "xe_uc_fw.h"
21dd08ebf6SMatthew Brost #include "xe_wopcm.h"
22dd08ebf6SMatthew Brost
23dd08ebf6SMatthew Brost static struct xe_gt *
uc_to_gt(struct xe_uc * uc)24dd08ebf6SMatthew Brost uc_to_gt(struct xe_uc *uc)
25dd08ebf6SMatthew Brost {
26dd08ebf6SMatthew Brost return container_of(uc, struct xe_gt, uc);
27dd08ebf6SMatthew Brost }
28dd08ebf6SMatthew Brost
29dd08ebf6SMatthew Brost static struct xe_device *
uc_to_xe(struct xe_uc * uc)30dd08ebf6SMatthew Brost uc_to_xe(struct xe_uc *uc)
31dd08ebf6SMatthew Brost {
32dd08ebf6SMatthew Brost return gt_to_xe(uc_to_gt(uc));
33dd08ebf6SMatthew Brost }
34dd08ebf6SMatthew Brost
35*bf8ec3c3SMichał Winiarski /* Should be called once at driver load only */
xe_uc_init_noalloc(struct xe_uc * uc)36dd08ebf6SMatthew Brost int xe_uc_init_noalloc(struct xe_uc *uc)
37dd08ebf6SMatthew Brost {
38*bf8ec3c3SMichał Winiarski int ret;
39*bf8ec3c3SMichał Winiarski
4075730847SDaniele Ceraolo Spurio ret = xe_guc_init_noalloc(&uc->guc);
410d1caff4SDaniele Ceraolo Spurio if (ret)
420d1caff4SDaniele Ceraolo Spurio goto err;
4375730847SDaniele Ceraolo Spurio
44dd08ebf6SMatthew Brost /* HuC and GSC have no early dependencies and will be initialized during xe_uc_init(). */
45dd08ebf6SMatthew Brost return 0;
46dd08ebf6SMatthew Brost
47dd08ebf6SMatthew Brost err:
48dd08ebf6SMatthew Brost xe_gt_err(uc_to_gt(uc), "Failed to early initialize uC (%pe)\n", ERR_PTR(ret));
49dd08ebf6SMatthew Brost return ret;
50dd08ebf6SMatthew Brost }
51dd08ebf6SMatthew Brost
xe_uc_init(struct xe_uc * uc)520d1caff4SDaniele Ceraolo Spurio int xe_uc_init(struct xe_uc *uc)
530d1caff4SDaniele Ceraolo Spurio {
540d1caff4SDaniele Ceraolo Spurio int ret;
550d1caff4SDaniele Ceraolo Spurio
5675730847SDaniele Ceraolo Spurio /*
57*bf8ec3c3SMichał Winiarski * We call the GuC/HuC/GSC init functions even if GuC submission is off
5875730847SDaniele Ceraolo Spurio * to correctly move our tracking of the FW state to "disabled".
59dd08ebf6SMatthew Brost */
60dd08ebf6SMatthew Brost ret = xe_guc_init(&uc->guc);
61dd08ebf6SMatthew Brost if (ret)
62dd08ebf6SMatthew Brost goto err;
63dd08ebf6SMatthew Brost
64dd08ebf6SMatthew Brost ret = xe_huc_init(&uc->huc);
65dd08ebf6SMatthew Brost if (ret)
66dd08ebf6SMatthew Brost goto err;
67587c7334SMichal Wajdeczko
68587c7334SMichal Wajdeczko ret = xe_gsc_init(&uc->gsc);
69587c7334SMichal Wajdeczko if (ret)
70587c7334SMichal Wajdeczko goto err;
71*bf8ec3c3SMichał Winiarski
72*bf8ec3c3SMichał Winiarski if (!xe_device_uc_enabled(uc_to_xe(uc)))
73dd08ebf6SMatthew Brost return 0;
74dd08ebf6SMatthew Brost
75dd08ebf6SMatthew Brost if (!IS_SRIOV_VF(uc_to_xe(uc))) {
76*bf8ec3c3SMichał Winiarski ret = xe_wopcm_init(&uc->wopcm);
77*bf8ec3c3SMichał Winiarski if (ret)
78dd08ebf6SMatthew Brost goto err;
79dd08ebf6SMatthew Brost }
80dd08ebf6SMatthew Brost
81dd08ebf6SMatthew Brost ret = xe_guc_min_load_for_hwconfig(&uc->guc);
82dd08ebf6SMatthew Brost if (ret)
83dd08ebf6SMatthew Brost goto err;
84dd08ebf6SMatthew Brost
85dd08ebf6SMatthew Brost return 0;
86dd08ebf6SMatthew Brost err:
87dd08ebf6SMatthew Brost xe_gt_err(uc_to_gt(uc), "Failed to initialize uC (%pe)\n", ERR_PTR(ret));
88dd08ebf6SMatthew Brost return ret;
89dd0e89e5SDaniele Ceraolo Spurio }
90dd0e89e5SDaniele Ceraolo Spurio
91dd08ebf6SMatthew Brost /**
92c4991ee0SDaniele Ceraolo Spurio * xe_uc_init_post_hwconfig - init Uc post hwconfig load
93dd08ebf6SMatthew Brost * @uc: The UC object
94dd08ebf6SMatthew Brost *
957704f32cSMichał Winiarski * Return: 0 on success, negative error code on error.
967704f32cSMichał Winiarski */
xe_uc_init_post_hwconfig(struct xe_uc * uc)977704f32cSMichał Winiarski int xe_uc_init_post_hwconfig(struct xe_uc *uc)
987704f32cSMichał Winiarski {
99dd0e89e5SDaniele Ceraolo Spurio int err;
100dd0e89e5SDaniele Ceraolo Spurio
101dd0e89e5SDaniele Ceraolo Spurio /* GuC submission not enabled, nothing to do */
102dd0e89e5SDaniele Ceraolo Spurio if (!xe_device_uc_enabled(uc_to_xe(uc)))
1037606f7d0SMichał Winiarski return 0;
1047606f7d0SMichał Winiarski
1057606f7d0SMichał Winiarski err = xe_uc_sanitize_reset(uc);
1067606f7d0SMichał Winiarski if (err)
107dd0e89e5SDaniele Ceraolo Spurio return err;
108dd08ebf6SMatthew Brost
109dd08ebf6SMatthew Brost err = xe_guc_init_post_hwconfig(&uc->guc);
110dd08ebf6SMatthew Brost if (err)
111dd08ebf6SMatthew Brost return err;
112dd08ebf6SMatthew Brost
113dd08ebf6SMatthew Brost err = xe_huc_init_post_hwconfig(&uc->huc);
114dd08ebf6SMatthew Brost if (err)
115dd08ebf6SMatthew Brost return err;
116dd08ebf6SMatthew Brost
117dd08ebf6SMatthew Brost return xe_gsc_init_post_hwconfig(&uc->gsc);
118dd08ebf6SMatthew Brost }
119dd08ebf6SMatthew Brost
uc_reset(struct xe_uc * uc)120dd08ebf6SMatthew Brost static int uc_reset(struct xe_uc *uc)
121dd08ebf6SMatthew Brost {
122dd08ebf6SMatthew Brost struct xe_device *xe = uc_to_xe(uc);
123dd08ebf6SMatthew Brost int ret;
1247704f32cSMichał Winiarski
125dd08ebf6SMatthew Brost ret = xe_guc_reset(&uc->guc);
126dd08ebf6SMatthew Brost if (ret) {
127dd08ebf6SMatthew Brost drm_err(&xe->drm, "Failed to reset GuC, ret = %d\n", ret);
128da3799c9SMatthew Brost return ret;
129da3799c9SMatthew Brost }
1307704f32cSMichał Winiarski
131da3799c9SMatthew Brost return 0;
132da3799c9SMatthew Brost }
133dd08ebf6SMatthew Brost
xe_uc_sanitize(struct xe_uc * uc)134dd08ebf6SMatthew Brost static void xe_uc_sanitize(struct xe_uc *uc)
135dd08ebf6SMatthew Brost {
136dd08ebf6SMatthew Brost xe_huc_sanitize(&uc->huc);
137dd08ebf6SMatthew Brost xe_guc_sanitize(&uc->guc);
138dd08ebf6SMatthew Brost }
139dd08ebf6SMatthew Brost
xe_uc_sanitize_reset(struct xe_uc * uc)140dd08ebf6SMatthew Brost int xe_uc_sanitize_reset(struct xe_uc *uc)
141dd08ebf6SMatthew Brost {
142dd08ebf6SMatthew Brost xe_uc_sanitize(uc);
143dd08ebf6SMatthew Brost
144dd08ebf6SMatthew Brost return uc_reset(uc);
145dd08ebf6SMatthew Brost }
146dd08ebf6SMatthew Brost
vf_uc_load_hw(struct xe_uc * uc)147dd08ebf6SMatthew Brost static int vf_uc_load_hw(struct xe_uc *uc)
148c4991ee0SDaniele Ceraolo Spurio {
149dd08ebf6SMatthew Brost int err;
150dd08ebf6SMatthew Brost
151dd08ebf6SMatthew Brost err = xe_uc_sanitize_reset(uc);
152dd08ebf6SMatthew Brost if (err)
153dd08ebf6SMatthew Brost return err;
154dd08ebf6SMatthew Brost
155dd08ebf6SMatthew Brost err = xe_guc_enable_communication(&uc->guc);
156dd08ebf6SMatthew Brost if (err)
157dd08ebf6SMatthew Brost return err;
158dd08ebf6SMatthew Brost
159dd08ebf6SMatthew Brost err = xe_gt_sriov_vf_connect(uc_to_gt(uc));
160dd08ebf6SMatthew Brost if (err)
161dd08ebf6SMatthew Brost goto err_out;
162dd08ebf6SMatthew Brost
163dd08ebf6SMatthew Brost uc->guc.submission_state.enabled = true;
164dd08ebf6SMatthew Brost
165dd08ebf6SMatthew Brost err = xe_guc_opt_in_features_enable(&uc->guc);
166dd08ebf6SMatthew Brost if (err)
167c4991ee0SDaniele Ceraolo Spurio goto err_out;
168dd08ebf6SMatthew Brost
169dd08ebf6SMatthew Brost err = xe_gt_record_default_lrcs(uc_to_gt(uc));
170dd08ebf6SMatthew Brost if (err)
171dd08ebf6SMatthew Brost goto err_out;
172dd08ebf6SMatthew Brost
173dd08ebf6SMatthew Brost return 0;
174dd08ebf6SMatthew Brost
175dd08ebf6SMatthew Brost err_out:
176dd08ebf6SMatthew Brost xe_guc_sanitize(&uc->guc);
177dd08ebf6SMatthew Brost return err;
178dd08ebf6SMatthew Brost }
179dd08ebf6SMatthew Brost
180dd08ebf6SMatthew Brost /*
181dd08ebf6SMatthew Brost * Should be called during driver load, after every GT reset, and after every
182dd08ebf6SMatthew Brost * suspend to reload / auth the firmwares.
183dd08ebf6SMatthew Brost */
xe_uc_load_hw(struct xe_uc * uc)184dd08ebf6SMatthew Brost int xe_uc_load_hw(struct xe_uc *uc)
185dd08ebf6SMatthew Brost {
186dd08ebf6SMatthew Brost int ret;
187dd08ebf6SMatthew Brost
188dd08ebf6SMatthew Brost /* GuC submission not enabled, nothing to do */
189dd08ebf6SMatthew Brost if (!xe_device_uc_enabled(uc_to_xe(uc)))
190dd08ebf6SMatthew Brost return 0;
191dd08ebf6SMatthew Brost
192dd08ebf6SMatthew Brost if (IS_SRIOV_VF(uc_to_xe(uc)))
193dd08ebf6SMatthew Brost return vf_uc_load_hw(uc);
194dd08ebf6SMatthew Brost
1957ce5716eSDaniele Ceraolo Spurio ret = xe_huc_upload(&uc->huc);
196c73acc1eSFrancois Dugast if (ret)
197dd08ebf6SMatthew Brost return ret;
198dd0e89e5SDaniele Ceraolo Spurio
199dd0e89e5SDaniele Ceraolo Spurio ret = xe_guc_upload(&uc->guc);
200dd0e89e5SDaniele Ceraolo Spurio if (ret)
201dd08ebf6SMatthew Brost return ret;
202dd08ebf6SMatthew Brost
203dd08ebf6SMatthew Brost ret = xe_guc_enable_communication(&uc->guc);
2041da0702cSMatthew Auld if (ret)
2051da0702cSMatthew Auld return ret;
2061da0702cSMatthew Auld
2071da0702cSMatthew Auld ret = xe_gt_record_default_lrcs(uc_to_gt(uc));
2081da0702cSMatthew Auld if (ret)
209dd08ebf6SMatthew Brost goto err_out;
210dd08ebf6SMatthew Brost
211dd08ebf6SMatthew Brost ret = xe_guc_post_load_init(&uc->guc);
212c4991ee0SDaniele Ceraolo Spurio if (ret)
213dd08ebf6SMatthew Brost goto err_out;
214dd08ebf6SMatthew Brost
215dd08ebf6SMatthew Brost ret = xe_guc_pc_start(&uc->guc.pc);
216dd08ebf6SMatthew Brost if (ret)
217dd08ebf6SMatthew Brost goto err_out;
2181737785aSRiana Tauro
2191737785aSRiana Tauro xe_guc_engine_activity_enable_stats(&uc->guc);
2201737785aSRiana Tauro
2211737785aSRiana Tauro /* We don't fail the driver load if HuC fails to auth, but let's warn */
2221737785aSRiana Tauro ret = xe_huc_auth(&uc->huc, XE_HUC_AUTH_VIA_GUC);
223dd08ebf6SMatthew Brost xe_gt_assert(uc_to_gt(uc), !ret);
224dd08ebf6SMatthew Brost
225dd0e89e5SDaniele Ceraolo Spurio /* GSC load is async */
226dd08ebf6SMatthew Brost xe_gsc_load_start(&uc->gsc);
227dd08ebf6SMatthew Brost
228dd08ebf6SMatthew Brost return 0;
229dd08ebf6SMatthew Brost
230dd08ebf6SMatthew Brost err_out:
231dd08ebf6SMatthew Brost xe_guc_sanitize(&uc->guc);
232c4991ee0SDaniele Ceraolo Spurio return ret;
233dd08ebf6SMatthew Brost }
234dd08ebf6SMatthew Brost
xe_uc_reset_prepare(struct xe_uc * uc)235dd08ebf6SMatthew Brost int xe_uc_reset_prepare(struct xe_uc *uc)
236dd08ebf6SMatthew Brost {
237dd08ebf6SMatthew Brost /* GuC submission not enabled, nothing to do */
238dd08ebf6SMatthew Brost if (!xe_device_uc_enabled(uc_to_xe(uc)))
239dd08ebf6SMatthew Brost return 0;
240dd08ebf6SMatthew Brost
241c4991ee0SDaniele Ceraolo Spurio return xe_guc_reset_prepare(&uc->guc);
242dd08ebf6SMatthew Brost }
243dd08ebf6SMatthew Brost
xe_uc_gucrc_disable(struct xe_uc * uc)244dd08ebf6SMatthew Brost void xe_uc_gucrc_disable(struct xe_uc *uc)
245dd08ebf6SMatthew Brost {
246dd08ebf6SMatthew Brost XE_WARN_ON(xe_guc_pc_gucrc_disable(&uc->guc.pc));
247dd08ebf6SMatthew Brost }
248dd08ebf6SMatthew Brost
xe_uc_stop_prepare(struct xe_uc * uc)249dd08ebf6SMatthew Brost void xe_uc_stop_prepare(struct xe_uc *uc)
250dd08ebf6SMatthew Brost {
251dd08ebf6SMatthew Brost xe_gsc_stop_prepare(&uc->gsc);
252dd08ebf6SMatthew Brost xe_guc_stop_prepare(&uc->guc);
253dd08ebf6SMatthew Brost }
254dd08ebf6SMatthew Brost
xe_uc_stop(struct xe_uc * uc)255dd08ebf6SMatthew Brost void xe_uc_stop(struct xe_uc *uc)
256dd08ebf6SMatthew Brost {
257dd08ebf6SMatthew Brost /* GuC submission not enabled, nothing to do */
258dd08ebf6SMatthew Brost if (!xe_device_uc_enabled(uc_to_xe(uc)))
259dd08ebf6SMatthew Brost return;
260dd08ebf6SMatthew Brost
261dd08ebf6SMatthew Brost xe_guc_stop(&uc->guc);
262dd08ebf6SMatthew Brost }
263dd08ebf6SMatthew Brost
xe_uc_start(struct xe_uc * uc)264c4991ee0SDaniele Ceraolo Spurio int xe_uc_start(struct xe_uc *uc)
265dd08ebf6SMatthew Brost {
266dd08ebf6SMatthew Brost /* GuC submission not enabled, nothing to do */
267dd08ebf6SMatthew Brost if (!xe_device_uc_enabled(uc_to_xe(uc)))
268dd08ebf6SMatthew Brost return 0;
269dd08ebf6SMatthew Brost
270dd08ebf6SMatthew Brost return xe_guc_start(&uc->guc);
271dd08ebf6SMatthew Brost }
272dd08ebf6SMatthew Brost
uc_reset_wait(struct xe_uc * uc)273dd08ebf6SMatthew Brost static void uc_reset_wait(struct xe_uc *uc)
274dd08ebf6SMatthew Brost {
275997a55caSDaniele Ceraolo Spurio int ret;
276997a55caSDaniele Ceraolo Spurio
277997a55caSDaniele Ceraolo Spurio again:
278997a55caSDaniele Ceraolo Spurio xe_guc_reset_wait(&uc->guc);
279997a55caSDaniele Ceraolo Spurio
280997a55caSDaniele Ceraolo Spurio ret = xe_uc_reset_prepare(uc);
281997a55caSDaniele Ceraolo Spurio if (ret)
282997a55caSDaniele Ceraolo Spurio goto again;
283997a55caSDaniele Ceraolo Spurio }
284997a55caSDaniele Ceraolo Spurio
xe_uc_suspend_prepare(struct xe_uc * uc)285997a55caSDaniele Ceraolo Spurio void xe_uc_suspend_prepare(struct xe_uc *uc)
286997a55caSDaniele Ceraolo Spurio {
287997a55caSDaniele Ceraolo Spurio xe_gsc_wait_for_worker_completion(&uc->gsc);
288 xe_guc_stop_prepare(&uc->guc);
289 }
290
xe_uc_suspend(struct xe_uc * uc)291 int xe_uc_suspend(struct xe_uc *uc)
292 {
293 /* GuC submission not enabled, nothing to do */
294 if (!xe_device_uc_enabled(uc_to_xe(uc)))
295 return 0;
296
297 uc_reset_wait(uc);
298
299 xe_uc_stop(uc);
300
301 return xe_guc_suspend(&uc->guc);
302 }
303
304 /**
305 * xe_uc_declare_wedged() - Declare UC wedged
306 * @uc: the UC object
307 *
308 * Wedge the UC which stops all submission, saves desired debug state, and
309 * cleans up anything which could timeout.
310 */
xe_uc_declare_wedged(struct xe_uc * uc)311 void xe_uc_declare_wedged(struct xe_uc *uc)
312 {
313 xe_gt_assert(uc_to_gt(uc), uc_to_xe(uc)->wedged.mode);
314
315 xe_guc_declare_wedged(&uc->guc);
316 }
317