1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright © 2021 Intel Corporation
4 */
5
6 #include "xe_hw_engine.h"
7
8 #include <linux/nospec.h>
9
10 #include <drm/drm_managed.h>
11 #include <drm/drm_print.h>
12 #include <uapi/drm/xe_drm.h>
13 #include <generated/xe_wa_oob.h>
14
15 #include "regs/xe_engine_regs.h"
16 #include "regs/xe_gt_regs.h"
17 #include "regs/xe_irq_regs.h"
18 #include "xe_assert.h"
19 #include "xe_bo.h"
20 #include "xe_configfs.h"
21 #include "xe_device.h"
22 #include "xe_execlist.h"
23 #include "xe_force_wake.h"
24 #include "xe_gsc.h"
25 #include "xe_gt.h"
26 #include "xe_gt_ccs_mode.h"
27 #include "xe_gt_clock.h"
28 #include "xe_gt_printk.h"
29 #include "xe_gt_mcr.h"
30 #include "xe_gt_topology.h"
31 #include "xe_guc_capture.h"
32 #include "xe_hw_engine_group.h"
33 #include "xe_hw_fence.h"
34 #include "xe_irq.h"
35 #include "xe_lrc.h"
36 #include "xe_mmio.h"
37 #include "xe_reg_sr.h"
38 #include "xe_reg_whitelist.h"
39 #include "xe_rtp.h"
40 #include "xe_sched_job.h"
41 #include "xe_sriov.h"
42 #include "xe_tuning.h"
43 #include "xe_uc_fw.h"
44 #include "xe_wa.h"
45
46 #define MAX_MMIO_BASES 3
47 struct engine_info {
48 const char *name;
49 unsigned int class : 8;
50 unsigned int instance : 8;
51 unsigned int irq_offset : 8;
52 enum xe_force_wake_domains domain;
53 u32 mmio_base;
54 };
55
56 static const struct engine_info engine_infos[] = {
57 [XE_HW_ENGINE_RCS0] = {
58 .name = "rcs0",
59 .class = XE_ENGINE_CLASS_RENDER,
60 .instance = 0,
61 .irq_offset = ilog2(INTR_RCS0),
62 .domain = XE_FW_RENDER,
63 .mmio_base = RENDER_RING_BASE,
64 },
65 [XE_HW_ENGINE_BCS0] = {
66 .name = "bcs0",
67 .class = XE_ENGINE_CLASS_COPY,
68 .instance = 0,
69 .irq_offset = ilog2(INTR_BCS(0)),
70 .domain = XE_FW_RENDER,
71 .mmio_base = BLT_RING_BASE,
72 },
73 [XE_HW_ENGINE_BCS1] = {
74 .name = "bcs1",
75 .class = XE_ENGINE_CLASS_COPY,
76 .instance = 1,
77 .irq_offset = ilog2(INTR_BCS(1)),
78 .domain = XE_FW_RENDER,
79 .mmio_base = XEHPC_BCS1_RING_BASE,
80 },
81 [XE_HW_ENGINE_BCS2] = {
82 .name = "bcs2",
83 .class = XE_ENGINE_CLASS_COPY,
84 .instance = 2,
85 .irq_offset = ilog2(INTR_BCS(2)),
86 .domain = XE_FW_RENDER,
87 .mmio_base = XEHPC_BCS2_RING_BASE,
88 },
89 [XE_HW_ENGINE_BCS3] = {
90 .name = "bcs3",
91 .class = XE_ENGINE_CLASS_COPY,
92 .instance = 3,
93 .irq_offset = ilog2(INTR_BCS(3)),
94 .domain = XE_FW_RENDER,
95 .mmio_base = XEHPC_BCS3_RING_BASE,
96 },
97 [XE_HW_ENGINE_BCS4] = {
98 .name = "bcs4",
99 .class = XE_ENGINE_CLASS_COPY,
100 .instance = 4,
101 .irq_offset = ilog2(INTR_BCS(4)),
102 .domain = XE_FW_RENDER,
103 .mmio_base = XEHPC_BCS4_RING_BASE,
104 },
105 [XE_HW_ENGINE_BCS5] = {
106 .name = "bcs5",
107 .class = XE_ENGINE_CLASS_COPY,
108 .instance = 5,
109 .irq_offset = ilog2(INTR_BCS(5)),
110 .domain = XE_FW_RENDER,
111 .mmio_base = XEHPC_BCS5_RING_BASE,
112 },
113 [XE_HW_ENGINE_BCS6] = {
114 .name = "bcs6",
115 .class = XE_ENGINE_CLASS_COPY,
116 .instance = 6,
117 .irq_offset = ilog2(INTR_BCS(6)),
118 .domain = XE_FW_RENDER,
119 .mmio_base = XEHPC_BCS6_RING_BASE,
120 },
121 [XE_HW_ENGINE_BCS7] = {
122 .name = "bcs7",
123 .class = XE_ENGINE_CLASS_COPY,
124 .irq_offset = ilog2(INTR_BCS(7)),
125 .instance = 7,
126 .domain = XE_FW_RENDER,
127 .mmio_base = XEHPC_BCS7_RING_BASE,
128 },
129 [XE_HW_ENGINE_BCS8] = {
130 .name = "bcs8",
131 .class = XE_ENGINE_CLASS_COPY,
132 .instance = 8,
133 .irq_offset = ilog2(INTR_BCS8),
134 .domain = XE_FW_RENDER,
135 .mmio_base = XEHPC_BCS8_RING_BASE,
136 },
137
138 [XE_HW_ENGINE_VCS0] = {
139 .name = "vcs0",
140 .class = XE_ENGINE_CLASS_VIDEO_DECODE,
141 .instance = 0,
142 .irq_offset = 32 + ilog2(INTR_VCS(0)),
143 .domain = XE_FW_MEDIA_VDBOX0,
144 .mmio_base = BSD_RING_BASE,
145 },
146 [XE_HW_ENGINE_VCS1] = {
147 .name = "vcs1",
148 .class = XE_ENGINE_CLASS_VIDEO_DECODE,
149 .instance = 1,
150 .irq_offset = 32 + ilog2(INTR_VCS(1)),
151 .domain = XE_FW_MEDIA_VDBOX1,
152 .mmio_base = BSD2_RING_BASE,
153 },
154 [XE_HW_ENGINE_VCS2] = {
155 .name = "vcs2",
156 .class = XE_ENGINE_CLASS_VIDEO_DECODE,
157 .instance = 2,
158 .irq_offset = 32 + ilog2(INTR_VCS(2)),
159 .domain = XE_FW_MEDIA_VDBOX2,
160 .mmio_base = BSD3_RING_BASE,
161 },
162 [XE_HW_ENGINE_VCS3] = {
163 .name = "vcs3",
164 .class = XE_ENGINE_CLASS_VIDEO_DECODE,
165 .instance = 3,
166 .irq_offset = 32 + ilog2(INTR_VCS(3)),
167 .domain = XE_FW_MEDIA_VDBOX3,
168 .mmio_base = BSD4_RING_BASE,
169 },
170 [XE_HW_ENGINE_VCS4] = {
171 .name = "vcs4",
172 .class = XE_ENGINE_CLASS_VIDEO_DECODE,
173 .instance = 4,
174 .irq_offset = 32 + ilog2(INTR_VCS(4)),
175 .domain = XE_FW_MEDIA_VDBOX4,
176 .mmio_base = XEHP_BSD5_RING_BASE,
177 },
178 [XE_HW_ENGINE_VCS5] = {
179 .name = "vcs5",
180 .class = XE_ENGINE_CLASS_VIDEO_DECODE,
181 .instance = 5,
182 .irq_offset = 32 + ilog2(INTR_VCS(5)),
183 .domain = XE_FW_MEDIA_VDBOX5,
184 .mmio_base = XEHP_BSD6_RING_BASE,
185 },
186 [XE_HW_ENGINE_VCS6] = {
187 .name = "vcs6",
188 .class = XE_ENGINE_CLASS_VIDEO_DECODE,
189 .instance = 6,
190 .irq_offset = 32 + ilog2(INTR_VCS(6)),
191 .domain = XE_FW_MEDIA_VDBOX6,
192 .mmio_base = XEHP_BSD7_RING_BASE,
193 },
194 [XE_HW_ENGINE_VCS7] = {
195 .name = "vcs7",
196 .class = XE_ENGINE_CLASS_VIDEO_DECODE,
197 .instance = 7,
198 .irq_offset = 32 + ilog2(INTR_VCS(7)),
199 .domain = XE_FW_MEDIA_VDBOX7,
200 .mmio_base = XEHP_BSD8_RING_BASE,
201 },
202 [XE_HW_ENGINE_VECS0] = {
203 .name = "vecs0",
204 .class = XE_ENGINE_CLASS_VIDEO_ENHANCE,
205 .instance = 0,
206 .irq_offset = 32 + ilog2(INTR_VECS(0)),
207 .domain = XE_FW_MEDIA_VEBOX0,
208 .mmio_base = VEBOX_RING_BASE,
209 },
210 [XE_HW_ENGINE_VECS1] = {
211 .name = "vecs1",
212 .class = XE_ENGINE_CLASS_VIDEO_ENHANCE,
213 .instance = 1,
214 .irq_offset = 32 + ilog2(INTR_VECS(1)),
215 .domain = XE_FW_MEDIA_VEBOX1,
216 .mmio_base = VEBOX2_RING_BASE,
217 },
218 [XE_HW_ENGINE_VECS2] = {
219 .name = "vecs2",
220 .class = XE_ENGINE_CLASS_VIDEO_ENHANCE,
221 .instance = 2,
222 .irq_offset = 32 + ilog2(INTR_VECS(2)),
223 .domain = XE_FW_MEDIA_VEBOX2,
224 .mmio_base = XEHP_VEBOX3_RING_BASE,
225 },
226 [XE_HW_ENGINE_VECS3] = {
227 .name = "vecs3",
228 .class = XE_ENGINE_CLASS_VIDEO_ENHANCE,
229 .instance = 3,
230 .irq_offset = 32 + ilog2(INTR_VECS(3)),
231 .domain = XE_FW_MEDIA_VEBOX3,
232 .mmio_base = XEHP_VEBOX4_RING_BASE,
233 },
234 [XE_HW_ENGINE_CCS0] = {
235 .name = "ccs0",
236 .class = XE_ENGINE_CLASS_COMPUTE,
237 .instance = 0,
238 .irq_offset = ilog2(INTR_CCS(0)),
239 .domain = XE_FW_RENDER,
240 .mmio_base = COMPUTE0_RING_BASE,
241 },
242 [XE_HW_ENGINE_CCS1] = {
243 .name = "ccs1",
244 .class = XE_ENGINE_CLASS_COMPUTE,
245 .instance = 1,
246 .irq_offset = ilog2(INTR_CCS(1)),
247 .domain = XE_FW_RENDER,
248 .mmio_base = COMPUTE1_RING_BASE,
249 },
250 [XE_HW_ENGINE_CCS2] = {
251 .name = "ccs2",
252 .class = XE_ENGINE_CLASS_COMPUTE,
253 .instance = 2,
254 .irq_offset = ilog2(INTR_CCS(2)),
255 .domain = XE_FW_RENDER,
256 .mmio_base = COMPUTE2_RING_BASE,
257 },
258 [XE_HW_ENGINE_CCS3] = {
259 .name = "ccs3",
260 .class = XE_ENGINE_CLASS_COMPUTE,
261 .instance = 3,
262 .irq_offset = ilog2(INTR_CCS(3)),
263 .domain = XE_FW_RENDER,
264 .mmio_base = COMPUTE3_RING_BASE,
265 },
266 [XE_HW_ENGINE_GSCCS0] = {
267 .name = "gsccs0",
268 .class = XE_ENGINE_CLASS_OTHER,
269 .instance = OTHER_GSC_INSTANCE,
270 .domain = XE_FW_GSC,
271 .mmio_base = GSCCS_RING_BASE,
272 },
273 };
274
hw_engine_fini(void * arg)275 static void hw_engine_fini(void *arg)
276 {
277 struct xe_hw_engine *hwe = arg;
278
279 if (hwe->exl_port)
280 xe_execlist_port_destroy(hwe->exl_port);
281
282 hwe->gt = NULL;
283 }
284
285 /**
286 * xe_hw_engine_mmio_write32() - Write engine register
287 * @hwe: engine
288 * @reg: register to write into
289 * @val: desired 32-bit value to write
290 *
291 * This function will write val into an engine specific register.
292 * Forcewake must be held by the caller.
293 *
294 */
xe_hw_engine_mmio_write32(struct xe_hw_engine * hwe,struct xe_reg reg,u32 val)295 void xe_hw_engine_mmio_write32(struct xe_hw_engine *hwe,
296 struct xe_reg reg, u32 val)
297 {
298 xe_gt_assert(hwe->gt, !(reg.addr & hwe->mmio_base));
299 xe_force_wake_assert_held(gt_to_fw(hwe->gt), hwe->domain);
300
301 reg.addr += hwe->mmio_base;
302
303 xe_mmio_write32(&hwe->gt->mmio, reg, val);
304 }
305
306 /**
307 * xe_hw_engine_mmio_read32() - Read engine register
308 * @hwe: engine
309 * @reg: register to read from
310 *
311 * This function will read from an engine specific register.
312 * Forcewake must be held by the caller.
313 *
314 * Return: value of the 32-bit register.
315 */
xe_hw_engine_mmio_read32(struct xe_hw_engine * hwe,struct xe_reg reg)316 u32 xe_hw_engine_mmio_read32(struct xe_hw_engine *hwe, struct xe_reg reg)
317 {
318 xe_gt_assert(hwe->gt, !(reg.addr & hwe->mmio_base));
319 xe_force_wake_assert_held(gt_to_fw(hwe->gt), hwe->domain);
320
321 reg.addr += hwe->mmio_base;
322
323 return xe_mmio_read32(&hwe->gt->mmio, reg);
324 }
325
xe_hw_engine_enable_ring(struct xe_hw_engine * hwe)326 void xe_hw_engine_enable_ring(struct xe_hw_engine *hwe)
327 {
328 u32 ccs_mask =
329 xe_hw_engine_mask_per_class(hwe->gt, XE_ENGINE_CLASS_COMPUTE);
330 u32 ring_mode = _MASKED_BIT_ENABLE(GFX_DISABLE_LEGACY_MODE);
331
332 if (hwe->class == XE_ENGINE_CLASS_COMPUTE && ccs_mask)
333 xe_mmio_write32(&hwe->gt->mmio, RCU_MODE,
334 _MASKED_BIT_ENABLE(RCU_MODE_CCS_ENABLE));
335
336 xe_hw_engine_mmio_write32(hwe, RING_HWSTAM(0), ~0x0);
337 xe_hw_engine_mmio_write32(hwe, RING_HWS_PGA(0),
338 xe_bo_ggtt_addr(hwe->hwsp));
339
340 if (xe_device_has_msix(gt_to_xe(hwe->gt)))
341 ring_mode |= _MASKED_BIT_ENABLE(GFX_MSIX_INTERRUPT_ENABLE);
342 xe_hw_engine_mmio_write32(hwe, RING_MODE(0), ring_mode);
343 xe_hw_engine_mmio_write32(hwe, RING_MI_MODE(0),
344 _MASKED_BIT_DISABLE(STOP_RING));
345 xe_hw_engine_mmio_read32(hwe, RING_MI_MODE(0));
346 }
347
xe_hw_engine_match_fixed_cslice_mode(const struct xe_device * xe,const struct xe_gt * gt,const struct xe_hw_engine * hwe)348 static bool xe_hw_engine_match_fixed_cslice_mode(const struct xe_device *xe,
349 const struct xe_gt *gt,
350 const struct xe_hw_engine *hwe)
351 {
352 /*
353 * Xe3p no longer supports load balance mode, so "fixed cslice" mode
354 * is automatic and no RCU_MODE programming is required.
355 */
356 if (GRAPHICS_VER(gt_to_xe(gt)) >= 35)
357 return false;
358
359 return xe_gt_ccs_mode_enabled(gt) &&
360 xe_rtp_match_first_render_or_compute(xe, gt, hwe);
361 }
362
xe_rtp_cfeg_wmtp_disabled(const struct xe_device * xe,const struct xe_gt * gt,const struct xe_hw_engine * hwe)363 static bool xe_rtp_cfeg_wmtp_disabled(const struct xe_device *xe,
364 const struct xe_gt *gt,
365 const struct xe_hw_engine *hwe)
366 {
367 if (GRAPHICS_VER(xe) < 20)
368 return false;
369
370 if (hwe->class != XE_ENGINE_CLASS_COMPUTE &&
371 hwe->class != XE_ENGINE_CLASS_RENDER)
372 return false;
373
374 return xe_mmio_read32(&hwe->gt->mmio, XEHP_FUSE4) & CFEG_WMTP_DISABLE;
375 }
376
377 void
xe_hw_engine_setup_default_lrc_state(struct xe_hw_engine * hwe)378 xe_hw_engine_setup_default_lrc_state(struct xe_hw_engine *hwe)
379 {
380 struct xe_gt *gt = hwe->gt;
381 const u8 mocs_write_idx = gt->mocs.uc_index;
382 const u8 mocs_read_idx = gt->mocs.uc_index;
383 u32 blit_cctl_val = REG_FIELD_PREP(BLIT_CCTL_DST_MOCS_MASK, mocs_write_idx) |
384 REG_FIELD_PREP(BLIT_CCTL_SRC_MOCS_MASK, mocs_read_idx);
385 struct xe_rtp_process_ctx ctx = XE_RTP_PROCESS_CTX_INITIALIZER(hwe);
386 const struct xe_rtp_entry_sr lrc_setup[] = {
387 /*
388 * Some blitter commands do not have a field for MOCS, those
389 * commands will use MOCS index pointed by BLIT_CCTL.
390 * BLIT_CCTL registers are needed to be programmed to un-cached.
391 */
392 { XE_RTP_NAME("BLIT_CCTL_default_MOCS"),
393 XE_RTP_RULES(GRAPHICS_VERSION_RANGE(1200, XE_RTP_END_VERSION_UNDEFINED),
394 ENGINE_CLASS(COPY)),
395 XE_RTP_ACTIONS(FIELD_SET(BLIT_CCTL(0),
396 BLIT_CCTL_DST_MOCS_MASK |
397 BLIT_CCTL_SRC_MOCS_MASK,
398 blit_cctl_val,
399 XE_RTP_ACTION_FLAG(ENGINE_BASE)))
400 },
401 /* Disable WMTP if HW doesn't support it */
402 { XE_RTP_NAME("DISABLE_WMTP_ON_UNSUPPORTED_HW"),
403 XE_RTP_RULES(FUNC(xe_rtp_cfeg_wmtp_disabled)),
404 XE_RTP_ACTIONS(FIELD_SET(CS_CHICKEN1(0),
405 PREEMPT_GPGPU_LEVEL_MASK,
406 PREEMPT_GPGPU_THREAD_GROUP_LEVEL)),
407 XE_RTP_ENTRY_FLAG(FOREACH_ENGINE)
408 },
409 };
410
411 xe_rtp_process_to_sr(&ctx, lrc_setup, ARRAY_SIZE(lrc_setup), &hwe->reg_lrc);
412 }
413
414 static void
hw_engine_setup_default_state(struct xe_hw_engine * hwe)415 hw_engine_setup_default_state(struct xe_hw_engine *hwe)
416 {
417 struct xe_gt *gt = hwe->gt;
418 struct xe_device *xe = gt_to_xe(gt);
419 /*
420 * RING_CMD_CCTL specifies the default MOCS entry that will be
421 * used by the command streamer when executing commands that
422 * don't have a way to explicitly specify a MOCS setting.
423 * The default should usually reference whichever MOCS entry
424 * corresponds to uncached behavior, although use of a WB cached
425 * entry is recommended by the spec in certain circumstances on
426 * specific platforms.
427 * Bspec: 72161
428 */
429 const u8 mocs_write_idx = gt->mocs.uc_index;
430 const u8 mocs_read_idx = hwe->class == XE_ENGINE_CLASS_COMPUTE && IS_DGFX(xe) &&
431 (GRAPHICS_VER(xe) >= 20 || xe->info.platform == XE_PVC) ?
432 gt->mocs.wb_index : gt->mocs.uc_index;
433 u32 ring_cmd_cctl_val = REG_FIELD_PREP(CMD_CCTL_WRITE_OVERRIDE_MASK, mocs_write_idx) |
434 REG_FIELD_PREP(CMD_CCTL_READ_OVERRIDE_MASK, mocs_read_idx);
435 struct xe_rtp_process_ctx ctx = XE_RTP_PROCESS_CTX_INITIALIZER(hwe);
436 const struct xe_rtp_entry_sr engine_entries[] = {
437 { XE_RTP_NAME("RING_CMD_CCTL_default_MOCS"),
438 XE_RTP_RULES(GRAPHICS_VERSION_RANGE(1200, XE_RTP_END_VERSION_UNDEFINED)),
439 XE_RTP_ACTIONS(FIELD_SET(RING_CMD_CCTL(0),
440 CMD_CCTL_WRITE_OVERRIDE_MASK |
441 CMD_CCTL_READ_OVERRIDE_MASK,
442 ring_cmd_cctl_val,
443 XE_RTP_ACTION_FLAG(ENGINE_BASE)))
444 },
445 /*
446 * To allow the GSC engine to go idle on MTL we need to enable
447 * idle messaging and set the hysteresis value (we use 0xA=5us
448 * as recommended in spec). On platforms after MTL this is
449 * enabled by default.
450 */
451 { XE_RTP_NAME("MTL GSCCS IDLE MSG enable"),
452 XE_RTP_RULES(MEDIA_VERSION(1300), ENGINE_CLASS(OTHER)),
453 XE_RTP_ACTIONS(CLR(RING_PSMI_CTL(0),
454 IDLE_MSG_DISABLE,
455 XE_RTP_ACTION_FLAG(ENGINE_BASE)),
456 FIELD_SET(RING_PWRCTX_MAXCNT(0),
457 IDLE_WAIT_TIME,
458 0xA,
459 XE_RTP_ACTION_FLAG(ENGINE_BASE)))
460 },
461 /* Enable Priority Mem Read */
462 { XE_RTP_NAME("Priority_Mem_Read"),
463 XE_RTP_RULES(GRAPHICS_VERSION_RANGE(2001, XE_RTP_END_VERSION_UNDEFINED)),
464 XE_RTP_ACTIONS(SET(CSFE_CHICKEN1(0), CS_PRIORITY_MEM_READ,
465 XE_RTP_ACTION_FLAG(ENGINE_BASE)))
466 },
467 /* Use Fixed slice CCS mode */
468 { XE_RTP_NAME("RCU_MODE_FIXED_SLICE_CCS_MODE"),
469 XE_RTP_RULES(FUNC(xe_hw_engine_match_fixed_cslice_mode)),
470 XE_RTP_ACTIONS(FIELD_SET(RCU_MODE, RCU_MODE_FIXED_SLICE_CCS_MODE,
471 RCU_MODE_FIXED_SLICE_CCS_MODE))
472 },
473 };
474
475 xe_rtp_process_to_sr(&ctx, engine_entries, ARRAY_SIZE(engine_entries), &hwe->reg_sr);
476 }
477
find_engine_info(enum xe_engine_class class,int instance)478 static const struct engine_info *find_engine_info(enum xe_engine_class class, int instance)
479 {
480 const struct engine_info *info;
481 enum xe_hw_engine_id id;
482
483 for (id = 0; id < XE_NUM_HW_ENGINES; ++id) {
484 info = &engine_infos[id];
485 if (info->class == class && info->instance == instance)
486 return info;
487 }
488
489 return NULL;
490 }
491
get_msix_irq_offset(struct xe_gt * gt,enum xe_engine_class class)492 static u16 get_msix_irq_offset(struct xe_gt *gt, enum xe_engine_class class)
493 {
494 /* For MSI-X, hw engines report to offset of engine instance zero */
495 const struct engine_info *info = find_engine_info(class, 0);
496
497 xe_gt_assert(gt, info);
498
499 return info ? info->irq_offset : 0;
500 }
501
hw_engine_init_early(struct xe_gt * gt,struct xe_hw_engine * hwe,enum xe_hw_engine_id id)502 static void hw_engine_init_early(struct xe_gt *gt, struct xe_hw_engine *hwe,
503 enum xe_hw_engine_id id)
504 {
505 const struct engine_info *info;
506
507 if (WARN_ON(id >= ARRAY_SIZE(engine_infos) || !engine_infos[id].name))
508 return;
509
510 if (!(gt->info.engine_mask & BIT(id)))
511 return;
512
513 info = &engine_infos[id];
514
515 xe_gt_assert(gt, !hwe->gt);
516
517 hwe->gt = gt;
518 hwe->class = info->class;
519 hwe->instance = info->instance;
520 hwe->mmio_base = info->mmio_base;
521 hwe->irq_offset = xe_device_has_msix(gt_to_xe(gt)) ?
522 get_msix_irq_offset(gt, info->class) :
523 info->irq_offset;
524 hwe->domain = info->domain;
525 hwe->name = info->name;
526 hwe->fence_irq = >->fence_irq[info->class];
527 hwe->engine_id = id;
528
529 hwe->eclass = >->eclass[hwe->class];
530 if (!hwe->eclass->sched_props.job_timeout_ms) {
531 hwe->eclass->sched_props.job_timeout_ms = 5 * 1000;
532 hwe->eclass->sched_props.job_timeout_min = XE_HW_ENGINE_JOB_TIMEOUT_MIN;
533 hwe->eclass->sched_props.job_timeout_max = XE_HW_ENGINE_JOB_TIMEOUT_MAX;
534 hwe->eclass->sched_props.timeslice_us = 1 * 1000;
535 hwe->eclass->sched_props.timeslice_min = XE_HW_ENGINE_TIMESLICE_MIN;
536 hwe->eclass->sched_props.timeslice_max = XE_HW_ENGINE_TIMESLICE_MAX;
537 hwe->eclass->sched_props.preempt_timeout_us = XE_HW_ENGINE_PREEMPT_TIMEOUT;
538 hwe->eclass->sched_props.preempt_timeout_min = XE_HW_ENGINE_PREEMPT_TIMEOUT_MIN;
539 hwe->eclass->sched_props.preempt_timeout_max = XE_HW_ENGINE_PREEMPT_TIMEOUT_MAX;
540
541 /*
542 * The GSC engine can accept submissions while the GSC shim is
543 * being reset, during which time the submission is stalled. In
544 * the worst case, the shim reset can take up to the maximum GSC
545 * command execution time (250ms), so the request start can be
546 * delayed by that much; the request itself can take that long
547 * without being preemptible, which means worst case it can
548 * theoretically take up to 500ms for a preemption to go through
549 * on the GSC engine. Adding to that an extra 100ms as a safety
550 * margin, we get a minimum recommended timeout of 600ms.
551 * The preempt_timeout value can't be tuned for OTHER_CLASS
552 * because the class is reserved for kernel usage, so we just
553 * need to make sure that the starting value is above that
554 * threshold; since our default value (640ms) is greater than
555 * 600ms, the only way we can go below is via a kconfig setting.
556 * If that happens, log it in dmesg and update the value.
557 */
558 if (hwe->class == XE_ENGINE_CLASS_OTHER) {
559 const u32 min_preempt_timeout = 600 * 1000;
560 if (hwe->eclass->sched_props.preempt_timeout_us < min_preempt_timeout) {
561 hwe->eclass->sched_props.preempt_timeout_us = min_preempt_timeout;
562 xe_gt_notice(gt, "Increasing preempt_timeout for GSC to 600ms\n");
563 }
564 }
565
566 /* Record default props */
567 hwe->eclass->defaults = hwe->eclass->sched_props;
568 }
569
570 xe_reg_sr_init(&hwe->reg_sr, hwe->name, gt_to_xe(gt));
571 xe_tuning_process_engine(hwe);
572 xe_wa_process_engine(hwe);
573 hw_engine_setup_default_state(hwe);
574
575 xe_reg_sr_init(&hwe->reg_whitelist, hwe->name, gt_to_xe(gt));
576 xe_reg_whitelist_process_engine(hwe);
577 }
578
adjust_idledly(struct xe_hw_engine * hwe)579 static void adjust_idledly(struct xe_hw_engine *hwe)
580 {
581 struct xe_gt *gt = hwe->gt;
582 u32 idledly, maxcnt;
583 u32 idledly_units_ps = 8 * gt->info.timestamp_base;
584 u32 maxcnt_units_ns = 640;
585 bool inhibit_switch = 0;
586
587 if (!IS_SRIOV_VF(gt_to_xe(hwe->gt)) && XE_GT_WA(gt, 16023105232)) {
588 idledly = xe_mmio_read32(>->mmio, RING_IDLEDLY(hwe->mmio_base));
589 maxcnt = xe_mmio_read32(>->mmio, RING_PWRCTX_MAXCNT(hwe->mmio_base));
590
591 inhibit_switch = idledly & INHIBIT_SWITCH_UNTIL_PREEMPTED;
592 idledly = REG_FIELD_GET(IDLE_DELAY, idledly);
593 idledly = DIV_ROUND_CLOSEST(idledly * idledly_units_ps, 1000);
594 maxcnt = REG_FIELD_GET(IDLE_WAIT_TIME, maxcnt);
595 maxcnt *= maxcnt_units_ns;
596
597 if (xe_gt_WARN_ON(gt, idledly >= maxcnt || inhibit_switch)) {
598 idledly = DIV_ROUND_CLOSEST(((maxcnt - 1) * 1000),
599 idledly_units_ps);
600 xe_mmio_write32(>->mmio, RING_IDLEDLY(hwe->mmio_base), idledly);
601 }
602 }
603 }
604
hw_engine_init(struct xe_gt * gt,struct xe_hw_engine * hwe,enum xe_hw_engine_id id)605 static int hw_engine_init(struct xe_gt *gt, struct xe_hw_engine *hwe,
606 enum xe_hw_engine_id id)
607 {
608 struct xe_device *xe = gt_to_xe(gt);
609 struct xe_tile *tile = gt_to_tile(gt);
610 int err;
611
612 xe_gt_assert(gt, id < ARRAY_SIZE(engine_infos) && engine_infos[id].name);
613 xe_gt_assert(gt, gt->info.engine_mask & BIT(id));
614
615 xe_reg_sr_apply_mmio(&hwe->reg_sr, gt);
616
617 hwe->hwsp = xe_managed_bo_create_pin_map(xe, tile, SZ_4K,
618 XE_BO_FLAG_VRAM_IF_DGFX(tile) |
619 XE_BO_FLAG_GGTT |
620 XE_BO_FLAG_GGTT_INVALIDATE);
621 if (IS_ERR(hwe->hwsp)) {
622 err = PTR_ERR(hwe->hwsp);
623 goto err_name;
624 }
625
626 if (!xe_device_uc_enabled(xe)) {
627 hwe->exl_port = xe_execlist_port_create(xe, hwe);
628 if (IS_ERR(hwe->exl_port)) {
629 err = PTR_ERR(hwe->exl_port);
630 goto err_hwsp;
631 }
632 } else {
633 /* GSCCS has a special interrupt for reset */
634 if (hwe->class == XE_ENGINE_CLASS_OTHER)
635 hwe->irq_handler = xe_gsc_hwe_irq_handler;
636
637 if (!IS_SRIOV_VF(xe))
638 xe_hw_engine_enable_ring(hwe);
639 }
640
641 /* We reserve the highest BCS instance for USM */
642 if (xe->info.has_usm && hwe->class == XE_ENGINE_CLASS_COPY)
643 gt->usm.reserved_bcs_instance = hwe->instance;
644
645 /* Ensure IDLEDLY is lower than MAXCNT */
646 adjust_idledly(hwe);
647
648 return devm_add_action_or_reset(xe->drm.dev, hw_engine_fini, hwe);
649
650 err_hwsp:
651 xe_bo_unpin_map_no_vm(hwe->hwsp);
652 err_name:
653 hwe->name = NULL;
654
655 return err;
656 }
657
hw_engine_setup_logical_mapping(struct xe_gt * gt)658 static void hw_engine_setup_logical_mapping(struct xe_gt *gt)
659 {
660 int class;
661
662 /* FIXME: Doing a simple logical mapping that works for most hardware */
663 for (class = 0; class < XE_ENGINE_CLASS_MAX; ++class) {
664 struct xe_hw_engine *hwe;
665 enum xe_hw_engine_id id;
666 int logical_instance = 0;
667
668 for_each_hw_engine(hwe, gt, id)
669 if (hwe->class == class)
670 hwe->logical_instance = logical_instance++;
671 }
672 }
673
read_media_fuses(struct xe_gt * gt)674 static void read_media_fuses(struct xe_gt *gt)
675 {
676 struct xe_device *xe = gt_to_xe(gt);
677 u32 media_fuse;
678 u16 vdbox_mask;
679 u16 vebox_mask;
680 int i, j;
681
682 xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GT);
683
684 media_fuse = xe_mmio_read32(>->mmio, GT_VEBOX_VDBOX_DISABLE);
685
686 /*
687 * Pre-Xe_HP platforms had register bits representing absent engines,
688 * whereas Xe_HP and beyond have bits representing present engines.
689 * Invert the polarity on old platforms so that we can use common
690 * handling below.
691 */
692 if (GRAPHICS_VERx100(xe) < 1250)
693 media_fuse = ~media_fuse;
694
695 vdbox_mask = REG_FIELD_GET(GT_VDBOX_DISABLE_MASK, media_fuse);
696 vebox_mask = REG_FIELD_GET(GT_VEBOX_DISABLE_MASK, media_fuse);
697
698 for (i = XE_HW_ENGINE_VCS0, j = 0; i <= XE_HW_ENGINE_VCS7; ++i, ++j) {
699 if (!(gt->info.engine_mask & BIT(i)))
700 continue;
701
702 if (!(BIT(j) & vdbox_mask)) {
703 gt->info.engine_mask &= ~BIT(i);
704 xe_gt_info(gt, "vcs%u fused off\n", j);
705 }
706 }
707
708 for (i = XE_HW_ENGINE_VECS0, j = 0; i <= XE_HW_ENGINE_VECS3; ++i, ++j) {
709 if (!(gt->info.engine_mask & BIT(i)))
710 continue;
711
712 if (!(BIT(j) & vebox_mask)) {
713 gt->info.engine_mask &= ~BIT(i);
714 xe_gt_info(gt, "vecs%u fused off\n", j);
715 }
716 }
717 }
718
infer_svccopy_from_meml3(struct xe_gt * gt)719 static u32 infer_svccopy_from_meml3(struct xe_gt *gt)
720 {
721 u32 meml3 = REG_FIELD_GET(MEML3_EN_MASK,
722 xe_mmio_read32(>->mmio, MIRROR_FUSE3));
723 u32 svccopy_mask = 0;
724
725 /*
726 * Each of the four meml3 bits determines the fusing of two service
727 * copy engines.
728 */
729 for (int i = 0; i < 4; i++)
730 svccopy_mask |= (meml3 & BIT(i)) ? 0b11 << 2 * i : 0;
731
732 return svccopy_mask;
733 }
734
read_svccopy_fuses(struct xe_gt * gt)735 static u32 read_svccopy_fuses(struct xe_gt *gt)
736 {
737 return REG_FIELD_GET(FUSE_SERVICE_COPY_ENABLE_MASK,
738 xe_mmio_read32(>->mmio, SERVICE_COPY_ENABLE));
739 }
740
read_copy_fuses(struct xe_gt * gt)741 static void read_copy_fuses(struct xe_gt *gt)
742 {
743 struct xe_device *xe = gt_to_xe(gt);
744 u32 bcs_mask;
745
746 xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GT);
747
748 if (GRAPHICS_VER(xe) >= 35)
749 bcs_mask = read_svccopy_fuses(gt);
750 else if (GRAPHICS_VERx100(xe) == 1260)
751 bcs_mask = infer_svccopy_from_meml3(gt);
752 else
753 return;
754
755 /* Only BCS1-BCS8 may be fused off */
756 bcs_mask <<= XE_HW_ENGINE_BCS1;
757 for (int i = XE_HW_ENGINE_BCS1; i <= XE_HW_ENGINE_BCS8; ++i) {
758 if (!(gt->info.engine_mask & BIT(i)))
759 continue;
760
761 if (!(bcs_mask & BIT(i))) {
762 gt->info.engine_mask &= ~BIT(i);
763 xe_gt_info(gt, "bcs%u fused off\n",
764 i - XE_HW_ENGINE_BCS0);
765 }
766 }
767 }
768
read_compute_fuses_from_dss(struct xe_gt * gt)769 static void read_compute_fuses_from_dss(struct xe_gt *gt)
770 {
771 /*
772 * CCS fusing based on DSS masks only applies to platforms that can
773 * have more than one CCS.
774 */
775 if (hweight64(gt->info.engine_mask &
776 GENMASK_ULL(XE_HW_ENGINE_CCS3, XE_HW_ENGINE_CCS0)) <= 1)
777 return;
778
779 /*
780 * CCS availability on Xe_HP is inferred from the presence of DSS in
781 * each quadrant.
782 */
783 for (int i = XE_HW_ENGINE_CCS0, j = 0; i <= XE_HW_ENGINE_CCS3; ++i, ++j) {
784 if (!(gt->info.engine_mask & BIT(i)))
785 continue;
786
787 if (!xe_gt_topology_has_dss_in_quadrant(gt, j)) {
788 gt->info.engine_mask &= ~BIT(i);
789 xe_gt_info(gt, "ccs%u fused off\n", j);
790 }
791 }
792 }
793
read_compute_fuses_from_reg(struct xe_gt * gt)794 static void read_compute_fuses_from_reg(struct xe_gt *gt)
795 {
796 u32 ccs_mask;
797
798 ccs_mask = xe_mmio_read32(>->mmio, XEHP_FUSE4);
799 ccs_mask = REG_FIELD_GET(CCS_EN_MASK, ccs_mask);
800
801 for (int i = XE_HW_ENGINE_CCS0, j = 0; i <= XE_HW_ENGINE_CCS3; ++i, ++j) {
802 if (!(gt->info.engine_mask & BIT(i)))
803 continue;
804
805 if ((ccs_mask & BIT(j)) == 0) {
806 gt->info.engine_mask &= ~BIT(i);
807 xe_gt_info(gt, "ccs%u fused off\n", j);
808 }
809 }
810 }
811
read_compute_fuses(struct xe_gt * gt)812 static void read_compute_fuses(struct xe_gt *gt)
813 {
814 if (GRAPHICS_VER(gt_to_xe(gt)) >= 20)
815 read_compute_fuses_from_reg(gt);
816 else
817 read_compute_fuses_from_dss(gt);
818 }
819
check_gsc_availability(struct xe_gt * gt)820 static void check_gsc_availability(struct xe_gt *gt)
821 {
822 if (!(gt->info.engine_mask & BIT(XE_HW_ENGINE_GSCCS0)))
823 return;
824
825 /*
826 * The GSCCS is only used to communicate with the GSC FW, so if we don't
827 * have the FW there is nothing we need the engine for and can therefore
828 * skip its initialization.
829 */
830 if (!xe_uc_fw_is_available(>->uc.gsc.fw)) {
831 gt->info.engine_mask &= ~BIT(XE_HW_ENGINE_GSCCS0);
832
833 /* interrupts where previously enabled, so turn them off */
834 xe_mmio_write32(>->mmio, GUNIT_GSC_INTR_ENABLE, 0);
835 xe_mmio_write32(>->mmio, GUNIT_GSC_INTR_MASK, ~0);
836
837 xe_gt_dbg(gt, "GSC FW not used, disabling gsccs\n");
838 }
839 }
840
check_sw_disable(struct xe_gt * gt)841 static void check_sw_disable(struct xe_gt *gt)
842 {
843 struct xe_device *xe = gt_to_xe(gt);
844 u64 sw_allowed = xe_configfs_get_engines_allowed(to_pci_dev(xe->drm.dev));
845 enum xe_hw_engine_id id;
846
847 for (id = 0; id < XE_NUM_HW_ENGINES; ++id) {
848 if (!(gt->info.engine_mask & BIT(id)))
849 continue;
850
851 if (!(sw_allowed & BIT(id))) {
852 gt->info.engine_mask &= ~BIT(id);
853 xe_gt_info(gt, "%s disabled via configfs\n",
854 engine_infos[id].name);
855 }
856 }
857 }
858
xe_hw_engines_init_early(struct xe_gt * gt)859 int xe_hw_engines_init_early(struct xe_gt *gt)
860 {
861 int i;
862
863 read_media_fuses(gt);
864 read_copy_fuses(gt);
865 read_compute_fuses(gt);
866 check_gsc_availability(gt);
867 check_sw_disable(gt);
868
869 BUILD_BUG_ON(XE_HW_ENGINE_PREEMPT_TIMEOUT < XE_HW_ENGINE_PREEMPT_TIMEOUT_MIN);
870 BUILD_BUG_ON(XE_HW_ENGINE_PREEMPT_TIMEOUT > XE_HW_ENGINE_PREEMPT_TIMEOUT_MAX);
871
872 for (i = 0; i < ARRAY_SIZE(gt->hw_engines); i++)
873 hw_engine_init_early(gt, >->hw_engines[i], i);
874
875 return 0;
876 }
877
xe_hw_engines_init(struct xe_gt * gt)878 int xe_hw_engines_init(struct xe_gt *gt)
879 {
880 int err;
881 struct xe_hw_engine *hwe;
882 enum xe_hw_engine_id id;
883
884 for_each_hw_engine(hwe, gt, id) {
885 err = hw_engine_init(gt, hwe, id);
886 if (err)
887 return err;
888 }
889
890 hw_engine_setup_logical_mapping(gt);
891 err = xe_hw_engine_setup_groups(gt);
892 if (err)
893 return err;
894
895 return 0;
896 }
897
xe_hw_engine_handle_irq(struct xe_hw_engine * hwe,u16 intr_vec)898 void xe_hw_engine_handle_irq(struct xe_hw_engine *hwe, u16 intr_vec)
899 {
900 wake_up_all(>_to_xe(hwe->gt)->ufence_wq);
901
902 if (hwe->irq_handler)
903 hwe->irq_handler(hwe, intr_vec);
904
905 if (intr_vec & GT_MI_USER_INTERRUPT)
906 xe_hw_fence_irq_run(hwe->fence_irq);
907 }
908
909 /**
910 * xe_hw_engine_snapshot_capture - Take a quick snapshot of the HW Engine.
911 * @hwe: Xe HW Engine.
912 * @q: The exec queue object.
913 *
914 * This can be printed out in a later stage like during dev_coredump
915 * analysis.
916 *
917 * Returns: a Xe HW Engine snapshot object that must be freed by the
918 * caller, using `xe_hw_engine_snapshot_free`.
919 */
920 struct xe_hw_engine_snapshot *
xe_hw_engine_snapshot_capture(struct xe_hw_engine * hwe,struct xe_exec_queue * q)921 xe_hw_engine_snapshot_capture(struct xe_hw_engine *hwe, struct xe_exec_queue *q)
922 {
923 struct xe_hw_engine_snapshot *snapshot;
924 struct __guc_capture_parsed_output *node;
925
926 if (!xe_hw_engine_is_valid(hwe))
927 return NULL;
928
929 snapshot = kzalloc_obj(*snapshot, GFP_ATOMIC);
930
931 if (!snapshot)
932 return NULL;
933
934 snapshot->name = kstrdup(hwe->name, GFP_ATOMIC);
935 snapshot->hwe = hwe;
936 snapshot->logical_instance = hwe->logical_instance;
937 snapshot->forcewake.domain = hwe->domain;
938 snapshot->forcewake.ref = xe_force_wake_ref(gt_to_fw(hwe->gt),
939 hwe->domain);
940 snapshot->mmio_base = hwe->mmio_base;
941 snapshot->kernel_reserved = xe_hw_engine_is_reserved(hwe);
942
943 /* no more VF accessible data below this point */
944 if (IS_SRIOV_VF(gt_to_xe(hwe->gt)))
945 return snapshot;
946
947 if (q) {
948 /* If got guc capture, set source to GuC */
949 node = xe_guc_capture_get_matching_and_lock(q);
950 if (node) {
951 struct xe_device *xe = gt_to_xe(hwe->gt);
952 struct xe_devcoredump *coredump = &xe->devcoredump;
953
954 coredump->snapshot.matched_node = node;
955 xe_gt_dbg(hwe->gt, "Found and locked GuC-err-capture node");
956 return snapshot;
957 }
958 }
959
960 /* otherwise, do manual capture */
961 xe_engine_manual_capture(hwe, snapshot);
962 xe_gt_dbg(hwe->gt, "Proceeding with manual engine snapshot");
963
964 return snapshot;
965 }
966
967 /**
968 * xe_hw_engine_snapshot_free - Free all allocated objects for a given snapshot.
969 * @snapshot: Xe HW Engine snapshot object.
970 *
971 * This function free all the memory that needed to be allocated at capture
972 * time.
973 */
xe_hw_engine_snapshot_free(struct xe_hw_engine_snapshot * snapshot)974 void xe_hw_engine_snapshot_free(struct xe_hw_engine_snapshot *snapshot)
975 {
976 struct xe_gt *gt;
977 if (!snapshot)
978 return;
979
980 gt = snapshot->hwe->gt;
981 /*
982 * xe_guc_capture_put_matched_nodes is called here and from
983 * xe_devcoredump_snapshot_free, to cover the 2 calling paths
984 * of hw_engines - debugfs and devcoredump free.
985 */
986 xe_guc_capture_put_matched_nodes(>->uc.guc);
987
988 kfree(snapshot->name);
989 kfree(snapshot);
990 }
991
992 /**
993 * xe_hw_engine_print - Xe HW Engine Print.
994 * @hwe: Hardware Engine.
995 * @p: drm_printer.
996 *
997 * This function quickly capture a snapshot and immediately print it out.
998 */
xe_hw_engine_print(struct xe_hw_engine * hwe,struct drm_printer * p)999 void xe_hw_engine_print(struct xe_hw_engine *hwe, struct drm_printer *p)
1000 {
1001 struct xe_hw_engine_snapshot *snapshot;
1002
1003 snapshot = xe_hw_engine_snapshot_capture(hwe, NULL);
1004 xe_engine_snapshot_print(snapshot, p);
1005 xe_hw_engine_snapshot_free(snapshot);
1006 }
1007
xe_hw_engine_mask_per_class(struct xe_gt * gt,enum xe_engine_class engine_class)1008 u32 xe_hw_engine_mask_per_class(struct xe_gt *gt,
1009 enum xe_engine_class engine_class)
1010 {
1011 u32 mask = 0;
1012 enum xe_hw_engine_id id;
1013
1014 for (id = 0; id < XE_NUM_HW_ENGINES; ++id) {
1015 if (engine_infos[id].class == engine_class &&
1016 gt->info.engine_mask & BIT(id))
1017 mask |= BIT(engine_infos[id].instance);
1018 }
1019 return mask;
1020 }
1021
xe_hw_engine_is_reserved(struct xe_hw_engine * hwe)1022 bool xe_hw_engine_is_reserved(struct xe_hw_engine *hwe)
1023 {
1024 struct xe_gt *gt = hwe->gt;
1025 struct xe_device *xe = gt_to_xe(gt);
1026
1027 if (hwe->class == XE_ENGINE_CLASS_OTHER)
1028 return true;
1029
1030 /* Check for engines disabled by ccs_mode setting */
1031 if (xe_gt_ccs_mode_enabled(gt) &&
1032 hwe->class == XE_ENGINE_CLASS_COMPUTE &&
1033 hwe->logical_instance >= gt->ccs_mode)
1034 return true;
1035
1036 return xe->info.has_usm && hwe->class == XE_ENGINE_CLASS_COPY &&
1037 hwe->instance == gt->usm.reserved_bcs_instance;
1038 }
1039
xe_hw_engine_class_to_str(enum xe_engine_class class)1040 const char *xe_hw_engine_class_to_str(enum xe_engine_class class)
1041 {
1042 switch (class) {
1043 case XE_ENGINE_CLASS_RENDER:
1044 return "rcs";
1045 case XE_ENGINE_CLASS_VIDEO_DECODE:
1046 return "vcs";
1047 case XE_ENGINE_CLASS_VIDEO_ENHANCE:
1048 return "vecs";
1049 case XE_ENGINE_CLASS_COPY:
1050 return "bcs";
1051 case XE_ENGINE_CLASS_OTHER:
1052 return "other";
1053 case XE_ENGINE_CLASS_COMPUTE:
1054 return "ccs";
1055 case XE_ENGINE_CLASS_MAX:
1056 break;
1057 }
1058
1059 return NULL;
1060 }
1061
xe_hw_engine_read_timestamp(struct xe_hw_engine * hwe)1062 u64 xe_hw_engine_read_timestamp(struct xe_hw_engine *hwe)
1063 {
1064 return xe_mmio_read64_2x32(&hwe->gt->mmio, RING_TIMESTAMP(hwe->mmio_base));
1065 }
1066
xe_hw_engine_to_fw_domain(struct xe_hw_engine * hwe)1067 enum xe_force_wake_domains xe_hw_engine_to_fw_domain(struct xe_hw_engine *hwe)
1068 {
1069 return engine_infos[hwe->engine_id].domain;
1070 }
1071
1072 static const enum xe_engine_class user_to_xe_engine_class[] = {
1073 [DRM_XE_ENGINE_CLASS_RENDER] = XE_ENGINE_CLASS_RENDER,
1074 [DRM_XE_ENGINE_CLASS_COPY] = XE_ENGINE_CLASS_COPY,
1075 [DRM_XE_ENGINE_CLASS_VIDEO_DECODE] = XE_ENGINE_CLASS_VIDEO_DECODE,
1076 [DRM_XE_ENGINE_CLASS_VIDEO_ENHANCE] = XE_ENGINE_CLASS_VIDEO_ENHANCE,
1077 [DRM_XE_ENGINE_CLASS_COMPUTE] = XE_ENGINE_CLASS_COMPUTE,
1078 };
1079
1080 /**
1081 * xe_hw_engine_lookup() - Lookup hardware engine for class:instance
1082 * @xe: xe device
1083 * @eci: engine class and instance
1084 *
1085 * This function will find a hardware engine for given engine
1086 * class and instance.
1087 *
1088 * Return: If found xe_hw_engine pointer, NULL otherwise.
1089 */
1090 struct xe_hw_engine *
xe_hw_engine_lookup(struct xe_device * xe,struct drm_xe_engine_class_instance eci)1091 xe_hw_engine_lookup(struct xe_device *xe,
1092 struct drm_xe_engine_class_instance eci)
1093 {
1094 struct xe_gt *gt = xe_device_get_gt(xe, eci.gt_id);
1095 unsigned int idx;
1096
1097 if (eci.engine_class >= ARRAY_SIZE(user_to_xe_engine_class))
1098 return NULL;
1099
1100 if (!gt)
1101 return NULL;
1102
1103 idx = array_index_nospec(eci.engine_class,
1104 ARRAY_SIZE(user_to_xe_engine_class));
1105
1106 return xe_gt_hw_engine(xe_device_get_gt(xe, eci.gt_id),
1107 user_to_xe_engine_class[idx],
1108 eci.engine_instance, true);
1109 }
1110