1dd08ebf6SMatthew Brost // SPDX-License-Identifier: MIT
2dd08ebf6SMatthew Brost /*
3dd08ebf6SMatthew Brost * Copyright © 2022 Intel Corporation
4dd08ebf6SMatthew Brost */
5dd08ebf6SMatthew Brost
6ea9f879dSLucas De Marchi #include "xe_guc_ads.h"
7ea9f879dSLucas De Marchi
891b2c42cSFrancois Dugast #include <linux/fault-inject.h>
991b2c42cSFrancois Dugast
10dd08ebf6SMatthew Brost #include <drm/drm_managed.h>
11dd08ebf6SMatthew Brost
12c151ff5cSBadal Nilawar #include <generated/xe_wa_oob.h>
13c151ff5cSBadal Nilawar
146b8ef44cSRodrigo Vivi #include "abi/guc_actions_abi.h"
15b79e8fd9SLucas De Marchi #include "regs/xe_engine_regs.h"
16226bfec8SLucas De Marchi #include "regs/xe_gt_regs.h"
17a9b1a136SLucas De Marchi #include "regs/xe_guc_regs.h"
18dd08ebf6SMatthew Brost #include "xe_bo.h"
19dd08ebf6SMatthew Brost #include "xe_gt.h"
200d97ecceSNiranjana Vishwanathapura #include "xe_gt_ccs_mode.h"
2126a22952SMichal Wajdeczko #include "xe_gt_printk.h"
22dd08ebf6SMatthew Brost #include "xe_guc.h"
239c8c7a7eSZhanjun Dong #include "xe_guc_buf.h"
246b8ef44cSRodrigo Vivi #include "xe_guc_capture.h"
25dd08ebf6SMatthew Brost #include "xe_guc_ct.h"
26dd08ebf6SMatthew Brost #include "xe_hw_engine.h"
27dd08ebf6SMatthew Brost #include "xe_lrc.h"
28dd08ebf6SMatthew Brost #include "xe_map.h"
29dd08ebf6SMatthew Brost #include "xe_mmio.h"
30636cdf6fSJulia Filipchuk #include "xe_platform_types.h"
31c151ff5cSBadal Nilawar #include "xe_uc_fw.h"
32ee5a1321SJesus Narvaez #include "xe_wa.h"
33ea9f879dSLucas De Marchi #include "xe_gt_mcr.h"
34dd08ebf6SMatthew Brost
35dd08ebf6SMatthew Brost /* Slack of a few additional entries per engine */
36dd08ebf6SMatthew Brost #define ADS_REGSET_EXTRA_MAX 8
37dd08ebf6SMatthew Brost
38dd08ebf6SMatthew Brost static struct xe_guc *
ads_to_guc(struct xe_guc_ads * ads)39dd08ebf6SMatthew Brost ads_to_guc(struct xe_guc_ads *ads)
40dd08ebf6SMatthew Brost {
41dd08ebf6SMatthew Brost return container_of(ads, struct xe_guc, ads);
42dd08ebf6SMatthew Brost }
43dd08ebf6SMatthew Brost
44dd08ebf6SMatthew Brost static struct xe_gt *
ads_to_gt(struct xe_guc_ads * ads)45dd08ebf6SMatthew Brost ads_to_gt(struct xe_guc_ads *ads)
46dd08ebf6SMatthew Brost {
47dd08ebf6SMatthew Brost return container_of(ads, struct xe_gt, uc.guc.ads);
48dd08ebf6SMatthew Brost }
49dd08ebf6SMatthew Brost
50dd08ebf6SMatthew Brost static struct xe_device *
ads_to_xe(struct xe_guc_ads * ads)51dd08ebf6SMatthew Brost ads_to_xe(struct xe_guc_ads *ads)
52dd08ebf6SMatthew Brost {
53dd08ebf6SMatthew Brost return gt_to_xe(ads_to_gt(ads));
54dd08ebf6SMatthew Brost }
55dd08ebf6SMatthew Brost
56dd08ebf6SMatthew Brost static struct iosys_map *
ads_to_map(struct xe_guc_ads * ads)57dd08ebf6SMatthew Brost ads_to_map(struct xe_guc_ads *ads)
58dd08ebf6SMatthew Brost {
59dd08ebf6SMatthew Brost return &ads->bo->vmap;
60dd08ebf6SMatthew Brost }
61dd08ebf6SMatthew Brost
62dd08ebf6SMatthew Brost /* UM Queue parameters: */
63dd08ebf6SMatthew Brost #define GUC_UM_QUEUE_SIZE (SZ_64K)
64dd08ebf6SMatthew Brost #define GUC_PAGE_RES_TIMEOUT_US (-1)
65dd08ebf6SMatthew Brost
66dd08ebf6SMatthew Brost /*
67dd08ebf6SMatthew Brost * The Additional Data Struct (ADS) has pointers for different buffers used by
68dd08ebf6SMatthew Brost * the GuC. One single gem object contains the ADS struct itself (guc_ads) and
69dd08ebf6SMatthew Brost * all the extra buffers indirectly linked via the ADS struct's entries.
70dd08ebf6SMatthew Brost *
71dd08ebf6SMatthew Brost * Layout of the ADS blob allocated for the GuC:
72dd08ebf6SMatthew Brost *
73dd08ebf6SMatthew Brost * +---------------------------------------+ <== base
74dd08ebf6SMatthew Brost * | guc_ads |
75dd08ebf6SMatthew Brost * +---------------------------------------+
76dd08ebf6SMatthew Brost * | guc_policies |
77dd08ebf6SMatthew Brost * +---------------------------------------+
78dd08ebf6SMatthew Brost * | guc_gt_system_info |
79dd08ebf6SMatthew Brost * +---------------------------------------+
80dd08ebf6SMatthew Brost * | guc_engine_usage |
81dd08ebf6SMatthew Brost * +---------------------------------------+
82dd08ebf6SMatthew Brost * | guc_um_init_params |
83dd08ebf6SMatthew Brost * +---------------------------------------+ <== static
84dd08ebf6SMatthew Brost * | guc_mmio_reg[countA] (engine 0.0) |
85dd08ebf6SMatthew Brost * | guc_mmio_reg[countB] (engine 0.1) |
86dd08ebf6SMatthew Brost * | guc_mmio_reg[countC] (engine 1.0) |
87dd08ebf6SMatthew Brost * | ... |
88dd08ebf6SMatthew Brost * +---------------------------------------+ <== dynamic
89dd08ebf6SMatthew Brost * | padding |
90dd08ebf6SMatthew Brost * +---------------------------------------+ <== 4K aligned
91dd08ebf6SMatthew Brost * | golden contexts |
92dd08ebf6SMatthew Brost * +---------------------------------------+
93dd08ebf6SMatthew Brost * | padding |
94d6da81a4SBadal Nilawar * +---------------------------------------+ <== 4K aligned
95d6da81a4SBadal Nilawar * | w/a KLVs |
96d6da81a4SBadal Nilawar * +---------------------------------------+
97d6da81a4SBadal Nilawar * | padding |
98dd08ebf6SMatthew Brost * +---------------------------------------+ <== 4K aligned
99dd08ebf6SMatthew Brost * | capture lists |
100dd08ebf6SMatthew Brost * +---------------------------------------+
101dd08ebf6SMatthew Brost * | padding |
102dd08ebf6SMatthew Brost * +---------------------------------------+ <== 4K aligned
103dd08ebf6SMatthew Brost * | UM queues |
104dd08ebf6SMatthew Brost * +---------------------------------------+
105dd08ebf6SMatthew Brost * | padding |
106dd08ebf6SMatthew Brost * +---------------------------------------+ <== 4K aligned
107dd08ebf6SMatthew Brost * | private data |
108dd08ebf6SMatthew Brost * +---------------------------------------+
109dd08ebf6SMatthew Brost * | padding |
110dd08ebf6SMatthew Brost * +---------------------------------------+ <== 4K aligned
111dd08ebf6SMatthew Brost */
112dd08ebf6SMatthew Brost struct __guc_ads_blob {
113dd08ebf6SMatthew Brost struct guc_ads ads;
114dd08ebf6SMatthew Brost struct guc_policies policies;
115dd08ebf6SMatthew Brost struct guc_gt_system_info system_info;
116dd08ebf6SMatthew Brost struct guc_engine_usage engine_usage;
117dd08ebf6SMatthew Brost struct guc_um_init_params um_init_params;
118ee728423SLucas De Marchi /* From here on, location is dynamic! Refer to above diagram. */
119dd08ebf6SMatthew Brost struct guc_mmio_reg regset[];
120dd08ebf6SMatthew Brost } __packed;
121dd08ebf6SMatthew Brost
122dd08ebf6SMatthew Brost #define ads_blob_read(ads_, field_) \
123dd08ebf6SMatthew Brost xe_map_rd_field(ads_to_xe(ads_), ads_to_map(ads_), 0, \
124dd08ebf6SMatthew Brost struct __guc_ads_blob, field_)
125dd08ebf6SMatthew Brost
126dd08ebf6SMatthew Brost #define ads_blob_write(ads_, field_, val_) \
127dd08ebf6SMatthew Brost xe_map_wr_field(ads_to_xe(ads_), ads_to_map(ads_), 0, \
128dd08ebf6SMatthew Brost struct __guc_ads_blob, field_, val_)
129dd08ebf6SMatthew Brost
130dd08ebf6SMatthew Brost #define info_map_write(xe_, map_, field_, val_) \
131dd08ebf6SMatthew Brost xe_map_wr_field(xe_, map_, 0, struct guc_gt_system_info, field_, val_)
132dd08ebf6SMatthew Brost
133dd08ebf6SMatthew Brost #define info_map_read(xe_, map_, field_) \
134dd08ebf6SMatthew Brost xe_map_rd_field(xe_, map_, 0, struct guc_gt_system_info, field_)
135dd08ebf6SMatthew Brost
guc_ads_regset_size(struct xe_guc_ads * ads)136dd08ebf6SMatthew Brost static size_t guc_ads_regset_size(struct xe_guc_ads *ads)
137c73acc1eSFrancois Dugast {
138c73acc1eSFrancois Dugast struct xe_device *xe = ads_to_xe(ads);
139c73acc1eSFrancois Dugast
140dd08ebf6SMatthew Brost xe_assert(xe, ads->regset_size);
141dd08ebf6SMatthew Brost
142dd08ebf6SMatthew Brost return ads->regset_size;
143dd08ebf6SMatthew Brost }
144dd08ebf6SMatthew Brost
guc_ads_golden_lrc_size(struct xe_guc_ads * ads)145dd08ebf6SMatthew Brost static size_t guc_ads_golden_lrc_size(struct xe_guc_ads *ads)
146dd08ebf6SMatthew Brost {
147dd08ebf6SMatthew Brost return PAGE_ALIGN(ads->golden_lrc_size);
148dd08ebf6SMatthew Brost }
149d6da81a4SBadal Nilawar
guc_ads_waklv_size(struct xe_guc_ads * ads)150d6da81a4SBadal Nilawar static u32 guc_ads_waklv_size(struct xe_guc_ads *ads)
151d6da81a4SBadal Nilawar {
152d6da81a4SBadal Nilawar return PAGE_ALIGN(ads->ads_waklv_size);
153d6da81a4SBadal Nilawar }
154dd08ebf6SMatthew Brost
guc_ads_capture_size(struct xe_guc_ads * ads)155dd08ebf6SMatthew Brost static size_t guc_ads_capture_size(struct xe_guc_ads *ads)
1569c8c7a7eSZhanjun Dong {
157dd08ebf6SMatthew Brost return PAGE_ALIGN(ads->capture_size);
158dd08ebf6SMatthew Brost }
159dd08ebf6SMatthew Brost
guc_ads_um_queues_size(struct xe_guc_ads * ads)160dd08ebf6SMatthew Brost static size_t guc_ads_um_queues_size(struct xe_guc_ads *ads)
161dd08ebf6SMatthew Brost {
162dd08ebf6SMatthew Brost struct xe_device *xe = ads_to_xe(ads);
1635a92da34SLucas De Marchi
164dd08ebf6SMatthew Brost if (!xe->info.has_usm)
165dd08ebf6SMatthew Brost return 0;
166dd08ebf6SMatthew Brost
167dd08ebf6SMatthew Brost return GUC_UM_QUEUE_SIZE * GUC_UM_HW_QUEUE_MAX;
168dd08ebf6SMatthew Brost }
169dd08ebf6SMatthew Brost
guc_ads_private_data_size(struct xe_guc_ads * ads)170dd08ebf6SMatthew Brost static size_t guc_ads_private_data_size(struct xe_guc_ads *ads)
171dd08ebf6SMatthew Brost {
172dd08ebf6SMatthew Brost return PAGE_ALIGN(ads_to_guc(ads)->fw.private_data_size);
173dd08ebf6SMatthew Brost }
174dd08ebf6SMatthew Brost
guc_ads_regset_offset(struct xe_guc_ads * ads)175dd08ebf6SMatthew Brost static size_t guc_ads_regset_offset(struct xe_guc_ads *ads)
176dd08ebf6SMatthew Brost {
177dd08ebf6SMatthew Brost return offsetof(struct __guc_ads_blob, regset);
178dd08ebf6SMatthew Brost }
179dd08ebf6SMatthew Brost
guc_ads_golden_lrc_offset(struct xe_guc_ads * ads)180dd08ebf6SMatthew Brost static size_t guc_ads_golden_lrc_offset(struct xe_guc_ads *ads)
181dd08ebf6SMatthew Brost {
182dd08ebf6SMatthew Brost size_t offset;
183dd08ebf6SMatthew Brost
184dd08ebf6SMatthew Brost offset = guc_ads_regset_offset(ads) +
185dd08ebf6SMatthew Brost guc_ads_regset_size(ads);
186dd08ebf6SMatthew Brost
187dd08ebf6SMatthew Brost return PAGE_ALIGN(offset);
188dd08ebf6SMatthew Brost }
189d6da81a4SBadal Nilawar
guc_ads_waklv_offset(struct xe_guc_ads * ads)190d6da81a4SBadal Nilawar static size_t guc_ads_waklv_offset(struct xe_guc_ads *ads)
191d6da81a4SBadal Nilawar {
192d6da81a4SBadal Nilawar u32 offset;
193d6da81a4SBadal Nilawar
194d6da81a4SBadal Nilawar offset = guc_ads_golden_lrc_offset(ads) +
195d6da81a4SBadal Nilawar guc_ads_golden_lrc_size(ads);
196d6da81a4SBadal Nilawar
197d6da81a4SBadal Nilawar return PAGE_ALIGN(offset);
198d6da81a4SBadal Nilawar }
199dd08ebf6SMatthew Brost
guc_ads_capture_offset(struct xe_guc_ads * ads)200dd08ebf6SMatthew Brost static size_t guc_ads_capture_offset(struct xe_guc_ads *ads)
201dd08ebf6SMatthew Brost {
202dd08ebf6SMatthew Brost size_t offset;
203d6da81a4SBadal Nilawar
204d6da81a4SBadal Nilawar offset = guc_ads_waklv_offset(ads) +
205dd08ebf6SMatthew Brost guc_ads_waklv_size(ads);
206dd08ebf6SMatthew Brost
207dd08ebf6SMatthew Brost return PAGE_ALIGN(offset);
208dd08ebf6SMatthew Brost }
209dd08ebf6SMatthew Brost
guc_ads_um_queues_offset(struct xe_guc_ads * ads)210dd08ebf6SMatthew Brost static size_t guc_ads_um_queues_offset(struct xe_guc_ads *ads)
211dd08ebf6SMatthew Brost {
212dd08ebf6SMatthew Brost u32 offset;
213dd08ebf6SMatthew Brost
214dd08ebf6SMatthew Brost offset = guc_ads_capture_offset(ads) +
215dd08ebf6SMatthew Brost guc_ads_capture_size(ads);
216dd08ebf6SMatthew Brost
217dd08ebf6SMatthew Brost return PAGE_ALIGN(offset);
218dd08ebf6SMatthew Brost }
219dd08ebf6SMatthew Brost
guc_ads_private_data_offset(struct xe_guc_ads * ads)220dd08ebf6SMatthew Brost static size_t guc_ads_private_data_offset(struct xe_guc_ads *ads)
221dd08ebf6SMatthew Brost {
222dd08ebf6SMatthew Brost size_t offset;
223dd08ebf6SMatthew Brost
224dd08ebf6SMatthew Brost offset = guc_ads_um_queues_offset(ads) +
225dd08ebf6SMatthew Brost guc_ads_um_queues_size(ads);
226dd08ebf6SMatthew Brost
227dd08ebf6SMatthew Brost return PAGE_ALIGN(offset);
228dd08ebf6SMatthew Brost }
229dd08ebf6SMatthew Brost
guc_ads_size(struct xe_guc_ads * ads)230dd08ebf6SMatthew Brost static size_t guc_ads_size(struct xe_guc_ads *ads)
231dd08ebf6SMatthew Brost {
232dd08ebf6SMatthew Brost return guc_ads_private_data_offset(ads) +
233dd08ebf6SMatthew Brost guc_ads_private_data_size(ads);
234dd08ebf6SMatthew Brost }
235dd08ebf6SMatthew Brost
calculate_regset_size(struct xe_gt * gt)236dd08ebf6SMatthew Brost static size_t calculate_regset_size(struct xe_gt *gt)
237dd08ebf6SMatthew Brost {
238dd08ebf6SMatthew Brost struct xe_reg_sr_entry *sr_entry;
239dd08ebf6SMatthew Brost unsigned long sr_idx;
240dd08ebf6SMatthew Brost struct xe_hw_engine *hwe;
241dd08ebf6SMatthew Brost enum xe_hw_engine_id id;
242dd08ebf6SMatthew Brost unsigned int count = 0;
243dd08ebf6SMatthew Brost
244dd08ebf6SMatthew Brost for_each_hw_engine(hwe, gt, id)
245dd08ebf6SMatthew Brost xa_for_each(&hwe->reg_sr.xa, sr_idx, sr_entry)
246dd08ebf6SMatthew Brost count++;
247f659ac15SMatt Roper
248f659ac15SMatt Roper count += ADS_REGSET_EXTRA_MAX * XE_NUM_HW_ENGINES;
2498262db9eSLucas De Marchi
250f659ac15SMatt Roper if (XE_WA(gt, 1607983814))
251dd08ebf6SMatthew Brost count += LNCFCMOCS_REG_COUNT;
252dd08ebf6SMatthew Brost
253dd08ebf6SMatthew Brost return count * sizeof(struct guc_mmio_reg);
254dd08ebf6SMatthew Brost }
255dd08ebf6SMatthew Brost
engine_enable_mask(struct xe_gt * gt,enum xe_engine_class class)256dd08ebf6SMatthew Brost static u32 engine_enable_mask(struct xe_gt *gt, enum xe_engine_class class)
257dd08ebf6SMatthew Brost {
258dd08ebf6SMatthew Brost struct xe_hw_engine *hwe;
259dd08ebf6SMatthew Brost enum xe_hw_engine_id id;
260dd08ebf6SMatthew Brost u32 mask = 0;
261dd08ebf6SMatthew Brost
262dd08ebf6SMatthew Brost for_each_hw_engine(hwe, gt, id)
263dd08ebf6SMatthew Brost if (hwe->class == class)
264dd08ebf6SMatthew Brost mask |= BIT(hwe->instance);
265dd08ebf6SMatthew Brost
266dd08ebf6SMatthew Brost return mask;
267dd08ebf6SMatthew Brost }
268dd08ebf6SMatthew Brost
calculate_golden_lrc_size(struct xe_guc_ads * ads)269dd08ebf6SMatthew Brost static size_t calculate_golden_lrc_size(struct xe_guc_ads *ads)
270dd08ebf6SMatthew Brost {
271dd08ebf6SMatthew Brost struct xe_gt *gt = ads_to_gt(ads);
272dd08ebf6SMatthew Brost size_t total_size = 0, alloc_size, real_size;
273dd08ebf6SMatthew Brost int class;
274dd08ebf6SMatthew Brost
275dd08ebf6SMatthew Brost for (class = 0; class < XE_ENGINE_CLASS_MAX; ++class) {
276dd08ebf6SMatthew Brost if (!engine_enable_mask(gt, class))
277dd08ebf6SMatthew Brost continue;
278d6219e1cSNiranjana Vishwanathapura
279dd08ebf6SMatthew Brost real_size = xe_gt_lrc_size(gt, class);
280dd08ebf6SMatthew Brost alloc_size = PAGE_ALIGN(real_size);
281dd08ebf6SMatthew Brost total_size += alloc_size;
282dd08ebf6SMatthew Brost }
283dd08ebf6SMatthew Brost
284dd08ebf6SMatthew Brost return total_size;
285dd08ebf6SMatthew Brost }
2862817a1f1SVinay Belgaumkar
guc_waklv_enable_one_word(struct xe_guc_ads * ads,enum xe_guc_klv_ids klv_id,u32 value,u32 * offset,u32 * remain)2872817a1f1SVinay Belgaumkar static void guc_waklv_enable_one_word(struct xe_guc_ads *ads,
2882817a1f1SVinay Belgaumkar enum xe_guc_klv_ids klv_id,
2892817a1f1SVinay Belgaumkar u32 value,
2902817a1f1SVinay Belgaumkar u32 *offset, u32 *remain)
2912817a1f1SVinay Belgaumkar {
2922817a1f1SVinay Belgaumkar u32 size;
2932817a1f1SVinay Belgaumkar u32 klv_entry[] = {
2942817a1f1SVinay Belgaumkar /* 16:16 key/length */
2952817a1f1SVinay Belgaumkar FIELD_PREP(GUC_KLV_0_KEY, klv_id) |
2962817a1f1SVinay Belgaumkar FIELD_PREP(GUC_KLV_0_LEN, 1),
2972817a1f1SVinay Belgaumkar value,
2982817a1f1SVinay Belgaumkar /* 1 dword data */
2992817a1f1SVinay Belgaumkar };
3002817a1f1SVinay Belgaumkar
3012817a1f1SVinay Belgaumkar size = sizeof(klv_entry);
3022817a1f1SVinay Belgaumkar
3032817a1f1SVinay Belgaumkar if (*remain < size) {
3042817a1f1SVinay Belgaumkar drm_warn(&ads_to_xe(ads)->drm,
3052817a1f1SVinay Belgaumkar "w/a klv buffer too small to add klv id %d\n", klv_id);
3062817a1f1SVinay Belgaumkar } else {
3072817a1f1SVinay Belgaumkar xe_map_memcpy_to(ads_to_xe(ads), ads_to_map(ads), *offset,
3082817a1f1SVinay Belgaumkar klv_entry, size);
3092817a1f1SVinay Belgaumkar *offset += size;
3102817a1f1SVinay Belgaumkar *remain -= size;
3112817a1f1SVinay Belgaumkar }
3122817a1f1SVinay Belgaumkar }
313c151ff5cSBadal Nilawar
guc_waklv_enable_simple(struct xe_guc_ads * ads,enum xe_guc_klv_ids klv_id,u32 * offset,u32 * remain)314c151ff5cSBadal Nilawar static void guc_waklv_enable_simple(struct xe_guc_ads *ads,
315c151ff5cSBadal Nilawar enum xe_guc_klv_ids klv_id, u32 *offset, u32 *remain)
316c151ff5cSBadal Nilawar {
317c151ff5cSBadal Nilawar u32 klv_entry[] = {
318c151ff5cSBadal Nilawar /* 16:16 key/length */
319c151ff5cSBadal Nilawar FIELD_PREP(GUC_KLV_0_KEY, klv_id) |
320c151ff5cSBadal Nilawar FIELD_PREP(GUC_KLV_0_LEN, 0),
321c151ff5cSBadal Nilawar /* 0 dwords data */
322c151ff5cSBadal Nilawar };
323c151ff5cSBadal Nilawar u32 size;
324c151ff5cSBadal Nilawar
325c151ff5cSBadal Nilawar size = sizeof(klv_entry);
326c151ff5cSBadal Nilawar
327c151ff5cSBadal Nilawar if (xe_gt_WARN(ads_to_gt(ads), *remain < size,
328c151ff5cSBadal Nilawar "w/a klv buffer too small to add klv id %d\n", klv_id))
329c151ff5cSBadal Nilawar return;
330c151ff5cSBadal Nilawar
331c151ff5cSBadal Nilawar xe_map_memcpy_to(ads_to_xe(ads), ads_to_map(ads), *offset,
332c151ff5cSBadal Nilawar klv_entry, size);
333c151ff5cSBadal Nilawar *offset += size;
334c151ff5cSBadal Nilawar *remain -= size;
335c151ff5cSBadal Nilawar }
336d6da81a4SBadal Nilawar
guc_waklv_init(struct xe_guc_ads * ads)337d6da81a4SBadal Nilawar static void guc_waklv_init(struct xe_guc_ads *ads)
338c151ff5cSBadal Nilawar {
339d6da81a4SBadal Nilawar struct xe_gt *gt = ads_to_gt(ads);
340d6da81a4SBadal Nilawar u64 addr_ggtt;
341d6da81a4SBadal Nilawar u32 offset, remain, size;
342d6da81a4SBadal Nilawar
343d6da81a4SBadal Nilawar offset = guc_ads_waklv_offset(ads);
344d6da81a4SBadal Nilawar remain = guc_ads_waklv_size(ads);
3458c5fe7d8SAradhya Bhatia
346c151ff5cSBadal Nilawar if (XE_WA(gt, 14019882105) || XE_WA(gt, 16021333562))
347c151ff5cSBadal Nilawar guc_waklv_enable_simple(ads,
348c151ff5cSBadal Nilawar GUC_WORKAROUND_KLV_BLOCK_INTERRUPTS_WHEN_MGSR_BLOCKED,
349b7f888eeSJohn Harrison &offset, &remain);
350b7f888eeSJohn Harrison if (XE_WA(gt, 18024947630))
351b7f888eeSJohn Harrison guc_waklv_enable_simple(ads,
352b7f888eeSJohn Harrison GUC_WORKAROUND_KLV_ID_GAM_PFQ_SHADOW_TAIL_POLLING,
353b7f888eeSJohn Harrison &offset, &remain);
354b7f888eeSJohn Harrison if (XE_WA(gt, 16022287689))
355b7f888eeSJohn Harrison guc_waklv_enable_simple(ads,
356b7f888eeSJohn Harrison GUC_WORKAROUND_KLV_ID_DISABLE_MTP_DURING_ASYNC_COMPUTE,
357d6da81a4SBadal Nilawar &offset, &remain);
35861ef737dSVinay Belgaumkar
35961ef737dSVinay Belgaumkar if (XE_WA(gt, 14022866841))
36061ef737dSVinay Belgaumkar guc_waklv_enable_simple(ads,
36161ef737dSVinay Belgaumkar GUC_WA_KLV_WAKE_POWER_DOMAINS_FOR_OUTBOUND_MMIO,
36261ef737dSVinay Belgaumkar &offset, &remain);
3632817a1f1SVinay Belgaumkar
3642817a1f1SVinay Belgaumkar /*
3652817a1f1SVinay Belgaumkar * On RC6 exit, GuC will write register 0xB04 with the default value provided. As of now,
3662817a1f1SVinay Belgaumkar * the default value for this register is determined to be 0xC40. This could change in the
3672817a1f1SVinay Belgaumkar * future, so GuC depends on KMD to send it the correct value.
3682817a1f1SVinay Belgaumkar */
3692817a1f1SVinay Belgaumkar if (XE_WA(gt, 13011645652))
3702817a1f1SVinay Belgaumkar guc_waklv_enable_one_word(ads,
3712817a1f1SVinay Belgaumkar GUC_WA_KLV_NP_RD_WRITE_TO_CLEAR_RCSM_AT_CGP_LATE_RESTORE,
3722817a1f1SVinay Belgaumkar 0xC40,
3732817a1f1SVinay Belgaumkar &offset, &remain);
374636cdf6fSJulia Filipchuk
375636cdf6fSJulia Filipchuk if (XE_WA(gt, 14022293748) || XE_WA(gt, 22019794406))
376636cdf6fSJulia Filipchuk guc_waklv_enable_simple(ads,
377636cdf6fSJulia Filipchuk GUC_WORKAROUND_KLV_ID_BACK_TO_BACK_RCS_ENGINE_RESET,
378636cdf6fSJulia Filipchuk &offset, &remain);
379d3e8349eSJohn Harrison
380d3e8349eSJohn Harrison if (GUC_FIRMWARE_VER(>->uc.guc) >= MAKE_GUC_VER(70, 44, 0) && XE_WA(gt, 16026508708))
381d3e8349eSJohn Harrison guc_waklv_enable_simple(ads,
382d3e8349eSJohn Harrison GUC_WA_KLV_RESET_BB_STACK_PTR_ON_VF_SWITCH,
383d3e8349eSJohn Harrison &offset, &remain);
384d6da81a4SBadal Nilawar
385d6da81a4SBadal Nilawar size = guc_ads_waklv_size(ads) - remain;
386d6da81a4SBadal Nilawar if (!size)
387d6da81a4SBadal Nilawar return;
388d6da81a4SBadal Nilawar
389d6da81a4SBadal Nilawar offset = guc_ads_waklv_offset(ads);
390d6da81a4SBadal Nilawar addr_ggtt = xe_bo_ggtt_addr(ads->bo) + offset;
391d6da81a4SBadal Nilawar
392d6da81a4SBadal Nilawar ads_blob_write(ads, ads.wa_klv_addr_lo, lower_32_bits(addr_ggtt));
393d6da81a4SBadal Nilawar ads_blob_write(ads, ads.wa_klv_addr_hi, upper_32_bits(addr_ggtt));
394d6da81a4SBadal Nilawar ads_blob_write(ads, ads.wa_klv_size, size);
395d6da81a4SBadal Nilawar }
396d6da81a4SBadal Nilawar
calculate_waklv_size(struct xe_guc_ads * ads)397d6da81a4SBadal Nilawar static int calculate_waklv_size(struct xe_guc_ads *ads)
398d6da81a4SBadal Nilawar {
399d6da81a4SBadal Nilawar /*
400d6da81a4SBadal Nilawar * A single page is both the minimum size possible and
401d6da81a4SBadal Nilawar * is sufficiently large enough for all current platforms.
402d6da81a4SBadal Nilawar */
403d6da81a4SBadal Nilawar return SZ_4K;
404d6da81a4SBadal Nilawar }
405dd08ebf6SMatthew Brost
406dd08ebf6SMatthew Brost #define MAX_GOLDEN_LRC_SIZE (SZ_4K * 64)
407dd08ebf6SMatthew Brost
xe_guc_ads_init(struct xe_guc_ads * ads)408dd08ebf6SMatthew Brost int xe_guc_ads_init(struct xe_guc_ads *ads)
409dd08ebf6SMatthew Brost {
410dd08ebf6SMatthew Brost struct xe_device *xe = ads_to_xe(ads);
411876611c2SMatt Roper struct xe_gt *gt = ads_to_gt(ads);
412dd08ebf6SMatthew Brost struct xe_tile *tile = gt_to_tile(gt);
413dd08ebf6SMatthew Brost struct xe_bo *bo;
414dd08ebf6SMatthew Brost
4159c8c7a7eSZhanjun Dong ads->golden_lrc_size = calculate_golden_lrc_size(ads);
416dd08ebf6SMatthew Brost ads->capture_size = xe_guc_capture_ads_input_worst_size(ads_to_guc(ads));
417d6da81a4SBadal Nilawar ads->regset_size = calculate_regset_size(gt);
418dd08ebf6SMatthew Brost ads->ads_waklv_size = calculate_waklv_size(ads);
4190e1a47fcSMichał Winiarski
42062742d12SLucas De Marchi bo = xe_managed_bo_create_pin_map(xe, tile, guc_ads_size(ads) + MAX_GOLDEN_LRC_SIZE,
42162742d12SLucas De Marchi XE_BO_FLAG_SYSTEM |
422045448daSMatthew Brost XE_BO_FLAG_GGTT |
423045448daSMatthew Brost XE_BO_FLAG_GGTT_INVALIDATE |
424dd08ebf6SMatthew Brost XE_BO_FLAG_PINNED_NORESTORE);
425dd08ebf6SMatthew Brost if (IS_ERR(bo))
426dd08ebf6SMatthew Brost return PTR_ERR(bo);
427dd08ebf6SMatthew Brost
428dd08ebf6SMatthew Brost ads->bo = bo;
429dd08ebf6SMatthew Brost
430dd08ebf6SMatthew Brost return 0;
43191b2c42cSFrancois Dugast }
432dd08ebf6SMatthew Brost ALLOW_ERROR_INJECTION(xe_guc_ads_init, ERRNO); /* See xe_pci_probe() */
433dd08ebf6SMatthew Brost
434dd08ebf6SMatthew Brost /**
435dd08ebf6SMatthew Brost * xe_guc_ads_init_post_hwconfig - initialize ADS post hwconfig load
436dd08ebf6SMatthew Brost * @ads: Additional data structures object
4379c8c7a7eSZhanjun Dong *
4389c8c7a7eSZhanjun Dong * Recalculate golden_lrc_size, capture_size and regset_size as the number
4399c8c7a7eSZhanjun Dong * hardware engines may have changed after the hwconfig was loaded. Also verify
440dd08ebf6SMatthew Brost * the new sizes fit in the already allocated ADS buffer object.
441dd08ebf6SMatthew Brost *
442dd08ebf6SMatthew Brost * Return: 0 on success, negative error code on error.
443dd08ebf6SMatthew Brost */
xe_guc_ads_init_post_hwconfig(struct xe_guc_ads * ads)444dd08ebf6SMatthew Brost int xe_guc_ads_init_post_hwconfig(struct xe_guc_ads *ads)
445dd08ebf6SMatthew Brost {
446dd08ebf6SMatthew Brost struct xe_gt *gt = ads_to_gt(ads);
447dd08ebf6SMatthew Brost u32 prev_regset_size = ads->regset_size;
448c73acc1eSFrancois Dugast
449dd08ebf6SMatthew Brost xe_gt_assert(gt, ads->bo);
450dd08ebf6SMatthew Brost
4519c8c7a7eSZhanjun Dong ads->golden_lrc_size = calculate_golden_lrc_size(ads);
4529c8c7a7eSZhanjun Dong /* Calculate Capture size with worst size */
453dd08ebf6SMatthew Brost ads->capture_size = xe_guc_capture_ads_input_worst_size(ads_to_guc(ads));
454dd08ebf6SMatthew Brost ads->regset_size = calculate_regset_size(gt);
455c73acc1eSFrancois Dugast
456c73acc1eSFrancois Dugast xe_gt_assert(gt, ads->golden_lrc_size +
457dd08ebf6SMatthew Brost (ads->regset_size - prev_regset_size) <=
458dd08ebf6SMatthew Brost MAX_GOLDEN_LRC_SIZE);
459dd08ebf6SMatthew Brost
460dd08ebf6SMatthew Brost return 0;
461dd08ebf6SMatthew Brost }
462dd08ebf6SMatthew Brost
guc_policies_init(struct xe_guc_ads * ads)463dd08ebf6SMatthew Brost static void guc_policies_init(struct xe_guc_ads *ads)
4646b8ef44cSRodrigo Vivi {
4658ed9aaaeSRodrigo Vivi struct xe_device *xe = ads_to_xe(ads);
4668ed9aaaeSRodrigo Vivi u32 global_flags = 0;
467dd08ebf6SMatthew Brost
468dd08ebf6SMatthew Brost ads_blob_write(ads, policies.dpc_promote_time,
469dd08ebf6SMatthew Brost GLOBAL_POLICY_DEFAULT_DPC_PROMOTE_TIME_US);
470dd08ebf6SMatthew Brost ads_blob_write(ads, policies.max_num_work_items,
4718ed9aaaeSRodrigo Vivi GLOBAL_POLICY_MAX_NUM_WI);
4726b8ef44cSRodrigo Vivi
4738ed9aaaeSRodrigo Vivi if (xe->wedged.mode == 2)
4748ed9aaaeSRodrigo Vivi global_flags |= GLOBAL_POLICY_DISABLE_ENGINE_RESET;
4758ed9aaaeSRodrigo Vivi
476dd08ebf6SMatthew Brost ads_blob_write(ads, policies.global_flags, global_flags);
477dd08ebf6SMatthew Brost ads_blob_write(ads, policies.is_valid, 1);
478dd08ebf6SMatthew Brost }
479dd08ebf6SMatthew Brost
fill_engine_enable_masks(struct xe_gt * gt,struct iosys_map * info_map)480dd08ebf6SMatthew Brost static void fill_engine_enable_masks(struct xe_gt *gt,
481dd08ebf6SMatthew Brost struct iosys_map *info_map)
482dd08ebf6SMatthew Brost {
483dd08ebf6SMatthew Brost struct xe_device *xe = gt_to_xe(gt);
484dd08ebf6SMatthew Brost
485dd08ebf6SMatthew Brost info_map_write(xe, info_map, engine_enabled_masks[GUC_RENDER_CLASS],
486dd08ebf6SMatthew Brost engine_enable_mask(gt, XE_ENGINE_CLASS_RENDER));
487dd08ebf6SMatthew Brost info_map_write(xe, info_map, engine_enabled_masks[GUC_BLITTER_CLASS],
488dd08ebf6SMatthew Brost engine_enable_mask(gt, XE_ENGINE_CLASS_COPY));
489dd08ebf6SMatthew Brost info_map_write(xe, info_map, engine_enabled_masks[GUC_VIDEO_CLASS],
490dd08ebf6SMatthew Brost engine_enable_mask(gt, XE_ENGINE_CLASS_VIDEO_DECODE));
491dd08ebf6SMatthew Brost info_map_write(xe, info_map,
492dd08ebf6SMatthew Brost engine_enabled_masks[GUC_VIDEOENHANCE_CLASS],
493dd08ebf6SMatthew Brost engine_enable_mask(gt, XE_ENGINE_CLASS_VIDEO_ENHANCE));
494dd08ebf6SMatthew Brost info_map_write(xe, info_map, engine_enabled_masks[GUC_COMPUTE_CLASS],
49529654910SDaniele Ceraolo Spurio engine_enable_mask(gt, XE_ENGINE_CLASS_COMPUTE));
49629654910SDaniele Ceraolo Spurio info_map_write(xe, info_map, engine_enabled_masks[GUC_GSC_OTHER_CLASS],
497dd08ebf6SMatthew Brost engine_enable_mask(gt, XE_ENGINE_CLASS_OTHER));
498dd08ebf6SMatthew Brost }
499*c31a0b64SLucas De Marchi
500*c31a0b64SLucas De Marchi /*
501*c31a0b64SLucas De Marchi * Write the offsets corresponding to the golden LRCs. The actual data is
502*c31a0b64SLucas De Marchi * populated later by guc_golden_lrc_populate()
503*c31a0b64SLucas De Marchi */
guc_golden_lrc_init(struct xe_guc_ads * ads)504dd08ebf6SMatthew Brost static void guc_golden_lrc_init(struct xe_guc_ads *ads)
505dd08ebf6SMatthew Brost {
506*c31a0b64SLucas De Marchi struct xe_device *xe = ads_to_xe(ads);
507dd08ebf6SMatthew Brost struct xe_gt *gt = ads_to_gt(ads);
508dd08ebf6SMatthew Brost struct iosys_map info_map = IOSYS_MAP_INIT_OFFSET(ads_to_map(ads),
509*c31a0b64SLucas De Marchi offsetof(struct __guc_ads_blob, system_info));
510*c31a0b64SLucas De Marchi size_t alloc_size, real_size;
511*c31a0b64SLucas De Marchi u32 addr_ggtt, offset;
512*c31a0b64SLucas De Marchi int class;
513*c31a0b64SLucas De Marchi
514*c31a0b64SLucas De Marchi offset = guc_ads_golden_lrc_offset(ads);
515*c31a0b64SLucas De Marchi addr_ggtt = xe_bo_ggtt_addr(ads->bo) + offset;
516*c31a0b64SLucas De Marchi
517dd08ebf6SMatthew Brost for (class = 0; class < XE_ENGINE_CLASS_MAX; ++class) {
518dd08ebf6SMatthew Brost u8 guc_class;
519*c31a0b64SLucas De Marchi
520*c31a0b64SLucas De Marchi guc_class = xe_engine_class_to_guc_class(class);
521dd08ebf6SMatthew Brost
522dd08ebf6SMatthew Brost if (!info_map_read(xe, &info_map,
523dd08ebf6SMatthew Brost engine_enabled_masks[guc_class]))
524dd08ebf6SMatthew Brost continue;
525*c31a0b64SLucas De Marchi
526*c31a0b64SLucas De Marchi real_size = xe_gt_lrc_size(gt, class);
527*c31a0b64SLucas De Marchi alloc_size = PAGE_ALIGN(real_size);
528*c31a0b64SLucas De Marchi
529*c31a0b64SLucas De Marchi /*
530*c31a0b64SLucas De Marchi * This interface is slightly confusing. We need to pass the
531*c31a0b64SLucas De Marchi * base address of the full golden context and the size of just
532*c31a0b64SLucas De Marchi * the engine state, which is the section of the context image
533*c31a0b64SLucas De Marchi * that starts after the execlists LRC registers. This is
534*c31a0b64SLucas De Marchi * required to allow the GuC to restore just the engine state
535*c31a0b64SLucas De Marchi * when a watchdog reset occurs.
536*c31a0b64SLucas De Marchi * We calculate the engine state size by removing the size of
537*c31a0b64SLucas De Marchi * what comes before it in the context image (which is identical
538*c31a0b64SLucas De Marchi * on all engines).
539dd08ebf6SMatthew Brost */
540*c31a0b64SLucas De Marchi ads_blob_write(ads, ads.eng_state_size[guc_class],
541dd08ebf6SMatthew Brost real_size - xe_lrc_skip_size(xe));
542*c31a0b64SLucas De Marchi ads_blob_write(ads, ads.golden_context_lrca[guc_class],
543*c31a0b64SLucas De Marchi addr_ggtt);
544*c31a0b64SLucas De Marchi
545dd08ebf6SMatthew Brost addr_ggtt += alloc_size;
546dd08ebf6SMatthew Brost }
547dd08ebf6SMatthew Brost }
548dd08ebf6SMatthew Brost
guc_mapping_table_init_invalid(struct xe_gt * gt,struct iosys_map * info_map)549dd08ebf6SMatthew Brost static void guc_mapping_table_init_invalid(struct xe_gt *gt,
550dd08ebf6SMatthew Brost struct iosys_map *info_map)
551dd08ebf6SMatthew Brost {
552dd08ebf6SMatthew Brost struct xe_device *xe = gt_to_xe(gt);
553dd08ebf6SMatthew Brost unsigned int i, j;
554dd08ebf6SMatthew Brost
555dd08ebf6SMatthew Brost /* Table must be set to invalid values for entries not used */
556dd08ebf6SMatthew Brost for (i = 0; i < GUC_MAX_ENGINE_CLASSES; ++i)
557dd08ebf6SMatthew Brost for (j = 0; j < GUC_MAX_INSTANCES_PER_CLASS; ++j)
558dd08ebf6SMatthew Brost info_map_write(xe, info_map, mapping_table[i][j],
559dd08ebf6SMatthew Brost GUC_MAX_INSTANCES_PER_CLASS);
560dd08ebf6SMatthew Brost }
561dd08ebf6SMatthew Brost
guc_mapping_table_init(struct xe_gt * gt,struct iosys_map * info_map)562dd08ebf6SMatthew Brost static void guc_mapping_table_init(struct xe_gt *gt,
563dd08ebf6SMatthew Brost struct iosys_map *info_map)
564dd08ebf6SMatthew Brost {
565dd08ebf6SMatthew Brost struct xe_device *xe = gt_to_xe(gt);
566dd08ebf6SMatthew Brost struct xe_hw_engine *hwe;
567dd08ebf6SMatthew Brost enum xe_hw_engine_id id;
568dd08ebf6SMatthew Brost
569dd08ebf6SMatthew Brost guc_mapping_table_init_invalid(gt, info_map);
570dd08ebf6SMatthew Brost
571dd08ebf6SMatthew Brost for_each_hw_engine(hwe, gt, id) {
572dd08ebf6SMatthew Brost u8 guc_class;
573dd08ebf6SMatthew Brost
574dd08ebf6SMatthew Brost guc_class = xe_engine_class_to_guc_class(hwe->class);
575dd08ebf6SMatthew Brost info_map_write(xe, info_map,
576dd08ebf6SMatthew Brost mapping_table[guc_class][hwe->logical_instance],
577dd08ebf6SMatthew Brost hwe->instance);
578dd08ebf6SMatthew Brost }
579dd08ebf6SMatthew Brost }
5809c8c7a7eSZhanjun Dong
guc_get_capture_engine_mask(struct xe_gt * gt,struct iosys_map * info_map,enum guc_capture_list_class_type capture_class)5819c8c7a7eSZhanjun Dong static u32 guc_get_capture_engine_mask(struct xe_gt *gt, struct iosys_map *info_map,
582dd08ebf6SMatthew Brost enum guc_capture_list_class_type capture_class)
5839c8c7a7eSZhanjun Dong {
5849c8c7a7eSZhanjun Dong struct xe_device *xe = gt_to_xe(gt);
5859c8c7a7eSZhanjun Dong u32 mask;
5869c8c7a7eSZhanjun Dong
5879c8c7a7eSZhanjun Dong switch (capture_class) {
5889c8c7a7eSZhanjun Dong case GUC_CAPTURE_LIST_CLASS_RENDER_COMPUTE:
5899c8c7a7eSZhanjun Dong mask = info_map_read(xe, info_map, engine_enabled_masks[GUC_RENDER_CLASS]);
5909c8c7a7eSZhanjun Dong mask |= info_map_read(xe, info_map, engine_enabled_masks[GUC_COMPUTE_CLASS]);
5919c8c7a7eSZhanjun Dong break;
5929c8c7a7eSZhanjun Dong case GUC_CAPTURE_LIST_CLASS_VIDEO:
5939c8c7a7eSZhanjun Dong mask = info_map_read(xe, info_map, engine_enabled_masks[GUC_VIDEO_CLASS]);
5949c8c7a7eSZhanjun Dong break;
5959c8c7a7eSZhanjun Dong case GUC_CAPTURE_LIST_CLASS_VIDEOENHANCE:
5969c8c7a7eSZhanjun Dong mask = info_map_read(xe, info_map, engine_enabled_masks[GUC_VIDEOENHANCE_CLASS]);
5979c8c7a7eSZhanjun Dong break;
5989c8c7a7eSZhanjun Dong case GUC_CAPTURE_LIST_CLASS_BLITTER:
5999c8c7a7eSZhanjun Dong mask = info_map_read(xe, info_map, engine_enabled_masks[GUC_BLITTER_CLASS]);
6009c8c7a7eSZhanjun Dong break;
6019c8c7a7eSZhanjun Dong case GUC_CAPTURE_LIST_CLASS_GSC_OTHER:
6029c8c7a7eSZhanjun Dong mask = info_map_read(xe, info_map, engine_enabled_masks[GUC_GSC_OTHER_CLASS]);
6039c8c7a7eSZhanjun Dong break;
6049c8c7a7eSZhanjun Dong default:
6059c8c7a7eSZhanjun Dong mask = 0;
6069c8c7a7eSZhanjun Dong }
6079c8c7a7eSZhanjun Dong
6089c8c7a7eSZhanjun Dong return mask;
6099c8c7a7eSZhanjun Dong }
6109c8c7a7eSZhanjun Dong
get_capture_list(struct xe_guc_ads * ads,struct xe_guc * guc,struct xe_gt * gt,int owner,int type,int class,u32 * total_size,size_t * size,void ** pptr)6119c8c7a7eSZhanjun Dong static inline bool get_capture_list(struct xe_guc_ads *ads, struct xe_guc *guc, struct xe_gt *gt,
6129c8c7a7eSZhanjun Dong int owner, int type, int class, u32 *total_size, size_t *size,
6139c8c7a7eSZhanjun Dong void **pptr)
6149c8c7a7eSZhanjun Dong {
6159c8c7a7eSZhanjun Dong *size = 0;
6169c8c7a7eSZhanjun Dong
6179c8c7a7eSZhanjun Dong if (!xe_guc_capture_getlistsize(guc, owner, type, class, size)) {
6189c8c7a7eSZhanjun Dong if (*total_size + *size > ads->capture_size)
6199c8c7a7eSZhanjun Dong xe_gt_dbg(gt, "Capture size overflow :%zu vs %d\n",
6209c8c7a7eSZhanjun Dong *total_size + *size, ads->capture_size);
6219c8c7a7eSZhanjun Dong else if (!xe_guc_capture_getlist(guc, owner, type, class, pptr))
6229c8c7a7eSZhanjun Dong return false;
6239c8c7a7eSZhanjun Dong }
6249c8c7a7eSZhanjun Dong
6259c8c7a7eSZhanjun Dong return true;
6269c8c7a7eSZhanjun Dong }
6279c8c7a7eSZhanjun Dong
guc_capture_prep_lists(struct xe_guc_ads * ads)6289c8c7a7eSZhanjun Dong static int guc_capture_prep_lists(struct xe_guc_ads *ads)
6299c8c7a7eSZhanjun Dong {
6309c8c7a7eSZhanjun Dong struct xe_guc *guc = ads_to_guc(ads);
6319c8c7a7eSZhanjun Dong struct xe_gt *gt = ads_to_gt(ads);
6329c8c7a7eSZhanjun Dong u32 ads_ggtt, capture_offset, null_ggtt, total_size = 0;
6339c8c7a7eSZhanjun Dong struct iosys_map info_map;
6349c8c7a7eSZhanjun Dong size_t size = 0;
635dd08ebf6SMatthew Brost void *ptr;
636dd08ebf6SMatthew Brost int i, j;
637b170d696SZhanjun Dong
638b170d696SZhanjun Dong /*
639b170d696SZhanjun Dong * GuC Capture's steered reg-list needs to be allocated and initialized
640b170d696SZhanjun Dong * after the GuC-hwconfig is available which guaranteed from here.
641b170d696SZhanjun Dong */
642b170d696SZhanjun Dong xe_guc_capture_steered_list_init(ads_to_guc(ads));
6439c8c7a7eSZhanjun Dong
6449c8c7a7eSZhanjun Dong capture_offset = guc_ads_capture_offset(ads);
6459c8c7a7eSZhanjun Dong ads_ggtt = xe_bo_ggtt_addr(ads->bo);
6469c8c7a7eSZhanjun Dong info_map = IOSYS_MAP_INIT_OFFSET(ads_to_map(ads),
6479c8c7a7eSZhanjun Dong offsetof(struct __guc_ads_blob, system_info));
6489c8c7a7eSZhanjun Dong
6499c8c7a7eSZhanjun Dong /* first, set aside the first page for a capture_list with zero descriptors */
6509c8c7a7eSZhanjun Dong total_size = PAGE_SIZE;
6519c8c7a7eSZhanjun Dong if (!xe_guc_capture_getnullheader(guc, &ptr, &size))
6529c8c7a7eSZhanjun Dong xe_map_memcpy_to(ads_to_xe(ads), ads_to_map(ads), capture_offset, ptr, size);
6539c8c7a7eSZhanjun Dong
6549c8c7a7eSZhanjun Dong null_ggtt = ads_ggtt + capture_offset;
6559c8c7a7eSZhanjun Dong capture_offset += PAGE_SIZE;
6569c8c7a7eSZhanjun Dong
6579c8c7a7eSZhanjun Dong /*
6589c8c7a7eSZhanjun Dong * Populate capture list : at this point adps is already allocated and
6599c8c7a7eSZhanjun Dong * mapped to worst case size
660dd08ebf6SMatthew Brost */
6619c8c7a7eSZhanjun Dong for (i = 0; i < GUC_CAPTURE_LIST_INDEX_MAX; i++) {
6629c8c7a7eSZhanjun Dong bool write_empty_list;
6639c8c7a7eSZhanjun Dong
6649c8c7a7eSZhanjun Dong for (j = 0; j < GUC_CAPTURE_LIST_CLASS_MAX; j++) {
6659c8c7a7eSZhanjun Dong u32 engine_mask = guc_get_capture_engine_mask(gt, &info_map, j);
6669c8c7a7eSZhanjun Dong /* null list if we dont have said engine or list */
6679c8c7a7eSZhanjun Dong if (!engine_mask) {
6689c8c7a7eSZhanjun Dong ads_blob_write(ads, ads.capture_class[i][j], null_ggtt);
6699c8c7a7eSZhanjun Dong ads_blob_write(ads, ads.capture_instance[i][j], null_ggtt);
670dd08ebf6SMatthew Brost continue;
671dd08ebf6SMatthew Brost }
6729c8c7a7eSZhanjun Dong
6739c8c7a7eSZhanjun Dong /* engine exists: start with engine-class registers */
6749c8c7a7eSZhanjun Dong write_empty_list = get_capture_list(ads, guc, gt, i,
6759c8c7a7eSZhanjun Dong GUC_STATE_CAPTURE_TYPE_ENGINE_CLASS,
6769c8c7a7eSZhanjun Dong j, &total_size, &size, &ptr);
6779c8c7a7eSZhanjun Dong if (!write_empty_list) {
6789c8c7a7eSZhanjun Dong ads_blob_write(ads, ads.capture_class[i][j],
6799c8c7a7eSZhanjun Dong ads_ggtt + capture_offset);
6809c8c7a7eSZhanjun Dong xe_map_memcpy_to(ads_to_xe(ads), ads_to_map(ads), capture_offset,
6819c8c7a7eSZhanjun Dong ptr, size);
6829c8c7a7eSZhanjun Dong total_size += size;
6839c8c7a7eSZhanjun Dong capture_offset += size;
6849c8c7a7eSZhanjun Dong } else {
685dd08ebf6SMatthew Brost ads_blob_write(ads, ads.capture_class[i][j], null_ggtt);
6869c8c7a7eSZhanjun Dong }
6879c8c7a7eSZhanjun Dong
6889c8c7a7eSZhanjun Dong /* engine exists: next, engine-instance registers */
6899c8c7a7eSZhanjun Dong write_empty_list = get_capture_list(ads, guc, gt, i,
6909c8c7a7eSZhanjun Dong GUC_STATE_CAPTURE_TYPE_ENGINE_INSTANCE,
6919c8c7a7eSZhanjun Dong j, &total_size, &size, &ptr);
6929c8c7a7eSZhanjun Dong if (!write_empty_list) {
6939c8c7a7eSZhanjun Dong ads_blob_write(ads, ads.capture_instance[i][j],
6949c8c7a7eSZhanjun Dong ads_ggtt + capture_offset);
6959c8c7a7eSZhanjun Dong xe_map_memcpy_to(ads_to_xe(ads), ads_to_map(ads), capture_offset,
6969c8c7a7eSZhanjun Dong ptr, size);
6979c8c7a7eSZhanjun Dong total_size += size;
6989c8c7a7eSZhanjun Dong capture_offset += size;
6999c8c7a7eSZhanjun Dong } else {
7009c8c7a7eSZhanjun Dong ads_blob_write(ads, ads.capture_instance[i][j], null_ggtt);
7019c8c7a7eSZhanjun Dong }
7029c8c7a7eSZhanjun Dong }
7039c8c7a7eSZhanjun Dong
7049c8c7a7eSZhanjun Dong /* global registers is last in our PF/VF loops */
7059c8c7a7eSZhanjun Dong write_empty_list = get_capture_list(ads, guc, gt, i,
7069c8c7a7eSZhanjun Dong GUC_STATE_CAPTURE_TYPE_GLOBAL,
7079c8c7a7eSZhanjun Dong 0, &total_size, &size, &ptr);
7089c8c7a7eSZhanjun Dong if (!write_empty_list) {
7099c8c7a7eSZhanjun Dong ads_blob_write(ads, ads.capture_global[i], ads_ggtt + capture_offset);
7109c8c7a7eSZhanjun Dong xe_map_memcpy_to(ads_to_xe(ads), ads_to_map(ads), capture_offset, ptr,
7119c8c7a7eSZhanjun Dong size);
7129c8c7a7eSZhanjun Dong total_size += size;
7139c8c7a7eSZhanjun Dong capture_offset += size;
7149c8c7a7eSZhanjun Dong } else {
7159c8c7a7eSZhanjun Dong ads_blob_write(ads, ads.capture_global[i], null_ggtt);
7169c8c7a7eSZhanjun Dong }
7179c8c7a7eSZhanjun Dong }
7189c8c7a7eSZhanjun Dong
719ce22fccdSJohn Harrison if (ads->capture_size != PAGE_ALIGN(total_size))
720ce22fccdSJohn Harrison xe_gt_dbg(gt, "Updated ADS capture size %d (was %d)\n",
7219c8c7a7eSZhanjun Dong PAGE_ALIGN(total_size), ads->capture_size);
722dd08ebf6SMatthew Brost return PAGE_ALIGN(total_size);
723dd08ebf6SMatthew Brost }
724dd08ebf6SMatthew Brost
guc_mmio_regset_write_one(struct xe_guc_ads * ads,struct iosys_map * regset_map,struct xe_reg reg,unsigned int n_entry)725dd08ebf6SMatthew Brost static void guc_mmio_regset_write_one(struct xe_guc_ads *ads,
72698ce59e9SLucas De Marchi struct iosys_map *regset_map,
727dd08ebf6SMatthew Brost struct xe_reg reg,
728dd08ebf6SMatthew Brost unsigned int n_entry)
729dd08ebf6SMatthew Brost {
730ee21379aSLucas De Marchi struct guc_mmio_reg entry = {
73198ce59e9SLucas De Marchi .offset = reg.addr,
732dd08ebf6SMatthew Brost .flags = reg.masked ? GUC_REGSET_MASKED : 0,
733dd08ebf6SMatthew Brost };
734ee5a1321SJesus Narvaez
735ee5a1321SJesus Narvaez if (reg.mcr) {
736ee5a1321SJesus Narvaez struct xe_reg_mcr mcr_reg = XE_REG_MCR(reg.addr);
737ee5a1321SJesus Narvaez u8 group, instance;
738ee5a1321SJesus Narvaez
739ee5a1321SJesus Narvaez bool steer = xe_gt_mcr_get_nonterminated_steering(ads_to_gt(ads), mcr_reg,
740ee5a1321SJesus Narvaez &group, &instance);
741ee5a1321SJesus Narvaez
742ee5a1321SJesus Narvaez if (steer) {
743ee5a1321SJesus Narvaez entry.flags |= FIELD_PREP(GUC_REGSET_STEERING_GROUP, group);
744ee5a1321SJesus Narvaez entry.flags |= FIELD_PREP(GUC_REGSET_STEERING_INSTANCE, instance);
745ee5a1321SJesus Narvaez entry.flags |= GUC_REGSET_STEERING_NEEDED;
746ee5a1321SJesus Narvaez }
747ee5a1321SJesus Narvaez }
748dd08ebf6SMatthew Brost
749dd08ebf6SMatthew Brost xe_map_memcpy_to(ads_to_xe(ads), regset_map, n_entry * sizeof(entry),
750dd08ebf6SMatthew Brost &entry, sizeof(entry));
751dd08ebf6SMatthew Brost }
752dd08ebf6SMatthew Brost
guc_mmio_regset_write(struct xe_guc_ads * ads,struct iosys_map * regset_map,struct xe_hw_engine * hwe)753dd08ebf6SMatthew Brost static unsigned int guc_mmio_regset_write(struct xe_guc_ads *ads,
754dd08ebf6SMatthew Brost struct iosys_map *regset_map,
755dd08ebf6SMatthew Brost struct xe_hw_engine *hwe)
756dd08ebf6SMatthew Brost {
757dd08ebf6SMatthew Brost struct xe_hw_engine *hwe_rcs_reset_domain =
758dd08ebf6SMatthew Brost xe_gt_any_hw_engine_by_reset_domain(hwe->gt, XE_ENGINE_CLASS_RENDER);
759dd08ebf6SMatthew Brost struct xe_reg_sr_entry *entry;
7603e8e7ee6SFrancois Dugast unsigned long idx;
761dd08ebf6SMatthew Brost unsigned int count = 0;
76298ce59e9SLucas De Marchi const struct {
763dd08ebf6SMatthew Brost struct xe_reg reg;
764dd08ebf6SMatthew Brost bool skip;
76598ce59e9SLucas De Marchi } *e, extra_regs[] = {
76698ce59e9SLucas De Marchi { .reg = RING_MODE(hwe->mmio_base), },
76798ce59e9SLucas De Marchi { .reg = RING_HWS_PGA(hwe->mmio_base), },
76898ce59e9SLucas De Marchi { .reg = RING_IMR(hwe->mmio_base), },
7690d97ecceSNiranjana Vishwanathapura { .reg = RCU_MODE, .skip = hwe != hwe_rcs_reset_domain },
7700d97ecceSNiranjana Vishwanathapura { .reg = CCS_MODE,
771dd08ebf6SMatthew Brost .skip = hwe != hwe_rcs_reset_domain || !xe_gt_ccs_mode_enabled(hwe->gt) },
772dd08ebf6SMatthew Brost };
773dd08ebf6SMatthew Brost u32 i;
774dd08ebf6SMatthew Brost
775dd08ebf6SMatthew Brost BUILD_BUG_ON(ARRAY_SIZE(extra_regs) > ADS_REGSET_EXTRA_MAX);
77698ce59e9SLucas De Marchi
77798ce59e9SLucas De Marchi xa_for_each(&hwe->reg_sr.xa, idx, entry)
778dd08ebf6SMatthew Brost guc_mmio_regset_write_one(ads, regset_map, entry->reg, count++);
779dd08ebf6SMatthew Brost
780dd08ebf6SMatthew Brost for (e = extra_regs; e < extra_regs + ARRAY_SIZE(extra_regs); e++) {
781dd08ebf6SMatthew Brost if (e->skip)
782dd08ebf6SMatthew Brost continue;
78398ce59e9SLucas De Marchi
784dd08ebf6SMatthew Brost guc_mmio_regset_write_one(ads, regset_map, e->reg, count++);
785dd08ebf6SMatthew Brost }
7868262db9eSLucas De Marchi
787dd08ebf6SMatthew Brost if (XE_WA(hwe->gt, 1607983814) && hwe->class == XE_ENGINE_CLASS_RENDER) {
788dd08ebf6SMatthew Brost for (i = 0; i < LNCFCMOCS_REG_COUNT; i++) {
78983af834eSMatt Roper guc_mmio_regset_write_one(ads, regset_map,
790dd08ebf6SMatthew Brost XELP_LNCFCMOCS(i), count++);
791f659ac15SMatt Roper }
792dd08ebf6SMatthew Brost }
793dd08ebf6SMatthew Brost
794dd08ebf6SMatthew Brost return count;
795dd08ebf6SMatthew Brost }
796dd08ebf6SMatthew Brost
guc_mmio_reg_state_init(struct xe_guc_ads * ads)797dd08ebf6SMatthew Brost static void guc_mmio_reg_state_init(struct xe_guc_ads *ads)
798dd08ebf6SMatthew Brost {
799dd08ebf6SMatthew Brost size_t regset_offset = guc_ads_regset_offset(ads);
800dd08ebf6SMatthew Brost struct xe_gt *gt = ads_to_gt(ads);
801dd08ebf6SMatthew Brost struct xe_hw_engine *hwe;
802dd08ebf6SMatthew Brost enum xe_hw_engine_id id;
803dd08ebf6SMatthew Brost u32 addr = xe_bo_ggtt_addr(ads->bo) + regset_offset;
804dd08ebf6SMatthew Brost struct iosys_map regset_map = IOSYS_MAP_INIT_OFFSET(ads_to_map(ads),
80539fd0b45SMatt Roper regset_offset);
806dd08ebf6SMatthew Brost unsigned int regset_used = 0;
807dd08ebf6SMatthew Brost
808dd08ebf6SMatthew Brost for_each_hw_engine(hwe, gt, id) {
809dd08ebf6SMatthew Brost unsigned int count;
810dd08ebf6SMatthew Brost u8 gc;
811dd08ebf6SMatthew Brost
8129b9529ceSFrancois Dugast /*
813dd08ebf6SMatthew Brost * 1. Write all MMIO entries for this exec queue to the table. No
814dd08ebf6SMatthew Brost * need to worry about fused-off engines and when there are
815dd08ebf6SMatthew Brost * entries in the regset: the reg_state_list has been zero'ed
816dd08ebf6SMatthew Brost * by xe_guc_ads_populate()
817dd08ebf6SMatthew Brost */
818dd08ebf6SMatthew Brost count = guc_mmio_regset_write(ads, ®set_map, hwe);
819dd08ebf6SMatthew Brost if (!count)
820dd08ebf6SMatthew Brost continue;
821dd08ebf6SMatthew Brost
822dd08ebf6SMatthew Brost /*
823dd08ebf6SMatthew Brost * 2. Record in the header (ads.reg_state_list) the address
824dd08ebf6SMatthew Brost * location and number of entries
825dd08ebf6SMatthew Brost */
826dd08ebf6SMatthew Brost gc = xe_engine_class_to_guc_class(hwe->class);
827dd08ebf6SMatthew Brost ads_blob_write(ads, ads.reg_state_list[gc][hwe->instance].address, addr);
828dd08ebf6SMatthew Brost ads_blob_write(ads, ads.reg_state_list[gc][hwe->instance].count, count);
829dd08ebf6SMatthew Brost
830dd08ebf6SMatthew Brost addr += count * sizeof(struct guc_mmio_reg);
83139fd0b45SMatt Roper iosys_map_incr(®set_map, count * sizeof(struct guc_mmio_reg));
83239fd0b45SMatt Roper
833dd08ebf6SMatthew Brost regset_used += count * sizeof(struct guc_mmio_reg);
83439fd0b45SMatt Roper }
835c73acc1eSFrancois Dugast
836dd08ebf6SMatthew Brost xe_gt_assert(gt, regset_used <= ads->regset_size);
837dd08ebf6SMatthew Brost }
838dd08ebf6SMatthew Brost
guc_um_init_params(struct xe_guc_ads * ads)839dd08ebf6SMatthew Brost static void guc_um_init_params(struct xe_guc_ads *ads)
840dd08ebf6SMatthew Brost {
841dd08ebf6SMatthew Brost u32 um_queue_offset = guc_ads_um_queues_offset(ads);
842dd08ebf6SMatthew Brost u64 base_dpa;
843dd08ebf6SMatthew Brost u32 base_ggtt;
844dd08ebf6SMatthew Brost int i;
845dd08ebf6SMatthew Brost
846dd08ebf6SMatthew Brost base_ggtt = xe_bo_ggtt_addr(ads->bo) + um_queue_offset;
847dd08ebf6SMatthew Brost base_dpa = xe_bo_main_addr(ads->bo, PAGE_SIZE) + um_queue_offset;
848dd08ebf6SMatthew Brost
849dd08ebf6SMatthew Brost for (i = 0; i < GUC_UM_HW_QUEUE_MAX; ++i) {
850dd08ebf6SMatthew Brost ads_blob_write(ads, um_init_params.queue_params[i].base_dpa,
851dd08ebf6SMatthew Brost base_dpa + (i * GUC_UM_QUEUE_SIZE));
852dd08ebf6SMatthew Brost ads_blob_write(ads, um_init_params.queue_params[i].base_ggtt_address,
853dd08ebf6SMatthew Brost base_ggtt + (i * GUC_UM_QUEUE_SIZE));
854dd08ebf6SMatthew Brost ads_blob_write(ads, um_init_params.queue_params[i].size_in_bytes,
855dd08ebf6SMatthew Brost GUC_UM_QUEUE_SIZE);
856dd08ebf6SMatthew Brost }
857dd08ebf6SMatthew Brost
858dd08ebf6SMatthew Brost ads_blob_write(ads, um_init_params.page_response_timeout_in_us,
859dd08ebf6SMatthew Brost GUC_PAGE_RES_TIMEOUT_US);
860dd08ebf6SMatthew Brost }
861dd08ebf6SMatthew Brost
guc_doorbell_init(struct xe_guc_ads * ads)862dd08ebf6SMatthew Brost static void guc_doorbell_init(struct xe_guc_ads *ads)
863dd08ebf6SMatthew Brost {
864dd08ebf6SMatthew Brost struct xe_device *xe = ads_to_xe(ads);
865dd08ebf6SMatthew Brost struct xe_gt *gt = ads_to_gt(ads);
866dd08ebf6SMatthew Brost
867dd08ebf6SMatthew Brost if (GRAPHICS_VER(xe) >= 12 && !IS_DGFX(xe)) {
868c18d4193SMatt Roper u32 distdbreg =
869dd08ebf6SMatthew Brost xe_mmio_read32(>->mmio, DIST_DBS_POPULATED);
870dd08ebf6SMatthew Brost
871dd08ebf6SMatthew Brost ads_blob_write(ads,
872d9b79ad2SLucas De Marchi system_info.generic_gt_sysinfo[GUC_GENERIC_GT_SYSINFO_DOORBELL_COUNT_PER_SQIDI],
873dd08ebf6SMatthew Brost REG_FIELD_GET(DOORBELLS_PER_SQIDI_MASK, distdbreg) + 1);
874dd08ebf6SMatthew Brost }
875dd08ebf6SMatthew Brost }
876dd08ebf6SMatthew Brost
877dd08ebf6SMatthew Brost /**
878dd08ebf6SMatthew Brost * xe_guc_ads_populate_minimal - populate minimal ADS
879dd08ebf6SMatthew Brost * @ads: Additional data structures object
880dd08ebf6SMatthew Brost *
881dd08ebf6SMatthew Brost * This function populates a minimal ADS that does not support submissions but
882dd08ebf6SMatthew Brost * enough so the GuC can load and the hwconfig table can be read.
883dd08ebf6SMatthew Brost */
xe_guc_ads_populate_minimal(struct xe_guc_ads * ads)884dd08ebf6SMatthew Brost void xe_guc_ads_populate_minimal(struct xe_guc_ads *ads)
885dd08ebf6SMatthew Brost {
886dd08ebf6SMatthew Brost struct xe_gt *gt = ads_to_gt(ads);
887dd08ebf6SMatthew Brost struct iosys_map info_map = IOSYS_MAP_INIT_OFFSET(ads_to_map(ads),
888dd08ebf6SMatthew Brost offsetof(struct __guc_ads_blob, system_info));
889dd08ebf6SMatthew Brost u32 base = xe_bo_ggtt_addr(ads->bo);
890c73acc1eSFrancois Dugast
891dd08ebf6SMatthew Brost xe_gt_assert(gt, ads->bo);
892dd08ebf6SMatthew Brost
893dd08ebf6SMatthew Brost xe_map_memset(ads_to_xe(ads), ads_to_map(ads), 0, 0, xe_bo_size(ads->bo));
894*c31a0b64SLucas De Marchi guc_policies_init(ads);
895dd08ebf6SMatthew Brost guc_golden_lrc_init(ads);
896dd08ebf6SMatthew Brost guc_mapping_table_init_invalid(gt, &info_map);
897dd08ebf6SMatthew Brost guc_doorbell_init(ads);
898dd08ebf6SMatthew Brost
899dd08ebf6SMatthew Brost ads_blob_write(ads, ads.scheduler_policies, base +
900dd08ebf6SMatthew Brost offsetof(struct __guc_ads_blob, policies));
901dd08ebf6SMatthew Brost ads_blob_write(ads, ads.gt_system_info, base +
902dd08ebf6SMatthew Brost offsetof(struct __guc_ads_blob, system_info));
903dd08ebf6SMatthew Brost ads_blob_write(ads, ads.private_data, base +
904dd08ebf6SMatthew Brost guc_ads_private_data_offset(ads));
905dd08ebf6SMatthew Brost }
906dd08ebf6SMatthew Brost
xe_guc_ads_populate(struct xe_guc_ads * ads)907dd08ebf6SMatthew Brost void xe_guc_ads_populate(struct xe_guc_ads *ads)
908dd08ebf6SMatthew Brost {
909dd08ebf6SMatthew Brost struct xe_device *xe = ads_to_xe(ads);
910dd08ebf6SMatthew Brost struct xe_gt *gt = ads_to_gt(ads);
911dd08ebf6SMatthew Brost struct iosys_map info_map = IOSYS_MAP_INIT_OFFSET(ads_to_map(ads),
912dd08ebf6SMatthew Brost offsetof(struct __guc_ads_blob, system_info));
913dd08ebf6SMatthew Brost u32 base = xe_bo_ggtt_addr(ads->bo);
914c73acc1eSFrancois Dugast
915dd08ebf6SMatthew Brost xe_gt_assert(gt, ads->bo);
916dd08ebf6SMatthew Brost
917dd08ebf6SMatthew Brost xe_map_memset(ads_to_xe(ads), ads_to_map(ads), 0, 0, xe_bo_size(ads->bo));
918dd08ebf6SMatthew Brost guc_policies_init(ads);
919dd08ebf6SMatthew Brost fill_engine_enable_masks(gt, &info_map);
920*c31a0b64SLucas De Marchi guc_mmio_reg_state_init(ads);
921dd08ebf6SMatthew Brost guc_golden_lrc_init(ads);
9229c8c7a7eSZhanjun Dong guc_mapping_table_init(gt, &info_map);
923dd08ebf6SMatthew Brost guc_capture_prep_lists(ads);
924d6da81a4SBadal Nilawar guc_doorbell_init(ads);
925dd08ebf6SMatthew Brost guc_waklv_init(ads);
9265a92da34SLucas De Marchi
927dd08ebf6SMatthew Brost if (xe->info.has_usm) {
928dd08ebf6SMatthew Brost guc_um_init_params(ads);
929dd08ebf6SMatthew Brost ads_blob_write(ads, ads.um_init_data, base +
930dd08ebf6SMatthew Brost offsetof(struct __guc_ads_blob, um_init_params));
931dd08ebf6SMatthew Brost }
932dd08ebf6SMatthew Brost
933dd08ebf6SMatthew Brost ads_blob_write(ads, ads.scheduler_policies, base +
934dd08ebf6SMatthew Brost offsetof(struct __guc_ads_blob, policies));
935dd08ebf6SMatthew Brost ads_blob_write(ads, ads.gt_system_info, base +
936dd08ebf6SMatthew Brost offsetof(struct __guc_ads_blob, system_info));
937dd08ebf6SMatthew Brost ads_blob_write(ads, ads.private_data, base +
938dd08ebf6SMatthew Brost guc_ads_private_data_offset(ads));
939dd08ebf6SMatthew Brost }
940*c31a0b64SLucas De Marchi
941*c31a0b64SLucas De Marchi /*
942*c31a0b64SLucas De Marchi * After the golden LRC's are recorded for each engine class by the first
943*c31a0b64SLucas De Marchi * submission, copy them to the ADS, as initialized earlier by
944*c31a0b64SLucas De Marchi * guc_golden_lrc_init().
945*c31a0b64SLucas De Marchi */
guc_golden_lrc_populate(struct xe_guc_ads * ads)946dd08ebf6SMatthew Brost static void guc_golden_lrc_populate(struct xe_guc_ads *ads)
947dd08ebf6SMatthew Brost {
948dd08ebf6SMatthew Brost struct xe_device *xe = ads_to_xe(ads);
949dd08ebf6SMatthew Brost struct xe_gt *gt = ads_to_gt(ads);
950dd08ebf6SMatthew Brost struct iosys_map info_map = IOSYS_MAP_INIT_OFFSET(ads_to_map(ads),
951dd08ebf6SMatthew Brost offsetof(struct __guc_ads_blob, system_info));
952*c31a0b64SLucas De Marchi size_t total_size = 0, alloc_size, real_size;
953dd08ebf6SMatthew Brost u32 offset;
954dd08ebf6SMatthew Brost int class;
955dd08ebf6SMatthew Brost
956dd08ebf6SMatthew Brost offset = guc_ads_golden_lrc_offset(ads);
957dd08ebf6SMatthew Brost
958dd08ebf6SMatthew Brost for (class = 0; class < XE_ENGINE_CLASS_MAX; ++class) {
959dd08ebf6SMatthew Brost u8 guc_class;
960dd08ebf6SMatthew Brost
961dd08ebf6SMatthew Brost guc_class = xe_engine_class_to_guc_class(class);
962dd08ebf6SMatthew Brost
963dd08ebf6SMatthew Brost if (!info_map_read(xe, &info_map,
964dd08ebf6SMatthew Brost engine_enabled_masks[guc_class]))
965dd08ebf6SMatthew Brost continue;
966c73acc1eSFrancois Dugast
967dd08ebf6SMatthew Brost xe_gt_assert(gt, gt->default_lrc[class]);
968d6219e1cSNiranjana Vishwanathapura
969dd08ebf6SMatthew Brost real_size = xe_gt_lrc_size(gt, class);
970dd08ebf6SMatthew Brost alloc_size = PAGE_ALIGN(real_size);
971dd08ebf6SMatthew Brost total_size += alloc_size;
972dd08ebf6SMatthew Brost
973dd08ebf6SMatthew Brost xe_map_memcpy_to(xe, ads_to_map(ads), offset,
974dd08ebf6SMatthew Brost gt->default_lrc[class], real_size);
975dd08ebf6SMatthew Brost
976dd08ebf6SMatthew Brost offset += alloc_size;
977dd08ebf6SMatthew Brost }
978c73acc1eSFrancois Dugast
979dd08ebf6SMatthew Brost xe_gt_assert(gt, total_size == ads->golden_lrc_size);
980dd08ebf6SMatthew Brost }
981dd08ebf6SMatthew Brost
xe_guc_ads_populate_post_load(struct xe_guc_ads * ads)982dd08ebf6SMatthew Brost void xe_guc_ads_populate_post_load(struct xe_guc_ads *ads)
983*c31a0b64SLucas De Marchi {
984dd08ebf6SMatthew Brost guc_golden_lrc_populate(ads);
9856b8ef44cSRodrigo Vivi }
9866b8ef44cSRodrigo Vivi
guc_ads_action_update_policies(struct xe_guc_ads * ads,u32 policy_offset)9876b8ef44cSRodrigo Vivi static int guc_ads_action_update_policies(struct xe_guc_ads *ads, u32 policy_offset)
9886b8ef44cSRodrigo Vivi {
9896b8ef44cSRodrigo Vivi struct xe_guc_ct *ct = &ads_to_guc(ads)->ct;
9906b8ef44cSRodrigo Vivi u32 action[] = {
9916b8ef44cSRodrigo Vivi XE_GUC_ACTION_GLOBAL_SCHED_POLICY_CHANGE,
9926b8ef44cSRodrigo Vivi policy_offset
9936b8ef44cSRodrigo Vivi };
9946b8ef44cSRodrigo Vivi
9956b8ef44cSRodrigo Vivi return xe_guc_ct_send(ct, action, ARRAY_SIZE(action), 0, 0);
9966b8ef44cSRodrigo Vivi }
9976b8ef44cSRodrigo Vivi
9986b8ef44cSRodrigo Vivi /**
9996b8ef44cSRodrigo Vivi * xe_guc_ads_scheduler_policy_toggle_reset - Toggle reset policy
10006b8ef44cSRodrigo Vivi * @ads: Additional data structures object
10016b8ef44cSRodrigo Vivi *
10026b8ef44cSRodrigo Vivi * This function update the GuC's engine reset policy based on wedged.mode.
10036b8ef44cSRodrigo Vivi *
10046b8ef44cSRodrigo Vivi * Return: 0 on success, and negative error code otherwise.
10056b8ef44cSRodrigo Vivi */
xe_guc_ads_scheduler_policy_toggle_reset(struct xe_guc_ads * ads)10066b8ef44cSRodrigo Vivi int xe_guc_ads_scheduler_policy_toggle_reset(struct xe_guc_ads *ads)
10076b8ef44cSRodrigo Vivi {
10086b8ef44cSRodrigo Vivi struct guc_policies *policies;
10096b8ef44cSRodrigo Vivi struct xe_guc *guc = ads_to_guc(ads);
10106b8ef44cSRodrigo Vivi struct xe_device *xe = ads_to_xe(ads);
10116b8ef44cSRodrigo Vivi CLASS(xe_guc_buf, buf)(&guc->buf, sizeof(*policies));
10126b8ef44cSRodrigo Vivi
10136b8ef44cSRodrigo Vivi if (!xe_guc_buf_is_valid(buf))
10146b8ef44cSRodrigo Vivi return -ENOBUFS;
10156b8ef44cSRodrigo Vivi
10166b8ef44cSRodrigo Vivi policies = xe_guc_buf_cpu_ptr(buf);
10176b8ef44cSRodrigo Vivi memset(policies, 0, sizeof(*policies));
10186b8ef44cSRodrigo Vivi
10196b8ef44cSRodrigo Vivi policies->dpc_promote_time = ads_blob_read(ads, policies.dpc_promote_time);
10206b8ef44cSRodrigo Vivi policies->max_num_work_items = ads_blob_read(ads, policies.max_num_work_items);
10216b8ef44cSRodrigo Vivi policies->is_valid = 1;
10226b8ef44cSRodrigo Vivi if (xe->wedged.mode == 2)
10236b8ef44cSRodrigo Vivi policies->global_flags |= GLOBAL_POLICY_DISABLE_ENGINE_RESET;
10246b8ef44cSRodrigo Vivi else
10256b8ef44cSRodrigo Vivi policies->global_flags &= ~GLOBAL_POLICY_DISABLE_ENGINE_RESET;
10266b8ef44cSRodrigo Vivi
10276b8ef44cSRodrigo Vivi return guc_ads_action_update_policies(ads, xe_guc_buf_flush(buf));
10286b8ef44cSRodrigo Vivi }
10296b8ef44cSRodrigo Vivi