1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright © 2023-2024 Intel Corporation
4 */
5
6 #include <linux/string_choices.h>
7 #include <linux/wordpart.h>
8
9 #include "abi/guc_actions_sriov_abi.h"
10 #include "abi/guc_klvs_abi.h"
11
12 #include "regs/xe_guc_regs.h"
13
14 #include "xe_bo.h"
15 #include "xe_device.h"
16 #include "xe_ggtt.h"
17 #include "xe_gt.h"
18 #include "xe_gt_sriov_pf_config.h"
19 #include "xe_gt_sriov_pf_helpers.h"
20 #include "xe_gt_sriov_pf_policy.h"
21 #include "xe_gt_sriov_printk.h"
22 #include "xe_guc.h"
23 #include "xe_guc_buf.h"
24 #include "xe_guc_ct.h"
25 #include "xe_guc_db_mgr.h"
26 #include "xe_guc_fwif.h"
27 #include "xe_guc_id_mgr.h"
28 #include "xe_guc_klv_helpers.h"
29 #include "xe_guc_klv_thresholds_set.h"
30 #include "xe_guc_submit.h"
31 #include "xe_lmtt.h"
32 #include "xe_map.h"
33 #include "xe_migrate.h"
34 #include "xe_sriov.h"
35 #include "xe_ttm_vram_mgr.h"
36 #include "xe_wopcm.h"
37
38 #define make_u64_from_u32(hi, lo) ((u64)((u64)(u32)(hi) << 32 | (u32)(lo)))
39
40 /*
41 * Return: number of KLVs that were successfully parsed and saved,
42 * negative error code on failure.
43 */
guc_action_update_vf_cfg(struct xe_guc * guc,u32 vfid,u64 addr,u32 size)44 static int guc_action_update_vf_cfg(struct xe_guc *guc, u32 vfid,
45 u64 addr, u32 size)
46 {
47 u32 request[] = {
48 GUC_ACTION_PF2GUC_UPDATE_VF_CFG,
49 vfid,
50 lower_32_bits(addr),
51 upper_32_bits(addr),
52 size,
53 };
54
55 return xe_guc_ct_send_block(&guc->ct, request, ARRAY_SIZE(request));
56 }
57
58 /*
59 * Return: 0 on success, negative error code on failure.
60 */
pf_send_vf_cfg_reset(struct xe_gt * gt,u32 vfid)61 static int pf_send_vf_cfg_reset(struct xe_gt *gt, u32 vfid)
62 {
63 struct xe_guc *guc = >->uc.guc;
64 int ret;
65
66 ret = guc_action_update_vf_cfg(guc, vfid, 0, 0);
67
68 return ret <= 0 ? ret : -EPROTO;
69 }
70
71 /*
72 * Return: number of KLVs that were successfully parsed and saved,
73 * negative error code on failure.
74 */
pf_send_vf_buf_klvs(struct xe_gt * gt,u32 vfid,struct xe_guc_buf buf,u32 num_dwords)75 static int pf_send_vf_buf_klvs(struct xe_gt *gt, u32 vfid, struct xe_guc_buf buf, u32 num_dwords)
76 {
77 struct xe_guc *guc = >->uc.guc;
78
79 return guc_action_update_vf_cfg(guc, vfid, xe_guc_buf_flush(buf), num_dwords);
80 }
81
82 /*
83 * Return: 0 on success, -ENOKEY if some KLVs were not updated, -EPROTO if reply was malformed,
84 * negative error code on failure.
85 */
pf_push_vf_buf_klvs(struct xe_gt * gt,unsigned int vfid,u32 num_klvs,struct xe_guc_buf buf,u32 num_dwords)86 static int pf_push_vf_buf_klvs(struct xe_gt *gt, unsigned int vfid, u32 num_klvs,
87 struct xe_guc_buf buf, u32 num_dwords)
88 {
89 int ret;
90
91 ret = pf_send_vf_buf_klvs(gt, vfid, buf, num_dwords);
92
93 if (ret != num_klvs) {
94 int err = ret < 0 ? ret : ret < num_klvs ? -ENOKEY : -EPROTO;
95 void *klvs = xe_guc_buf_cpu_ptr(buf);
96 struct drm_printer p = xe_gt_info_printer(gt);
97 char name[8];
98
99 xe_gt_sriov_notice(gt, "Failed to push %s %u config KLV%s (%pe)\n",
100 xe_sriov_function_name(vfid, name, sizeof(name)),
101 num_klvs, str_plural(num_klvs), ERR_PTR(err));
102 xe_guc_klv_print(klvs, num_dwords, &p);
103 return err;
104 }
105
106 if (IS_ENABLED(CONFIG_DRM_XE_DEBUG_SRIOV)) {
107 struct drm_printer p = xe_gt_info_printer(gt);
108 void *klvs = xe_guc_buf_cpu_ptr(buf);
109 char name[8];
110
111 xe_gt_sriov_info(gt, "pushed %s config with %u KLV%s:\n",
112 xe_sriov_function_name(vfid, name, sizeof(name)),
113 num_klvs, str_plural(num_klvs));
114 xe_guc_klv_print(klvs, num_dwords, &p);
115 }
116
117 return 0;
118 }
119
120 /*
121 * Return: 0 on success, -ENOBUFS if no free buffer for the indirect data,
122 * negative error code on failure.
123 */
pf_push_vf_cfg_klvs(struct xe_gt * gt,unsigned int vfid,u32 num_klvs,const u32 * klvs,u32 num_dwords)124 static int pf_push_vf_cfg_klvs(struct xe_gt *gt, unsigned int vfid, u32 num_klvs,
125 const u32 *klvs, u32 num_dwords)
126 {
127 CLASS(xe_guc_buf_from_data, buf)(>->uc.guc.buf, klvs, num_dwords * sizeof(u32));
128
129 xe_gt_assert(gt, num_klvs == xe_guc_klv_count(klvs, num_dwords));
130
131 if (!xe_guc_buf_is_valid(buf))
132 return -ENOBUFS;
133
134 return pf_push_vf_buf_klvs(gt, vfid, num_klvs, buf, num_dwords);
135 }
136
pf_push_vf_cfg_u32(struct xe_gt * gt,unsigned int vfid,u16 key,u32 value)137 static int pf_push_vf_cfg_u32(struct xe_gt *gt, unsigned int vfid, u16 key, u32 value)
138 {
139 u32 klv[] = {
140 FIELD_PREP(GUC_KLV_0_KEY, key) | FIELD_PREP(GUC_KLV_0_LEN, 1),
141 value,
142 };
143
144 return pf_push_vf_cfg_klvs(gt, vfid, 1, klv, ARRAY_SIZE(klv));
145 }
146
pf_push_vf_cfg_u64(struct xe_gt * gt,unsigned int vfid,u16 key,u64 value)147 static int pf_push_vf_cfg_u64(struct xe_gt *gt, unsigned int vfid, u16 key, u64 value)
148 {
149 u32 klv[] = {
150 FIELD_PREP(GUC_KLV_0_KEY, key) | FIELD_PREP(GUC_KLV_0_LEN, 2),
151 lower_32_bits(value),
152 upper_32_bits(value),
153 };
154
155 return pf_push_vf_cfg_klvs(gt, vfid, 1, klv, ARRAY_SIZE(klv));
156 }
157
pf_push_vf_cfg_ggtt(struct xe_gt * gt,unsigned int vfid,u64 start,u64 size)158 static int pf_push_vf_cfg_ggtt(struct xe_gt *gt, unsigned int vfid, u64 start, u64 size)
159 {
160 u32 klvs[] = {
161 PREP_GUC_KLV_TAG(VF_CFG_GGTT_START),
162 lower_32_bits(start),
163 upper_32_bits(start),
164 PREP_GUC_KLV_TAG(VF_CFG_GGTT_SIZE),
165 lower_32_bits(size),
166 upper_32_bits(size),
167 };
168
169 return pf_push_vf_cfg_klvs(gt, vfid, 2, klvs, ARRAY_SIZE(klvs));
170 }
171
pf_push_vf_cfg_ctxs(struct xe_gt * gt,unsigned int vfid,u32 begin,u32 num)172 static int pf_push_vf_cfg_ctxs(struct xe_gt *gt, unsigned int vfid, u32 begin, u32 num)
173 {
174 u32 klvs[] = {
175 PREP_GUC_KLV_TAG(VF_CFG_BEGIN_CONTEXT_ID),
176 begin,
177 PREP_GUC_KLV_TAG(VF_CFG_NUM_CONTEXTS),
178 num,
179 };
180
181 return pf_push_vf_cfg_klvs(gt, vfid, 2, klvs, ARRAY_SIZE(klvs));
182 }
183
pf_push_vf_cfg_dbs(struct xe_gt * gt,unsigned int vfid,u32 begin,u32 num)184 static int pf_push_vf_cfg_dbs(struct xe_gt *gt, unsigned int vfid, u32 begin, u32 num)
185 {
186 u32 klvs[] = {
187 PREP_GUC_KLV_TAG(VF_CFG_BEGIN_DOORBELL_ID),
188 begin,
189 PREP_GUC_KLV_TAG(VF_CFG_NUM_DOORBELLS),
190 num,
191 };
192
193 return pf_push_vf_cfg_klvs(gt, vfid, 2, klvs, ARRAY_SIZE(klvs));
194 }
195
pf_push_vf_cfg_exec_quantum(struct xe_gt * gt,unsigned int vfid,u32 * exec_quantum)196 static int pf_push_vf_cfg_exec_quantum(struct xe_gt *gt, unsigned int vfid, u32 *exec_quantum)
197 {
198 /* GuC will silently clamp values exceeding max */
199 *exec_quantum = min_t(u32, *exec_quantum, GUC_KLV_VF_CFG_EXEC_QUANTUM_MAX_VALUE);
200
201 return pf_push_vf_cfg_u32(gt, vfid, GUC_KLV_VF_CFG_EXEC_QUANTUM_KEY, *exec_quantum);
202 }
203
pf_push_vf_cfg_preempt_timeout(struct xe_gt * gt,unsigned int vfid,u32 * preempt_timeout)204 static int pf_push_vf_cfg_preempt_timeout(struct xe_gt *gt, unsigned int vfid, u32 *preempt_timeout)
205 {
206 /* GuC will silently clamp values exceeding max */
207 *preempt_timeout = min_t(u32, *preempt_timeout, GUC_KLV_VF_CFG_PREEMPT_TIMEOUT_MAX_VALUE);
208
209 return pf_push_vf_cfg_u32(gt, vfid, GUC_KLV_VF_CFG_PREEMPT_TIMEOUT_KEY, *preempt_timeout);
210 }
211
pf_push_vf_cfg_sched_priority(struct xe_gt * gt,unsigned int vfid,u32 priority)212 static int pf_push_vf_cfg_sched_priority(struct xe_gt *gt, unsigned int vfid, u32 priority)
213 {
214 return pf_push_vf_cfg_u32(gt, vfid, GUC_KLV_VF_CFG_SCHED_PRIORITY_KEY, priority);
215 }
216
pf_push_vf_cfg_lmem(struct xe_gt * gt,unsigned int vfid,u64 size)217 static int pf_push_vf_cfg_lmem(struct xe_gt *gt, unsigned int vfid, u64 size)
218 {
219 return pf_push_vf_cfg_u64(gt, vfid, GUC_KLV_VF_CFG_LMEM_SIZE_KEY, size);
220 }
221
pf_push_vf_cfg_threshold(struct xe_gt * gt,unsigned int vfid,enum xe_guc_klv_threshold_index index,u32 value)222 static int pf_push_vf_cfg_threshold(struct xe_gt *gt, unsigned int vfid,
223 enum xe_guc_klv_threshold_index index, u32 value)
224 {
225 u32 key = xe_guc_klv_threshold_index_to_key(index);
226
227 xe_gt_assert(gt, key);
228 return pf_push_vf_cfg_u32(gt, vfid, key, value);
229 }
230
pf_pick_vf_config(struct xe_gt * gt,unsigned int vfid)231 static struct xe_gt_sriov_config *pf_pick_vf_config(struct xe_gt *gt, unsigned int vfid)
232 {
233 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
234 xe_gt_assert(gt, vfid <= xe_sriov_pf_get_totalvfs(gt_to_xe(gt)));
235 lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
236
237 return >->sriov.pf.vfs[vfid].config;
238 }
239
240 /* Return: number of configuration dwords written */
encode_config_ggtt(u32 * cfg,const struct xe_gt_sriov_config * config,bool details)241 static u32 encode_config_ggtt(u32 *cfg, const struct xe_gt_sriov_config *config, bool details)
242 {
243 u32 n = 0;
244
245 if (xe_ggtt_node_allocated(config->ggtt_region)) {
246 if (details) {
247 cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_GGTT_START);
248 cfg[n++] = lower_32_bits(config->ggtt_region->base.start);
249 cfg[n++] = upper_32_bits(config->ggtt_region->base.start);
250 }
251
252 cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_GGTT_SIZE);
253 cfg[n++] = lower_32_bits(config->ggtt_region->base.size);
254 cfg[n++] = upper_32_bits(config->ggtt_region->base.size);
255 }
256
257 return n;
258 }
259
260 /* Return: number of configuration dwords written */
encode_config(u32 * cfg,const struct xe_gt_sriov_config * config,bool details)261 static u32 encode_config(u32 *cfg, const struct xe_gt_sriov_config *config, bool details)
262 {
263 u32 n = 0;
264
265 n += encode_config_ggtt(cfg, config, details);
266
267 if (details && config->num_ctxs) {
268 cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_BEGIN_CONTEXT_ID);
269 cfg[n++] = config->begin_ctx;
270 }
271
272 cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_NUM_CONTEXTS);
273 cfg[n++] = config->num_ctxs;
274
275 if (details && config->num_dbs) {
276 cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_BEGIN_DOORBELL_ID);
277 cfg[n++] = config->begin_db;
278 }
279
280 cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_NUM_DOORBELLS);
281 cfg[n++] = config->num_dbs;
282
283 if (config->lmem_obj) {
284 cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_LMEM_SIZE);
285 cfg[n++] = lower_32_bits(config->lmem_obj->size);
286 cfg[n++] = upper_32_bits(config->lmem_obj->size);
287 }
288
289 cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_EXEC_QUANTUM);
290 cfg[n++] = config->exec_quantum;
291
292 cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_PREEMPT_TIMEOUT);
293 cfg[n++] = config->preempt_timeout;
294
295 #define encode_threshold_config(TAG, ...) ({ \
296 cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_THRESHOLD_##TAG); \
297 cfg[n++] = config->thresholds[MAKE_XE_GUC_KLV_THRESHOLD_INDEX(TAG)]; \
298 });
299
300 MAKE_XE_GUC_KLV_THRESHOLDS_SET(encode_threshold_config);
301 #undef encode_threshold_config
302
303 return n;
304 }
305
pf_push_full_vf_config(struct xe_gt * gt,unsigned int vfid)306 static int pf_push_full_vf_config(struct xe_gt *gt, unsigned int vfid)
307 {
308 struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
309 u32 max_cfg_dwords = xe_guc_buf_cache_dwords(>->uc.guc.buf);
310 CLASS(xe_guc_buf, buf)(>->uc.guc.buf, max_cfg_dwords);
311 u32 num_dwords;
312 int num_klvs;
313 u32 *cfg;
314 int err;
315
316 if (!xe_guc_buf_is_valid(buf))
317 return -ENOBUFS;
318
319 cfg = xe_guc_buf_cpu_ptr(buf);
320 num_dwords = encode_config(cfg, config, true);
321 xe_gt_assert(gt, num_dwords <= max_cfg_dwords);
322
323 if (xe_gt_is_media_type(gt)) {
324 struct xe_gt *primary = gt->tile->primary_gt;
325 struct xe_gt_sriov_config *other = pf_pick_vf_config(primary, vfid);
326
327 /* media-GT will never include a GGTT config */
328 xe_gt_assert(gt, !encode_config_ggtt(cfg + num_dwords, config, true));
329
330 /* the GGTT config must be taken from the primary-GT instead */
331 num_dwords += encode_config_ggtt(cfg + num_dwords, other, true);
332 }
333 xe_gt_assert(gt, num_dwords <= max_cfg_dwords);
334
335 num_klvs = xe_guc_klv_count(cfg, num_dwords);
336 err = pf_push_vf_buf_klvs(gt, vfid, num_klvs, buf, num_dwords);
337
338 return err;
339 }
340
pf_push_vf_cfg(struct xe_gt * gt,unsigned int vfid,bool reset)341 static int pf_push_vf_cfg(struct xe_gt *gt, unsigned int vfid, bool reset)
342 {
343 int err = 0;
344
345 xe_gt_assert(gt, vfid);
346 lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
347
348 if (reset)
349 err = pf_send_vf_cfg_reset(gt, vfid);
350 if (!err)
351 err = pf_push_full_vf_config(gt, vfid);
352
353 return err;
354 }
355
pf_refresh_vf_cfg(struct xe_gt * gt,unsigned int vfid)356 static int pf_refresh_vf_cfg(struct xe_gt *gt, unsigned int vfid)
357 {
358 return pf_push_vf_cfg(gt, vfid, true);
359 }
360
pf_get_ggtt_alignment(struct xe_gt * gt)361 static u64 pf_get_ggtt_alignment(struct xe_gt *gt)
362 {
363 struct xe_device *xe = gt_to_xe(gt);
364
365 return IS_DGFX(xe) && xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K ? SZ_64K : SZ_4K;
366 }
367
pf_get_min_spare_ggtt(struct xe_gt * gt)368 static u64 pf_get_min_spare_ggtt(struct xe_gt *gt)
369 {
370 /* XXX: preliminary */
371 return IS_ENABLED(CONFIG_DRM_XE_DEBUG_SRIOV) ?
372 pf_get_ggtt_alignment(gt) : SZ_64M;
373 }
374
pf_get_spare_ggtt(struct xe_gt * gt)375 static u64 pf_get_spare_ggtt(struct xe_gt *gt)
376 {
377 u64 spare;
378
379 xe_gt_assert(gt, !xe_gt_is_media_type(gt));
380 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
381 lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
382
383 spare = gt->sriov.pf.spare.ggtt_size;
384 spare = max_t(u64, spare, pf_get_min_spare_ggtt(gt));
385
386 return spare;
387 }
388
pf_set_spare_ggtt(struct xe_gt * gt,u64 size)389 static int pf_set_spare_ggtt(struct xe_gt *gt, u64 size)
390 {
391 xe_gt_assert(gt, !xe_gt_is_media_type(gt));
392 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
393 lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
394
395 if (size && size < pf_get_min_spare_ggtt(gt))
396 return -EINVAL;
397
398 size = round_up(size, pf_get_ggtt_alignment(gt));
399 gt->sriov.pf.spare.ggtt_size = size;
400
401 return 0;
402 }
403
pf_distribute_config_ggtt(struct xe_tile * tile,unsigned int vfid,u64 start,u64 size)404 static int pf_distribute_config_ggtt(struct xe_tile *tile, unsigned int vfid, u64 start, u64 size)
405 {
406 int err, err2 = 0;
407
408 err = pf_push_vf_cfg_ggtt(tile->primary_gt, vfid, start, size);
409
410 if (tile->media_gt && !err)
411 err2 = pf_push_vf_cfg_ggtt(tile->media_gt, vfid, start, size);
412
413 return err ?: err2;
414 }
415
pf_release_ggtt(struct xe_tile * tile,struct xe_ggtt_node * node)416 static void pf_release_ggtt(struct xe_tile *tile, struct xe_ggtt_node *node)
417 {
418 if (xe_ggtt_node_allocated(node)) {
419 /*
420 * explicit GGTT PTE assignment to the PF using xe_ggtt_assign()
421 * is redundant, as PTE will be implicitly re-assigned to PF by
422 * the xe_ggtt_clear() called by below xe_ggtt_remove_node().
423 */
424 xe_ggtt_node_remove(node, false);
425 } else {
426 xe_ggtt_node_fini(node);
427 }
428 }
429
pf_release_vf_config_ggtt(struct xe_gt * gt,struct xe_gt_sriov_config * config)430 static void pf_release_vf_config_ggtt(struct xe_gt *gt, struct xe_gt_sriov_config *config)
431 {
432 pf_release_ggtt(gt_to_tile(gt), config->ggtt_region);
433 config->ggtt_region = NULL;
434 }
435
pf_provision_vf_ggtt(struct xe_gt * gt,unsigned int vfid,u64 size)436 static int pf_provision_vf_ggtt(struct xe_gt *gt, unsigned int vfid, u64 size)
437 {
438 struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
439 struct xe_ggtt_node *node;
440 struct xe_tile *tile = gt_to_tile(gt);
441 struct xe_ggtt *ggtt = tile->mem.ggtt;
442 u64 alignment = pf_get_ggtt_alignment(gt);
443 int err;
444
445 xe_gt_assert(gt, vfid);
446 xe_gt_assert(gt, !xe_gt_is_media_type(gt));
447 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
448
449 size = round_up(size, alignment);
450
451 if (xe_ggtt_node_allocated(config->ggtt_region)) {
452 err = pf_distribute_config_ggtt(tile, vfid, 0, 0);
453 if (unlikely(err))
454 return err;
455
456 pf_release_vf_config_ggtt(gt, config);
457
458 err = pf_refresh_vf_cfg(gt, vfid);
459 if (unlikely(err))
460 return err;
461 }
462 xe_gt_assert(gt, !xe_ggtt_node_allocated(config->ggtt_region));
463
464 if (!size)
465 return 0;
466
467 node = xe_ggtt_node_init(ggtt);
468 if (IS_ERR(node))
469 return PTR_ERR(node);
470
471 err = xe_ggtt_node_insert(node, size, alignment);
472 if (unlikely(err))
473 goto err;
474
475 xe_ggtt_assign(node, vfid);
476 xe_gt_sriov_dbg_verbose(gt, "VF%u assigned GGTT %llx-%llx\n",
477 vfid, node->base.start, node->base.start + node->base.size - 1);
478
479 err = pf_distribute_config_ggtt(gt->tile, vfid, node->base.start, node->base.size);
480 if (unlikely(err))
481 goto err;
482
483 config->ggtt_region = node;
484 return 0;
485 err:
486 pf_release_ggtt(tile, node);
487 return err;
488 }
489
pf_get_vf_config_ggtt(struct xe_gt * gt,unsigned int vfid)490 static u64 pf_get_vf_config_ggtt(struct xe_gt *gt, unsigned int vfid)
491 {
492 struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
493 struct xe_ggtt_node *node = config->ggtt_region;
494
495 xe_gt_assert(gt, !xe_gt_is_media_type(gt));
496 return xe_ggtt_node_allocated(node) ? node->base.size : 0;
497 }
498
499 /**
500 * xe_gt_sriov_pf_config_get_ggtt - Query size of GGTT address space of the VF.
501 * @gt: the &xe_gt
502 * @vfid: the VF identifier
503 *
504 * This function can only be called on PF.
505 *
506 * Return: size of the VF's assigned (or PF's spare) GGTT address space.
507 */
xe_gt_sriov_pf_config_get_ggtt(struct xe_gt * gt,unsigned int vfid)508 u64 xe_gt_sriov_pf_config_get_ggtt(struct xe_gt *gt, unsigned int vfid)
509 {
510 u64 size;
511
512 mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
513 if (vfid)
514 size = pf_get_vf_config_ggtt(gt_to_tile(gt)->primary_gt, vfid);
515 else
516 size = pf_get_spare_ggtt(gt_to_tile(gt)->primary_gt);
517 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
518
519 return size;
520 }
521
pf_config_set_u64_done(struct xe_gt * gt,unsigned int vfid,u64 value,u64 actual,const char * what,int err)522 static int pf_config_set_u64_done(struct xe_gt *gt, unsigned int vfid, u64 value,
523 u64 actual, const char *what, int err)
524 {
525 char size[10];
526 char name[8];
527
528 xe_sriov_function_name(vfid, name, sizeof(name));
529
530 if (unlikely(err)) {
531 string_get_size(value, 1, STRING_UNITS_2, size, sizeof(size));
532 xe_gt_sriov_notice(gt, "Failed to provision %s with %llu (%s) %s (%pe)\n",
533 name, value, size, what, ERR_PTR(err));
534 string_get_size(actual, 1, STRING_UNITS_2, size, sizeof(size));
535 xe_gt_sriov_info(gt, "%s provisioning remains at %llu (%s) %s\n",
536 name, actual, size, what);
537 return err;
538 }
539
540 /* the actual value may have changed during provisioning */
541 string_get_size(actual, 1, STRING_UNITS_2, size, sizeof(size));
542 xe_gt_sriov_info(gt, "%s provisioned with %llu (%s) %s\n",
543 name, actual, size, what);
544 return 0;
545 }
546
547 /**
548 * xe_gt_sriov_pf_config_set_ggtt - Provision VF with GGTT space.
549 * @gt: the &xe_gt (can't be media)
550 * @vfid: the VF identifier
551 * @size: requested GGTT size
552 *
553 * If &vfid represents PF, then function will change PF's spare GGTT config.
554 *
555 * This function can only be called on PF.
556 *
557 * Return: 0 on success or a negative error code on failure.
558 */
xe_gt_sriov_pf_config_set_ggtt(struct xe_gt * gt,unsigned int vfid,u64 size)559 int xe_gt_sriov_pf_config_set_ggtt(struct xe_gt *gt, unsigned int vfid, u64 size)
560 {
561 int err;
562
563 xe_gt_assert(gt, !xe_gt_is_media_type(gt));
564
565 mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
566 if (vfid)
567 err = pf_provision_vf_ggtt(gt, vfid, size);
568 else
569 err = pf_set_spare_ggtt(gt, size);
570 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
571
572 return pf_config_set_u64_done(gt, vfid, size,
573 xe_gt_sriov_pf_config_get_ggtt(gt, vfid),
574 vfid ? "GGTT" : "spare GGTT", err);
575 }
576
pf_config_bulk_set_u64_done(struct xe_gt * gt,unsigned int first,unsigned int num_vfs,u64 value,u64 (* get)(struct xe_gt *,unsigned int),const char * what,unsigned int last,int err)577 static int pf_config_bulk_set_u64_done(struct xe_gt *gt, unsigned int first, unsigned int num_vfs,
578 u64 value, u64 (*get)(struct xe_gt*, unsigned int),
579 const char *what, unsigned int last, int err)
580 {
581 char size[10];
582
583 xe_gt_assert(gt, first);
584 xe_gt_assert(gt, num_vfs);
585 xe_gt_assert(gt, first <= last);
586
587 if (num_vfs == 1)
588 return pf_config_set_u64_done(gt, first, value, get(gt, first), what, err);
589
590 if (unlikely(err)) {
591 xe_gt_sriov_notice(gt, "Failed to bulk provision VF%u..VF%u with %s\n",
592 first, first + num_vfs - 1, what);
593 if (last > first)
594 pf_config_bulk_set_u64_done(gt, first, last - first, value,
595 get, what, last, 0);
596 return pf_config_set_u64_done(gt, last, value, get(gt, last), what, err);
597 }
598
599 /* pick actual value from first VF - bulk provisioning shall be equal across all VFs */
600 value = get(gt, first);
601 string_get_size(value, 1, STRING_UNITS_2, size, sizeof(size));
602 xe_gt_sriov_info(gt, "VF%u..VF%u provisioned with %llu (%s) %s\n",
603 first, first + num_vfs - 1, value, size, what);
604 return 0;
605 }
606
607 /**
608 * xe_gt_sriov_pf_config_bulk_set_ggtt - Provision many VFs with GGTT.
609 * @gt: the &xe_gt (can't be media)
610 * @vfid: starting VF identifier (can't be 0)
611 * @num_vfs: number of VFs to provision
612 * @size: requested GGTT size
613 *
614 * This function can only be called on PF.
615 *
616 * Return: 0 on success or a negative error code on failure.
617 */
xe_gt_sriov_pf_config_bulk_set_ggtt(struct xe_gt * gt,unsigned int vfid,unsigned int num_vfs,u64 size)618 int xe_gt_sriov_pf_config_bulk_set_ggtt(struct xe_gt *gt, unsigned int vfid,
619 unsigned int num_vfs, u64 size)
620 {
621 unsigned int n;
622 int err = 0;
623
624 xe_gt_assert(gt, vfid);
625 xe_gt_assert(gt, !xe_gt_is_media_type(gt));
626
627 if (!num_vfs)
628 return 0;
629
630 mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
631 for (n = vfid; n < vfid + num_vfs; n++) {
632 err = pf_provision_vf_ggtt(gt, n, size);
633 if (err)
634 break;
635 }
636 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
637
638 return pf_config_bulk_set_u64_done(gt, vfid, num_vfs, size,
639 xe_gt_sriov_pf_config_get_ggtt,
640 "GGTT", n, err);
641 }
642
643 /* Return: size of the largest continuous GGTT region */
pf_get_max_ggtt(struct xe_gt * gt)644 static u64 pf_get_max_ggtt(struct xe_gt *gt)
645 {
646 struct xe_ggtt *ggtt = gt_to_tile(gt)->mem.ggtt;
647 u64 alignment = pf_get_ggtt_alignment(gt);
648 u64 spare = pf_get_spare_ggtt(gt);
649 u64 max_hole;
650
651 max_hole = xe_ggtt_largest_hole(ggtt, alignment, &spare);
652
653 xe_gt_sriov_dbg_verbose(gt, "HOLE max %lluK reserved %lluK\n",
654 max_hole / SZ_1K, spare / SZ_1K);
655 return max_hole > spare ? max_hole - spare : 0;
656 }
657
pf_estimate_fair_ggtt(struct xe_gt * gt,unsigned int num_vfs)658 static u64 pf_estimate_fair_ggtt(struct xe_gt *gt, unsigned int num_vfs)
659 {
660 u64 available = pf_get_max_ggtt(gt);
661 u64 alignment = pf_get_ggtt_alignment(gt);
662 u64 fair;
663
664 /*
665 * To simplify the logic we only look at single largest GGTT region
666 * as that will be always the best fit for 1 VF case, and most likely
667 * will also nicely cover other cases where VFs are provisioned on the
668 * fresh and idle PF driver, without any stale GGTT allocations spread
669 * in the middle of the full GGTT range.
670 */
671
672 fair = div_u64(available, num_vfs);
673 fair = ALIGN_DOWN(fair, alignment);
674 xe_gt_sriov_dbg_verbose(gt, "GGTT available(%lluK) fair(%u x %lluK)\n",
675 available / SZ_1K, num_vfs, fair / SZ_1K);
676 return fair;
677 }
678
679 /**
680 * xe_gt_sriov_pf_config_set_fair_ggtt - Provision many VFs with fair GGTT.
681 * @gt: the &xe_gt (can't be media)
682 * @vfid: starting VF identifier (can't be 0)
683 * @num_vfs: number of VFs to provision
684 *
685 * This function can only be called on PF.
686 *
687 * Return: 0 on success or a negative error code on failure.
688 */
xe_gt_sriov_pf_config_set_fair_ggtt(struct xe_gt * gt,unsigned int vfid,unsigned int num_vfs)689 int xe_gt_sriov_pf_config_set_fair_ggtt(struct xe_gt *gt, unsigned int vfid,
690 unsigned int num_vfs)
691 {
692 u64 fair;
693
694 xe_gt_assert(gt, vfid);
695 xe_gt_assert(gt, num_vfs);
696 xe_gt_assert(gt, !xe_gt_is_media_type(gt));
697
698 mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
699 fair = pf_estimate_fair_ggtt(gt, num_vfs);
700 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
701
702 if (!fair)
703 return -ENOSPC;
704
705 return xe_gt_sriov_pf_config_bulk_set_ggtt(gt, vfid, num_vfs, fair);
706 }
707
pf_get_min_spare_ctxs(struct xe_gt * gt)708 static u32 pf_get_min_spare_ctxs(struct xe_gt *gt)
709 {
710 /* XXX: preliminary */
711 return IS_ENABLED(CONFIG_DRM_XE_DEBUG_SRIOV) ?
712 hweight64(gt->info.engine_mask) : SZ_256;
713 }
714
pf_get_spare_ctxs(struct xe_gt * gt)715 static u32 pf_get_spare_ctxs(struct xe_gt *gt)
716 {
717 u32 spare;
718
719 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
720 lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
721
722 spare = gt->sriov.pf.spare.num_ctxs;
723 spare = max_t(u32, spare, pf_get_min_spare_ctxs(gt));
724
725 return spare;
726 }
727
pf_set_spare_ctxs(struct xe_gt * gt,u32 spare)728 static int pf_set_spare_ctxs(struct xe_gt *gt, u32 spare)
729 {
730 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
731 lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
732
733 if (spare > GUC_ID_MAX)
734 return -EINVAL;
735
736 if (spare && spare < pf_get_min_spare_ctxs(gt))
737 return -EINVAL;
738
739 gt->sriov.pf.spare.num_ctxs = spare;
740
741 return 0;
742 }
743
744 /* Return: start ID or negative error code on failure */
pf_reserve_ctxs(struct xe_gt * gt,u32 num)745 static int pf_reserve_ctxs(struct xe_gt *gt, u32 num)
746 {
747 struct xe_guc_id_mgr *idm = >->uc.guc.submission_state.idm;
748 unsigned int spare = pf_get_spare_ctxs(gt);
749
750 return xe_guc_id_mgr_reserve(idm, num, spare);
751 }
752
pf_release_ctxs(struct xe_gt * gt,u32 start,u32 num)753 static void pf_release_ctxs(struct xe_gt *gt, u32 start, u32 num)
754 {
755 struct xe_guc_id_mgr *idm = >->uc.guc.submission_state.idm;
756
757 if (num)
758 xe_guc_id_mgr_release(idm, start, num);
759 }
760
pf_release_config_ctxs(struct xe_gt * gt,struct xe_gt_sriov_config * config)761 static void pf_release_config_ctxs(struct xe_gt *gt, struct xe_gt_sriov_config *config)
762 {
763 lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
764
765 pf_release_ctxs(gt, config->begin_ctx, config->num_ctxs);
766 config->begin_ctx = 0;
767 config->num_ctxs = 0;
768 }
769
pf_provision_vf_ctxs(struct xe_gt * gt,unsigned int vfid,u32 num_ctxs)770 static int pf_provision_vf_ctxs(struct xe_gt *gt, unsigned int vfid, u32 num_ctxs)
771 {
772 struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
773 int ret;
774
775 xe_gt_assert(gt, vfid);
776
777 if (num_ctxs > GUC_ID_MAX)
778 return -EINVAL;
779
780 if (config->num_ctxs) {
781 ret = pf_push_vf_cfg_ctxs(gt, vfid, 0, 0);
782 if (unlikely(ret))
783 return ret;
784
785 pf_release_config_ctxs(gt, config);
786
787 ret = pf_refresh_vf_cfg(gt, vfid);
788 if (unlikely(ret))
789 return ret;
790 }
791
792 if (!num_ctxs)
793 return 0;
794
795 ret = pf_reserve_ctxs(gt, num_ctxs);
796 if (unlikely(ret < 0))
797 return ret;
798
799 config->begin_ctx = ret;
800 config->num_ctxs = num_ctxs;
801
802 ret = pf_push_vf_cfg_ctxs(gt, vfid, config->begin_ctx, config->num_ctxs);
803 if (unlikely(ret)) {
804 pf_release_config_ctxs(gt, config);
805 return ret;
806 }
807
808 xe_gt_sriov_dbg_verbose(gt, "VF%u contexts %u-%u\n",
809 vfid, config->begin_ctx, config->begin_ctx + config->num_ctxs - 1);
810 return 0;
811 }
812
pf_get_vf_config_ctxs(struct xe_gt * gt,unsigned int vfid)813 static u32 pf_get_vf_config_ctxs(struct xe_gt *gt, unsigned int vfid)
814 {
815 struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
816
817 return config->num_ctxs;
818 }
819
820 /**
821 * xe_gt_sriov_pf_config_get_ctxs - Get VF's GuC contexts IDs quota.
822 * @gt: the &xe_gt
823 * @vfid: the VF identifier
824 *
825 * This function can only be called on PF.
826 * If &vfid represents a PF then number of PF's spare GuC context IDs is returned.
827 *
828 * Return: VF's quota (or PF's spare).
829 */
xe_gt_sriov_pf_config_get_ctxs(struct xe_gt * gt,unsigned int vfid)830 u32 xe_gt_sriov_pf_config_get_ctxs(struct xe_gt *gt, unsigned int vfid)
831 {
832 u32 num_ctxs;
833
834 mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
835 if (vfid)
836 num_ctxs = pf_get_vf_config_ctxs(gt, vfid);
837 else
838 num_ctxs = pf_get_spare_ctxs(gt);
839 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
840
841 return num_ctxs;
842 }
843
no_unit(u32 unused)844 static const char *no_unit(u32 unused)
845 {
846 return "";
847 }
848
spare_unit(u32 unused)849 static const char *spare_unit(u32 unused)
850 {
851 return " spare";
852 }
853
pf_config_set_u32_done(struct xe_gt * gt,unsigned int vfid,u32 value,u32 actual,const char * what,const char * (* unit)(u32),int err)854 static int pf_config_set_u32_done(struct xe_gt *gt, unsigned int vfid, u32 value, u32 actual,
855 const char *what, const char *(*unit)(u32), int err)
856 {
857 char name[8];
858
859 xe_sriov_function_name(vfid, name, sizeof(name));
860
861 if (unlikely(err)) {
862 xe_gt_sriov_notice(gt, "Failed to provision %s with %u%s %s (%pe)\n",
863 name, value, unit(value), what, ERR_PTR(err));
864 xe_gt_sriov_info(gt, "%s provisioning remains at %u%s %s\n",
865 name, actual, unit(actual), what);
866 return err;
867 }
868
869 /* the actual value may have changed during provisioning */
870 xe_gt_sriov_info(gt, "%s provisioned with %u%s %s\n",
871 name, actual, unit(actual), what);
872 return 0;
873 }
874
875 /**
876 * xe_gt_sriov_pf_config_set_ctxs - Configure GuC contexts IDs quota for the VF.
877 * @gt: the &xe_gt
878 * @vfid: the VF identifier
879 * @num_ctxs: requested number of GuC contexts IDs (0 to release)
880 *
881 * This function can only be called on PF.
882 *
883 * Return: 0 on success or a negative error code on failure.
884 */
xe_gt_sriov_pf_config_set_ctxs(struct xe_gt * gt,unsigned int vfid,u32 num_ctxs)885 int xe_gt_sriov_pf_config_set_ctxs(struct xe_gt *gt, unsigned int vfid, u32 num_ctxs)
886 {
887 int err;
888
889 mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
890 if (vfid)
891 err = pf_provision_vf_ctxs(gt, vfid, num_ctxs);
892 else
893 err = pf_set_spare_ctxs(gt, num_ctxs);
894 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
895
896 return pf_config_set_u32_done(gt, vfid, num_ctxs,
897 xe_gt_sriov_pf_config_get_ctxs(gt, vfid),
898 "GuC context IDs", vfid ? no_unit : spare_unit, err);
899 }
900
pf_config_bulk_set_u32_done(struct xe_gt * gt,unsigned int first,unsigned int num_vfs,u32 value,u32 (* get)(struct xe_gt *,unsigned int),const char * what,const char * (* unit)(u32),unsigned int last,int err)901 static int pf_config_bulk_set_u32_done(struct xe_gt *gt, unsigned int first, unsigned int num_vfs,
902 u32 value, u32 (*get)(struct xe_gt*, unsigned int),
903 const char *what, const char *(*unit)(u32),
904 unsigned int last, int err)
905 {
906 xe_gt_assert(gt, first);
907 xe_gt_assert(gt, num_vfs);
908 xe_gt_assert(gt, first <= last);
909
910 if (num_vfs == 1)
911 return pf_config_set_u32_done(gt, first, value, get(gt, first), what, unit, err);
912
913 if (unlikely(err)) {
914 xe_gt_sriov_notice(gt, "Failed to bulk provision VF%u..VF%u with %s\n",
915 first, first + num_vfs - 1, what);
916 if (last > first)
917 pf_config_bulk_set_u32_done(gt, first, last - first, value,
918 get, what, unit, last, 0);
919 return pf_config_set_u32_done(gt, last, value, get(gt, last), what, unit, err);
920 }
921
922 /* pick actual value from first VF - bulk provisioning shall be equal across all VFs */
923 value = get(gt, first);
924 xe_gt_sriov_info(gt, "VF%u..VF%u provisioned with %u%s %s\n",
925 first, first + num_vfs - 1, value, unit(value), what);
926 return 0;
927 }
928
929 /**
930 * xe_gt_sriov_pf_config_bulk_set_ctxs - Provision many VFs with GuC context IDs.
931 * @gt: the &xe_gt
932 * @vfid: starting VF identifier
933 * @num_vfs: number of VFs to provision
934 * @num_ctxs: requested number of GuC contexts IDs (0 to release)
935 *
936 * This function can only be called on PF.
937 *
938 * Return: 0 on success or a negative error code on failure.
939 */
xe_gt_sriov_pf_config_bulk_set_ctxs(struct xe_gt * gt,unsigned int vfid,unsigned int num_vfs,u32 num_ctxs)940 int xe_gt_sriov_pf_config_bulk_set_ctxs(struct xe_gt *gt, unsigned int vfid,
941 unsigned int num_vfs, u32 num_ctxs)
942 {
943 unsigned int n;
944 int err = 0;
945
946 xe_gt_assert(gt, vfid);
947
948 if (!num_vfs)
949 return 0;
950
951 mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
952 for (n = vfid; n < vfid + num_vfs; n++) {
953 err = pf_provision_vf_ctxs(gt, n, num_ctxs);
954 if (err)
955 break;
956 }
957 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
958
959 return pf_config_bulk_set_u32_done(gt, vfid, num_vfs, num_ctxs,
960 xe_gt_sriov_pf_config_get_ctxs,
961 "GuC context IDs", no_unit, n, err);
962 }
963
pf_estimate_fair_ctxs(struct xe_gt * gt,unsigned int num_vfs)964 static u32 pf_estimate_fair_ctxs(struct xe_gt *gt, unsigned int num_vfs)
965 {
966 struct xe_guc_id_mgr *idm = >->uc.guc.submission_state.idm;
967 u32 spare = pf_get_spare_ctxs(gt);
968 u32 fair = (idm->total - spare) / num_vfs;
969 int ret;
970
971 for (; fair; --fair) {
972 ret = xe_guc_id_mgr_reserve(idm, fair * num_vfs, spare);
973 if (ret < 0)
974 continue;
975 xe_guc_id_mgr_release(idm, ret, fair * num_vfs);
976 break;
977 }
978
979 xe_gt_sriov_dbg_verbose(gt, "contexts fair(%u x %u)\n", num_vfs, fair);
980 return fair;
981 }
982
983 /**
984 * xe_gt_sriov_pf_config_set_fair_ctxs - Provision many VFs with fair GuC context IDs.
985 * @gt: the &xe_gt
986 * @vfid: starting VF identifier (can't be 0)
987 * @num_vfs: number of VFs to provision (can't be 0)
988 *
989 * This function can only be called on PF.
990 *
991 * Return: 0 on success or a negative error code on failure.
992 */
xe_gt_sriov_pf_config_set_fair_ctxs(struct xe_gt * gt,unsigned int vfid,unsigned int num_vfs)993 int xe_gt_sriov_pf_config_set_fair_ctxs(struct xe_gt *gt, unsigned int vfid,
994 unsigned int num_vfs)
995 {
996 u32 fair;
997
998 xe_gt_assert(gt, vfid);
999 xe_gt_assert(gt, num_vfs);
1000
1001 mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1002 fair = pf_estimate_fair_ctxs(gt, num_vfs);
1003 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1004
1005 if (!fair)
1006 return -ENOSPC;
1007
1008 return xe_gt_sriov_pf_config_bulk_set_ctxs(gt, vfid, num_vfs, fair);
1009 }
1010
pf_get_min_spare_dbs(struct xe_gt * gt)1011 static u32 pf_get_min_spare_dbs(struct xe_gt *gt)
1012 {
1013 /* XXX: preliminary, we don't use doorbells yet! */
1014 return IS_ENABLED(CONFIG_DRM_XE_DEBUG_SRIOV) ? 1 : 0;
1015 }
1016
pf_get_spare_dbs(struct xe_gt * gt)1017 static u32 pf_get_spare_dbs(struct xe_gt *gt)
1018 {
1019 u32 spare;
1020
1021 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
1022 lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
1023
1024 spare = gt->sriov.pf.spare.num_dbs;
1025 spare = max_t(u32, spare, pf_get_min_spare_dbs(gt));
1026
1027 return spare;
1028 }
1029
pf_set_spare_dbs(struct xe_gt * gt,u32 spare)1030 static int pf_set_spare_dbs(struct xe_gt *gt, u32 spare)
1031 {
1032 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
1033 lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
1034
1035 if (spare > GUC_NUM_DOORBELLS)
1036 return -EINVAL;
1037
1038 if (spare && spare < pf_get_min_spare_dbs(gt))
1039 return -EINVAL;
1040
1041 gt->sriov.pf.spare.num_dbs = spare;
1042 return 0;
1043 }
1044
1045 /* Return: start ID or negative error code on failure */
pf_reserve_dbs(struct xe_gt * gt,u32 num)1046 static int pf_reserve_dbs(struct xe_gt *gt, u32 num)
1047 {
1048 struct xe_guc_db_mgr *dbm = >->uc.guc.dbm;
1049 unsigned int spare = pf_get_spare_dbs(gt);
1050
1051 return xe_guc_db_mgr_reserve_range(dbm, num, spare);
1052 }
1053
pf_release_dbs(struct xe_gt * gt,u32 start,u32 num)1054 static void pf_release_dbs(struct xe_gt *gt, u32 start, u32 num)
1055 {
1056 struct xe_guc_db_mgr *dbm = >->uc.guc.dbm;
1057
1058 if (num)
1059 xe_guc_db_mgr_release_range(dbm, start, num);
1060 }
1061
pf_release_config_dbs(struct xe_gt * gt,struct xe_gt_sriov_config * config)1062 static void pf_release_config_dbs(struct xe_gt *gt, struct xe_gt_sriov_config *config)
1063 {
1064 lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
1065
1066 pf_release_dbs(gt, config->begin_db, config->num_dbs);
1067 config->begin_db = 0;
1068 config->num_dbs = 0;
1069 }
1070
pf_provision_vf_dbs(struct xe_gt * gt,unsigned int vfid,u32 num_dbs)1071 static int pf_provision_vf_dbs(struct xe_gt *gt, unsigned int vfid, u32 num_dbs)
1072 {
1073 struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1074 int ret;
1075
1076 xe_gt_assert(gt, vfid);
1077
1078 if (num_dbs > GUC_NUM_DOORBELLS)
1079 return -EINVAL;
1080
1081 if (config->num_dbs) {
1082 ret = pf_push_vf_cfg_dbs(gt, vfid, 0, 0);
1083 if (unlikely(ret))
1084 return ret;
1085
1086 pf_release_config_dbs(gt, config);
1087
1088 ret = pf_refresh_vf_cfg(gt, vfid);
1089 if (unlikely(ret))
1090 return ret;
1091 }
1092
1093 if (!num_dbs)
1094 return 0;
1095
1096 ret = pf_reserve_dbs(gt, num_dbs);
1097 if (unlikely(ret < 0))
1098 return ret;
1099
1100 config->begin_db = ret;
1101 config->num_dbs = num_dbs;
1102
1103 ret = pf_push_vf_cfg_dbs(gt, vfid, config->begin_db, config->num_dbs);
1104 if (unlikely(ret)) {
1105 pf_release_config_dbs(gt, config);
1106 return ret;
1107 }
1108
1109 xe_gt_sriov_dbg_verbose(gt, "VF%u doorbells %u-%u\n",
1110 vfid, config->begin_db, config->begin_db + config->num_dbs - 1);
1111 return 0;
1112 }
1113
pf_get_vf_config_dbs(struct xe_gt * gt,unsigned int vfid)1114 static u32 pf_get_vf_config_dbs(struct xe_gt *gt, unsigned int vfid)
1115 {
1116 struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1117
1118 return config->num_dbs;
1119 }
1120
1121 /**
1122 * xe_gt_sriov_pf_config_get_dbs - Get VF's GuC doorbells IDs quota.
1123 * @gt: the &xe_gt
1124 * @vfid: the VF identifier
1125 *
1126 * This function can only be called on PF.
1127 * If &vfid represents a PF then number of PF's spare GuC doorbells IDs is returned.
1128 *
1129 * Return: VF's quota (or PF's spare).
1130 */
xe_gt_sriov_pf_config_get_dbs(struct xe_gt * gt,unsigned int vfid)1131 u32 xe_gt_sriov_pf_config_get_dbs(struct xe_gt *gt, unsigned int vfid)
1132 {
1133 u32 num_dbs;
1134
1135 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
1136 xe_gt_assert(gt, vfid <= xe_sriov_pf_get_totalvfs(gt_to_xe(gt)));
1137
1138 mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1139 if (vfid)
1140 num_dbs = pf_get_vf_config_dbs(gt, vfid);
1141 else
1142 num_dbs = pf_get_spare_dbs(gt);
1143 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1144
1145 return num_dbs;
1146 }
1147
1148 /**
1149 * xe_gt_sriov_pf_config_set_dbs - Configure GuC doorbells IDs quota for the VF.
1150 * @gt: the &xe_gt
1151 * @vfid: the VF identifier
1152 * @num_dbs: requested number of GuC doorbells IDs (0 to release)
1153 *
1154 * This function can only be called on PF.
1155 *
1156 * Return: 0 on success or a negative error code on failure.
1157 */
xe_gt_sriov_pf_config_set_dbs(struct xe_gt * gt,unsigned int vfid,u32 num_dbs)1158 int xe_gt_sriov_pf_config_set_dbs(struct xe_gt *gt, unsigned int vfid, u32 num_dbs)
1159 {
1160 int err;
1161
1162 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
1163 xe_gt_assert(gt, vfid <= xe_sriov_pf_get_totalvfs(gt_to_xe(gt)));
1164
1165 mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1166 if (vfid)
1167 err = pf_provision_vf_dbs(gt, vfid, num_dbs);
1168 else
1169 err = pf_set_spare_dbs(gt, num_dbs);
1170 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1171
1172 return pf_config_set_u32_done(gt, vfid, num_dbs,
1173 xe_gt_sriov_pf_config_get_dbs(gt, vfid),
1174 "GuC doorbell IDs", vfid ? no_unit : spare_unit, err);
1175 }
1176
1177 /**
1178 * xe_gt_sriov_pf_config_bulk_set_dbs - Provision many VFs with GuC context IDs.
1179 * @gt: the &xe_gt
1180 * @vfid: starting VF identifier (can't be 0)
1181 * @num_vfs: number of VFs to provision
1182 * @num_dbs: requested number of GuC doorbell IDs (0 to release)
1183 *
1184 * This function can only be called on PF.
1185 *
1186 * Return: 0 on success or a negative error code on failure.
1187 */
xe_gt_sriov_pf_config_bulk_set_dbs(struct xe_gt * gt,unsigned int vfid,unsigned int num_vfs,u32 num_dbs)1188 int xe_gt_sriov_pf_config_bulk_set_dbs(struct xe_gt *gt, unsigned int vfid,
1189 unsigned int num_vfs, u32 num_dbs)
1190 {
1191 unsigned int n;
1192 int err = 0;
1193
1194 xe_gt_assert(gt, vfid);
1195
1196 if (!num_vfs)
1197 return 0;
1198
1199 mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1200 for (n = vfid; n < vfid + num_vfs; n++) {
1201 err = pf_provision_vf_dbs(gt, n, num_dbs);
1202 if (err)
1203 break;
1204 }
1205 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1206
1207 return pf_config_bulk_set_u32_done(gt, vfid, num_vfs, num_dbs,
1208 xe_gt_sriov_pf_config_get_dbs,
1209 "GuC doorbell IDs", no_unit, n, err);
1210 }
1211
pf_estimate_fair_dbs(struct xe_gt * gt,unsigned int num_vfs)1212 static u32 pf_estimate_fair_dbs(struct xe_gt *gt, unsigned int num_vfs)
1213 {
1214 struct xe_guc_db_mgr *dbm = >->uc.guc.dbm;
1215 u32 spare = pf_get_spare_dbs(gt);
1216 u32 fair = (GUC_NUM_DOORBELLS - spare) / num_vfs;
1217 int ret;
1218
1219 for (; fair; --fair) {
1220 ret = xe_guc_db_mgr_reserve_range(dbm, fair * num_vfs, spare);
1221 if (ret < 0)
1222 continue;
1223 xe_guc_db_mgr_release_range(dbm, ret, fair * num_vfs);
1224 break;
1225 }
1226
1227 xe_gt_sriov_dbg_verbose(gt, "doorbells fair(%u x %u)\n", num_vfs, fair);
1228 return fair;
1229 }
1230
1231 /**
1232 * xe_gt_sriov_pf_config_set_fair_dbs - Provision many VFs with fair GuC doorbell IDs.
1233 * @gt: the &xe_gt
1234 * @vfid: starting VF identifier (can't be 0)
1235 * @num_vfs: number of VFs to provision (can't be 0)
1236 *
1237 * This function can only be called on PF.
1238 *
1239 * Return: 0 on success or a negative error code on failure.
1240 */
xe_gt_sriov_pf_config_set_fair_dbs(struct xe_gt * gt,unsigned int vfid,unsigned int num_vfs)1241 int xe_gt_sriov_pf_config_set_fair_dbs(struct xe_gt *gt, unsigned int vfid,
1242 unsigned int num_vfs)
1243 {
1244 u32 fair;
1245
1246 xe_gt_assert(gt, vfid);
1247 xe_gt_assert(gt, num_vfs);
1248
1249 mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1250 fair = pf_estimate_fair_dbs(gt, num_vfs);
1251 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1252
1253 if (!fair)
1254 return -ENOSPC;
1255
1256 return xe_gt_sriov_pf_config_bulk_set_dbs(gt, vfid, num_vfs, fair);
1257 }
1258
pf_get_lmem_alignment(struct xe_gt * gt)1259 static u64 pf_get_lmem_alignment(struct xe_gt *gt)
1260 {
1261 /* this might be platform dependent */
1262 return SZ_2M;
1263 }
1264
pf_get_min_spare_lmem(struct xe_gt * gt)1265 static u64 pf_get_min_spare_lmem(struct xe_gt *gt)
1266 {
1267 /* this might be platform dependent */
1268 return SZ_128M; /* XXX: preliminary */
1269 }
1270
pf_get_spare_lmem(struct xe_gt * gt)1271 static u64 pf_get_spare_lmem(struct xe_gt *gt)
1272 {
1273 u64 spare;
1274
1275 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
1276 lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
1277
1278 spare = gt->sriov.pf.spare.lmem_size;
1279 spare = max_t(u64, spare, pf_get_min_spare_lmem(gt));
1280
1281 return spare;
1282 }
1283
pf_set_spare_lmem(struct xe_gt * gt,u64 size)1284 static int pf_set_spare_lmem(struct xe_gt *gt, u64 size)
1285 {
1286 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
1287 lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
1288
1289 if (size && size < pf_get_min_spare_lmem(gt))
1290 return -EINVAL;
1291
1292 gt->sriov.pf.spare.lmem_size = size;
1293 return 0;
1294 }
1295
pf_get_vf_config_lmem(struct xe_gt * gt,unsigned int vfid)1296 static u64 pf_get_vf_config_lmem(struct xe_gt *gt, unsigned int vfid)
1297 {
1298 struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1299 struct xe_bo *bo;
1300
1301 bo = config->lmem_obj;
1302 return bo ? bo->size : 0;
1303 }
1304
pf_distribute_config_lmem(struct xe_gt * gt,unsigned int vfid,u64 size)1305 static int pf_distribute_config_lmem(struct xe_gt *gt, unsigned int vfid, u64 size)
1306 {
1307 struct xe_device *xe = gt_to_xe(gt);
1308 struct xe_tile *tile;
1309 unsigned int tid;
1310 int err;
1311
1312 for_each_tile(tile, xe, tid) {
1313 if (tile->primary_gt == gt) {
1314 err = pf_push_vf_cfg_lmem(gt, vfid, size);
1315 } else {
1316 u64 lmem = pf_get_vf_config_lmem(tile->primary_gt, vfid);
1317
1318 if (!lmem)
1319 continue;
1320 err = pf_push_vf_cfg_lmem(gt, vfid, lmem);
1321 }
1322 if (unlikely(err))
1323 return err;
1324 }
1325 return 0;
1326 }
1327
pf_force_lmtt_invalidate(struct xe_device * xe)1328 static void pf_force_lmtt_invalidate(struct xe_device *xe)
1329 {
1330 /* TODO */
1331 }
1332
pf_reset_vf_lmtt(struct xe_device * xe,unsigned int vfid)1333 static void pf_reset_vf_lmtt(struct xe_device *xe, unsigned int vfid)
1334 {
1335 struct xe_lmtt *lmtt;
1336 struct xe_tile *tile;
1337 unsigned int tid;
1338
1339 xe_assert(xe, xe_device_has_lmtt(xe));
1340 xe_assert(xe, IS_SRIOV_PF(xe));
1341
1342 for_each_tile(tile, xe, tid) {
1343 lmtt = &tile->sriov.pf.lmtt;
1344 xe_lmtt_drop_pages(lmtt, vfid);
1345 }
1346 }
1347
pf_update_vf_lmtt(struct xe_device * xe,unsigned int vfid)1348 static int pf_update_vf_lmtt(struct xe_device *xe, unsigned int vfid)
1349 {
1350 struct xe_gt_sriov_config *config;
1351 struct xe_tile *tile;
1352 struct xe_lmtt *lmtt;
1353 struct xe_bo *bo;
1354 struct xe_gt *gt;
1355 u64 total, offset;
1356 unsigned int gtid;
1357 unsigned int tid;
1358 int err;
1359
1360 xe_assert(xe, xe_device_has_lmtt(xe));
1361 xe_assert(xe, IS_SRIOV_PF(xe));
1362
1363 total = 0;
1364 for_each_tile(tile, xe, tid)
1365 total += pf_get_vf_config_lmem(tile->primary_gt, vfid);
1366
1367 for_each_tile(tile, xe, tid) {
1368 lmtt = &tile->sriov.pf.lmtt;
1369
1370 xe_lmtt_drop_pages(lmtt, vfid);
1371 if (!total)
1372 continue;
1373
1374 err = xe_lmtt_prepare_pages(lmtt, vfid, total);
1375 if (err)
1376 goto fail;
1377
1378 offset = 0;
1379 for_each_gt(gt, xe, gtid) {
1380 if (xe_gt_is_media_type(gt))
1381 continue;
1382
1383 config = pf_pick_vf_config(gt, vfid);
1384 bo = config->lmem_obj;
1385 if (!bo)
1386 continue;
1387
1388 err = xe_lmtt_populate_pages(lmtt, vfid, bo, offset);
1389 if (err)
1390 goto fail;
1391 offset += bo->size;
1392 }
1393 }
1394
1395 pf_force_lmtt_invalidate(xe);
1396 return 0;
1397
1398 fail:
1399 for_each_tile(tile, xe, tid) {
1400 lmtt = &tile->sriov.pf.lmtt;
1401 xe_lmtt_drop_pages(lmtt, vfid);
1402 }
1403 return err;
1404 }
1405
pf_release_vf_config_lmem(struct xe_gt * gt,struct xe_gt_sriov_config * config)1406 static void pf_release_vf_config_lmem(struct xe_gt *gt, struct xe_gt_sriov_config *config)
1407 {
1408 xe_gt_assert(gt, IS_DGFX(gt_to_xe(gt)));
1409 xe_gt_assert(gt, !xe_gt_is_media_type(gt));
1410 lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
1411
1412 if (config->lmem_obj) {
1413 xe_bo_unpin_map_no_vm(config->lmem_obj);
1414 config->lmem_obj = NULL;
1415 }
1416 }
1417
pf_provision_vf_lmem(struct xe_gt * gt,unsigned int vfid,u64 size)1418 static int pf_provision_vf_lmem(struct xe_gt *gt, unsigned int vfid, u64 size)
1419 {
1420 struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1421 struct xe_device *xe = gt_to_xe(gt);
1422 struct xe_tile *tile = gt_to_tile(gt);
1423 struct xe_bo *bo;
1424 int err;
1425
1426 xe_gt_assert(gt, vfid);
1427 xe_gt_assert(gt, IS_DGFX(xe));
1428 xe_gt_assert(gt, !xe_gt_is_media_type(gt));
1429
1430 size = round_up(size, pf_get_lmem_alignment(gt));
1431
1432 if (config->lmem_obj) {
1433 err = pf_distribute_config_lmem(gt, vfid, 0);
1434 if (unlikely(err))
1435 return err;
1436
1437 if (xe_device_has_lmtt(xe))
1438 pf_reset_vf_lmtt(xe, vfid);
1439 pf_release_vf_config_lmem(gt, config);
1440 }
1441 xe_gt_assert(gt, !config->lmem_obj);
1442
1443 if (!size)
1444 return 0;
1445
1446 xe_gt_assert(gt, pf_get_lmem_alignment(gt) == SZ_2M);
1447 bo = xe_bo_create_pin_map(xe, tile, NULL,
1448 ALIGN(size, PAGE_SIZE),
1449 ttm_bo_type_kernel,
1450 XE_BO_FLAG_VRAM_IF_DGFX(tile) |
1451 XE_BO_FLAG_NEEDS_2M |
1452 XE_BO_FLAG_PINNED);
1453 if (IS_ERR(bo))
1454 return PTR_ERR(bo);
1455
1456 config->lmem_obj = bo;
1457
1458 if (xe_device_has_lmtt(xe)) {
1459 err = pf_update_vf_lmtt(xe, vfid);
1460 if (unlikely(err))
1461 goto release;
1462 }
1463
1464 err = pf_push_vf_cfg_lmem(gt, vfid, bo->size);
1465 if (unlikely(err))
1466 goto reset_lmtt;
1467
1468 xe_gt_sriov_dbg_verbose(gt, "VF%u LMEM %zu (%zuM)\n",
1469 vfid, bo->size, bo->size / SZ_1M);
1470 return 0;
1471
1472 reset_lmtt:
1473 if (xe_device_has_lmtt(xe))
1474 pf_reset_vf_lmtt(xe, vfid);
1475 release:
1476 pf_release_vf_config_lmem(gt, config);
1477 return err;
1478 }
1479
1480 /**
1481 * xe_gt_sriov_pf_config_get_lmem - Get VF's LMEM quota.
1482 * @gt: the &xe_gt
1483 * @vfid: the VF identifier
1484 *
1485 * This function can only be called on PF.
1486 *
1487 * Return: VF's (or PF's spare) LMEM quota.
1488 */
xe_gt_sriov_pf_config_get_lmem(struct xe_gt * gt,unsigned int vfid)1489 u64 xe_gt_sriov_pf_config_get_lmem(struct xe_gt *gt, unsigned int vfid)
1490 {
1491 u64 size;
1492
1493 mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1494 if (vfid)
1495 size = pf_get_vf_config_lmem(gt, vfid);
1496 else
1497 size = pf_get_spare_lmem(gt);
1498 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1499
1500 return size;
1501 }
1502
1503 /**
1504 * xe_gt_sriov_pf_config_set_lmem - Provision VF with LMEM.
1505 * @gt: the &xe_gt (can't be media)
1506 * @vfid: the VF identifier
1507 * @size: requested LMEM size
1508 *
1509 * This function can only be called on PF.
1510 */
xe_gt_sriov_pf_config_set_lmem(struct xe_gt * gt,unsigned int vfid,u64 size)1511 int xe_gt_sriov_pf_config_set_lmem(struct xe_gt *gt, unsigned int vfid, u64 size)
1512 {
1513 int err;
1514
1515 mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1516 if (vfid)
1517 err = pf_provision_vf_lmem(gt, vfid, size);
1518 else
1519 err = pf_set_spare_lmem(gt, size);
1520 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1521
1522 return pf_config_set_u64_done(gt, vfid, size,
1523 xe_gt_sriov_pf_config_get_lmem(gt, vfid),
1524 vfid ? "LMEM" : "spare LMEM", err);
1525 }
1526
1527 /**
1528 * xe_gt_sriov_pf_config_bulk_set_lmem - Provision many VFs with LMEM.
1529 * @gt: the &xe_gt (can't be media)
1530 * @vfid: starting VF identifier (can't be 0)
1531 * @num_vfs: number of VFs to provision
1532 * @size: requested LMEM size
1533 *
1534 * This function can only be called on PF.
1535 *
1536 * Return: 0 on success or a negative error code on failure.
1537 */
xe_gt_sriov_pf_config_bulk_set_lmem(struct xe_gt * gt,unsigned int vfid,unsigned int num_vfs,u64 size)1538 int xe_gt_sriov_pf_config_bulk_set_lmem(struct xe_gt *gt, unsigned int vfid,
1539 unsigned int num_vfs, u64 size)
1540 {
1541 unsigned int n;
1542 int err = 0;
1543
1544 xe_gt_assert(gt, vfid);
1545 xe_gt_assert(gt, !xe_gt_is_media_type(gt));
1546
1547 if (!num_vfs)
1548 return 0;
1549
1550 mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1551 for (n = vfid; n < vfid + num_vfs; n++) {
1552 err = pf_provision_vf_lmem(gt, n, size);
1553 if (err)
1554 break;
1555 }
1556 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1557
1558 return pf_config_bulk_set_u64_done(gt, vfid, num_vfs, size,
1559 xe_gt_sriov_pf_config_get_lmem,
1560 "LMEM", n, err);
1561 }
1562
pf_query_free_lmem(struct xe_gt * gt)1563 static u64 pf_query_free_lmem(struct xe_gt *gt)
1564 {
1565 struct xe_tile *tile = gt->tile;
1566
1567 return xe_ttm_vram_get_avail(&tile->mem.vram.ttm.manager);
1568 }
1569
pf_query_max_lmem(struct xe_gt * gt)1570 static u64 pf_query_max_lmem(struct xe_gt *gt)
1571 {
1572 u64 alignment = pf_get_lmem_alignment(gt);
1573 u64 spare = pf_get_spare_lmem(gt);
1574 u64 free = pf_query_free_lmem(gt);
1575 u64 avail;
1576
1577 /* XXX: need to account for 2MB blocks only */
1578 avail = free > spare ? free - spare : 0;
1579 avail = round_down(avail, alignment);
1580
1581 return avail;
1582 }
1583
1584 #ifdef CONFIG_DRM_XE_DEBUG_SRIOV
1585 #define MAX_FAIR_LMEM SZ_128M /* XXX: make it small for the driver bringup */
1586 #endif
1587
pf_estimate_fair_lmem(struct xe_gt * gt,unsigned int num_vfs)1588 static u64 pf_estimate_fair_lmem(struct xe_gt *gt, unsigned int num_vfs)
1589 {
1590 u64 available = pf_query_max_lmem(gt);
1591 u64 alignment = pf_get_lmem_alignment(gt);
1592 u64 fair;
1593
1594 fair = div_u64(available, num_vfs);
1595 fair = rounddown_pow_of_two(fair); /* XXX: ttm_vram_mgr & drm_buddy limitation */
1596 fair = ALIGN_DOWN(fair, alignment);
1597 #ifdef MAX_FAIR_LMEM
1598 fair = min_t(u64, MAX_FAIR_LMEM, fair);
1599 #endif
1600 xe_gt_sriov_dbg_verbose(gt, "LMEM available(%lluM) fair(%u x %lluM)\n",
1601 available / SZ_1M, num_vfs, fair / SZ_1M);
1602 return fair;
1603 }
1604
1605 /**
1606 * xe_gt_sriov_pf_config_set_fair_lmem - Provision many VFs with fair LMEM.
1607 * @gt: the &xe_gt (can't be media)
1608 * @vfid: starting VF identifier (can't be 0)
1609 * @num_vfs: number of VFs to provision (can't be 0)
1610 *
1611 * This function can only be called on PF.
1612 *
1613 * Return: 0 on success or a negative error code on failure.
1614 */
xe_gt_sriov_pf_config_set_fair_lmem(struct xe_gt * gt,unsigned int vfid,unsigned int num_vfs)1615 int xe_gt_sriov_pf_config_set_fair_lmem(struct xe_gt *gt, unsigned int vfid,
1616 unsigned int num_vfs)
1617 {
1618 u64 fair;
1619
1620 xe_gt_assert(gt, vfid);
1621 xe_gt_assert(gt, num_vfs);
1622 xe_gt_assert(gt, !xe_gt_is_media_type(gt));
1623
1624 if (!IS_DGFX(gt_to_xe(gt)))
1625 return 0;
1626
1627 mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1628 fair = pf_estimate_fair_lmem(gt, num_vfs);
1629 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1630
1631 if (!fair)
1632 return -ENOSPC;
1633
1634 return xe_gt_sriov_pf_config_bulk_set_lmem(gt, vfid, num_vfs, fair);
1635 }
1636
1637 /**
1638 * xe_gt_sriov_pf_config_set_fair - Provision many VFs with fair resources.
1639 * @gt: the &xe_gt
1640 * @vfid: starting VF identifier (can't be 0)
1641 * @num_vfs: number of VFs to provision (can't be 0)
1642 *
1643 * This function can only be called on PF.
1644 *
1645 * Return: 0 on success or a negative error code on failure.
1646 */
xe_gt_sriov_pf_config_set_fair(struct xe_gt * gt,unsigned int vfid,unsigned int num_vfs)1647 int xe_gt_sriov_pf_config_set_fair(struct xe_gt *gt, unsigned int vfid,
1648 unsigned int num_vfs)
1649 {
1650 int result = 0;
1651 int err;
1652
1653 xe_gt_assert(gt, vfid);
1654 xe_gt_assert(gt, num_vfs);
1655
1656 if (!xe_gt_is_media_type(gt)) {
1657 err = xe_gt_sriov_pf_config_set_fair_ggtt(gt, vfid, num_vfs);
1658 result = result ?: err;
1659 err = xe_gt_sriov_pf_config_set_fair_lmem(gt, vfid, num_vfs);
1660 result = result ?: err;
1661 }
1662 err = xe_gt_sriov_pf_config_set_fair_ctxs(gt, vfid, num_vfs);
1663 result = result ?: err;
1664 err = xe_gt_sriov_pf_config_set_fair_dbs(gt, vfid, num_vfs);
1665 result = result ?: err;
1666
1667 return result;
1668 }
1669
exec_quantum_unit(u32 exec_quantum)1670 static const char *exec_quantum_unit(u32 exec_quantum)
1671 {
1672 return exec_quantum ? "ms" : "(infinity)";
1673 }
1674
pf_provision_exec_quantum(struct xe_gt * gt,unsigned int vfid,u32 exec_quantum)1675 static int pf_provision_exec_quantum(struct xe_gt *gt, unsigned int vfid,
1676 u32 exec_quantum)
1677 {
1678 struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1679 int err;
1680
1681 err = pf_push_vf_cfg_exec_quantum(gt, vfid, &exec_quantum);
1682 if (unlikely(err))
1683 return err;
1684
1685 config->exec_quantum = exec_quantum;
1686 return 0;
1687 }
1688
pf_get_exec_quantum(struct xe_gt * gt,unsigned int vfid)1689 static int pf_get_exec_quantum(struct xe_gt *gt, unsigned int vfid)
1690 {
1691 struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1692
1693 return config->exec_quantum;
1694 }
1695
1696 /**
1697 * xe_gt_sriov_pf_config_set_exec_quantum - Configure execution quantum for the VF.
1698 * @gt: the &xe_gt
1699 * @vfid: the VF identifier
1700 * @exec_quantum: requested execution quantum in milliseconds (0 is infinity)
1701 *
1702 * This function can only be called on PF.
1703 *
1704 * Return: 0 on success or a negative error code on failure.
1705 */
xe_gt_sriov_pf_config_set_exec_quantum(struct xe_gt * gt,unsigned int vfid,u32 exec_quantum)1706 int xe_gt_sriov_pf_config_set_exec_quantum(struct xe_gt *gt, unsigned int vfid,
1707 u32 exec_quantum)
1708 {
1709 int err;
1710
1711 mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1712 err = pf_provision_exec_quantum(gt, vfid, exec_quantum);
1713 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1714
1715 return pf_config_set_u32_done(gt, vfid, exec_quantum,
1716 xe_gt_sriov_pf_config_get_exec_quantum(gt, vfid),
1717 "execution quantum", exec_quantum_unit, err);
1718 }
1719
1720 /**
1721 * xe_gt_sriov_pf_config_get_exec_quantum - Get VF's execution quantum.
1722 * @gt: the &xe_gt
1723 * @vfid: the VF identifier
1724 *
1725 * This function can only be called on PF.
1726 *
1727 * Return: VF's (or PF's) execution quantum in milliseconds.
1728 */
xe_gt_sriov_pf_config_get_exec_quantum(struct xe_gt * gt,unsigned int vfid)1729 u32 xe_gt_sriov_pf_config_get_exec_quantum(struct xe_gt *gt, unsigned int vfid)
1730 {
1731 u32 exec_quantum;
1732
1733 mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1734 exec_quantum = pf_get_exec_quantum(gt, vfid);
1735 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1736
1737 return exec_quantum;
1738 }
1739
preempt_timeout_unit(u32 preempt_timeout)1740 static const char *preempt_timeout_unit(u32 preempt_timeout)
1741 {
1742 return preempt_timeout ? "us" : "(infinity)";
1743 }
1744
pf_provision_preempt_timeout(struct xe_gt * gt,unsigned int vfid,u32 preempt_timeout)1745 static int pf_provision_preempt_timeout(struct xe_gt *gt, unsigned int vfid,
1746 u32 preempt_timeout)
1747 {
1748 struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1749 int err;
1750
1751 err = pf_push_vf_cfg_preempt_timeout(gt, vfid, &preempt_timeout);
1752 if (unlikely(err))
1753 return err;
1754
1755 config->preempt_timeout = preempt_timeout;
1756
1757 return 0;
1758 }
1759
pf_get_preempt_timeout(struct xe_gt * gt,unsigned int vfid)1760 static int pf_get_preempt_timeout(struct xe_gt *gt, unsigned int vfid)
1761 {
1762 struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1763
1764 return config->preempt_timeout;
1765 }
1766
1767 /**
1768 * xe_gt_sriov_pf_config_set_preempt_timeout - Configure preemption timeout for the VF.
1769 * @gt: the &xe_gt
1770 * @vfid: the VF identifier
1771 * @preempt_timeout: requested preemption timeout in microseconds (0 is infinity)
1772 *
1773 * This function can only be called on PF.
1774 *
1775 * Return: 0 on success or a negative error code on failure.
1776 */
xe_gt_sriov_pf_config_set_preempt_timeout(struct xe_gt * gt,unsigned int vfid,u32 preempt_timeout)1777 int xe_gt_sriov_pf_config_set_preempt_timeout(struct xe_gt *gt, unsigned int vfid,
1778 u32 preempt_timeout)
1779 {
1780 int err;
1781
1782 mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1783 err = pf_provision_preempt_timeout(gt, vfid, preempt_timeout);
1784 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1785
1786 return pf_config_set_u32_done(gt, vfid, preempt_timeout,
1787 xe_gt_sriov_pf_config_get_preempt_timeout(gt, vfid),
1788 "preemption timeout", preempt_timeout_unit, err);
1789 }
1790
1791 /**
1792 * xe_gt_sriov_pf_config_get_preempt_timeout - Get VF's preemption timeout.
1793 * @gt: the &xe_gt
1794 * @vfid: the VF identifier
1795 *
1796 * This function can only be called on PF.
1797 *
1798 * Return: VF's (or PF's) preemption timeout in microseconds.
1799 */
xe_gt_sriov_pf_config_get_preempt_timeout(struct xe_gt * gt,unsigned int vfid)1800 u32 xe_gt_sriov_pf_config_get_preempt_timeout(struct xe_gt *gt, unsigned int vfid)
1801 {
1802 u32 preempt_timeout;
1803
1804 mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1805 preempt_timeout = pf_get_preempt_timeout(gt, vfid);
1806 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1807
1808 return preempt_timeout;
1809 }
1810
sched_priority_unit(u32 priority)1811 static const char *sched_priority_unit(u32 priority)
1812 {
1813 return priority == GUC_SCHED_PRIORITY_LOW ? "(low)" :
1814 priority == GUC_SCHED_PRIORITY_NORMAL ? "(normal)" :
1815 priority == GUC_SCHED_PRIORITY_HIGH ? "(high)" :
1816 "(?)";
1817 }
1818
pf_provision_sched_priority(struct xe_gt * gt,unsigned int vfid,u32 priority)1819 static int pf_provision_sched_priority(struct xe_gt *gt, unsigned int vfid, u32 priority)
1820 {
1821 struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1822 int err;
1823
1824 err = pf_push_vf_cfg_sched_priority(gt, vfid, priority);
1825 if (unlikely(err))
1826 return err;
1827
1828 config->sched_priority = priority;
1829 return 0;
1830 }
1831
pf_get_sched_priority(struct xe_gt * gt,unsigned int vfid)1832 static int pf_get_sched_priority(struct xe_gt *gt, unsigned int vfid)
1833 {
1834 struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1835
1836 return config->sched_priority;
1837 }
1838
1839 /**
1840 * xe_gt_sriov_pf_config_set_sched_priority() - Configure scheduling priority.
1841 * @gt: the &xe_gt
1842 * @vfid: the VF identifier
1843 * @priority: requested scheduling priority
1844 *
1845 * This function can only be called on PF.
1846 *
1847 * Return: 0 on success or a negative error code on failure.
1848 */
xe_gt_sriov_pf_config_set_sched_priority(struct xe_gt * gt,unsigned int vfid,u32 priority)1849 int xe_gt_sriov_pf_config_set_sched_priority(struct xe_gt *gt, unsigned int vfid, u32 priority)
1850 {
1851 int err;
1852
1853 mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1854 err = pf_provision_sched_priority(gt, vfid, priority);
1855 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1856
1857 return pf_config_set_u32_done(gt, vfid, priority,
1858 xe_gt_sriov_pf_config_get_sched_priority(gt, vfid),
1859 "scheduling priority", sched_priority_unit, err);
1860 }
1861
1862 /**
1863 * xe_gt_sriov_pf_config_get_sched_priority - Get VF's scheduling priority.
1864 * @gt: the &xe_gt
1865 * @vfid: the VF identifier
1866 *
1867 * This function can only be called on PF.
1868 *
1869 * Return: VF's (or PF's) scheduling priority.
1870 */
xe_gt_sriov_pf_config_get_sched_priority(struct xe_gt * gt,unsigned int vfid)1871 u32 xe_gt_sriov_pf_config_get_sched_priority(struct xe_gt *gt, unsigned int vfid)
1872 {
1873 u32 priority;
1874
1875 mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1876 priority = pf_get_sched_priority(gt, vfid);
1877 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1878
1879 return priority;
1880 }
1881
pf_reset_config_sched(struct xe_gt * gt,struct xe_gt_sriov_config * config)1882 static void pf_reset_config_sched(struct xe_gt *gt, struct xe_gt_sriov_config *config)
1883 {
1884 lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
1885
1886 config->exec_quantum = 0;
1887 config->preempt_timeout = 0;
1888 }
1889
pf_provision_threshold(struct xe_gt * gt,unsigned int vfid,enum xe_guc_klv_threshold_index index,u32 value)1890 static int pf_provision_threshold(struct xe_gt *gt, unsigned int vfid,
1891 enum xe_guc_klv_threshold_index index, u32 value)
1892 {
1893 struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1894 int err;
1895
1896 err = pf_push_vf_cfg_threshold(gt, vfid, index, value);
1897 if (unlikely(err))
1898 return err;
1899
1900 config->thresholds[index] = value;
1901
1902 return 0;
1903 }
1904
pf_get_threshold(struct xe_gt * gt,unsigned int vfid,enum xe_guc_klv_threshold_index index)1905 static int pf_get_threshold(struct xe_gt *gt, unsigned int vfid,
1906 enum xe_guc_klv_threshold_index index)
1907 {
1908 struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1909
1910 return config->thresholds[index];
1911 }
1912
threshold_unit(u32 threshold)1913 static const char *threshold_unit(u32 threshold)
1914 {
1915 return threshold ? "" : "(disabled)";
1916 }
1917
1918 /**
1919 * xe_gt_sriov_pf_config_set_threshold - Configure threshold for the VF.
1920 * @gt: the &xe_gt
1921 * @vfid: the VF identifier
1922 * @index: the threshold index
1923 * @value: requested value (0 means disabled)
1924 *
1925 * This function can only be called on PF.
1926 *
1927 * Return: 0 on success or a negative error code on failure.
1928 */
xe_gt_sriov_pf_config_set_threshold(struct xe_gt * gt,unsigned int vfid,enum xe_guc_klv_threshold_index index,u32 value)1929 int xe_gt_sriov_pf_config_set_threshold(struct xe_gt *gt, unsigned int vfid,
1930 enum xe_guc_klv_threshold_index index, u32 value)
1931 {
1932 u32 key = xe_guc_klv_threshold_index_to_key(index);
1933 const char *name = xe_guc_klv_key_to_string(key);
1934 int err;
1935
1936 mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1937 err = pf_provision_threshold(gt, vfid, index, value);
1938 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1939
1940 return pf_config_set_u32_done(gt, vfid, value,
1941 xe_gt_sriov_pf_config_get_threshold(gt, vfid, index),
1942 name, threshold_unit, err);
1943 }
1944
1945 /**
1946 * xe_gt_sriov_pf_config_get_threshold - Get VF's threshold.
1947 * @gt: the &xe_gt
1948 * @vfid: the VF identifier
1949 * @index: the threshold index
1950 *
1951 * This function can only be called on PF.
1952 *
1953 * Return: value of VF's (or PF's) threshold.
1954 */
xe_gt_sriov_pf_config_get_threshold(struct xe_gt * gt,unsigned int vfid,enum xe_guc_klv_threshold_index index)1955 u32 xe_gt_sriov_pf_config_get_threshold(struct xe_gt *gt, unsigned int vfid,
1956 enum xe_guc_klv_threshold_index index)
1957 {
1958 u32 value;
1959
1960 mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1961 value = pf_get_threshold(gt, vfid, index);
1962 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1963
1964 return value;
1965 }
1966
pf_reset_config_thresholds(struct xe_gt * gt,struct xe_gt_sriov_config * config)1967 static void pf_reset_config_thresholds(struct xe_gt *gt, struct xe_gt_sriov_config *config)
1968 {
1969 lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
1970
1971 #define reset_threshold_config(TAG, ...) ({ \
1972 config->thresholds[MAKE_XE_GUC_KLV_THRESHOLD_INDEX(TAG)] = 0; \
1973 });
1974
1975 MAKE_XE_GUC_KLV_THRESHOLDS_SET(reset_threshold_config);
1976 #undef reset_threshold_config
1977 }
1978
pf_release_vf_config(struct xe_gt * gt,unsigned int vfid)1979 static void pf_release_vf_config(struct xe_gt *gt, unsigned int vfid)
1980 {
1981 struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1982 struct xe_device *xe = gt_to_xe(gt);
1983
1984 if (!xe_gt_is_media_type(gt)) {
1985 pf_release_vf_config_ggtt(gt, config);
1986 if (IS_DGFX(xe)) {
1987 pf_release_vf_config_lmem(gt, config);
1988 if (xe_device_has_lmtt(xe))
1989 pf_update_vf_lmtt(xe, vfid);
1990 }
1991 }
1992 pf_release_config_ctxs(gt, config);
1993 pf_release_config_dbs(gt, config);
1994 pf_reset_config_sched(gt, config);
1995 pf_reset_config_thresholds(gt, config);
1996 }
1997
1998 /**
1999 * xe_gt_sriov_pf_config_release - Release and reset VF configuration.
2000 * @gt: the &xe_gt
2001 * @vfid: the VF identifier (can't be PF)
2002 * @force: force configuration release
2003 *
2004 * This function can only be called on PF.
2005 *
2006 * Return: 0 on success or a negative error code on failure.
2007 */
xe_gt_sriov_pf_config_release(struct xe_gt * gt,unsigned int vfid,bool force)2008 int xe_gt_sriov_pf_config_release(struct xe_gt *gt, unsigned int vfid, bool force)
2009 {
2010 int err;
2011
2012 xe_gt_assert(gt, vfid);
2013
2014 mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
2015 err = pf_send_vf_cfg_reset(gt, vfid);
2016 if (!err || force)
2017 pf_release_vf_config(gt, vfid);
2018 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
2019
2020 if (unlikely(err)) {
2021 xe_gt_sriov_notice(gt, "VF%u unprovisioning failed with error (%pe)%s\n",
2022 vfid, ERR_PTR(err),
2023 force ? " but all resources were released anyway!" : "");
2024 }
2025
2026 return force ? 0 : err;
2027 }
2028
pf_sanitize_ggtt(struct xe_ggtt_node * ggtt_region,unsigned int vfid)2029 static void pf_sanitize_ggtt(struct xe_ggtt_node *ggtt_region, unsigned int vfid)
2030 {
2031 if (xe_ggtt_node_allocated(ggtt_region))
2032 xe_ggtt_assign(ggtt_region, vfid);
2033 }
2034
pf_sanitize_lmem(struct xe_tile * tile,struct xe_bo * bo,long timeout)2035 static int pf_sanitize_lmem(struct xe_tile *tile, struct xe_bo *bo, long timeout)
2036 {
2037 struct xe_migrate *m = tile->migrate;
2038 struct dma_fence *fence;
2039 int err;
2040
2041 if (!bo)
2042 return 0;
2043
2044 xe_bo_lock(bo, false);
2045 fence = xe_migrate_clear(m, bo, bo->ttm.resource, XE_MIGRATE_CLEAR_FLAG_FULL);
2046 if (IS_ERR(fence)) {
2047 err = PTR_ERR(fence);
2048 } else if (!fence) {
2049 err = -ENOMEM;
2050 } else {
2051 long ret = dma_fence_wait_timeout(fence, false, timeout);
2052
2053 err = ret > 0 ? 0 : ret < 0 ? ret : -ETIMEDOUT;
2054 dma_fence_put(fence);
2055 if (!err)
2056 xe_gt_sriov_dbg_verbose(tile->primary_gt, "LMEM cleared in %dms\n",
2057 jiffies_to_msecs(timeout - ret));
2058 }
2059 xe_bo_unlock(bo);
2060
2061 return err;
2062 }
2063
pf_sanitize_vf_resources(struct xe_gt * gt,u32 vfid,long timeout)2064 static int pf_sanitize_vf_resources(struct xe_gt *gt, u32 vfid, long timeout)
2065 {
2066 struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
2067 struct xe_tile *tile = gt_to_tile(gt);
2068 struct xe_device *xe = gt_to_xe(gt);
2069 int err = 0;
2070
2071 /*
2072 * Only GGTT and LMEM requires to be cleared by the PF.
2073 * GuC doorbell IDs and context IDs do not need any clearing.
2074 */
2075 if (!xe_gt_is_media_type(gt)) {
2076 pf_sanitize_ggtt(config->ggtt_region, vfid);
2077 if (IS_DGFX(xe))
2078 err = pf_sanitize_lmem(tile, config->lmem_obj, timeout);
2079 }
2080
2081 return err;
2082 }
2083
2084 /**
2085 * xe_gt_sriov_pf_config_sanitize() - Sanitize VF's resources.
2086 * @gt: the &xe_gt
2087 * @vfid: the VF identifier (can't be PF)
2088 * @timeout: maximum timeout to wait for completion in jiffies
2089 *
2090 * This function can only be called on PF.
2091 *
2092 * Return: 0 on success or a negative error code on failure.
2093 */
xe_gt_sriov_pf_config_sanitize(struct xe_gt * gt,unsigned int vfid,long timeout)2094 int xe_gt_sriov_pf_config_sanitize(struct xe_gt *gt, unsigned int vfid, long timeout)
2095 {
2096 int err;
2097
2098 xe_gt_assert(gt, vfid != PFID);
2099
2100 mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
2101 err = pf_sanitize_vf_resources(gt, vfid, timeout);
2102 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
2103
2104 if (unlikely(err))
2105 xe_gt_sriov_notice(gt, "VF%u resource sanitizing failed (%pe)\n",
2106 vfid, ERR_PTR(err));
2107 return err;
2108 }
2109
2110 /**
2111 * xe_gt_sriov_pf_config_push - Reprovision VF's configuration.
2112 * @gt: the &xe_gt
2113 * @vfid: the VF identifier (can't be PF)
2114 * @refresh: explicit refresh
2115 *
2116 * This function can only be called on PF.
2117 *
2118 * Return: 0 on success or a negative error code on failure.
2119 */
xe_gt_sriov_pf_config_push(struct xe_gt * gt,unsigned int vfid,bool refresh)2120 int xe_gt_sriov_pf_config_push(struct xe_gt *gt, unsigned int vfid, bool refresh)
2121 {
2122 int err = 0;
2123
2124 xe_gt_assert(gt, vfid);
2125
2126 mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
2127 err = pf_push_vf_cfg(gt, vfid, refresh);
2128 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
2129
2130 if (unlikely(err)) {
2131 xe_gt_sriov_notice(gt, "Failed to %s VF%u configuration (%pe)\n",
2132 refresh ? "refresh" : "push", vfid, ERR_PTR(err));
2133 }
2134
2135 return err;
2136 }
2137
pf_validate_vf_config(struct xe_gt * gt,unsigned int vfid)2138 static int pf_validate_vf_config(struct xe_gt *gt, unsigned int vfid)
2139 {
2140 struct xe_gt *primary_gt = gt_to_tile(gt)->primary_gt;
2141 struct xe_device *xe = gt_to_xe(gt);
2142 bool is_primary = !xe_gt_is_media_type(gt);
2143 bool valid_ggtt, valid_ctxs, valid_dbs;
2144 bool valid_any, valid_all;
2145
2146 valid_ggtt = pf_get_vf_config_ggtt(primary_gt, vfid);
2147 valid_ctxs = pf_get_vf_config_ctxs(gt, vfid);
2148 valid_dbs = pf_get_vf_config_dbs(gt, vfid);
2149
2150 /* note that GuC doorbells are optional */
2151 valid_any = valid_ctxs || valid_dbs;
2152 valid_all = valid_ctxs;
2153
2154 /* and GGTT/LMEM is configured on primary GT only */
2155 valid_all = valid_all && valid_ggtt;
2156 valid_any = valid_any || (valid_ggtt && is_primary);
2157
2158 if (IS_DGFX(xe)) {
2159 bool valid_lmem = pf_get_vf_config_lmem(primary_gt, vfid);
2160
2161 valid_any = valid_any || (valid_lmem && is_primary);
2162 valid_all = valid_all && valid_lmem;
2163 }
2164
2165 return valid_all ? 0 : valid_any ? -ENOKEY : -ENODATA;
2166 }
2167
2168 /**
2169 * xe_gt_sriov_pf_config_is_empty - Check VF's configuration.
2170 * @gt: the &xe_gt
2171 * @vfid: the VF identifier (can't be PF)
2172 *
2173 * This function can only be called on PF.
2174 *
2175 * Return: true if VF mandatory configuration (GGTT, LMEM, ...) is empty.
2176 */
xe_gt_sriov_pf_config_is_empty(struct xe_gt * gt,unsigned int vfid)2177 bool xe_gt_sriov_pf_config_is_empty(struct xe_gt *gt, unsigned int vfid)
2178 {
2179 bool empty;
2180
2181 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
2182 xe_gt_assert(gt, vfid);
2183
2184 mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
2185 empty = pf_validate_vf_config(gt, vfid) == -ENODATA;
2186 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
2187
2188 return empty;
2189 }
2190
2191 /**
2192 * xe_gt_sriov_pf_config_save - Save a VF provisioning config as binary blob.
2193 * @gt: the &xe_gt
2194 * @vfid: the VF identifier (can't be PF)
2195 * @buf: the buffer to save a config to (or NULL if query the buf size)
2196 * @size: the size of the buffer (or 0 if query the buf size)
2197 *
2198 * This function can only be called on PF.
2199 *
2200 * Return: minimum size of the buffer or the number of bytes saved,
2201 * or a negative error code on failure.
2202 */
xe_gt_sriov_pf_config_save(struct xe_gt * gt,unsigned int vfid,void * buf,size_t size)2203 ssize_t xe_gt_sriov_pf_config_save(struct xe_gt *gt, unsigned int vfid, void *buf, size_t size)
2204 {
2205 struct xe_gt_sriov_config *config;
2206 ssize_t ret;
2207
2208 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
2209 xe_gt_assert(gt, vfid);
2210 xe_gt_assert(gt, !(!buf ^ !size));
2211
2212 mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
2213 ret = pf_validate_vf_config(gt, vfid);
2214 if (!size) {
2215 ret = ret ? 0 : SZ_4K;
2216 } else if (!ret) {
2217 if (size < SZ_4K) {
2218 ret = -ENOBUFS;
2219 } else {
2220 config = pf_pick_vf_config(gt, vfid);
2221 ret = encode_config(buf, config, false) * sizeof(u32);
2222 }
2223 }
2224 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
2225
2226 return ret;
2227 }
2228
pf_restore_vf_config_klv(struct xe_gt * gt,unsigned int vfid,u32 key,u32 len,const u32 * value)2229 static int pf_restore_vf_config_klv(struct xe_gt *gt, unsigned int vfid,
2230 u32 key, u32 len, const u32 *value)
2231 {
2232 switch (key) {
2233 case GUC_KLV_VF_CFG_NUM_CONTEXTS_KEY:
2234 if (len != GUC_KLV_VF_CFG_NUM_CONTEXTS_LEN)
2235 return -EBADMSG;
2236 return pf_provision_vf_ctxs(gt, vfid, value[0]);
2237
2238 case GUC_KLV_VF_CFG_NUM_DOORBELLS_KEY:
2239 if (len != GUC_KLV_VF_CFG_NUM_DOORBELLS_LEN)
2240 return -EBADMSG;
2241 return pf_provision_vf_dbs(gt, vfid, value[0]);
2242
2243 case GUC_KLV_VF_CFG_EXEC_QUANTUM_KEY:
2244 if (len != GUC_KLV_VF_CFG_EXEC_QUANTUM_LEN)
2245 return -EBADMSG;
2246 return pf_provision_exec_quantum(gt, vfid, value[0]);
2247
2248 case GUC_KLV_VF_CFG_PREEMPT_TIMEOUT_KEY:
2249 if (len != GUC_KLV_VF_CFG_PREEMPT_TIMEOUT_LEN)
2250 return -EBADMSG;
2251 return pf_provision_preempt_timeout(gt, vfid, value[0]);
2252
2253 /* auto-generate case statements */
2254 #define define_threshold_key_to_provision_case(TAG, ...) \
2255 case MAKE_GUC_KLV_VF_CFG_THRESHOLD_KEY(TAG): \
2256 BUILD_BUG_ON(MAKE_GUC_KLV_VF_CFG_THRESHOLD_LEN(TAG) != 1u); \
2257 if (len != MAKE_GUC_KLV_VF_CFG_THRESHOLD_LEN(TAG)) \
2258 return -EBADMSG; \
2259 return pf_provision_threshold(gt, vfid, \
2260 MAKE_XE_GUC_KLV_THRESHOLD_INDEX(TAG), \
2261 value[0]);
2262
2263 MAKE_XE_GUC_KLV_THRESHOLDS_SET(define_threshold_key_to_provision_case)
2264 #undef define_threshold_key_to_provision_case
2265 }
2266
2267 if (xe_gt_is_media_type(gt))
2268 return -EKEYREJECTED;
2269
2270 switch (key) {
2271 case GUC_KLV_VF_CFG_GGTT_SIZE_KEY:
2272 if (len != GUC_KLV_VF_CFG_GGTT_SIZE_LEN)
2273 return -EBADMSG;
2274 return pf_provision_vf_ggtt(gt, vfid, make_u64_from_u32(value[1], value[0]));
2275
2276 case GUC_KLV_VF_CFG_LMEM_SIZE_KEY:
2277 if (!IS_DGFX(gt_to_xe(gt)))
2278 return -EKEYREJECTED;
2279 if (len != GUC_KLV_VF_CFG_LMEM_SIZE_LEN)
2280 return -EBADMSG;
2281 return pf_provision_vf_lmem(gt, vfid, make_u64_from_u32(value[1], value[0]));
2282 }
2283
2284 return -EKEYREJECTED;
2285 }
2286
pf_restore_vf_config(struct xe_gt * gt,unsigned int vfid,const u32 * klvs,size_t num_dwords)2287 static int pf_restore_vf_config(struct xe_gt *gt, unsigned int vfid,
2288 const u32 *klvs, size_t num_dwords)
2289 {
2290 int err;
2291
2292 while (num_dwords >= GUC_KLV_LEN_MIN) {
2293 u32 key = FIELD_GET(GUC_KLV_0_KEY, klvs[0]);
2294 u32 len = FIELD_GET(GUC_KLV_0_LEN, klvs[0]);
2295
2296 klvs += GUC_KLV_LEN_MIN;
2297 num_dwords -= GUC_KLV_LEN_MIN;
2298
2299 if (num_dwords < len)
2300 err = -EBADMSG;
2301 else
2302 err = pf_restore_vf_config_klv(gt, vfid, key, len, klvs);
2303
2304 if (err) {
2305 xe_gt_sriov_dbg(gt, "restore failed on key %#x (%pe)\n", key, ERR_PTR(err));
2306 return err;
2307 }
2308
2309 klvs += len;
2310 num_dwords -= len;
2311 }
2312
2313 return pf_validate_vf_config(gt, vfid);
2314 }
2315
2316 /**
2317 * xe_gt_sriov_pf_config_restore - Restore a VF provisioning config from binary blob.
2318 * @gt: the &xe_gt
2319 * @vfid: the VF identifier (can't be PF)
2320 * @buf: the buffer with config data
2321 * @size: the size of the config data
2322 *
2323 * This function can only be called on PF.
2324 *
2325 * Return: 0 on success or a negative error code on failure.
2326 */
xe_gt_sriov_pf_config_restore(struct xe_gt * gt,unsigned int vfid,const void * buf,size_t size)2327 int xe_gt_sriov_pf_config_restore(struct xe_gt *gt, unsigned int vfid,
2328 const void *buf, size_t size)
2329 {
2330 int err;
2331
2332 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
2333 xe_gt_assert(gt, vfid);
2334
2335 if (!size)
2336 return -ENODATA;
2337
2338 if (size % sizeof(u32))
2339 return -EINVAL;
2340
2341 if (IS_ENABLED(CONFIG_DRM_XE_DEBUG_SRIOV)) {
2342 struct drm_printer p = xe_gt_info_printer(gt);
2343
2344 drm_printf(&p, "restoring VF%u config:\n", vfid);
2345 xe_guc_klv_print(buf, size / sizeof(u32), &p);
2346 }
2347
2348 mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
2349 err = pf_send_vf_cfg_reset(gt, vfid);
2350 if (!err) {
2351 pf_release_vf_config(gt, vfid);
2352 err = pf_restore_vf_config(gt, vfid, buf, size / sizeof(u32));
2353 }
2354 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
2355
2356 return err;
2357 }
2358
fini_config(void * arg)2359 static void fini_config(void *arg)
2360 {
2361 struct xe_gt *gt = arg;
2362 struct xe_device *xe = gt_to_xe(gt);
2363 unsigned int n, total_vfs = xe_sriov_pf_get_totalvfs(xe);
2364
2365 mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
2366 for (n = 1; n <= total_vfs; n++)
2367 pf_release_vf_config(gt, n);
2368 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
2369 }
2370
2371 /**
2372 * xe_gt_sriov_pf_config_init - Initialize SR-IOV configuration data.
2373 * @gt: the &xe_gt
2374 *
2375 * This function can only be called on PF.
2376 *
2377 * Return: 0 on success or a negative error code on failure.
2378 */
xe_gt_sriov_pf_config_init(struct xe_gt * gt)2379 int xe_gt_sriov_pf_config_init(struct xe_gt *gt)
2380 {
2381 struct xe_device *xe = gt_to_xe(gt);
2382
2383 xe_gt_assert(gt, IS_SRIOV_PF(xe));
2384
2385 return devm_add_action_or_reset(xe->drm.dev, fini_config, gt);
2386 }
2387
2388 /**
2389 * xe_gt_sriov_pf_config_restart - Restart SR-IOV configurations after a GT reset.
2390 * @gt: the &xe_gt
2391 *
2392 * Any prior configurations pushed to GuC are lost when the GT is reset.
2393 * Push again all non-empty VF configurations to the GuC.
2394 *
2395 * This function can only be called on PF.
2396 */
xe_gt_sriov_pf_config_restart(struct xe_gt * gt)2397 void xe_gt_sriov_pf_config_restart(struct xe_gt *gt)
2398 {
2399 unsigned int n, total_vfs = xe_sriov_pf_get_totalvfs(gt_to_xe(gt));
2400 unsigned int fail = 0, skip = 0;
2401
2402 for (n = 1; n <= total_vfs; n++) {
2403 if (xe_gt_sriov_pf_config_is_empty(gt, n))
2404 skip++;
2405 else if (xe_gt_sriov_pf_config_push(gt, n, false))
2406 fail++;
2407 }
2408
2409 if (fail)
2410 xe_gt_sriov_notice(gt, "Failed to push %u of %u VF%s configurations\n",
2411 fail, total_vfs - skip, str_plural(total_vfs));
2412
2413 if (fail != total_vfs)
2414 xe_gt_sriov_dbg(gt, "pushed %u skip %u of %u VF%s configurations\n",
2415 total_vfs - skip - fail, skip, total_vfs, str_plural(total_vfs));
2416 }
2417
2418 /**
2419 * xe_gt_sriov_pf_config_print_ggtt - Print GGTT configurations.
2420 * @gt: the &xe_gt
2421 * @p: the &drm_printer
2422 *
2423 * Print GGTT configuration data for all VFs.
2424 * VFs without provisioned GGTT are ignored.
2425 *
2426 * This function can only be called on PF.
2427 */
xe_gt_sriov_pf_config_print_ggtt(struct xe_gt * gt,struct drm_printer * p)2428 int xe_gt_sriov_pf_config_print_ggtt(struct xe_gt *gt, struct drm_printer *p)
2429 {
2430 unsigned int n, total_vfs = xe_sriov_pf_get_totalvfs(gt_to_xe(gt));
2431 const struct xe_gt_sriov_config *config;
2432 char buf[10];
2433
2434 for (n = 1; n <= total_vfs; n++) {
2435 config = >->sriov.pf.vfs[n].config;
2436 if (!xe_ggtt_node_allocated(config->ggtt_region))
2437 continue;
2438
2439 string_get_size(config->ggtt_region->base.size, 1, STRING_UNITS_2,
2440 buf, sizeof(buf));
2441 drm_printf(p, "VF%u:\t%#0llx-%#llx\t(%s)\n",
2442 n, config->ggtt_region->base.start,
2443 config->ggtt_region->base.start + config->ggtt_region->base.size - 1,
2444 buf);
2445 }
2446
2447 return 0;
2448 }
2449
2450 /**
2451 * xe_gt_sriov_pf_config_print_ctxs - Print GuC context IDs configurations.
2452 * @gt: the &xe_gt
2453 * @p: the &drm_printer
2454 *
2455 * Print GuC context ID allocations across all VFs.
2456 * VFs without GuC context IDs are skipped.
2457 *
2458 * This function can only be called on PF.
2459 * Return: 0 on success or a negative error code on failure.
2460 */
xe_gt_sriov_pf_config_print_ctxs(struct xe_gt * gt,struct drm_printer * p)2461 int xe_gt_sriov_pf_config_print_ctxs(struct xe_gt *gt, struct drm_printer *p)
2462 {
2463 unsigned int n, total_vfs = xe_sriov_pf_get_totalvfs(gt_to_xe(gt));
2464 const struct xe_gt_sriov_config *config;
2465
2466 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
2467 mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
2468
2469 for (n = 1; n <= total_vfs; n++) {
2470 config = >->sriov.pf.vfs[n].config;
2471 if (!config->num_ctxs)
2472 continue;
2473
2474 drm_printf(p, "VF%u:\t%u-%u\t(%u)\n",
2475 n,
2476 config->begin_ctx,
2477 config->begin_ctx + config->num_ctxs - 1,
2478 config->num_ctxs);
2479 }
2480
2481 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
2482 return 0;
2483 }
2484
2485 /**
2486 * xe_gt_sriov_pf_config_print_dbs - Print GuC doorbell ID configurations.
2487 * @gt: the &xe_gt
2488 * @p: the &drm_printer
2489 *
2490 * Print GuC doorbell IDs allocations across all VFs.
2491 * VFs without GuC doorbell IDs are skipped.
2492 *
2493 * This function can only be called on PF.
2494 * Return: 0 on success or a negative error code on failure.
2495 */
xe_gt_sriov_pf_config_print_dbs(struct xe_gt * gt,struct drm_printer * p)2496 int xe_gt_sriov_pf_config_print_dbs(struct xe_gt *gt, struct drm_printer *p)
2497 {
2498 unsigned int n, total_vfs = xe_sriov_pf_get_totalvfs(gt_to_xe(gt));
2499 const struct xe_gt_sriov_config *config;
2500
2501 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
2502 mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
2503
2504 for (n = 1; n <= total_vfs; n++) {
2505 config = >->sriov.pf.vfs[n].config;
2506 if (!config->num_dbs)
2507 continue;
2508
2509 drm_printf(p, "VF%u:\t%u-%u\t(%u)\n",
2510 n,
2511 config->begin_db,
2512 config->begin_db + config->num_dbs - 1,
2513 config->num_dbs);
2514 }
2515
2516 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
2517 return 0;
2518 }
2519
2520 /**
2521 * xe_gt_sriov_pf_config_print_lmem - Print LMEM configurations.
2522 * @gt: the &xe_gt
2523 * @p: the &drm_printer
2524 *
2525 * Print LMEM allocations across all VFs.
2526 * VFs without LMEM allocation are skipped.
2527 *
2528 * This function can only be called on PF.
2529 * Return: 0 on success or a negative error code on failure.
2530 */
xe_gt_sriov_pf_config_print_lmem(struct xe_gt * gt,struct drm_printer * p)2531 int xe_gt_sriov_pf_config_print_lmem(struct xe_gt *gt, struct drm_printer *p)
2532 {
2533 unsigned int n, total_vfs = xe_sriov_pf_get_totalvfs(gt_to_xe(gt));
2534 const struct xe_gt_sriov_config *config;
2535 char buf[10];
2536
2537 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
2538 mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
2539
2540 for (n = 1; n <= total_vfs; n++) {
2541 config = >->sriov.pf.vfs[n].config;
2542 if (!config->lmem_obj)
2543 continue;
2544
2545 string_get_size(config->lmem_obj->size, 1, STRING_UNITS_2,
2546 buf, sizeof(buf));
2547 drm_printf(p, "VF%u:\t%zu\t(%s)\n",
2548 n, config->lmem_obj->size, buf);
2549 }
2550
2551 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
2552 return 0;
2553 }
2554
2555 /**
2556 * xe_gt_sriov_pf_config_print_available_ggtt - Print available GGTT ranges.
2557 * @gt: the &xe_gt
2558 * @p: the &drm_printer
2559 *
2560 * Print GGTT ranges that are available for the provisioning.
2561 *
2562 * This function can only be called on PF.
2563 */
xe_gt_sriov_pf_config_print_available_ggtt(struct xe_gt * gt,struct drm_printer * p)2564 int xe_gt_sriov_pf_config_print_available_ggtt(struct xe_gt *gt, struct drm_printer *p)
2565 {
2566 struct xe_ggtt *ggtt = gt_to_tile(gt)->mem.ggtt;
2567 u64 alignment = pf_get_ggtt_alignment(gt);
2568 u64 spare, avail, total;
2569 char buf[10];
2570
2571 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
2572
2573 mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
2574
2575 spare = pf_get_spare_ggtt(gt);
2576 total = xe_ggtt_print_holes(ggtt, alignment, p);
2577
2578 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
2579
2580 string_get_size(total, 1, STRING_UNITS_2, buf, sizeof(buf));
2581 drm_printf(p, "total:\t%llu\t(%s)\n", total, buf);
2582
2583 string_get_size(spare, 1, STRING_UNITS_2, buf, sizeof(buf));
2584 drm_printf(p, "spare:\t%llu\t(%s)\n", spare, buf);
2585
2586 avail = total > spare ? total - spare : 0;
2587
2588 string_get_size(avail, 1, STRING_UNITS_2, buf, sizeof(buf));
2589 drm_printf(p, "avail:\t%llu\t(%s)\n", avail, buf);
2590
2591 return 0;
2592 }
2593