xref: /linux/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c (revision ab93e0dd72c37d378dd936f031ffb83ff2bd87ce)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2023-2024 Intel Corporation
4  */
5 
6 #include <linux/string_choices.h>
7 #include <linux/wordpart.h>
8 
9 #include "abi/guc_actions_sriov_abi.h"
10 #include "abi/guc_klvs_abi.h"
11 
12 #include "regs/xe_guc_regs.h"
13 
14 #include "xe_bo.h"
15 #include "xe_device.h"
16 #include "xe_ggtt.h"
17 #include "xe_gt.h"
18 #include "xe_gt_sriov_pf_config.h"
19 #include "xe_gt_sriov_pf_helpers.h"
20 #include "xe_gt_sriov_pf_policy.h"
21 #include "xe_gt_sriov_printk.h"
22 #include "xe_guc.h"
23 #include "xe_guc_buf.h"
24 #include "xe_guc_ct.h"
25 #include "xe_guc_db_mgr.h"
26 #include "xe_guc_fwif.h"
27 #include "xe_guc_id_mgr.h"
28 #include "xe_guc_klv_helpers.h"
29 #include "xe_guc_klv_thresholds_set.h"
30 #include "xe_guc_submit.h"
31 #include "xe_lmtt.h"
32 #include "xe_map.h"
33 #include "xe_migrate.h"
34 #include "xe_sriov.h"
35 #include "xe_ttm_vram_mgr.h"
36 #include "xe_wopcm.h"
37 
38 #define make_u64_from_u32(hi, lo) ((u64)((u64)(u32)(hi) << 32 | (u32)(lo)))
39 
40 /*
41  * Return: number of KLVs that were successfully parsed and saved,
42  *         negative error code on failure.
43  */
guc_action_update_vf_cfg(struct xe_guc * guc,u32 vfid,u64 addr,u32 size)44 static int guc_action_update_vf_cfg(struct xe_guc *guc, u32 vfid,
45 				    u64 addr, u32 size)
46 {
47 	u32 request[] = {
48 		GUC_ACTION_PF2GUC_UPDATE_VF_CFG,
49 		vfid,
50 		lower_32_bits(addr),
51 		upper_32_bits(addr),
52 		size,
53 	};
54 
55 	return xe_guc_ct_send_block(&guc->ct, request, ARRAY_SIZE(request));
56 }
57 
58 /*
59  * Return: 0 on success, negative error code on failure.
60  */
pf_send_vf_cfg_reset(struct xe_gt * gt,u32 vfid)61 static int pf_send_vf_cfg_reset(struct xe_gt *gt, u32 vfid)
62 {
63 	struct xe_guc *guc = &gt->uc.guc;
64 	int ret;
65 
66 	ret = guc_action_update_vf_cfg(guc, vfid, 0, 0);
67 
68 	return ret <= 0 ? ret : -EPROTO;
69 }
70 
71 /*
72  * Return: number of KLVs that were successfully parsed and saved,
73  *         negative error code on failure.
74  */
pf_send_vf_buf_klvs(struct xe_gt * gt,u32 vfid,struct xe_guc_buf buf,u32 num_dwords)75 static int pf_send_vf_buf_klvs(struct xe_gt *gt, u32 vfid, struct xe_guc_buf buf, u32 num_dwords)
76 {
77 	struct xe_guc *guc = &gt->uc.guc;
78 
79 	return guc_action_update_vf_cfg(guc, vfid, xe_guc_buf_flush(buf), num_dwords);
80 }
81 
82 /*
83  * Return: 0 on success, -ENOKEY if some KLVs were not updated, -EPROTO if reply was malformed,
84  *         negative error code on failure.
85  */
pf_push_vf_buf_klvs(struct xe_gt * gt,unsigned int vfid,u32 num_klvs,struct xe_guc_buf buf,u32 num_dwords)86 static int pf_push_vf_buf_klvs(struct xe_gt *gt, unsigned int vfid, u32 num_klvs,
87 			       struct xe_guc_buf buf, u32 num_dwords)
88 {
89 	int ret;
90 
91 	ret = pf_send_vf_buf_klvs(gt, vfid, buf, num_dwords);
92 
93 	if (ret != num_klvs) {
94 		int err = ret < 0 ? ret : ret < num_klvs ? -ENOKEY : -EPROTO;
95 		void *klvs = xe_guc_buf_cpu_ptr(buf);
96 		struct drm_printer p = xe_gt_info_printer(gt);
97 		char name[8];
98 
99 		xe_gt_sriov_notice(gt, "Failed to push %s %u config KLV%s (%pe)\n",
100 				   xe_sriov_function_name(vfid, name, sizeof(name)),
101 				   num_klvs, str_plural(num_klvs), ERR_PTR(err));
102 		xe_guc_klv_print(klvs, num_dwords, &p);
103 		return err;
104 	}
105 
106 	if (IS_ENABLED(CONFIG_DRM_XE_DEBUG_SRIOV)) {
107 		struct drm_printer p = xe_gt_dbg_printer(gt);
108 		void *klvs = xe_guc_buf_cpu_ptr(buf);
109 		char name[8];
110 
111 		xe_gt_sriov_dbg(gt, "pushed %s config with %u KLV%s:\n",
112 				xe_sriov_function_name(vfid, name, sizeof(name)),
113 				num_klvs, str_plural(num_klvs));
114 		xe_guc_klv_print(klvs, num_dwords, &p);
115 	}
116 
117 	return 0;
118 }
119 
120 /*
121  * Return: 0 on success, -ENOBUFS if no free buffer for the indirect data,
122  *         negative error code on failure.
123  */
pf_push_vf_cfg_klvs(struct xe_gt * gt,unsigned int vfid,u32 num_klvs,const u32 * klvs,u32 num_dwords)124 static int pf_push_vf_cfg_klvs(struct xe_gt *gt, unsigned int vfid, u32 num_klvs,
125 			       const u32 *klvs, u32 num_dwords)
126 {
127 	CLASS(xe_guc_buf_from_data, buf)(&gt->uc.guc.buf, klvs, num_dwords * sizeof(u32));
128 
129 	xe_gt_assert(gt, num_klvs == xe_guc_klv_count(klvs, num_dwords));
130 
131 	if (!xe_guc_buf_is_valid(buf))
132 		return -ENOBUFS;
133 
134 	return pf_push_vf_buf_klvs(gt, vfid, num_klvs, buf, num_dwords);
135 }
136 
pf_push_vf_cfg_u32(struct xe_gt * gt,unsigned int vfid,u16 key,u32 value)137 static int pf_push_vf_cfg_u32(struct xe_gt *gt, unsigned int vfid, u16 key, u32 value)
138 {
139 	u32 klv[] = {
140 		FIELD_PREP(GUC_KLV_0_KEY, key) | FIELD_PREP(GUC_KLV_0_LEN, 1),
141 		value,
142 	};
143 
144 	return pf_push_vf_cfg_klvs(gt, vfid, 1, klv, ARRAY_SIZE(klv));
145 }
146 
pf_push_vf_cfg_u64(struct xe_gt * gt,unsigned int vfid,u16 key,u64 value)147 static int pf_push_vf_cfg_u64(struct xe_gt *gt, unsigned int vfid, u16 key, u64 value)
148 {
149 	u32 klv[] = {
150 		FIELD_PREP(GUC_KLV_0_KEY, key) | FIELD_PREP(GUC_KLV_0_LEN, 2),
151 		lower_32_bits(value),
152 		upper_32_bits(value),
153 	};
154 
155 	return pf_push_vf_cfg_klvs(gt, vfid, 1, klv, ARRAY_SIZE(klv));
156 }
157 
pf_push_vf_cfg_ggtt(struct xe_gt * gt,unsigned int vfid,u64 start,u64 size)158 static int pf_push_vf_cfg_ggtt(struct xe_gt *gt, unsigned int vfid, u64 start, u64 size)
159 {
160 	u32 klvs[] = {
161 		PREP_GUC_KLV_TAG(VF_CFG_GGTT_START),
162 		lower_32_bits(start),
163 		upper_32_bits(start),
164 		PREP_GUC_KLV_TAG(VF_CFG_GGTT_SIZE),
165 		lower_32_bits(size),
166 		upper_32_bits(size),
167 	};
168 
169 	return pf_push_vf_cfg_klvs(gt, vfid, 2, klvs, ARRAY_SIZE(klvs));
170 }
171 
pf_push_vf_cfg_ctxs(struct xe_gt * gt,unsigned int vfid,u32 begin,u32 num)172 static int pf_push_vf_cfg_ctxs(struct xe_gt *gt, unsigned int vfid, u32 begin, u32 num)
173 {
174 	u32 klvs[] = {
175 		PREP_GUC_KLV_TAG(VF_CFG_BEGIN_CONTEXT_ID),
176 		begin,
177 		PREP_GUC_KLV_TAG(VF_CFG_NUM_CONTEXTS),
178 		num,
179 	};
180 
181 	return pf_push_vf_cfg_klvs(gt, vfid, 2, klvs, ARRAY_SIZE(klvs));
182 }
183 
pf_push_vf_cfg_dbs(struct xe_gt * gt,unsigned int vfid,u32 begin,u32 num)184 static int pf_push_vf_cfg_dbs(struct xe_gt *gt, unsigned int vfid, u32 begin, u32 num)
185 {
186 	u32 klvs[] = {
187 		PREP_GUC_KLV_TAG(VF_CFG_BEGIN_DOORBELL_ID),
188 		begin,
189 		PREP_GUC_KLV_TAG(VF_CFG_NUM_DOORBELLS),
190 		num,
191 	};
192 
193 	return pf_push_vf_cfg_klvs(gt, vfid, 2, klvs, ARRAY_SIZE(klvs));
194 }
195 
pf_push_vf_cfg_exec_quantum(struct xe_gt * gt,unsigned int vfid,u32 * exec_quantum)196 static int pf_push_vf_cfg_exec_quantum(struct xe_gt *gt, unsigned int vfid, u32 *exec_quantum)
197 {
198 	/* GuC will silently clamp values exceeding max */
199 	*exec_quantum = min_t(u32, *exec_quantum, GUC_KLV_VF_CFG_EXEC_QUANTUM_MAX_VALUE);
200 
201 	return pf_push_vf_cfg_u32(gt, vfid, GUC_KLV_VF_CFG_EXEC_QUANTUM_KEY, *exec_quantum);
202 }
203 
pf_push_vf_cfg_preempt_timeout(struct xe_gt * gt,unsigned int vfid,u32 * preempt_timeout)204 static int pf_push_vf_cfg_preempt_timeout(struct xe_gt *gt, unsigned int vfid, u32 *preempt_timeout)
205 {
206 	/* GuC will silently clamp values exceeding max */
207 	*preempt_timeout = min_t(u32, *preempt_timeout, GUC_KLV_VF_CFG_PREEMPT_TIMEOUT_MAX_VALUE);
208 
209 	return pf_push_vf_cfg_u32(gt, vfid, GUC_KLV_VF_CFG_PREEMPT_TIMEOUT_KEY, *preempt_timeout);
210 }
211 
pf_push_vf_cfg_sched_priority(struct xe_gt * gt,unsigned int vfid,u32 priority)212 static int pf_push_vf_cfg_sched_priority(struct xe_gt *gt, unsigned int vfid, u32 priority)
213 {
214 	return pf_push_vf_cfg_u32(gt, vfid, GUC_KLV_VF_CFG_SCHED_PRIORITY_KEY, priority);
215 }
216 
pf_push_vf_cfg_lmem(struct xe_gt * gt,unsigned int vfid,u64 size)217 static int pf_push_vf_cfg_lmem(struct xe_gt *gt, unsigned int vfid, u64 size)
218 {
219 	return pf_push_vf_cfg_u64(gt, vfid, GUC_KLV_VF_CFG_LMEM_SIZE_KEY, size);
220 }
221 
pf_push_vf_cfg_threshold(struct xe_gt * gt,unsigned int vfid,enum xe_guc_klv_threshold_index index,u32 value)222 static int pf_push_vf_cfg_threshold(struct xe_gt *gt, unsigned int vfid,
223 				    enum xe_guc_klv_threshold_index index, u32 value)
224 {
225 	u32 key = xe_guc_klv_threshold_index_to_key(index);
226 
227 	xe_gt_assert(gt, key);
228 	return pf_push_vf_cfg_u32(gt, vfid, key, value);
229 }
230 
pf_pick_vf_config(struct xe_gt * gt,unsigned int vfid)231 static struct xe_gt_sriov_config *pf_pick_vf_config(struct xe_gt *gt, unsigned int vfid)
232 {
233 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
234 	xe_gt_assert(gt, vfid <= xe_sriov_pf_get_totalvfs(gt_to_xe(gt)));
235 	lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
236 
237 	return &gt->sriov.pf.vfs[vfid].config;
238 }
239 
240 /* Return: number of configuration dwords written */
encode_ggtt(u32 * cfg,u64 start,u64 size,bool details)241 static u32 encode_ggtt(u32 *cfg, u64 start, u64 size, bool details)
242 {
243 	u32 n = 0;
244 
245 	if (details) {
246 		cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_GGTT_START);
247 		cfg[n++] = lower_32_bits(start);
248 		cfg[n++] = upper_32_bits(start);
249 	}
250 
251 	cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_GGTT_SIZE);
252 	cfg[n++] = lower_32_bits(size);
253 	cfg[n++] = upper_32_bits(size);
254 
255 	return n;
256 }
257 
258 /* Return: number of configuration dwords written */
encode_config_ggtt(u32 * cfg,const struct xe_gt_sriov_config * config,bool details)259 static u32 encode_config_ggtt(u32 *cfg, const struct xe_gt_sriov_config *config, bool details)
260 {
261 	struct xe_ggtt_node *node = config->ggtt_region;
262 
263 	if (!xe_ggtt_node_allocated(node))
264 		return 0;
265 
266 	return encode_ggtt(cfg, node->base.start, node->base.size, details);
267 }
268 
269 /* Return: number of configuration dwords written */
encode_config(u32 * cfg,const struct xe_gt_sriov_config * config,bool details)270 static u32 encode_config(u32 *cfg, const struct xe_gt_sriov_config *config, bool details)
271 {
272 	u32 n = 0;
273 
274 	n += encode_config_ggtt(cfg, config, details);
275 
276 	if (details && config->num_ctxs) {
277 		cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_BEGIN_CONTEXT_ID);
278 		cfg[n++] = config->begin_ctx;
279 	}
280 
281 	cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_NUM_CONTEXTS);
282 	cfg[n++] = config->num_ctxs;
283 
284 	if (details && config->num_dbs) {
285 		cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_BEGIN_DOORBELL_ID);
286 		cfg[n++] = config->begin_db;
287 	}
288 
289 	cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_NUM_DOORBELLS);
290 	cfg[n++] = config->num_dbs;
291 
292 	if (config->lmem_obj) {
293 		cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_LMEM_SIZE);
294 		cfg[n++] = lower_32_bits(xe_bo_size(config->lmem_obj));
295 		cfg[n++] = upper_32_bits(xe_bo_size(config->lmem_obj));
296 	}
297 
298 	cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_EXEC_QUANTUM);
299 	cfg[n++] = config->exec_quantum;
300 
301 	cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_PREEMPT_TIMEOUT);
302 	cfg[n++] = config->preempt_timeout;
303 
304 #define encode_threshold_config(TAG, ...) ({					\
305 	cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_THRESHOLD_##TAG);			\
306 	cfg[n++] = config->thresholds[MAKE_XE_GUC_KLV_THRESHOLD_INDEX(TAG)];	\
307 });
308 
309 	MAKE_XE_GUC_KLV_THRESHOLDS_SET(encode_threshold_config);
310 #undef encode_threshold_config
311 
312 	return n;
313 }
314 
pf_push_full_vf_config(struct xe_gt * gt,unsigned int vfid)315 static int pf_push_full_vf_config(struct xe_gt *gt, unsigned int vfid)
316 {
317 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
318 	u32 max_cfg_dwords = xe_guc_buf_cache_dwords(&gt->uc.guc.buf);
319 	CLASS(xe_guc_buf, buf)(&gt->uc.guc.buf, max_cfg_dwords);
320 	u32 num_dwords;
321 	int num_klvs;
322 	u32 *cfg;
323 	int err;
324 
325 	if (!xe_guc_buf_is_valid(buf))
326 		return -ENOBUFS;
327 
328 	cfg = xe_guc_buf_cpu_ptr(buf);
329 	num_dwords = encode_config(cfg, config, true);
330 	xe_gt_assert(gt, num_dwords <= max_cfg_dwords);
331 
332 	if (xe_gt_is_media_type(gt)) {
333 		struct xe_gt *primary = gt->tile->primary_gt;
334 		struct xe_gt_sriov_config *other = pf_pick_vf_config(primary, vfid);
335 
336 		/* media-GT will never include a GGTT config */
337 		xe_gt_assert(gt, !encode_config_ggtt(cfg + num_dwords, config, true));
338 
339 		/* the GGTT config must be taken from the primary-GT instead */
340 		num_dwords += encode_config_ggtt(cfg + num_dwords, other, true);
341 	}
342 	xe_gt_assert(gt, num_dwords <= max_cfg_dwords);
343 
344 	if (vfid == PFID) {
345 		u64 ggtt_start = xe_wopcm_size(gt_to_xe(gt));
346 		u64 ggtt_size = gt_to_tile(gt)->mem.ggtt->size - ggtt_start;
347 
348 		/* plain PF config data will never include a real GGTT region */
349 		xe_gt_assert(gt, !encode_config_ggtt(cfg + num_dwords, config, true));
350 
351 		/* fake PF GGTT config covers full GGTT range except reserved WOPCM */
352 		num_dwords += encode_ggtt(cfg + num_dwords, ggtt_start, ggtt_size, true);
353 	}
354 
355 	num_klvs = xe_guc_klv_count(cfg, num_dwords);
356 	err = pf_push_vf_buf_klvs(gt, vfid, num_klvs, buf, num_dwords);
357 
358 	return err;
359 }
360 
pf_push_vf_cfg(struct xe_gt * gt,unsigned int vfid,bool reset)361 static int pf_push_vf_cfg(struct xe_gt *gt, unsigned int vfid, bool reset)
362 {
363 	int err = 0;
364 
365 	xe_gt_assert(gt, vfid);
366 	lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
367 
368 	if (reset)
369 		err = pf_send_vf_cfg_reset(gt, vfid);
370 	if (!err)
371 		err = pf_push_full_vf_config(gt, vfid);
372 
373 	return err;
374 }
375 
pf_refresh_vf_cfg(struct xe_gt * gt,unsigned int vfid)376 static int pf_refresh_vf_cfg(struct xe_gt *gt, unsigned int vfid)
377 {
378 	return pf_push_vf_cfg(gt, vfid, true);
379 }
380 
pf_get_ggtt_alignment(struct xe_gt * gt)381 static u64 pf_get_ggtt_alignment(struct xe_gt *gt)
382 {
383 	struct xe_device *xe = gt_to_xe(gt);
384 
385 	return IS_DGFX(xe) && xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K ? SZ_64K : SZ_4K;
386 }
387 
pf_get_min_spare_ggtt(struct xe_gt * gt)388 static u64 pf_get_min_spare_ggtt(struct xe_gt *gt)
389 {
390 	/* XXX: preliminary */
391 	return IS_ENABLED(CONFIG_DRM_XE_DEBUG_SRIOV) ?
392 		pf_get_ggtt_alignment(gt) : SZ_64M;
393 }
394 
pf_get_spare_ggtt(struct xe_gt * gt)395 static u64 pf_get_spare_ggtt(struct xe_gt *gt)
396 {
397 	u64 spare;
398 
399 	xe_gt_assert(gt, xe_gt_is_main_type(gt));
400 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
401 	lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
402 
403 	spare = gt->sriov.pf.spare.ggtt_size;
404 	spare = max_t(u64, spare, pf_get_min_spare_ggtt(gt));
405 
406 	return spare;
407 }
408 
pf_set_spare_ggtt(struct xe_gt * gt,u64 size)409 static int pf_set_spare_ggtt(struct xe_gt *gt, u64 size)
410 {
411 	xe_gt_assert(gt, xe_gt_is_main_type(gt));
412 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
413 	lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
414 
415 	if (size && size < pf_get_min_spare_ggtt(gt))
416 		return -EINVAL;
417 
418 	size = round_up(size, pf_get_ggtt_alignment(gt));
419 	gt->sriov.pf.spare.ggtt_size = size;
420 
421 	return 0;
422 }
423 
pf_distribute_config_ggtt(struct xe_tile * tile,unsigned int vfid,u64 start,u64 size)424 static int pf_distribute_config_ggtt(struct xe_tile *tile, unsigned int vfid, u64 start, u64 size)
425 {
426 	int err, err2 = 0;
427 
428 	err = pf_push_vf_cfg_ggtt(tile->primary_gt, vfid, start, size);
429 
430 	if (tile->media_gt && !err)
431 		err2 = pf_push_vf_cfg_ggtt(tile->media_gt, vfid, start, size);
432 
433 	return err ?: err2;
434 }
435 
pf_release_ggtt(struct xe_tile * tile,struct xe_ggtt_node * node)436 static void pf_release_ggtt(struct xe_tile *tile, struct xe_ggtt_node *node)
437 {
438 	if (xe_ggtt_node_allocated(node)) {
439 		/*
440 		 * explicit GGTT PTE assignment to the PF using xe_ggtt_assign()
441 		 * is redundant, as PTE will be implicitly re-assigned to PF by
442 		 * the xe_ggtt_clear() called by below xe_ggtt_remove_node().
443 		 */
444 		xe_ggtt_node_remove(node, false);
445 	} else {
446 		xe_ggtt_node_fini(node);
447 	}
448 }
449 
pf_release_vf_config_ggtt(struct xe_gt * gt,struct xe_gt_sriov_config * config)450 static void pf_release_vf_config_ggtt(struct xe_gt *gt, struct xe_gt_sriov_config *config)
451 {
452 	pf_release_ggtt(gt_to_tile(gt), config->ggtt_region);
453 	config->ggtt_region = NULL;
454 }
455 
pf_provision_vf_ggtt(struct xe_gt * gt,unsigned int vfid,u64 size)456 static int pf_provision_vf_ggtt(struct xe_gt *gt, unsigned int vfid, u64 size)
457 {
458 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
459 	struct xe_ggtt_node *node;
460 	struct xe_tile *tile = gt_to_tile(gt);
461 	struct xe_ggtt *ggtt = tile->mem.ggtt;
462 	u64 alignment = pf_get_ggtt_alignment(gt);
463 	int err;
464 
465 	xe_gt_assert(gt, vfid);
466 	xe_gt_assert(gt, xe_gt_is_main_type(gt));
467 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
468 
469 	size = round_up(size, alignment);
470 
471 	if (xe_ggtt_node_allocated(config->ggtt_region)) {
472 		err = pf_distribute_config_ggtt(tile, vfid, 0, 0);
473 		if (unlikely(err))
474 			return err;
475 
476 		pf_release_vf_config_ggtt(gt, config);
477 
478 		err = pf_refresh_vf_cfg(gt, vfid);
479 		if (unlikely(err))
480 			return err;
481 	}
482 	xe_gt_assert(gt, !xe_ggtt_node_allocated(config->ggtt_region));
483 
484 	if (!size)
485 		return 0;
486 
487 	node = xe_ggtt_node_init(ggtt);
488 	if (IS_ERR(node))
489 		return PTR_ERR(node);
490 
491 	err = xe_ggtt_node_insert(node, size, alignment);
492 	if (unlikely(err))
493 		goto err;
494 
495 	xe_ggtt_assign(node, vfid);
496 	xe_gt_sriov_dbg_verbose(gt, "VF%u assigned GGTT %llx-%llx\n",
497 				vfid, node->base.start, node->base.start + node->base.size - 1);
498 
499 	err = pf_distribute_config_ggtt(gt->tile, vfid, node->base.start, node->base.size);
500 	if (unlikely(err))
501 		goto err;
502 
503 	config->ggtt_region = node;
504 	return 0;
505 err:
506 	pf_release_ggtt(tile, node);
507 	return err;
508 }
509 
pf_get_vf_config_ggtt(struct xe_gt * gt,unsigned int vfid)510 static u64 pf_get_vf_config_ggtt(struct xe_gt *gt, unsigned int vfid)
511 {
512 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
513 	struct xe_ggtt_node *node = config->ggtt_region;
514 
515 	xe_gt_assert(gt, xe_gt_is_main_type(gt));
516 	return xe_ggtt_node_allocated(node) ? node->base.size : 0;
517 }
518 
519 /**
520  * xe_gt_sriov_pf_config_get_ggtt - Query size of GGTT address space of the VF.
521  * @gt: the &xe_gt
522  * @vfid: the VF identifier
523  *
524  * This function can only be called on PF.
525  *
526  * Return: size of the VF's assigned (or PF's spare) GGTT address space.
527  */
xe_gt_sriov_pf_config_get_ggtt(struct xe_gt * gt,unsigned int vfid)528 u64 xe_gt_sriov_pf_config_get_ggtt(struct xe_gt *gt, unsigned int vfid)
529 {
530 	u64 size;
531 
532 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
533 	if (vfid)
534 		size = pf_get_vf_config_ggtt(gt_to_tile(gt)->primary_gt, vfid);
535 	else
536 		size = pf_get_spare_ggtt(gt_to_tile(gt)->primary_gt);
537 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
538 
539 	return size;
540 }
541 
pf_config_set_u64_done(struct xe_gt * gt,unsigned int vfid,u64 value,u64 actual,const char * what,int err)542 static int pf_config_set_u64_done(struct xe_gt *gt, unsigned int vfid, u64 value,
543 				  u64 actual, const char *what, int err)
544 {
545 	char size[10];
546 	char name[8];
547 
548 	xe_sriov_function_name(vfid, name, sizeof(name));
549 
550 	if (unlikely(err)) {
551 		string_get_size(value, 1, STRING_UNITS_2, size, sizeof(size));
552 		xe_gt_sriov_notice(gt, "Failed to provision %s with %llu (%s) %s (%pe)\n",
553 				   name, value, size, what, ERR_PTR(err));
554 		string_get_size(actual, 1, STRING_UNITS_2, size, sizeof(size));
555 		xe_gt_sriov_info(gt, "%s provisioning remains at %llu (%s) %s\n",
556 				 name, actual, size, what);
557 		return err;
558 	}
559 
560 	/* the actual value may have changed during provisioning */
561 	string_get_size(actual, 1, STRING_UNITS_2, size, sizeof(size));
562 	xe_gt_sriov_info(gt, "%s provisioned with %llu (%s) %s\n",
563 			 name, actual, size, what);
564 	return 0;
565 }
566 
567 /**
568  * xe_gt_sriov_pf_config_set_ggtt - Provision VF with GGTT space.
569  * @gt: the &xe_gt (can't be media)
570  * @vfid: the VF identifier
571  * @size: requested GGTT size
572  *
573  * If &vfid represents PF, then function will change PF's spare GGTT config.
574  *
575  * This function can only be called on PF.
576  *
577  * Return: 0 on success or a negative error code on failure.
578  */
xe_gt_sriov_pf_config_set_ggtt(struct xe_gt * gt,unsigned int vfid,u64 size)579 int xe_gt_sriov_pf_config_set_ggtt(struct xe_gt *gt, unsigned int vfid, u64 size)
580 {
581 	int err;
582 
583 	xe_gt_assert(gt, xe_gt_is_main_type(gt));
584 
585 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
586 	if (vfid)
587 		err = pf_provision_vf_ggtt(gt, vfid, size);
588 	else
589 		err = pf_set_spare_ggtt(gt, size);
590 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
591 
592 	return pf_config_set_u64_done(gt, vfid, size,
593 				      xe_gt_sriov_pf_config_get_ggtt(gt, vfid),
594 				      vfid ? "GGTT" : "spare GGTT", err);
595 }
596 
pf_config_bulk_set_u64_done(struct xe_gt * gt,unsigned int first,unsigned int num_vfs,u64 value,u64 (* get)(struct xe_gt *,unsigned int),const char * what,unsigned int last,int err)597 static int pf_config_bulk_set_u64_done(struct xe_gt *gt, unsigned int first, unsigned int num_vfs,
598 				       u64 value, u64 (*get)(struct xe_gt*, unsigned int),
599 				       const char *what, unsigned int last, int err)
600 {
601 	char size[10];
602 
603 	xe_gt_assert(gt, first);
604 	xe_gt_assert(gt, num_vfs);
605 	xe_gt_assert(gt, first <= last);
606 
607 	if (num_vfs == 1)
608 		return pf_config_set_u64_done(gt, first, value, get(gt, first), what, err);
609 
610 	if (unlikely(err)) {
611 		xe_gt_sriov_notice(gt, "Failed to bulk provision VF%u..VF%u with %s\n",
612 				   first, first + num_vfs - 1, what);
613 		if (last > first)
614 			pf_config_bulk_set_u64_done(gt, first, last - first, value,
615 						    get, what, last, 0);
616 		return pf_config_set_u64_done(gt, last, value, get(gt, last), what, err);
617 	}
618 
619 	/* pick actual value from first VF - bulk provisioning shall be equal across all VFs */
620 	value = get(gt, first);
621 	string_get_size(value, 1, STRING_UNITS_2, size, sizeof(size));
622 	xe_gt_sriov_info(gt, "VF%u..VF%u provisioned with %llu (%s) %s\n",
623 			 first, first + num_vfs - 1, value, size, what);
624 	return 0;
625 }
626 
627 /**
628  * xe_gt_sriov_pf_config_bulk_set_ggtt - Provision many VFs with GGTT.
629  * @gt: the &xe_gt (can't be media)
630  * @vfid: starting VF identifier (can't be 0)
631  * @num_vfs: number of VFs to provision
632  * @size: requested GGTT size
633  *
634  * This function can only be called on PF.
635  *
636  * Return: 0 on success or a negative error code on failure.
637  */
xe_gt_sriov_pf_config_bulk_set_ggtt(struct xe_gt * gt,unsigned int vfid,unsigned int num_vfs,u64 size)638 int xe_gt_sriov_pf_config_bulk_set_ggtt(struct xe_gt *gt, unsigned int vfid,
639 					unsigned int num_vfs, u64 size)
640 {
641 	unsigned int n;
642 	int err = 0;
643 
644 	xe_gt_assert(gt, vfid);
645 	xe_gt_assert(gt, xe_gt_is_main_type(gt));
646 
647 	if (!num_vfs)
648 		return 0;
649 
650 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
651 	for (n = vfid; n < vfid + num_vfs; n++) {
652 		err = pf_provision_vf_ggtt(gt, n, size);
653 		if (err)
654 			break;
655 	}
656 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
657 
658 	return pf_config_bulk_set_u64_done(gt, vfid, num_vfs, size,
659 					   xe_gt_sriov_pf_config_get_ggtt,
660 					   "GGTT", n, err);
661 }
662 
663 /* Return: size of the largest continuous GGTT region */
pf_get_max_ggtt(struct xe_gt * gt)664 static u64 pf_get_max_ggtt(struct xe_gt *gt)
665 {
666 	struct xe_ggtt *ggtt = gt_to_tile(gt)->mem.ggtt;
667 	u64 alignment = pf_get_ggtt_alignment(gt);
668 	u64 spare = pf_get_spare_ggtt(gt);
669 	u64 max_hole;
670 
671 	max_hole = xe_ggtt_largest_hole(ggtt, alignment, &spare);
672 
673 	xe_gt_sriov_dbg_verbose(gt, "HOLE max %lluK reserved %lluK\n",
674 				max_hole / SZ_1K, spare / SZ_1K);
675 	return max_hole > spare ? max_hole - spare : 0;
676 }
677 
pf_estimate_fair_ggtt(struct xe_gt * gt,unsigned int num_vfs)678 static u64 pf_estimate_fair_ggtt(struct xe_gt *gt, unsigned int num_vfs)
679 {
680 	u64 available = pf_get_max_ggtt(gt);
681 	u64 alignment = pf_get_ggtt_alignment(gt);
682 	u64 fair;
683 
684 	/*
685 	 * To simplify the logic we only look at single largest GGTT region
686 	 * as that will be always the best fit for 1 VF case, and most likely
687 	 * will also nicely cover other cases where VFs are provisioned on the
688 	 * fresh and idle PF driver, without any stale GGTT allocations spread
689 	 * in the middle of the full GGTT range.
690 	 */
691 
692 	fair = div_u64(available, num_vfs);
693 	fair = ALIGN_DOWN(fair, alignment);
694 	xe_gt_sriov_dbg_verbose(gt, "GGTT available(%lluK) fair(%u x %lluK)\n",
695 				available / SZ_1K, num_vfs, fair / SZ_1K);
696 	return fair;
697 }
698 
699 /**
700  * xe_gt_sriov_pf_config_set_fair_ggtt - Provision many VFs with fair GGTT.
701  * @gt: the &xe_gt (can't be media)
702  * @vfid: starting VF identifier (can't be 0)
703  * @num_vfs: number of VFs to provision
704  *
705  * This function can only be called on PF.
706  *
707  * Return: 0 on success or a negative error code on failure.
708  */
xe_gt_sriov_pf_config_set_fair_ggtt(struct xe_gt * gt,unsigned int vfid,unsigned int num_vfs)709 int xe_gt_sriov_pf_config_set_fair_ggtt(struct xe_gt *gt, unsigned int vfid,
710 					unsigned int num_vfs)
711 {
712 	u64 fair;
713 
714 	xe_gt_assert(gt, vfid);
715 	xe_gt_assert(gt, num_vfs);
716 	xe_gt_assert(gt, xe_gt_is_main_type(gt));
717 
718 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
719 	fair = pf_estimate_fair_ggtt(gt, num_vfs);
720 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
721 
722 	if (!fair)
723 		return -ENOSPC;
724 
725 	return xe_gt_sriov_pf_config_bulk_set_ggtt(gt, vfid, num_vfs, fair);
726 }
727 
pf_get_min_spare_ctxs(struct xe_gt * gt)728 static u32 pf_get_min_spare_ctxs(struct xe_gt *gt)
729 {
730 	/* XXX: preliminary */
731 	return IS_ENABLED(CONFIG_DRM_XE_DEBUG_SRIOV) ?
732 		hweight64(gt->info.engine_mask) : SZ_256;
733 }
734 
pf_get_spare_ctxs(struct xe_gt * gt)735 static u32 pf_get_spare_ctxs(struct xe_gt *gt)
736 {
737 	u32 spare;
738 
739 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
740 	lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
741 
742 	spare = gt->sriov.pf.spare.num_ctxs;
743 	spare = max_t(u32, spare, pf_get_min_spare_ctxs(gt));
744 
745 	return spare;
746 }
747 
pf_set_spare_ctxs(struct xe_gt * gt,u32 spare)748 static int pf_set_spare_ctxs(struct xe_gt *gt, u32 spare)
749 {
750 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
751 	lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
752 
753 	if (spare > GUC_ID_MAX)
754 		return -EINVAL;
755 
756 	if (spare && spare < pf_get_min_spare_ctxs(gt))
757 		return -EINVAL;
758 
759 	gt->sriov.pf.spare.num_ctxs = spare;
760 
761 	return 0;
762 }
763 
764 /* Return: start ID or negative error code on failure */
pf_reserve_ctxs(struct xe_gt * gt,u32 num)765 static int pf_reserve_ctxs(struct xe_gt *gt, u32 num)
766 {
767 	struct xe_guc_id_mgr *idm = &gt->uc.guc.submission_state.idm;
768 	unsigned int spare = pf_get_spare_ctxs(gt);
769 
770 	return xe_guc_id_mgr_reserve(idm, num, spare);
771 }
772 
pf_release_ctxs(struct xe_gt * gt,u32 start,u32 num)773 static void pf_release_ctxs(struct xe_gt *gt, u32 start, u32 num)
774 {
775 	struct xe_guc_id_mgr *idm = &gt->uc.guc.submission_state.idm;
776 
777 	if (num)
778 		xe_guc_id_mgr_release(idm, start, num);
779 }
780 
pf_release_config_ctxs(struct xe_gt * gt,struct xe_gt_sriov_config * config)781 static void pf_release_config_ctxs(struct xe_gt *gt, struct xe_gt_sriov_config *config)
782 {
783 	lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
784 
785 	pf_release_ctxs(gt, config->begin_ctx, config->num_ctxs);
786 	config->begin_ctx = 0;
787 	config->num_ctxs = 0;
788 }
789 
pf_provision_vf_ctxs(struct xe_gt * gt,unsigned int vfid,u32 num_ctxs)790 static int pf_provision_vf_ctxs(struct xe_gt *gt, unsigned int vfid, u32 num_ctxs)
791 {
792 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
793 	int ret;
794 
795 	xe_gt_assert(gt, vfid);
796 
797 	if (num_ctxs > GUC_ID_MAX)
798 		return -EINVAL;
799 
800 	if (config->num_ctxs) {
801 		ret = pf_push_vf_cfg_ctxs(gt, vfid, 0, 0);
802 		if (unlikely(ret))
803 			return ret;
804 
805 		pf_release_config_ctxs(gt, config);
806 
807 		ret = pf_refresh_vf_cfg(gt, vfid);
808 		if (unlikely(ret))
809 			return ret;
810 	}
811 
812 	if (!num_ctxs)
813 		return 0;
814 
815 	ret = pf_reserve_ctxs(gt, num_ctxs);
816 	if (unlikely(ret < 0))
817 		return ret;
818 
819 	config->begin_ctx = ret;
820 	config->num_ctxs = num_ctxs;
821 
822 	ret = pf_push_vf_cfg_ctxs(gt, vfid, config->begin_ctx, config->num_ctxs);
823 	if (unlikely(ret)) {
824 		pf_release_config_ctxs(gt, config);
825 		return ret;
826 	}
827 
828 	xe_gt_sriov_dbg_verbose(gt, "VF%u contexts %u-%u\n",
829 				vfid, config->begin_ctx, config->begin_ctx + config->num_ctxs - 1);
830 	return 0;
831 }
832 
pf_get_vf_config_ctxs(struct xe_gt * gt,unsigned int vfid)833 static u32 pf_get_vf_config_ctxs(struct xe_gt *gt, unsigned int vfid)
834 {
835 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
836 
837 	return config->num_ctxs;
838 }
839 
840 /**
841  * xe_gt_sriov_pf_config_get_ctxs - Get VF's GuC contexts IDs quota.
842  * @gt: the &xe_gt
843  * @vfid: the VF identifier
844  *
845  * This function can only be called on PF.
846  * If &vfid represents a PF then number of PF's spare GuC context IDs is returned.
847  *
848  * Return: VF's quota (or PF's spare).
849  */
xe_gt_sriov_pf_config_get_ctxs(struct xe_gt * gt,unsigned int vfid)850 u32 xe_gt_sriov_pf_config_get_ctxs(struct xe_gt *gt, unsigned int vfid)
851 {
852 	u32 num_ctxs;
853 
854 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
855 	if (vfid)
856 		num_ctxs = pf_get_vf_config_ctxs(gt, vfid);
857 	else
858 		num_ctxs = pf_get_spare_ctxs(gt);
859 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
860 
861 	return num_ctxs;
862 }
863 
no_unit(u32 unused)864 static const char *no_unit(u32 unused)
865 {
866 	return "";
867 }
868 
spare_unit(u32 unused)869 static const char *spare_unit(u32 unused)
870 {
871 	return " spare";
872 }
873 
pf_config_set_u32_done(struct xe_gt * gt,unsigned int vfid,u32 value,u32 actual,const char * what,const char * (* unit)(u32),int err)874 static int pf_config_set_u32_done(struct xe_gt *gt, unsigned int vfid, u32 value, u32 actual,
875 				  const char *what, const char *(*unit)(u32), int err)
876 {
877 	char name[8];
878 
879 	xe_sriov_function_name(vfid, name, sizeof(name));
880 
881 	if (unlikely(err)) {
882 		xe_gt_sriov_notice(gt, "Failed to provision %s with %u%s %s (%pe)\n",
883 				   name, value, unit(value), what, ERR_PTR(err));
884 		xe_gt_sriov_info(gt, "%s provisioning remains at %u%s %s\n",
885 				 name, actual, unit(actual), what);
886 		return err;
887 	}
888 
889 	/* the actual value may have changed during provisioning */
890 	xe_gt_sriov_info(gt, "%s provisioned with %u%s %s\n",
891 			 name, actual, unit(actual), what);
892 	return 0;
893 }
894 
895 /**
896  * xe_gt_sriov_pf_config_set_ctxs - Configure GuC contexts IDs quota for the VF.
897  * @gt: the &xe_gt
898  * @vfid: the VF identifier
899  * @num_ctxs: requested number of GuC contexts IDs (0 to release)
900  *
901  * This function can only be called on PF.
902  *
903  * Return: 0 on success or a negative error code on failure.
904  */
xe_gt_sriov_pf_config_set_ctxs(struct xe_gt * gt,unsigned int vfid,u32 num_ctxs)905 int xe_gt_sriov_pf_config_set_ctxs(struct xe_gt *gt, unsigned int vfid, u32 num_ctxs)
906 {
907 	int err;
908 
909 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
910 	if (vfid)
911 		err = pf_provision_vf_ctxs(gt, vfid, num_ctxs);
912 	else
913 		err = pf_set_spare_ctxs(gt, num_ctxs);
914 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
915 
916 	return pf_config_set_u32_done(gt, vfid, num_ctxs,
917 				      xe_gt_sriov_pf_config_get_ctxs(gt, vfid),
918 				      "GuC context IDs", vfid ? no_unit : spare_unit, err);
919 }
920 
pf_config_bulk_set_u32_done(struct xe_gt * gt,unsigned int first,unsigned int num_vfs,u32 value,u32 (* get)(struct xe_gt *,unsigned int),const char * what,const char * (* unit)(u32),unsigned int last,int err)921 static int pf_config_bulk_set_u32_done(struct xe_gt *gt, unsigned int first, unsigned int num_vfs,
922 				       u32 value, u32 (*get)(struct xe_gt*, unsigned int),
923 				       const char *what, const char *(*unit)(u32),
924 				       unsigned int last, int err)
925 {
926 	xe_gt_assert(gt, first);
927 	xe_gt_assert(gt, num_vfs);
928 	xe_gt_assert(gt, first <= last);
929 
930 	if (num_vfs == 1)
931 		return pf_config_set_u32_done(gt, first, value, get(gt, first), what, unit, err);
932 
933 	if (unlikely(err)) {
934 		xe_gt_sriov_notice(gt, "Failed to bulk provision VF%u..VF%u with %s\n",
935 				   first, first + num_vfs - 1, what);
936 		if (last > first)
937 			pf_config_bulk_set_u32_done(gt, first, last - first, value,
938 						    get, what, unit, last, 0);
939 		return pf_config_set_u32_done(gt, last, value, get(gt, last), what, unit, err);
940 	}
941 
942 	/* pick actual value from first VF - bulk provisioning shall be equal across all VFs */
943 	value = get(gt, first);
944 	xe_gt_sriov_info(gt, "VF%u..VF%u provisioned with %u%s %s\n",
945 			 first, first + num_vfs - 1, value, unit(value), what);
946 	return 0;
947 }
948 
949 /**
950  * xe_gt_sriov_pf_config_bulk_set_ctxs - Provision many VFs with GuC context IDs.
951  * @gt: the &xe_gt
952  * @vfid: starting VF identifier
953  * @num_vfs: number of VFs to provision
954  * @num_ctxs: requested number of GuC contexts IDs (0 to release)
955  *
956  * This function can only be called on PF.
957  *
958  * Return: 0 on success or a negative error code on failure.
959  */
xe_gt_sriov_pf_config_bulk_set_ctxs(struct xe_gt * gt,unsigned int vfid,unsigned int num_vfs,u32 num_ctxs)960 int xe_gt_sriov_pf_config_bulk_set_ctxs(struct xe_gt *gt, unsigned int vfid,
961 					unsigned int num_vfs, u32 num_ctxs)
962 {
963 	unsigned int n;
964 	int err = 0;
965 
966 	xe_gt_assert(gt, vfid);
967 
968 	if (!num_vfs)
969 		return 0;
970 
971 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
972 	for (n = vfid; n < vfid + num_vfs; n++) {
973 		err = pf_provision_vf_ctxs(gt, n, num_ctxs);
974 		if (err)
975 			break;
976 	}
977 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
978 
979 	return pf_config_bulk_set_u32_done(gt, vfid, num_vfs, num_ctxs,
980 					   xe_gt_sriov_pf_config_get_ctxs,
981 					   "GuC context IDs", no_unit, n, err);
982 }
983 
pf_estimate_fair_ctxs(struct xe_gt * gt,unsigned int num_vfs)984 static u32 pf_estimate_fair_ctxs(struct xe_gt *gt, unsigned int num_vfs)
985 {
986 	struct xe_guc_id_mgr *idm = &gt->uc.guc.submission_state.idm;
987 	u32 spare = pf_get_spare_ctxs(gt);
988 	u32 fair = (idm->total - spare) / num_vfs;
989 	int ret;
990 
991 	for (; fair; --fair) {
992 		ret = xe_guc_id_mgr_reserve(idm, fair * num_vfs, spare);
993 		if (ret < 0)
994 			continue;
995 		xe_guc_id_mgr_release(idm, ret, fair * num_vfs);
996 		break;
997 	}
998 
999 	xe_gt_sriov_dbg_verbose(gt, "contexts fair(%u x %u)\n", num_vfs, fair);
1000 	return fair;
1001 }
1002 
1003 /**
1004  * xe_gt_sriov_pf_config_set_fair_ctxs - Provision many VFs with fair GuC context IDs.
1005  * @gt: the &xe_gt
1006  * @vfid: starting VF identifier (can't be 0)
1007  * @num_vfs: number of VFs to provision (can't be 0)
1008  *
1009  * This function can only be called on PF.
1010  *
1011  * Return: 0 on success or a negative error code on failure.
1012  */
xe_gt_sriov_pf_config_set_fair_ctxs(struct xe_gt * gt,unsigned int vfid,unsigned int num_vfs)1013 int xe_gt_sriov_pf_config_set_fair_ctxs(struct xe_gt *gt, unsigned int vfid,
1014 					unsigned int num_vfs)
1015 {
1016 	u32 fair;
1017 
1018 	xe_gt_assert(gt, vfid);
1019 	xe_gt_assert(gt, num_vfs);
1020 
1021 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1022 	fair = pf_estimate_fair_ctxs(gt, num_vfs);
1023 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1024 
1025 	if (!fair)
1026 		return -ENOSPC;
1027 
1028 	return xe_gt_sriov_pf_config_bulk_set_ctxs(gt, vfid, num_vfs, fair);
1029 }
1030 
pf_get_min_spare_dbs(struct xe_gt * gt)1031 static u32 pf_get_min_spare_dbs(struct xe_gt *gt)
1032 {
1033 	/* XXX: preliminary, we don't use doorbells yet! */
1034 	return IS_ENABLED(CONFIG_DRM_XE_DEBUG_SRIOV) ? 1 : 0;
1035 }
1036 
pf_get_spare_dbs(struct xe_gt * gt)1037 static u32 pf_get_spare_dbs(struct xe_gt *gt)
1038 {
1039 	u32 spare;
1040 
1041 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
1042 	lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
1043 
1044 	spare = gt->sriov.pf.spare.num_dbs;
1045 	spare = max_t(u32, spare, pf_get_min_spare_dbs(gt));
1046 
1047 	return spare;
1048 }
1049 
pf_set_spare_dbs(struct xe_gt * gt,u32 spare)1050 static int pf_set_spare_dbs(struct xe_gt *gt, u32 spare)
1051 {
1052 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
1053 	lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
1054 
1055 	if (spare > GUC_NUM_DOORBELLS)
1056 		return -EINVAL;
1057 
1058 	if (spare && spare < pf_get_min_spare_dbs(gt))
1059 		return -EINVAL;
1060 
1061 	gt->sriov.pf.spare.num_dbs = spare;
1062 	return 0;
1063 }
1064 
1065 /* Return: start ID or negative error code on failure */
pf_reserve_dbs(struct xe_gt * gt,u32 num)1066 static int pf_reserve_dbs(struct xe_gt *gt, u32 num)
1067 {
1068 	struct xe_guc_db_mgr *dbm = &gt->uc.guc.dbm;
1069 	unsigned int spare = pf_get_spare_dbs(gt);
1070 
1071 	return xe_guc_db_mgr_reserve_range(dbm, num, spare);
1072 }
1073 
pf_release_dbs(struct xe_gt * gt,u32 start,u32 num)1074 static void pf_release_dbs(struct xe_gt *gt, u32 start, u32 num)
1075 {
1076 	struct xe_guc_db_mgr *dbm = &gt->uc.guc.dbm;
1077 
1078 	if (num)
1079 		xe_guc_db_mgr_release_range(dbm, start, num);
1080 }
1081 
pf_release_config_dbs(struct xe_gt * gt,struct xe_gt_sriov_config * config)1082 static void pf_release_config_dbs(struct xe_gt *gt, struct xe_gt_sriov_config *config)
1083 {
1084 	lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
1085 
1086 	pf_release_dbs(gt, config->begin_db, config->num_dbs);
1087 	config->begin_db = 0;
1088 	config->num_dbs = 0;
1089 }
1090 
pf_provision_vf_dbs(struct xe_gt * gt,unsigned int vfid,u32 num_dbs)1091 static int pf_provision_vf_dbs(struct xe_gt *gt, unsigned int vfid, u32 num_dbs)
1092 {
1093 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1094 	int ret;
1095 
1096 	xe_gt_assert(gt, vfid);
1097 
1098 	if (num_dbs > GUC_NUM_DOORBELLS)
1099 		return -EINVAL;
1100 
1101 	if (config->num_dbs) {
1102 		ret = pf_push_vf_cfg_dbs(gt, vfid, 0, 0);
1103 		if (unlikely(ret))
1104 			return ret;
1105 
1106 		pf_release_config_dbs(gt, config);
1107 
1108 		ret = pf_refresh_vf_cfg(gt, vfid);
1109 		if (unlikely(ret))
1110 			return ret;
1111 	}
1112 
1113 	if (!num_dbs)
1114 		return 0;
1115 
1116 	ret = pf_reserve_dbs(gt, num_dbs);
1117 	if (unlikely(ret < 0))
1118 		return ret;
1119 
1120 	config->begin_db = ret;
1121 	config->num_dbs = num_dbs;
1122 
1123 	ret = pf_push_vf_cfg_dbs(gt, vfid, config->begin_db, config->num_dbs);
1124 	if (unlikely(ret)) {
1125 		pf_release_config_dbs(gt, config);
1126 		return ret;
1127 	}
1128 
1129 	xe_gt_sriov_dbg_verbose(gt, "VF%u doorbells %u-%u\n",
1130 				vfid, config->begin_db, config->begin_db + config->num_dbs - 1);
1131 	return 0;
1132 }
1133 
pf_get_vf_config_dbs(struct xe_gt * gt,unsigned int vfid)1134 static u32 pf_get_vf_config_dbs(struct xe_gt *gt, unsigned int vfid)
1135 {
1136 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1137 
1138 	return config->num_dbs;
1139 }
1140 
1141 /**
1142  * xe_gt_sriov_pf_config_get_dbs - Get VF's GuC doorbells IDs quota.
1143  * @gt: the &xe_gt
1144  * @vfid: the VF identifier
1145  *
1146  * This function can only be called on PF.
1147  * If &vfid represents a PF then number of PF's spare GuC doorbells IDs is returned.
1148  *
1149  * Return: VF's quota (or PF's spare).
1150  */
xe_gt_sriov_pf_config_get_dbs(struct xe_gt * gt,unsigned int vfid)1151 u32 xe_gt_sriov_pf_config_get_dbs(struct xe_gt *gt, unsigned int vfid)
1152 {
1153 	u32 num_dbs;
1154 
1155 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
1156 	xe_gt_assert(gt, vfid <= xe_sriov_pf_get_totalvfs(gt_to_xe(gt)));
1157 
1158 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1159 	if (vfid)
1160 		num_dbs = pf_get_vf_config_dbs(gt, vfid);
1161 	else
1162 		num_dbs = pf_get_spare_dbs(gt);
1163 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1164 
1165 	return num_dbs;
1166 }
1167 
1168 /**
1169  * xe_gt_sriov_pf_config_set_dbs - Configure GuC doorbells IDs quota for the VF.
1170  * @gt: the &xe_gt
1171  * @vfid: the VF identifier
1172  * @num_dbs: requested number of GuC doorbells IDs (0 to release)
1173  *
1174  * This function can only be called on PF.
1175  *
1176  * Return: 0 on success or a negative error code on failure.
1177  */
xe_gt_sriov_pf_config_set_dbs(struct xe_gt * gt,unsigned int vfid,u32 num_dbs)1178 int xe_gt_sriov_pf_config_set_dbs(struct xe_gt *gt, unsigned int vfid, u32 num_dbs)
1179 {
1180 	int err;
1181 
1182 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
1183 	xe_gt_assert(gt, vfid <= xe_sriov_pf_get_totalvfs(gt_to_xe(gt)));
1184 
1185 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1186 	if (vfid)
1187 		err = pf_provision_vf_dbs(gt, vfid, num_dbs);
1188 	else
1189 		err = pf_set_spare_dbs(gt, num_dbs);
1190 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1191 
1192 	return pf_config_set_u32_done(gt, vfid, num_dbs,
1193 				      xe_gt_sriov_pf_config_get_dbs(gt, vfid),
1194 				      "GuC doorbell IDs", vfid ? no_unit : spare_unit, err);
1195 }
1196 
1197 /**
1198  * xe_gt_sriov_pf_config_bulk_set_dbs - Provision many VFs with GuC context IDs.
1199  * @gt: the &xe_gt
1200  * @vfid: starting VF identifier (can't be 0)
1201  * @num_vfs: number of VFs to provision
1202  * @num_dbs: requested number of GuC doorbell IDs (0 to release)
1203  *
1204  * This function can only be called on PF.
1205  *
1206  * Return: 0 on success or a negative error code on failure.
1207  */
xe_gt_sriov_pf_config_bulk_set_dbs(struct xe_gt * gt,unsigned int vfid,unsigned int num_vfs,u32 num_dbs)1208 int xe_gt_sriov_pf_config_bulk_set_dbs(struct xe_gt *gt, unsigned int vfid,
1209 				       unsigned int num_vfs, u32 num_dbs)
1210 {
1211 	unsigned int n;
1212 	int err = 0;
1213 
1214 	xe_gt_assert(gt, vfid);
1215 
1216 	if (!num_vfs)
1217 		return 0;
1218 
1219 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1220 	for (n = vfid; n < vfid + num_vfs; n++) {
1221 		err = pf_provision_vf_dbs(gt, n, num_dbs);
1222 		if (err)
1223 			break;
1224 	}
1225 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1226 
1227 	return pf_config_bulk_set_u32_done(gt, vfid, num_vfs, num_dbs,
1228 					   xe_gt_sriov_pf_config_get_dbs,
1229 					   "GuC doorbell IDs", no_unit, n, err);
1230 }
1231 
pf_estimate_fair_dbs(struct xe_gt * gt,unsigned int num_vfs)1232 static u32 pf_estimate_fair_dbs(struct xe_gt *gt, unsigned int num_vfs)
1233 {
1234 	struct xe_guc_db_mgr *dbm = &gt->uc.guc.dbm;
1235 	u32 spare = pf_get_spare_dbs(gt);
1236 	u32 fair = (GUC_NUM_DOORBELLS - spare) / num_vfs;
1237 	int ret;
1238 
1239 	for (; fair; --fair) {
1240 		ret = xe_guc_db_mgr_reserve_range(dbm, fair * num_vfs, spare);
1241 		if (ret < 0)
1242 			continue;
1243 		xe_guc_db_mgr_release_range(dbm, ret, fair * num_vfs);
1244 		break;
1245 	}
1246 
1247 	xe_gt_sriov_dbg_verbose(gt, "doorbells fair(%u x %u)\n", num_vfs, fair);
1248 	return fair;
1249 }
1250 
1251 /**
1252  * xe_gt_sriov_pf_config_set_fair_dbs - Provision many VFs with fair GuC doorbell  IDs.
1253  * @gt: the &xe_gt
1254  * @vfid: starting VF identifier (can't be 0)
1255  * @num_vfs: number of VFs to provision (can't be 0)
1256  *
1257  * This function can only be called on PF.
1258  *
1259  * Return: 0 on success or a negative error code on failure.
1260  */
xe_gt_sriov_pf_config_set_fair_dbs(struct xe_gt * gt,unsigned int vfid,unsigned int num_vfs)1261 int xe_gt_sriov_pf_config_set_fair_dbs(struct xe_gt *gt, unsigned int vfid,
1262 				       unsigned int num_vfs)
1263 {
1264 	u32 fair;
1265 
1266 	xe_gt_assert(gt, vfid);
1267 	xe_gt_assert(gt, num_vfs);
1268 
1269 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1270 	fair = pf_estimate_fair_dbs(gt, num_vfs);
1271 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1272 
1273 	if (!fair)
1274 		return -ENOSPC;
1275 
1276 	return xe_gt_sriov_pf_config_bulk_set_dbs(gt, vfid, num_vfs, fair);
1277 }
1278 
pf_get_lmem_alignment(struct xe_gt * gt)1279 static u64 pf_get_lmem_alignment(struct xe_gt *gt)
1280 {
1281 	/* this might be platform dependent */
1282 	return SZ_2M;
1283 }
1284 
pf_get_min_spare_lmem(struct xe_gt * gt)1285 static u64 pf_get_min_spare_lmem(struct xe_gt *gt)
1286 {
1287 	/* this might be platform dependent */
1288 	return SZ_128M; /* XXX: preliminary */
1289 }
1290 
pf_get_spare_lmem(struct xe_gt * gt)1291 static u64 pf_get_spare_lmem(struct xe_gt *gt)
1292 {
1293 	u64 spare;
1294 
1295 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
1296 	lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
1297 
1298 	spare = gt->sriov.pf.spare.lmem_size;
1299 	spare = max_t(u64, spare, pf_get_min_spare_lmem(gt));
1300 
1301 	return spare;
1302 }
1303 
pf_set_spare_lmem(struct xe_gt * gt,u64 size)1304 static int pf_set_spare_lmem(struct xe_gt *gt, u64 size)
1305 {
1306 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
1307 	lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
1308 
1309 	if (size && size < pf_get_min_spare_lmem(gt))
1310 		return -EINVAL;
1311 
1312 	gt->sriov.pf.spare.lmem_size = size;
1313 	return 0;
1314 }
1315 
pf_get_vf_config_lmem(struct xe_gt * gt,unsigned int vfid)1316 static u64 pf_get_vf_config_lmem(struct xe_gt *gt, unsigned int vfid)
1317 {
1318 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1319 	struct xe_bo *bo;
1320 
1321 	bo = config->lmem_obj;
1322 	return bo ? xe_bo_size(bo) : 0;
1323 }
1324 
pf_distribute_config_lmem(struct xe_gt * gt,unsigned int vfid,u64 size)1325 static int pf_distribute_config_lmem(struct xe_gt *gt, unsigned int vfid, u64 size)
1326 {
1327 	struct xe_device *xe = gt_to_xe(gt);
1328 	struct xe_tile *tile;
1329 	unsigned int tid;
1330 	int err;
1331 
1332 	for_each_tile(tile, xe, tid) {
1333 		if (tile->primary_gt == gt) {
1334 			err = pf_push_vf_cfg_lmem(gt, vfid, size);
1335 		} else {
1336 			u64 lmem = pf_get_vf_config_lmem(tile->primary_gt, vfid);
1337 
1338 			if (!lmem)
1339 				continue;
1340 			err = pf_push_vf_cfg_lmem(gt, vfid, lmem);
1341 		}
1342 		if (unlikely(err))
1343 			return err;
1344 	}
1345 	return 0;
1346 }
1347 
pf_force_lmtt_invalidate(struct xe_device * xe)1348 static void pf_force_lmtt_invalidate(struct xe_device *xe)
1349 {
1350 	struct xe_lmtt *lmtt;
1351 	struct xe_tile *tile;
1352 	unsigned int tid;
1353 
1354 	xe_assert(xe, xe_device_has_lmtt(xe));
1355 	xe_assert(xe, IS_SRIOV_PF(xe));
1356 
1357 	for_each_tile(tile, xe, tid) {
1358 		lmtt = &tile->sriov.pf.lmtt;
1359 		xe_lmtt_invalidate_hw(lmtt);
1360 	}
1361 }
1362 
pf_reset_vf_lmtt(struct xe_device * xe,unsigned int vfid)1363 static void pf_reset_vf_lmtt(struct xe_device *xe, unsigned int vfid)
1364 {
1365 	struct xe_lmtt *lmtt;
1366 	struct xe_tile *tile;
1367 	unsigned int tid;
1368 
1369 	xe_assert(xe, xe_device_has_lmtt(xe));
1370 	xe_assert(xe, IS_SRIOV_PF(xe));
1371 
1372 	for_each_tile(tile, xe, tid) {
1373 		lmtt = &tile->sriov.pf.lmtt;
1374 		xe_lmtt_drop_pages(lmtt, vfid);
1375 	}
1376 }
1377 
pf_update_vf_lmtt(struct xe_device * xe,unsigned int vfid)1378 static int pf_update_vf_lmtt(struct xe_device *xe, unsigned int vfid)
1379 {
1380 	struct xe_gt_sriov_config *config;
1381 	struct xe_tile *tile;
1382 	struct xe_lmtt *lmtt;
1383 	struct xe_bo *bo;
1384 	struct xe_gt *gt;
1385 	u64 total, offset;
1386 	unsigned int gtid;
1387 	unsigned int tid;
1388 	int err;
1389 
1390 	xe_assert(xe, xe_device_has_lmtt(xe));
1391 	xe_assert(xe, IS_SRIOV_PF(xe));
1392 
1393 	total = 0;
1394 	for_each_tile(tile, xe, tid)
1395 		total += pf_get_vf_config_lmem(tile->primary_gt, vfid);
1396 
1397 	for_each_tile(tile, xe, tid) {
1398 		lmtt = &tile->sriov.pf.lmtt;
1399 
1400 		xe_lmtt_drop_pages(lmtt, vfid);
1401 		if (!total)
1402 			continue;
1403 
1404 		err  = xe_lmtt_prepare_pages(lmtt, vfid, total);
1405 		if (err)
1406 			goto fail;
1407 
1408 		offset = 0;
1409 		for_each_gt(gt, xe, gtid) {
1410 			if (xe_gt_is_media_type(gt))
1411 				continue;
1412 
1413 			config = pf_pick_vf_config(gt, vfid);
1414 			bo = config->lmem_obj;
1415 			if (!bo)
1416 				continue;
1417 
1418 			err = xe_lmtt_populate_pages(lmtt, vfid, bo, offset);
1419 			if (err)
1420 				goto fail;
1421 			offset += xe_bo_size(bo);
1422 		}
1423 	}
1424 
1425 	pf_force_lmtt_invalidate(xe);
1426 	return 0;
1427 
1428 fail:
1429 	for_each_tile(tile, xe, tid) {
1430 		lmtt = &tile->sriov.pf.lmtt;
1431 		xe_lmtt_drop_pages(lmtt, vfid);
1432 	}
1433 	return err;
1434 }
1435 
pf_release_vf_config_lmem(struct xe_gt * gt,struct xe_gt_sriov_config * config)1436 static void pf_release_vf_config_lmem(struct xe_gt *gt, struct xe_gt_sriov_config *config)
1437 {
1438 	xe_gt_assert(gt, IS_DGFX(gt_to_xe(gt)));
1439 	xe_gt_assert(gt, xe_gt_is_main_type(gt));
1440 	lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
1441 
1442 	if (config->lmem_obj) {
1443 		xe_bo_unpin_map_no_vm(config->lmem_obj);
1444 		config->lmem_obj = NULL;
1445 	}
1446 }
1447 
pf_provision_vf_lmem(struct xe_gt * gt,unsigned int vfid,u64 size)1448 static int pf_provision_vf_lmem(struct xe_gt *gt, unsigned int vfid, u64 size)
1449 {
1450 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1451 	struct xe_device *xe = gt_to_xe(gt);
1452 	struct xe_tile *tile = gt_to_tile(gt);
1453 	struct xe_bo *bo;
1454 	int err;
1455 
1456 	xe_gt_assert(gt, vfid);
1457 	xe_gt_assert(gt, IS_DGFX(xe));
1458 	xe_gt_assert(gt, xe_gt_is_main_type(gt));
1459 
1460 	size = round_up(size, pf_get_lmem_alignment(gt));
1461 
1462 	if (config->lmem_obj) {
1463 		err = pf_distribute_config_lmem(gt, vfid, 0);
1464 		if (unlikely(err))
1465 			return err;
1466 
1467 		if (xe_device_has_lmtt(xe))
1468 			pf_reset_vf_lmtt(xe, vfid);
1469 		pf_release_vf_config_lmem(gt, config);
1470 	}
1471 	xe_gt_assert(gt, !config->lmem_obj);
1472 
1473 	if (!size)
1474 		return 0;
1475 
1476 	xe_gt_assert(gt, pf_get_lmem_alignment(gt) == SZ_2M);
1477 	bo = xe_bo_create_locked(xe, tile, NULL,
1478 				 ALIGN(size, PAGE_SIZE),
1479 				 ttm_bo_type_kernel,
1480 				 XE_BO_FLAG_VRAM_IF_DGFX(tile) |
1481 				 XE_BO_FLAG_NEEDS_2M |
1482 				 XE_BO_FLAG_PINNED |
1483 				 XE_BO_FLAG_PINNED_LATE_RESTORE);
1484 	if (IS_ERR(bo))
1485 		return PTR_ERR(bo);
1486 
1487 	err = xe_bo_pin(bo);
1488 	xe_bo_unlock(bo);
1489 	if (unlikely(err)) {
1490 		xe_bo_put(bo);
1491 		return err;
1492 	}
1493 
1494 	config->lmem_obj = bo;
1495 
1496 	if (xe_device_has_lmtt(xe)) {
1497 		err = pf_update_vf_lmtt(xe, vfid);
1498 		if (unlikely(err))
1499 			goto release;
1500 	}
1501 
1502 	err = pf_push_vf_cfg_lmem(gt, vfid, xe_bo_size(bo));
1503 	if (unlikely(err))
1504 		goto reset_lmtt;
1505 
1506 	xe_gt_sriov_dbg_verbose(gt, "VF%u LMEM %zu (%zuM)\n",
1507 				vfid, xe_bo_size(bo), xe_bo_size(bo) / SZ_1M);
1508 	return 0;
1509 
1510 reset_lmtt:
1511 	if (xe_device_has_lmtt(xe))
1512 		pf_reset_vf_lmtt(xe, vfid);
1513 release:
1514 	pf_release_vf_config_lmem(gt, config);
1515 	return err;
1516 }
1517 
1518 /**
1519  * xe_gt_sriov_pf_config_get_lmem - Get VF's LMEM quota.
1520  * @gt: the &xe_gt
1521  * @vfid: the VF identifier
1522  *
1523  * This function can only be called on PF.
1524  *
1525  * Return: VF's (or PF's spare) LMEM quota.
1526  */
xe_gt_sriov_pf_config_get_lmem(struct xe_gt * gt,unsigned int vfid)1527 u64 xe_gt_sriov_pf_config_get_lmem(struct xe_gt *gt, unsigned int vfid)
1528 {
1529 	u64 size;
1530 
1531 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1532 	if (vfid)
1533 		size = pf_get_vf_config_lmem(gt, vfid);
1534 	else
1535 		size = pf_get_spare_lmem(gt);
1536 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1537 
1538 	return size;
1539 }
1540 
1541 /**
1542  * xe_gt_sriov_pf_config_set_lmem - Provision VF with LMEM.
1543  * @gt: the &xe_gt (can't be media)
1544  * @vfid: the VF identifier
1545  * @size: requested LMEM size
1546  *
1547  * This function can only be called on PF.
1548  */
xe_gt_sriov_pf_config_set_lmem(struct xe_gt * gt,unsigned int vfid,u64 size)1549 int xe_gt_sriov_pf_config_set_lmem(struct xe_gt *gt, unsigned int vfid, u64 size)
1550 {
1551 	int err;
1552 
1553 	xe_gt_assert(gt, xe_device_has_lmtt(gt_to_xe(gt)));
1554 
1555 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1556 	if (vfid)
1557 		err = pf_provision_vf_lmem(gt, vfid, size);
1558 	else
1559 		err = pf_set_spare_lmem(gt, size);
1560 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1561 
1562 	return pf_config_set_u64_done(gt, vfid, size,
1563 				      xe_gt_sriov_pf_config_get_lmem(gt, vfid),
1564 				      vfid ? "LMEM" : "spare LMEM", err);
1565 }
1566 
1567 /**
1568  * xe_gt_sriov_pf_config_bulk_set_lmem - Provision many VFs with LMEM.
1569  * @gt: the &xe_gt (can't be media)
1570  * @vfid: starting VF identifier (can't be 0)
1571  * @num_vfs: number of VFs to provision
1572  * @size: requested LMEM size
1573  *
1574  * This function can only be called on PF.
1575  *
1576  * Return: 0 on success or a negative error code on failure.
1577  */
xe_gt_sriov_pf_config_bulk_set_lmem(struct xe_gt * gt,unsigned int vfid,unsigned int num_vfs,u64 size)1578 int xe_gt_sriov_pf_config_bulk_set_lmem(struct xe_gt *gt, unsigned int vfid,
1579 					unsigned int num_vfs, u64 size)
1580 {
1581 	unsigned int n;
1582 	int err = 0;
1583 
1584 	xe_gt_assert(gt, vfid);
1585 	xe_gt_assert(gt, xe_gt_is_main_type(gt));
1586 
1587 	if (!num_vfs)
1588 		return 0;
1589 
1590 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1591 	for (n = vfid; n < vfid + num_vfs; n++) {
1592 		err = pf_provision_vf_lmem(gt, n, size);
1593 		if (err)
1594 			break;
1595 	}
1596 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1597 
1598 	return pf_config_bulk_set_u64_done(gt, vfid, num_vfs, size,
1599 					   xe_gt_sriov_pf_config_get_lmem,
1600 					   "LMEM", n, err);
1601 }
1602 
pf_query_free_lmem(struct xe_gt * gt)1603 static u64 pf_query_free_lmem(struct xe_gt *gt)
1604 {
1605 	struct xe_tile *tile = gt->tile;
1606 
1607 	return xe_ttm_vram_get_avail(&tile->mem.vram.ttm.manager);
1608 }
1609 
pf_query_max_lmem(struct xe_gt * gt)1610 static u64 pf_query_max_lmem(struct xe_gt *gt)
1611 {
1612 	u64 alignment = pf_get_lmem_alignment(gt);
1613 	u64 spare = pf_get_spare_lmem(gt);
1614 	u64 free = pf_query_free_lmem(gt);
1615 	u64 avail;
1616 
1617 	/* XXX: need to account for 2MB blocks only */
1618 	avail = free > spare ? free - spare : 0;
1619 	avail = round_down(avail, alignment);
1620 
1621 	return avail;
1622 }
1623 
1624 #ifdef CONFIG_DRM_XE_DEBUG_SRIOV
1625 #define MAX_FAIR_LMEM	SZ_128M	/* XXX: make it small for the driver bringup */
1626 #endif
1627 
pf_estimate_fair_lmem(struct xe_gt * gt,unsigned int num_vfs)1628 static u64 pf_estimate_fair_lmem(struct xe_gt *gt, unsigned int num_vfs)
1629 {
1630 	u64 available = pf_query_max_lmem(gt);
1631 	u64 alignment = pf_get_lmem_alignment(gt);
1632 	u64 fair;
1633 
1634 	fair = div_u64(available, num_vfs);
1635 	fair = rounddown_pow_of_two(fair);	/* XXX: ttm_vram_mgr & drm_buddy limitation */
1636 	fair = ALIGN_DOWN(fair, alignment);
1637 #ifdef MAX_FAIR_LMEM
1638 	fair = min_t(u64, MAX_FAIR_LMEM, fair);
1639 #endif
1640 	xe_gt_sriov_dbg_verbose(gt, "LMEM available(%lluM) fair(%u x %lluM)\n",
1641 				available / SZ_1M, num_vfs, fair / SZ_1M);
1642 	return fair;
1643 }
1644 
1645 /**
1646  * xe_gt_sriov_pf_config_set_fair_lmem - Provision many VFs with fair LMEM.
1647  * @gt: the &xe_gt (can't be media)
1648  * @vfid: starting VF identifier (can't be 0)
1649  * @num_vfs: number of VFs to provision (can't be 0)
1650  *
1651  * This function can only be called on PF.
1652  *
1653  * Return: 0 on success or a negative error code on failure.
1654  */
xe_gt_sriov_pf_config_set_fair_lmem(struct xe_gt * gt,unsigned int vfid,unsigned int num_vfs)1655 int xe_gt_sriov_pf_config_set_fair_lmem(struct xe_gt *gt, unsigned int vfid,
1656 					unsigned int num_vfs)
1657 {
1658 	u64 fair;
1659 
1660 	xe_gt_assert(gt, vfid);
1661 	xe_gt_assert(gt, num_vfs);
1662 	xe_gt_assert(gt, xe_gt_is_main_type(gt));
1663 
1664 	if (!xe_device_has_lmtt(gt_to_xe(gt)))
1665 		return 0;
1666 
1667 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1668 	fair = pf_estimate_fair_lmem(gt, num_vfs);
1669 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1670 
1671 	if (!fair)
1672 		return -ENOSPC;
1673 
1674 	return xe_gt_sriov_pf_config_bulk_set_lmem(gt, vfid, num_vfs, fair);
1675 }
1676 
1677 /**
1678  * xe_gt_sriov_pf_config_set_fair - Provision many VFs with fair resources.
1679  * @gt: the &xe_gt
1680  * @vfid: starting VF identifier (can't be 0)
1681  * @num_vfs: number of VFs to provision (can't be 0)
1682  *
1683  * This function can only be called on PF.
1684  *
1685  * Return: 0 on success or a negative error code on failure.
1686  */
xe_gt_sriov_pf_config_set_fair(struct xe_gt * gt,unsigned int vfid,unsigned int num_vfs)1687 int xe_gt_sriov_pf_config_set_fair(struct xe_gt *gt, unsigned int vfid,
1688 				   unsigned int num_vfs)
1689 {
1690 	int result = 0;
1691 	int err;
1692 
1693 	xe_gt_assert(gt, vfid);
1694 	xe_gt_assert(gt, num_vfs);
1695 
1696 	if (xe_gt_is_main_type(gt)) {
1697 		err = xe_gt_sriov_pf_config_set_fair_ggtt(gt, vfid, num_vfs);
1698 		result = result ?: err;
1699 		err = xe_gt_sriov_pf_config_set_fair_lmem(gt, vfid, num_vfs);
1700 		result = result ?: err;
1701 	}
1702 	err = xe_gt_sriov_pf_config_set_fair_ctxs(gt, vfid, num_vfs);
1703 	result = result ?: err;
1704 	err = xe_gt_sriov_pf_config_set_fair_dbs(gt, vfid, num_vfs);
1705 	result = result ?: err;
1706 
1707 	return result;
1708 }
1709 
exec_quantum_unit(u32 exec_quantum)1710 static const char *exec_quantum_unit(u32 exec_quantum)
1711 {
1712 	return exec_quantum ? "ms" : "(infinity)";
1713 }
1714 
pf_provision_exec_quantum(struct xe_gt * gt,unsigned int vfid,u32 exec_quantum)1715 static int pf_provision_exec_quantum(struct xe_gt *gt, unsigned int vfid,
1716 				     u32 exec_quantum)
1717 {
1718 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1719 	int err;
1720 
1721 	err = pf_push_vf_cfg_exec_quantum(gt, vfid, &exec_quantum);
1722 	if (unlikely(err))
1723 		return err;
1724 
1725 	config->exec_quantum = exec_quantum;
1726 	return 0;
1727 }
1728 
pf_get_exec_quantum(struct xe_gt * gt,unsigned int vfid)1729 static int pf_get_exec_quantum(struct xe_gt *gt, unsigned int vfid)
1730 {
1731 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1732 
1733 	return config->exec_quantum;
1734 }
1735 
1736 /**
1737  * xe_gt_sriov_pf_config_set_exec_quantum - Configure execution quantum for the VF.
1738  * @gt: the &xe_gt
1739  * @vfid: the VF identifier
1740  * @exec_quantum: requested execution quantum in milliseconds (0 is infinity)
1741  *
1742  * This function can only be called on PF.
1743  *
1744  * Return: 0 on success or a negative error code on failure.
1745  */
xe_gt_sriov_pf_config_set_exec_quantum(struct xe_gt * gt,unsigned int vfid,u32 exec_quantum)1746 int xe_gt_sriov_pf_config_set_exec_quantum(struct xe_gt *gt, unsigned int vfid,
1747 					   u32 exec_quantum)
1748 {
1749 	int err;
1750 
1751 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1752 	err = pf_provision_exec_quantum(gt, vfid, exec_quantum);
1753 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1754 
1755 	return pf_config_set_u32_done(gt, vfid, exec_quantum,
1756 				      xe_gt_sriov_pf_config_get_exec_quantum(gt, vfid),
1757 				      "execution quantum", exec_quantum_unit, err);
1758 }
1759 
1760 /**
1761  * xe_gt_sriov_pf_config_get_exec_quantum - Get VF's execution quantum.
1762  * @gt: the &xe_gt
1763  * @vfid: the VF identifier
1764  *
1765  * This function can only be called on PF.
1766  *
1767  * Return: VF's (or PF's) execution quantum in milliseconds.
1768  */
xe_gt_sriov_pf_config_get_exec_quantum(struct xe_gt * gt,unsigned int vfid)1769 u32 xe_gt_sriov_pf_config_get_exec_quantum(struct xe_gt *gt, unsigned int vfid)
1770 {
1771 	u32 exec_quantum;
1772 
1773 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1774 	exec_quantum = pf_get_exec_quantum(gt, vfid);
1775 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1776 
1777 	return exec_quantum;
1778 }
1779 
preempt_timeout_unit(u32 preempt_timeout)1780 static const char *preempt_timeout_unit(u32 preempt_timeout)
1781 {
1782 	return preempt_timeout ? "us" : "(infinity)";
1783 }
1784 
pf_provision_preempt_timeout(struct xe_gt * gt,unsigned int vfid,u32 preempt_timeout)1785 static int pf_provision_preempt_timeout(struct xe_gt *gt, unsigned int vfid,
1786 					u32 preempt_timeout)
1787 {
1788 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1789 	int err;
1790 
1791 	err = pf_push_vf_cfg_preempt_timeout(gt, vfid, &preempt_timeout);
1792 	if (unlikely(err))
1793 		return err;
1794 
1795 	config->preempt_timeout = preempt_timeout;
1796 
1797 	return 0;
1798 }
1799 
pf_get_preempt_timeout(struct xe_gt * gt,unsigned int vfid)1800 static int pf_get_preempt_timeout(struct xe_gt *gt, unsigned int vfid)
1801 {
1802 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1803 
1804 	return config->preempt_timeout;
1805 }
1806 
1807 /**
1808  * xe_gt_sriov_pf_config_set_preempt_timeout - Configure preemption timeout for the VF.
1809  * @gt: the &xe_gt
1810  * @vfid: the VF identifier
1811  * @preempt_timeout: requested preemption timeout in microseconds (0 is infinity)
1812  *
1813  * This function can only be called on PF.
1814  *
1815  * Return: 0 on success or a negative error code on failure.
1816  */
xe_gt_sriov_pf_config_set_preempt_timeout(struct xe_gt * gt,unsigned int vfid,u32 preempt_timeout)1817 int xe_gt_sriov_pf_config_set_preempt_timeout(struct xe_gt *gt, unsigned int vfid,
1818 					      u32 preempt_timeout)
1819 {
1820 	int err;
1821 
1822 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1823 	err = pf_provision_preempt_timeout(gt, vfid, preempt_timeout);
1824 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1825 
1826 	return pf_config_set_u32_done(gt, vfid, preempt_timeout,
1827 				      xe_gt_sriov_pf_config_get_preempt_timeout(gt, vfid),
1828 				      "preemption timeout", preempt_timeout_unit, err);
1829 }
1830 
1831 /**
1832  * xe_gt_sriov_pf_config_get_preempt_timeout - Get VF's preemption timeout.
1833  * @gt: the &xe_gt
1834  * @vfid: the VF identifier
1835  *
1836  * This function can only be called on PF.
1837  *
1838  * Return: VF's (or PF's) preemption timeout in microseconds.
1839  */
xe_gt_sriov_pf_config_get_preempt_timeout(struct xe_gt * gt,unsigned int vfid)1840 u32 xe_gt_sriov_pf_config_get_preempt_timeout(struct xe_gt *gt, unsigned int vfid)
1841 {
1842 	u32 preempt_timeout;
1843 
1844 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1845 	preempt_timeout = pf_get_preempt_timeout(gt, vfid);
1846 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1847 
1848 	return preempt_timeout;
1849 }
1850 
sched_priority_unit(u32 priority)1851 static const char *sched_priority_unit(u32 priority)
1852 {
1853 	return priority == GUC_SCHED_PRIORITY_LOW ? "(low)" :
1854 		priority == GUC_SCHED_PRIORITY_NORMAL ? "(normal)" :
1855 		priority == GUC_SCHED_PRIORITY_HIGH ? "(high)" :
1856 		"(?)";
1857 }
1858 
pf_provision_sched_priority(struct xe_gt * gt,unsigned int vfid,u32 priority)1859 static int pf_provision_sched_priority(struct xe_gt *gt, unsigned int vfid, u32 priority)
1860 {
1861 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1862 	int err;
1863 
1864 	err = pf_push_vf_cfg_sched_priority(gt, vfid, priority);
1865 	if (unlikely(err))
1866 		return err;
1867 
1868 	config->sched_priority = priority;
1869 	return 0;
1870 }
1871 
pf_get_sched_priority(struct xe_gt * gt,unsigned int vfid)1872 static int pf_get_sched_priority(struct xe_gt *gt, unsigned int vfid)
1873 {
1874 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1875 
1876 	return config->sched_priority;
1877 }
1878 
1879 /**
1880  * xe_gt_sriov_pf_config_set_sched_priority() - Configure scheduling priority.
1881  * @gt: the &xe_gt
1882  * @vfid: the VF identifier
1883  * @priority: requested scheduling priority
1884  *
1885  * This function can only be called on PF.
1886  *
1887  * Return: 0 on success or a negative error code on failure.
1888  */
xe_gt_sriov_pf_config_set_sched_priority(struct xe_gt * gt,unsigned int vfid,u32 priority)1889 int xe_gt_sriov_pf_config_set_sched_priority(struct xe_gt *gt, unsigned int vfid, u32 priority)
1890 {
1891 	int err;
1892 
1893 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1894 	err = pf_provision_sched_priority(gt, vfid, priority);
1895 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1896 
1897 	return pf_config_set_u32_done(gt, vfid, priority,
1898 				      xe_gt_sriov_pf_config_get_sched_priority(gt, vfid),
1899 				      "scheduling priority", sched_priority_unit, err);
1900 }
1901 
1902 /**
1903  * xe_gt_sriov_pf_config_get_sched_priority - Get VF's scheduling priority.
1904  * @gt: the &xe_gt
1905  * @vfid: the VF identifier
1906  *
1907  * This function can only be called on PF.
1908  *
1909  * Return: VF's (or PF's) scheduling priority.
1910  */
xe_gt_sriov_pf_config_get_sched_priority(struct xe_gt * gt,unsigned int vfid)1911 u32 xe_gt_sriov_pf_config_get_sched_priority(struct xe_gt *gt, unsigned int vfid)
1912 {
1913 	u32 priority;
1914 
1915 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1916 	priority = pf_get_sched_priority(gt, vfid);
1917 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1918 
1919 	return priority;
1920 }
1921 
pf_reset_config_sched(struct xe_gt * gt,struct xe_gt_sriov_config * config)1922 static void pf_reset_config_sched(struct xe_gt *gt, struct xe_gt_sriov_config *config)
1923 {
1924 	lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
1925 
1926 	config->exec_quantum = 0;
1927 	config->preempt_timeout = 0;
1928 }
1929 
pf_provision_threshold(struct xe_gt * gt,unsigned int vfid,enum xe_guc_klv_threshold_index index,u32 value)1930 static int pf_provision_threshold(struct xe_gt *gt, unsigned int vfid,
1931 				  enum xe_guc_klv_threshold_index index, u32 value)
1932 {
1933 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1934 	int err;
1935 
1936 	err = pf_push_vf_cfg_threshold(gt, vfid, index, value);
1937 	if (unlikely(err))
1938 		return err;
1939 
1940 	config->thresholds[index] = value;
1941 
1942 	return 0;
1943 }
1944 
pf_get_threshold(struct xe_gt * gt,unsigned int vfid,enum xe_guc_klv_threshold_index index)1945 static int pf_get_threshold(struct xe_gt *gt, unsigned int vfid,
1946 			    enum xe_guc_klv_threshold_index index)
1947 {
1948 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1949 
1950 	return config->thresholds[index];
1951 }
1952 
threshold_unit(u32 threshold)1953 static const char *threshold_unit(u32 threshold)
1954 {
1955 	return threshold ? "" : "(disabled)";
1956 }
1957 
1958 /**
1959  * xe_gt_sriov_pf_config_set_threshold - Configure threshold for the VF.
1960  * @gt: the &xe_gt
1961  * @vfid: the VF identifier
1962  * @index: the threshold index
1963  * @value: requested value (0 means disabled)
1964  *
1965  * This function can only be called on PF.
1966  *
1967  * Return: 0 on success or a negative error code on failure.
1968  */
xe_gt_sriov_pf_config_set_threshold(struct xe_gt * gt,unsigned int vfid,enum xe_guc_klv_threshold_index index,u32 value)1969 int xe_gt_sriov_pf_config_set_threshold(struct xe_gt *gt, unsigned int vfid,
1970 					enum xe_guc_klv_threshold_index index, u32 value)
1971 {
1972 	u32 key = xe_guc_klv_threshold_index_to_key(index);
1973 	const char *name = xe_guc_klv_key_to_string(key);
1974 	int err;
1975 
1976 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1977 	err = pf_provision_threshold(gt, vfid, index, value);
1978 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1979 
1980 	return pf_config_set_u32_done(gt, vfid, value,
1981 				      xe_gt_sriov_pf_config_get_threshold(gt, vfid, index),
1982 				      name, threshold_unit, err);
1983 }
1984 
1985 /**
1986  * xe_gt_sriov_pf_config_get_threshold - Get VF's threshold.
1987  * @gt: the &xe_gt
1988  * @vfid: the VF identifier
1989  * @index: the threshold index
1990  *
1991  * This function can only be called on PF.
1992  *
1993  * Return: value of VF's (or PF's) threshold.
1994  */
xe_gt_sriov_pf_config_get_threshold(struct xe_gt * gt,unsigned int vfid,enum xe_guc_klv_threshold_index index)1995 u32 xe_gt_sriov_pf_config_get_threshold(struct xe_gt *gt, unsigned int vfid,
1996 					enum xe_guc_klv_threshold_index index)
1997 {
1998 	u32 value;
1999 
2000 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
2001 	value = pf_get_threshold(gt, vfid, index);
2002 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
2003 
2004 	return value;
2005 }
2006 
pf_reset_config_thresholds(struct xe_gt * gt,struct xe_gt_sriov_config * config)2007 static void pf_reset_config_thresholds(struct xe_gt *gt, struct xe_gt_sriov_config *config)
2008 {
2009 	lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
2010 
2011 #define reset_threshold_config(TAG, ...) ({				\
2012 	config->thresholds[MAKE_XE_GUC_KLV_THRESHOLD_INDEX(TAG)] = 0;	\
2013 });
2014 
2015 	MAKE_XE_GUC_KLV_THRESHOLDS_SET(reset_threshold_config);
2016 #undef reset_threshold_config
2017 }
2018 
pf_release_vf_config(struct xe_gt * gt,unsigned int vfid)2019 static void pf_release_vf_config(struct xe_gt *gt, unsigned int vfid)
2020 {
2021 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
2022 	struct xe_device *xe = gt_to_xe(gt);
2023 
2024 	if (xe_gt_is_main_type(gt)) {
2025 		pf_release_vf_config_ggtt(gt, config);
2026 		if (IS_DGFX(xe)) {
2027 			pf_release_vf_config_lmem(gt, config);
2028 			if (xe_device_has_lmtt(xe))
2029 				pf_update_vf_lmtt(xe, vfid);
2030 		}
2031 	}
2032 	pf_release_config_ctxs(gt, config);
2033 	pf_release_config_dbs(gt, config);
2034 	pf_reset_config_sched(gt, config);
2035 	pf_reset_config_thresholds(gt, config);
2036 }
2037 
2038 /**
2039  * xe_gt_sriov_pf_config_release - Release and reset VF configuration.
2040  * @gt: the &xe_gt
2041  * @vfid: the VF identifier (can't be PF)
2042  * @force: force configuration release
2043  *
2044  * This function can only be called on PF.
2045  *
2046  * Return: 0 on success or a negative error code on failure.
2047  */
xe_gt_sriov_pf_config_release(struct xe_gt * gt,unsigned int vfid,bool force)2048 int xe_gt_sriov_pf_config_release(struct xe_gt *gt, unsigned int vfid, bool force)
2049 {
2050 	int err;
2051 
2052 	xe_gt_assert(gt, vfid);
2053 
2054 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
2055 	err = pf_send_vf_cfg_reset(gt, vfid);
2056 	if (!err || force)
2057 		pf_release_vf_config(gt, vfid);
2058 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
2059 
2060 	if (unlikely(err)) {
2061 		xe_gt_sriov_notice(gt, "VF%u unprovisioning failed with error (%pe)%s\n",
2062 				   vfid, ERR_PTR(err),
2063 				   force ? " but all resources were released anyway!" : "");
2064 	}
2065 
2066 	return force ? 0 : err;
2067 }
2068 
pf_sanitize_ggtt(struct xe_ggtt_node * ggtt_region,unsigned int vfid)2069 static void pf_sanitize_ggtt(struct xe_ggtt_node *ggtt_region, unsigned int vfid)
2070 {
2071 	if (xe_ggtt_node_allocated(ggtt_region))
2072 		xe_ggtt_assign(ggtt_region, vfid);
2073 }
2074 
pf_sanitize_lmem(struct xe_tile * tile,struct xe_bo * bo,long timeout)2075 static int pf_sanitize_lmem(struct xe_tile *tile, struct xe_bo *bo, long timeout)
2076 {
2077 	struct xe_migrate *m = tile->migrate;
2078 	struct dma_fence *fence;
2079 	int err;
2080 
2081 	if (!bo)
2082 		return 0;
2083 
2084 	xe_bo_lock(bo, false);
2085 	fence = xe_migrate_clear(m, bo, bo->ttm.resource, XE_MIGRATE_CLEAR_FLAG_FULL);
2086 	if (IS_ERR(fence)) {
2087 		err = PTR_ERR(fence);
2088 	} else if (!fence) {
2089 		err = -ENOMEM;
2090 	} else {
2091 		long ret = dma_fence_wait_timeout(fence, false, timeout);
2092 
2093 		err = ret > 0 ? 0 : ret < 0 ? ret : -ETIMEDOUT;
2094 		dma_fence_put(fence);
2095 		if (!err)
2096 			xe_gt_sriov_dbg_verbose(tile->primary_gt, "LMEM cleared in %dms\n",
2097 						jiffies_to_msecs(timeout - ret));
2098 	}
2099 	xe_bo_unlock(bo);
2100 
2101 	return err;
2102 }
2103 
pf_sanitize_vf_resources(struct xe_gt * gt,u32 vfid,long timeout)2104 static int pf_sanitize_vf_resources(struct xe_gt *gt, u32 vfid, long timeout)
2105 {
2106 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
2107 	struct xe_tile *tile = gt_to_tile(gt);
2108 	struct xe_device *xe = gt_to_xe(gt);
2109 	int err = 0;
2110 
2111 	/*
2112 	 * Only GGTT and LMEM requires to be cleared by the PF.
2113 	 * GuC doorbell IDs and context IDs do not need any clearing.
2114 	 */
2115 	if (xe_gt_is_main_type(gt)) {
2116 		pf_sanitize_ggtt(config->ggtt_region, vfid);
2117 		if (IS_DGFX(xe))
2118 			err = pf_sanitize_lmem(tile, config->lmem_obj, timeout);
2119 	}
2120 
2121 	return err;
2122 }
2123 
2124 /**
2125  * xe_gt_sriov_pf_config_sanitize() - Sanitize VF's resources.
2126  * @gt: the &xe_gt
2127  * @vfid: the VF identifier (can't be PF)
2128  * @timeout: maximum timeout to wait for completion in jiffies
2129  *
2130  * This function can only be called on PF.
2131  *
2132  * Return: 0 on success or a negative error code on failure.
2133  */
xe_gt_sriov_pf_config_sanitize(struct xe_gt * gt,unsigned int vfid,long timeout)2134 int xe_gt_sriov_pf_config_sanitize(struct xe_gt *gt, unsigned int vfid, long timeout)
2135 {
2136 	int err;
2137 
2138 	xe_gt_assert(gt, vfid != PFID);
2139 
2140 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
2141 	err = pf_sanitize_vf_resources(gt, vfid, timeout);
2142 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
2143 
2144 	if (unlikely(err))
2145 		xe_gt_sriov_notice(gt, "VF%u resource sanitizing failed (%pe)\n",
2146 				   vfid, ERR_PTR(err));
2147 	return err;
2148 }
2149 
2150 /**
2151  * xe_gt_sriov_pf_config_push - Reprovision VF's configuration.
2152  * @gt: the &xe_gt
2153  * @vfid: the VF identifier (can't be PF)
2154  * @refresh: explicit refresh
2155  *
2156  * This function can only be called on PF.
2157  *
2158  * Return: 0 on success or a negative error code on failure.
2159  */
xe_gt_sriov_pf_config_push(struct xe_gt * gt,unsigned int vfid,bool refresh)2160 int xe_gt_sriov_pf_config_push(struct xe_gt *gt, unsigned int vfid, bool refresh)
2161 {
2162 	int err = 0;
2163 
2164 	xe_gt_assert(gt, vfid);
2165 
2166 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
2167 	err = pf_push_vf_cfg(gt, vfid, refresh);
2168 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
2169 
2170 	if (unlikely(err)) {
2171 		xe_gt_sriov_notice(gt, "Failed to %s VF%u configuration (%pe)\n",
2172 				   refresh ? "refresh" : "push", vfid, ERR_PTR(err));
2173 	}
2174 
2175 	return err;
2176 }
2177 
pf_validate_vf_config(struct xe_gt * gt,unsigned int vfid)2178 static int pf_validate_vf_config(struct xe_gt *gt, unsigned int vfid)
2179 {
2180 	struct xe_gt *primary_gt = gt_to_tile(gt)->primary_gt;
2181 	struct xe_device *xe = gt_to_xe(gt);
2182 	bool is_primary = xe_gt_is_main_type(gt);
2183 	bool valid_ggtt, valid_ctxs, valid_dbs;
2184 	bool valid_any, valid_all;
2185 
2186 	valid_ggtt = pf_get_vf_config_ggtt(primary_gt, vfid);
2187 	valid_ctxs = pf_get_vf_config_ctxs(gt, vfid);
2188 	valid_dbs = pf_get_vf_config_dbs(gt, vfid);
2189 
2190 	/* note that GuC doorbells are optional */
2191 	valid_any = valid_ctxs || valid_dbs;
2192 	valid_all = valid_ctxs;
2193 
2194 	/* and GGTT/LMEM is configured on primary GT only */
2195 	valid_all = valid_all && valid_ggtt;
2196 	valid_any = valid_any || (valid_ggtt && is_primary);
2197 
2198 	if (xe_device_has_lmtt(xe)) {
2199 		bool valid_lmem = pf_get_vf_config_lmem(primary_gt, vfid);
2200 
2201 		valid_any = valid_any || (valid_lmem && is_primary);
2202 		valid_all = valid_all && valid_lmem;
2203 	}
2204 
2205 	return valid_all ? 0 : valid_any ? -ENOKEY : -ENODATA;
2206 }
2207 
2208 /**
2209  * xe_gt_sriov_pf_config_is_empty - Check VF's configuration.
2210  * @gt: the &xe_gt
2211  * @vfid: the VF identifier (can't be PF)
2212  *
2213  * This function can only be called on PF.
2214  *
2215  * Return: true if VF mandatory configuration (GGTT, LMEM, ...) is empty.
2216  */
xe_gt_sriov_pf_config_is_empty(struct xe_gt * gt,unsigned int vfid)2217 bool xe_gt_sriov_pf_config_is_empty(struct xe_gt *gt, unsigned int vfid)
2218 {
2219 	bool empty;
2220 
2221 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
2222 	xe_gt_assert(gt, vfid);
2223 
2224 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
2225 	empty = pf_validate_vf_config(gt, vfid) == -ENODATA;
2226 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
2227 
2228 	return empty;
2229 }
2230 
2231 /**
2232  * xe_gt_sriov_pf_config_save - Save a VF provisioning config as binary blob.
2233  * @gt: the &xe_gt
2234  * @vfid: the VF identifier (can't be PF)
2235  * @buf: the buffer to save a config to (or NULL if query the buf size)
2236  * @size: the size of the buffer (or 0 if query the buf size)
2237  *
2238  * This function can only be called on PF.
2239  *
2240  * Return: minimum size of the buffer or the number of bytes saved,
2241  *         or a negative error code on failure.
2242  */
xe_gt_sriov_pf_config_save(struct xe_gt * gt,unsigned int vfid,void * buf,size_t size)2243 ssize_t xe_gt_sriov_pf_config_save(struct xe_gt *gt, unsigned int vfid, void *buf, size_t size)
2244 {
2245 	struct xe_gt_sriov_config *config;
2246 	ssize_t ret;
2247 
2248 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
2249 	xe_gt_assert(gt, vfid);
2250 	xe_gt_assert(gt, !(!buf ^ !size));
2251 
2252 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
2253 	ret = pf_validate_vf_config(gt, vfid);
2254 	if (!size) {
2255 		ret = ret ? 0 : SZ_4K;
2256 	} else if (!ret) {
2257 		if (size < SZ_4K) {
2258 			ret = -ENOBUFS;
2259 		} else {
2260 			config = pf_pick_vf_config(gt, vfid);
2261 			ret = encode_config(buf, config, false) * sizeof(u32);
2262 		}
2263 	}
2264 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
2265 
2266 	return ret;
2267 }
2268 
pf_restore_vf_config_klv(struct xe_gt * gt,unsigned int vfid,u32 key,u32 len,const u32 * value)2269 static int pf_restore_vf_config_klv(struct xe_gt *gt, unsigned int vfid,
2270 				    u32 key, u32 len, const u32 *value)
2271 {
2272 	switch (key) {
2273 	case GUC_KLV_VF_CFG_NUM_CONTEXTS_KEY:
2274 		if (len != GUC_KLV_VF_CFG_NUM_CONTEXTS_LEN)
2275 			return -EBADMSG;
2276 		return pf_provision_vf_ctxs(gt, vfid, value[0]);
2277 
2278 	case GUC_KLV_VF_CFG_NUM_DOORBELLS_KEY:
2279 		if (len != GUC_KLV_VF_CFG_NUM_DOORBELLS_LEN)
2280 			return -EBADMSG;
2281 		return pf_provision_vf_dbs(gt, vfid, value[0]);
2282 
2283 	case GUC_KLV_VF_CFG_EXEC_QUANTUM_KEY:
2284 		if (len != GUC_KLV_VF_CFG_EXEC_QUANTUM_LEN)
2285 			return -EBADMSG;
2286 		return pf_provision_exec_quantum(gt, vfid, value[0]);
2287 
2288 	case GUC_KLV_VF_CFG_PREEMPT_TIMEOUT_KEY:
2289 		if (len != GUC_KLV_VF_CFG_PREEMPT_TIMEOUT_LEN)
2290 			return -EBADMSG;
2291 		return pf_provision_preempt_timeout(gt, vfid, value[0]);
2292 
2293 	/* auto-generate case statements */
2294 #define define_threshold_key_to_provision_case(TAG, ...)				\
2295 	case MAKE_GUC_KLV_VF_CFG_THRESHOLD_KEY(TAG):					\
2296 		BUILD_BUG_ON(MAKE_GUC_KLV_VF_CFG_THRESHOLD_LEN(TAG) != 1u);		\
2297 		if (len != MAKE_GUC_KLV_VF_CFG_THRESHOLD_LEN(TAG))			\
2298 			return -EBADMSG;						\
2299 		return pf_provision_threshold(gt, vfid,					\
2300 					      MAKE_XE_GUC_KLV_THRESHOLD_INDEX(TAG),	\
2301 					      value[0]);
2302 
2303 	MAKE_XE_GUC_KLV_THRESHOLDS_SET(define_threshold_key_to_provision_case)
2304 #undef define_threshold_key_to_provision_case
2305 	}
2306 
2307 	if (xe_gt_is_media_type(gt))
2308 		return -EKEYREJECTED;
2309 
2310 	switch (key) {
2311 	case GUC_KLV_VF_CFG_GGTT_SIZE_KEY:
2312 		if (len != GUC_KLV_VF_CFG_GGTT_SIZE_LEN)
2313 			return -EBADMSG;
2314 		return pf_provision_vf_ggtt(gt, vfid, make_u64_from_u32(value[1], value[0]));
2315 
2316 	case GUC_KLV_VF_CFG_LMEM_SIZE_KEY:
2317 		if (!IS_DGFX(gt_to_xe(gt)))
2318 			return -EKEYREJECTED;
2319 		if (len != GUC_KLV_VF_CFG_LMEM_SIZE_LEN)
2320 			return -EBADMSG;
2321 		return pf_provision_vf_lmem(gt, vfid, make_u64_from_u32(value[1], value[0]));
2322 	}
2323 
2324 	return -EKEYREJECTED;
2325 }
2326 
pf_restore_vf_config(struct xe_gt * gt,unsigned int vfid,const u32 * klvs,size_t num_dwords)2327 static int pf_restore_vf_config(struct xe_gt *gt, unsigned int vfid,
2328 				const u32 *klvs, size_t num_dwords)
2329 {
2330 	int err;
2331 
2332 	while (num_dwords >= GUC_KLV_LEN_MIN) {
2333 		u32 key = FIELD_GET(GUC_KLV_0_KEY, klvs[0]);
2334 		u32 len = FIELD_GET(GUC_KLV_0_LEN, klvs[0]);
2335 
2336 		klvs += GUC_KLV_LEN_MIN;
2337 		num_dwords -= GUC_KLV_LEN_MIN;
2338 
2339 		if (num_dwords < len)
2340 			err = -EBADMSG;
2341 		else
2342 			err = pf_restore_vf_config_klv(gt, vfid, key, len, klvs);
2343 
2344 		if (err) {
2345 			xe_gt_sriov_dbg(gt, "restore failed on key %#x (%pe)\n", key, ERR_PTR(err));
2346 			return err;
2347 		}
2348 
2349 		klvs += len;
2350 		num_dwords -= len;
2351 	}
2352 
2353 	return pf_validate_vf_config(gt, vfid);
2354 }
2355 
2356 /**
2357  * xe_gt_sriov_pf_config_restore - Restore a VF provisioning config from binary blob.
2358  * @gt: the &xe_gt
2359  * @vfid: the VF identifier (can't be PF)
2360  * @buf: the buffer with config data
2361  * @size: the size of the config data
2362  *
2363  * This function can only be called on PF.
2364  *
2365  * Return: 0 on success or a negative error code on failure.
2366  */
xe_gt_sriov_pf_config_restore(struct xe_gt * gt,unsigned int vfid,const void * buf,size_t size)2367 int xe_gt_sriov_pf_config_restore(struct xe_gt *gt, unsigned int vfid,
2368 				  const void *buf, size_t size)
2369 {
2370 	int err;
2371 
2372 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
2373 	xe_gt_assert(gt, vfid);
2374 
2375 	if (!size)
2376 		return -ENODATA;
2377 
2378 	if (size % sizeof(u32))
2379 		return -EINVAL;
2380 
2381 	if (IS_ENABLED(CONFIG_DRM_XE_DEBUG_SRIOV)) {
2382 		struct drm_printer p = xe_gt_dbg_printer(gt);
2383 
2384 		drm_printf(&p, "restoring VF%u config:\n", vfid);
2385 		xe_guc_klv_print(buf, size / sizeof(u32), &p);
2386 	}
2387 
2388 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
2389 	err = pf_send_vf_cfg_reset(gt, vfid);
2390 	if (!err) {
2391 		pf_release_vf_config(gt, vfid);
2392 		err = pf_restore_vf_config(gt, vfid, buf, size / sizeof(u32));
2393 	}
2394 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
2395 
2396 	return err;
2397 }
2398 
pf_prepare_self_config(struct xe_gt * gt)2399 static void pf_prepare_self_config(struct xe_gt *gt)
2400 {
2401 	struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, PFID);
2402 
2403 	/*
2404 	 * We want PF to be allowed to use all of context ID, doorbells IDs
2405 	 * and whole usable GGTT area. While we can store ctxs/dbs numbers
2406 	 * directly in the config structure, can't do the same with the GGTT
2407 	 * configuration, so let it be prepared on demand while pushing KLVs.
2408 	 */
2409 	config->num_ctxs = GUC_ID_MAX;
2410 	config->num_dbs = GUC_NUM_DOORBELLS;
2411 }
2412 
pf_push_self_config(struct xe_gt * gt)2413 static int pf_push_self_config(struct xe_gt *gt)
2414 {
2415 	int err;
2416 
2417 	err = pf_push_full_vf_config(gt, PFID);
2418 	if (err) {
2419 		xe_gt_sriov_err(gt, "Failed to push self configuration (%pe)\n",
2420 				ERR_PTR(err));
2421 		return err;
2422 	}
2423 
2424 	xe_gt_sriov_dbg_verbose(gt, "self configuration completed\n");
2425 	return 0;
2426 }
2427 
fini_config(void * arg)2428 static void fini_config(void *arg)
2429 {
2430 	struct xe_gt *gt = arg;
2431 	struct xe_device *xe = gt_to_xe(gt);
2432 	unsigned int n, total_vfs = xe_sriov_pf_get_totalvfs(xe);
2433 
2434 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
2435 	for (n = 1; n <= total_vfs; n++)
2436 		pf_release_vf_config(gt, n);
2437 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
2438 }
2439 
2440 /**
2441  * xe_gt_sriov_pf_config_init - Initialize SR-IOV configuration data.
2442  * @gt: the &xe_gt
2443  *
2444  * This function can only be called on PF.
2445  *
2446  * Return: 0 on success or a negative error code on failure.
2447  */
xe_gt_sriov_pf_config_init(struct xe_gt * gt)2448 int xe_gt_sriov_pf_config_init(struct xe_gt *gt)
2449 {
2450 	struct xe_device *xe = gt_to_xe(gt);
2451 	int err;
2452 
2453 	xe_gt_assert(gt, IS_SRIOV_PF(xe));
2454 
2455 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
2456 	pf_prepare_self_config(gt);
2457 	err = pf_push_self_config(gt);
2458 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
2459 
2460 	if (err)
2461 		return err;
2462 
2463 	return devm_add_action_or_reset(xe->drm.dev, fini_config, gt);
2464 }
2465 
2466 /**
2467  * xe_gt_sriov_pf_config_restart - Restart SR-IOV configurations after a GT reset.
2468  * @gt: the &xe_gt
2469  *
2470  * Any prior configurations pushed to GuC are lost when the GT is reset.
2471  * Push again all non-empty VF configurations to the GuC.
2472  *
2473  * This function can only be called on PF.
2474  */
xe_gt_sriov_pf_config_restart(struct xe_gt * gt)2475 void xe_gt_sriov_pf_config_restart(struct xe_gt *gt)
2476 {
2477 	unsigned int n, total_vfs = xe_sriov_pf_get_totalvfs(gt_to_xe(gt));
2478 	unsigned int fail = 0, skip = 0;
2479 
2480 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
2481 	pf_push_self_config(gt);
2482 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
2483 
2484 	for (n = 1; n <= total_vfs; n++) {
2485 		if (xe_gt_sriov_pf_config_is_empty(gt, n))
2486 			skip++;
2487 		else if (xe_gt_sriov_pf_config_push(gt, n, false))
2488 			fail++;
2489 	}
2490 
2491 	if (fail)
2492 		xe_gt_sriov_notice(gt, "Failed to push %u of %u VF%s configurations\n",
2493 				   fail, total_vfs - skip, str_plural(total_vfs));
2494 
2495 	if (fail != total_vfs)
2496 		xe_gt_sriov_dbg(gt, "pushed %u skip %u of %u VF%s configurations\n",
2497 				total_vfs - skip - fail, skip, total_vfs, str_plural(total_vfs));
2498 }
2499 
2500 /**
2501  * xe_gt_sriov_pf_config_print_ggtt - Print GGTT configurations.
2502  * @gt: the &xe_gt
2503  * @p: the &drm_printer
2504  *
2505  * Print GGTT configuration data for all VFs.
2506  * VFs without provisioned GGTT are ignored.
2507  *
2508  * This function can only be called on PF.
2509  */
xe_gt_sriov_pf_config_print_ggtt(struct xe_gt * gt,struct drm_printer * p)2510 int xe_gt_sriov_pf_config_print_ggtt(struct xe_gt *gt, struct drm_printer *p)
2511 {
2512 	unsigned int n, total_vfs = xe_sriov_pf_get_totalvfs(gt_to_xe(gt));
2513 	const struct xe_gt_sriov_config *config;
2514 	char buf[10];
2515 
2516 	for (n = 1; n <= total_vfs; n++) {
2517 		config = &gt->sriov.pf.vfs[n].config;
2518 		if (!xe_ggtt_node_allocated(config->ggtt_region))
2519 			continue;
2520 
2521 		string_get_size(config->ggtt_region->base.size, 1, STRING_UNITS_2,
2522 				buf, sizeof(buf));
2523 		drm_printf(p, "VF%u:\t%#0llx-%#llx\t(%s)\n",
2524 			   n, config->ggtt_region->base.start,
2525 			   config->ggtt_region->base.start + config->ggtt_region->base.size - 1,
2526 			   buf);
2527 	}
2528 
2529 	return 0;
2530 }
2531 
2532 /**
2533  * xe_gt_sriov_pf_config_print_ctxs - Print GuC context IDs configurations.
2534  * @gt: the &xe_gt
2535  * @p: the &drm_printer
2536  *
2537  * Print GuC context ID allocations across all VFs.
2538  * VFs without GuC context IDs are skipped.
2539  *
2540  * This function can only be called on PF.
2541  * Return: 0 on success or a negative error code on failure.
2542  */
xe_gt_sriov_pf_config_print_ctxs(struct xe_gt * gt,struct drm_printer * p)2543 int xe_gt_sriov_pf_config_print_ctxs(struct xe_gt *gt, struct drm_printer *p)
2544 {
2545 	unsigned int n, total_vfs = xe_sriov_pf_get_totalvfs(gt_to_xe(gt));
2546 	const struct xe_gt_sriov_config *config;
2547 
2548 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
2549 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
2550 
2551 	for (n = 1; n <= total_vfs; n++) {
2552 		config = &gt->sriov.pf.vfs[n].config;
2553 		if (!config->num_ctxs)
2554 			continue;
2555 
2556 		drm_printf(p, "VF%u:\t%u-%u\t(%u)\n",
2557 			   n,
2558 			   config->begin_ctx,
2559 			   config->begin_ctx + config->num_ctxs - 1,
2560 			   config->num_ctxs);
2561 	}
2562 
2563 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
2564 	return 0;
2565 }
2566 
2567 /**
2568  * xe_gt_sriov_pf_config_print_dbs - Print GuC doorbell ID configurations.
2569  * @gt: the &xe_gt
2570  * @p: the &drm_printer
2571  *
2572  * Print GuC doorbell IDs allocations across all VFs.
2573  * VFs without GuC doorbell IDs are skipped.
2574  *
2575  * This function can only be called on PF.
2576  * Return: 0 on success or a negative error code on failure.
2577  */
xe_gt_sriov_pf_config_print_dbs(struct xe_gt * gt,struct drm_printer * p)2578 int xe_gt_sriov_pf_config_print_dbs(struct xe_gt *gt, struct drm_printer *p)
2579 {
2580 	unsigned int n, total_vfs = xe_sriov_pf_get_totalvfs(gt_to_xe(gt));
2581 	const struct xe_gt_sriov_config *config;
2582 
2583 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
2584 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
2585 
2586 	for (n = 1; n <= total_vfs; n++) {
2587 		config = &gt->sriov.pf.vfs[n].config;
2588 		if (!config->num_dbs)
2589 			continue;
2590 
2591 		drm_printf(p, "VF%u:\t%u-%u\t(%u)\n",
2592 			   n,
2593 			   config->begin_db,
2594 			   config->begin_db + config->num_dbs - 1,
2595 			   config->num_dbs);
2596 	}
2597 
2598 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
2599 	return 0;
2600 }
2601 
2602 /**
2603  * xe_gt_sriov_pf_config_print_lmem - Print LMEM configurations.
2604  * @gt: the &xe_gt
2605  * @p: the &drm_printer
2606  *
2607  * Print LMEM allocations across all VFs.
2608  * VFs without LMEM allocation are skipped.
2609  *
2610  * This function can only be called on PF.
2611  * Return: 0 on success or a negative error code on failure.
2612  */
xe_gt_sriov_pf_config_print_lmem(struct xe_gt * gt,struct drm_printer * p)2613 int xe_gt_sriov_pf_config_print_lmem(struct xe_gt *gt, struct drm_printer *p)
2614 {
2615 	unsigned int n, total_vfs = xe_sriov_pf_get_totalvfs(gt_to_xe(gt));
2616 	const struct xe_gt_sriov_config *config;
2617 	char buf[10];
2618 
2619 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
2620 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
2621 
2622 	for (n = 1; n <= total_vfs; n++) {
2623 		config = &gt->sriov.pf.vfs[n].config;
2624 		if (!config->lmem_obj)
2625 			continue;
2626 
2627 		string_get_size(xe_bo_size(config->lmem_obj), 1, STRING_UNITS_2,
2628 				buf, sizeof(buf));
2629 		drm_printf(p, "VF%u:\t%zu\t(%s)\n",
2630 			   n, xe_bo_size(config->lmem_obj), buf);
2631 	}
2632 
2633 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
2634 	return 0;
2635 }
2636 
2637 /**
2638  * xe_gt_sriov_pf_config_print_available_ggtt - Print available GGTT ranges.
2639  * @gt: the &xe_gt
2640  * @p: the &drm_printer
2641  *
2642  * Print GGTT ranges that are available for the provisioning.
2643  *
2644  * This function can only be called on PF.
2645  */
xe_gt_sriov_pf_config_print_available_ggtt(struct xe_gt * gt,struct drm_printer * p)2646 int xe_gt_sriov_pf_config_print_available_ggtt(struct xe_gt *gt, struct drm_printer *p)
2647 {
2648 	struct xe_ggtt *ggtt = gt_to_tile(gt)->mem.ggtt;
2649 	u64 alignment = pf_get_ggtt_alignment(gt);
2650 	u64 spare, avail, total;
2651 	char buf[10];
2652 
2653 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
2654 
2655 	mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
2656 
2657 	spare = pf_get_spare_ggtt(gt);
2658 	total = xe_ggtt_print_holes(ggtt, alignment, p);
2659 
2660 	mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
2661 
2662 	string_get_size(total, 1, STRING_UNITS_2, buf, sizeof(buf));
2663 	drm_printf(p, "total:\t%llu\t(%s)\n", total, buf);
2664 
2665 	string_get_size(spare, 1, STRING_UNITS_2, buf, sizeof(buf));
2666 	drm_printf(p, "spare:\t%llu\t(%s)\n", spare, buf);
2667 
2668 	avail = total > spare ? total - spare : 0;
2669 
2670 	string_get_size(avail, 1, STRING_UNITS_2, buf, sizeof(buf));
2671 	drm_printf(p, "avail:\t%llu\t(%s)\n", avail, buf);
2672 
2673 	return 0;
2674 }
2675