xref: /linux/drivers/gpu/drm/xe/xe_gt_sriov_pf.c (revision ffe8ac927d935d7d4a0bd9ac94afd705df79982b)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2023-2024 Intel Corporation
4  */
5 
6 #include <drm/drm_managed.h>
7 
8 #include "regs/xe_guc_regs.h"
9 #include "regs/xe_regs.h"
10 
11 #include "xe_gt.h"
12 #include "xe_gt_sriov_pf.h"
13 #include "xe_gt_sriov_pf_config.h"
14 #include "xe_gt_sriov_pf_control.h"
15 #include "xe_gt_sriov_pf_helpers.h"
16 #include "xe_gt_sriov_pf_migration.h"
17 #include "xe_gt_sriov_pf_service.h"
18 #include "xe_gt_sriov_printk.h"
19 #include "xe_guc_submit.h"
20 #include "xe_mmio.h"
21 #include "xe_pm.h"
22 
23 static void pf_worker_restart_func(struct work_struct *w);
24 
25 /*
26  * VF's metadata is maintained in the flexible array where:
27  *   - entry [0] contains metadata for the PF (only if applicable),
28  *   - entries [1..n] contain metadata for VF1..VFn::
29  *
30  *       <--------------------------- 1 + total_vfs ----------->
31  *      +-------+-------+-------+-----------------------+-------+
32  *      |   0   |   1   |   2   |                       |   n   |
33  *      +-------+-------+-------+-----------------------+-------+
34  *      |  PF   |  VF1  |  VF2  |      ...     ...      |  VFn  |
35  *      +-------+-------+-------+-----------------------+-------+
36  */
pf_alloc_metadata(struct xe_gt * gt)37 static int pf_alloc_metadata(struct xe_gt *gt)
38 {
39 	unsigned int num_vfs = xe_gt_sriov_pf_get_totalvfs(gt);
40 
41 	gt->sriov.pf.vfs = drmm_kcalloc(&gt_to_xe(gt)->drm, 1 + num_vfs,
42 					sizeof(*gt->sriov.pf.vfs), GFP_KERNEL);
43 	if (!gt->sriov.pf.vfs)
44 		return -ENOMEM;
45 
46 	return 0;
47 }
48 
pf_init_workers(struct xe_gt * gt)49 static void pf_init_workers(struct xe_gt *gt)
50 {
51 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
52 	INIT_WORK(&gt->sriov.pf.workers.restart, pf_worker_restart_func);
53 }
54 
pf_fini_workers(struct xe_gt * gt)55 static void pf_fini_workers(struct xe_gt *gt)
56 {
57 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
58 	disable_work_sync(&gt->sriov.pf.workers.restart);
59 }
60 
61 /**
62  * xe_gt_sriov_pf_init_early - Prepare SR-IOV PF data structures on PF.
63  * @gt: the &xe_gt to initialize
64  *
65  * Early initialization of the PF data.
66  *
67  * Return: 0 on success or a negative error code on failure.
68  */
xe_gt_sriov_pf_init_early(struct xe_gt * gt)69 int xe_gt_sriov_pf_init_early(struct xe_gt *gt)
70 {
71 	int err;
72 
73 	err = pf_alloc_metadata(gt);
74 	if (err)
75 		return err;
76 
77 	err = xe_gt_sriov_pf_service_init(gt);
78 	if (err)
79 		return err;
80 
81 	err = xe_gt_sriov_pf_control_init(gt);
82 	if (err)
83 		return err;
84 
85 	pf_init_workers(gt);
86 
87 	return 0;
88 }
89 
pf_fini_action(void * arg)90 static void pf_fini_action(void *arg)
91 {
92 	struct xe_gt *gt = arg;
93 
94 	pf_fini_workers(gt);
95 }
96 
pf_init_late(struct xe_gt * gt)97 static int pf_init_late(struct xe_gt *gt)
98 {
99 	struct xe_device *xe = gt_to_xe(gt);
100 
101 	xe_gt_assert(gt, IS_SRIOV_PF(xe));
102 	return devm_add_action_or_reset(xe->drm.dev, pf_fini_action, gt);
103 }
104 
105 /**
106  * xe_gt_sriov_pf_init - Prepare SR-IOV PF data structures on PF.
107  * @gt: the &xe_gt to initialize
108  *
109  * Late one-time initialization of the PF data.
110  *
111  * Return: 0 on success or a negative error code on failure.
112  */
xe_gt_sriov_pf_init(struct xe_gt * gt)113 int xe_gt_sriov_pf_init(struct xe_gt *gt)
114 {
115 	int err;
116 
117 	err = xe_gt_sriov_pf_config_init(gt);
118 	if (err)
119 		return err;
120 
121 	err = xe_gt_sriov_pf_migration_init(gt);
122 	if (err)
123 		return err;
124 
125 	err = pf_init_late(gt);
126 	if (err)
127 		return err;
128 
129 	return 0;
130 }
131 
pf_needs_enable_ggtt_guest_update(struct xe_device * xe)132 static bool pf_needs_enable_ggtt_guest_update(struct xe_device *xe)
133 {
134 	return GRAPHICS_VERx100(xe) == 1200;
135 }
136 
pf_enable_ggtt_guest_update(struct xe_gt * gt)137 static void pf_enable_ggtt_guest_update(struct xe_gt *gt)
138 {
139 	xe_mmio_write32(&gt->mmio, VIRTUAL_CTRL_REG, GUEST_GTT_UPDATE_EN);
140 }
141 
142 /**
143  * xe_gt_sriov_pf_init_hw - Initialize SR-IOV hardware support.
144  * @gt: the &xe_gt to initialize
145  *
146  * On some platforms the PF must explicitly enable VF's access to the GGTT.
147  */
xe_gt_sriov_pf_init_hw(struct xe_gt * gt)148 void xe_gt_sriov_pf_init_hw(struct xe_gt *gt)
149 {
150 	if (pf_needs_enable_ggtt_guest_update(gt_to_xe(gt)))
151 		pf_enable_ggtt_guest_update(gt);
152 
153 	xe_gt_sriov_pf_service_update(gt);
154 }
155 
pf_get_vf_regs_stride(struct xe_device * xe)156 static u32 pf_get_vf_regs_stride(struct xe_device *xe)
157 {
158 	return GRAPHICS_VERx100(xe) > 1200 ? 0x400 : 0x1000;
159 }
160 
xe_reg_vf_to_pf(struct xe_reg vf_reg,unsigned int vfid,u32 stride)161 static struct xe_reg xe_reg_vf_to_pf(struct xe_reg vf_reg, unsigned int vfid, u32 stride)
162 {
163 	struct xe_reg pf_reg = vf_reg;
164 
165 	pf_reg.vf = 0;
166 	pf_reg.addr += stride * vfid;
167 
168 	return pf_reg;
169 }
170 
pf_clear_vf_scratch_regs(struct xe_gt * gt,unsigned int vfid)171 static void pf_clear_vf_scratch_regs(struct xe_gt *gt, unsigned int vfid)
172 {
173 	u32 stride = pf_get_vf_regs_stride(gt_to_xe(gt));
174 	struct xe_reg scratch;
175 	int n, count;
176 
177 	if (xe_gt_is_media_type(gt)) {
178 		count = MED_VF_SW_FLAG_COUNT;
179 		for (n = 0; n < count; n++) {
180 			scratch = xe_reg_vf_to_pf(MED_VF_SW_FLAG(n), vfid, stride);
181 			xe_mmio_write32(&gt->mmio, scratch, 0);
182 		}
183 	} else {
184 		count = VF_SW_FLAG_COUNT;
185 		for (n = 0; n < count; n++) {
186 			scratch = xe_reg_vf_to_pf(VF_SW_FLAG(n), vfid, stride);
187 			xe_mmio_write32(&gt->mmio, scratch, 0);
188 		}
189 	}
190 }
191 
192 /**
193  * xe_gt_sriov_pf_sanitize_hw() - Reset hardware state related to a VF.
194  * @gt: the &xe_gt
195  * @vfid: the VF identifier
196  *
197  * This function can only be called on PF.
198  */
xe_gt_sriov_pf_sanitize_hw(struct xe_gt * gt,unsigned int vfid)199 void xe_gt_sriov_pf_sanitize_hw(struct xe_gt *gt, unsigned int vfid)
200 {
201 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
202 
203 	pf_clear_vf_scratch_regs(gt, vfid);
204 }
205 
pf_cancel_restart(struct xe_gt * gt)206 static void pf_cancel_restart(struct xe_gt *gt)
207 {
208 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
209 
210 	if (cancel_work_sync(&gt->sriov.pf.workers.restart))
211 		xe_gt_sriov_dbg_verbose(gt, "pending restart canceled!\n");
212 }
213 
214 /**
215  * xe_gt_sriov_pf_stop_prepare() - Prepare to stop SR-IOV support.
216  * @gt: the &xe_gt
217  *
218  * This function can only be called on the PF.
219  */
xe_gt_sriov_pf_stop_prepare(struct xe_gt * gt)220 void xe_gt_sriov_pf_stop_prepare(struct xe_gt *gt)
221 {
222 	pf_cancel_restart(gt);
223 }
224 
pf_restart(struct xe_gt * gt)225 static void pf_restart(struct xe_gt *gt)
226 {
227 	struct xe_device *xe = gt_to_xe(gt);
228 
229 	xe_pm_runtime_get(xe);
230 	xe_gt_sriov_pf_config_restart(gt);
231 	xe_gt_sriov_pf_control_restart(gt);
232 	xe_pm_runtime_put(xe);
233 
234 	xe_gt_sriov_dbg(gt, "restart completed\n");
235 }
236 
pf_worker_restart_func(struct work_struct * w)237 static void pf_worker_restart_func(struct work_struct *w)
238 {
239 	struct xe_gt *gt = container_of(w, typeof(*gt), sriov.pf.workers.restart);
240 
241 	pf_restart(gt);
242 }
243 
pf_queue_restart(struct xe_gt * gt)244 static void pf_queue_restart(struct xe_gt *gt)
245 {
246 	struct xe_device *xe = gt_to_xe(gt);
247 
248 	xe_gt_assert(gt, IS_SRIOV_PF(xe));
249 
250 	if (!queue_work(xe->sriov.wq, &gt->sriov.pf.workers.restart))
251 		xe_gt_sriov_dbg(gt, "restart already in queue!\n");
252 }
253 
254 /**
255  * xe_gt_sriov_pf_restart - Restart SR-IOV support after a GT reset.
256  * @gt: the &xe_gt
257  *
258  * This function can only be called on PF.
259  */
xe_gt_sriov_pf_restart(struct xe_gt * gt)260 void xe_gt_sriov_pf_restart(struct xe_gt *gt)
261 {
262 	pf_queue_restart(gt);
263 }
264 
pf_flush_restart(struct xe_gt * gt)265 static void pf_flush_restart(struct xe_gt *gt)
266 {
267 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
268 	flush_work(&gt->sriov.pf.workers.restart);
269 }
270 
271 /**
272  * xe_gt_sriov_pf_wait_ready() - Wait until per-GT PF SR-IOV support is ready.
273  * @gt: the &xe_gt
274  *
275  * This function can only be called on PF.
276  *
277  * Return: 0 on success or a negative error code on failure.
278  */
xe_gt_sriov_pf_wait_ready(struct xe_gt * gt)279 int xe_gt_sriov_pf_wait_ready(struct xe_gt *gt)
280 {
281 	/* don't wait if there is another ongoing reset */
282 	if (xe_guc_read_stopped(&gt->uc.guc))
283 		return -EBUSY;
284 
285 	pf_flush_restart(gt);
286 	return 0;
287 }
288