11f2880baSMichal Wajdeczko // SPDX-License-Identifier: MIT
21f2880baSMichal Wajdeczko /*
31f2880baSMichal Wajdeczko * Copyright © 2023-2024 Intel Corporation
41f2880baSMichal Wajdeczko */
51f2880baSMichal Wajdeczko
61f2880baSMichal Wajdeczko #include <drm/drm_managed.h>
71f2880baSMichal Wajdeczko
813a48a0fSMichal Wajdeczko #include "regs/xe_guc_regs.h"
9466a6c38SMichal Wajdeczko #include "regs/xe_regs.h"
10d6c5bac8SMichal Wajdeczko
1113a48a0fSMichal Wajdeczko #include "xe_gt.h"
121f2880baSMichal Wajdeczko #include "xe_gt_sriov_pf.h"
1341122080SMichal Wajdeczko #include "xe_gt_sriov_pf_config.h"
142bd87f0fSMichal Wajdeczko #include "xe_gt_sriov_pf_control.h"
151f2880baSMichal Wajdeczko #include "xe_gt_sriov_pf_helpers.h"
16d86e3737SMichal Wajdeczko #include "xe_gt_sriov_pf_migration.h"
17e77dff51SMichal Wajdeczko #include "xe_gt_sriov_pf_service.h"
18a4d1c5d0SMichal Wajdeczko #include "xe_gt_sriov_printk.h"
19*cb7a3f94SMichal Wajdeczko #include "xe_guc_submit.h"
20d6c5bac8SMichal Wajdeczko #include "xe_mmio.h"
21a4d1c5d0SMichal Wajdeczko #include "xe_pm.h"
22a4d1c5d0SMichal Wajdeczko
23a4d1c5d0SMichal Wajdeczko static void pf_worker_restart_func(struct work_struct *w);
241f2880baSMichal Wajdeczko
251f2880baSMichal Wajdeczko /*
261f2880baSMichal Wajdeczko * VF's metadata is maintained in the flexible array where:
271f2880baSMichal Wajdeczko * - entry [0] contains metadata for the PF (only if applicable),
281f2880baSMichal Wajdeczko * - entries [1..n] contain metadata for VF1..VFn::
291f2880baSMichal Wajdeczko *
301f2880baSMichal Wajdeczko * <--------------------------- 1 + total_vfs ----------->
311f2880baSMichal Wajdeczko * +-------+-------+-------+-----------------------+-------+
321f2880baSMichal Wajdeczko * | 0 | 1 | 2 | | n |
331f2880baSMichal Wajdeczko * +-------+-------+-------+-----------------------+-------+
341f2880baSMichal Wajdeczko * | PF | VF1 | VF2 | ... ... | VFn |
351f2880baSMichal Wajdeczko * +-------+-------+-------+-----------------------+-------+
361f2880baSMichal Wajdeczko */
pf_alloc_metadata(struct xe_gt * gt)371f2880baSMichal Wajdeczko static int pf_alloc_metadata(struct xe_gt *gt)
381f2880baSMichal Wajdeczko {
391f2880baSMichal Wajdeczko unsigned int num_vfs = xe_gt_sriov_pf_get_totalvfs(gt);
401f2880baSMichal Wajdeczko
411f2880baSMichal Wajdeczko gt->sriov.pf.vfs = drmm_kcalloc(>_to_xe(gt)->drm, 1 + num_vfs,
421f2880baSMichal Wajdeczko sizeof(*gt->sriov.pf.vfs), GFP_KERNEL);
431f2880baSMichal Wajdeczko if (!gt->sriov.pf.vfs)
441f2880baSMichal Wajdeczko return -ENOMEM;
451f2880baSMichal Wajdeczko
461f2880baSMichal Wajdeczko return 0;
471f2880baSMichal Wajdeczko }
481f2880baSMichal Wajdeczko
pf_init_workers(struct xe_gt * gt)49a4d1c5d0SMichal Wajdeczko static void pf_init_workers(struct xe_gt *gt)
50a4d1c5d0SMichal Wajdeczko {
51c286ce6bSMichal Wajdeczko xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
52a4d1c5d0SMichal Wajdeczko INIT_WORK(>->sriov.pf.workers.restart, pf_worker_restart_func);
53a4d1c5d0SMichal Wajdeczko }
54a4d1c5d0SMichal Wajdeczko
pf_fini_workers(struct xe_gt * gt)55c286ce6bSMichal Wajdeczko static void pf_fini_workers(struct xe_gt *gt)
56c286ce6bSMichal Wajdeczko {
57c286ce6bSMichal Wajdeczko xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
58c286ce6bSMichal Wajdeczko disable_work_sync(>->sriov.pf.workers.restart);
59c286ce6bSMichal Wajdeczko }
60c286ce6bSMichal Wajdeczko
611f2880baSMichal Wajdeczko /**
621f2880baSMichal Wajdeczko * xe_gt_sriov_pf_init_early - Prepare SR-IOV PF data structures on PF.
631f2880baSMichal Wajdeczko * @gt: the &xe_gt to initialize
641f2880baSMichal Wajdeczko *
651f2880baSMichal Wajdeczko * Early initialization of the PF data.
661f2880baSMichal Wajdeczko *
671f2880baSMichal Wajdeczko * Return: 0 on success or a negative error code on failure.
681f2880baSMichal Wajdeczko */
xe_gt_sriov_pf_init_early(struct xe_gt * gt)691f2880baSMichal Wajdeczko int xe_gt_sriov_pf_init_early(struct xe_gt *gt)
701f2880baSMichal Wajdeczko {
711f2880baSMichal Wajdeczko int err;
721f2880baSMichal Wajdeczko
731f2880baSMichal Wajdeczko err = pf_alloc_metadata(gt);
741f2880baSMichal Wajdeczko if (err)
751f2880baSMichal Wajdeczko return err;
761f2880baSMichal Wajdeczko
77e77dff51SMichal Wajdeczko err = xe_gt_sriov_pf_service_init(gt);
78e77dff51SMichal Wajdeczko if (err)
79e77dff51SMichal Wajdeczko return err;
80e77dff51SMichal Wajdeczko
812bd87f0fSMichal Wajdeczko err = xe_gt_sriov_pf_control_init(gt);
822bd87f0fSMichal Wajdeczko if (err)
832bd87f0fSMichal Wajdeczko return err;
842bd87f0fSMichal Wajdeczko
85a4d1c5d0SMichal Wajdeczko pf_init_workers(gt);
86a4d1c5d0SMichal Wajdeczko
871f2880baSMichal Wajdeczko return 0;
881f2880baSMichal Wajdeczko }
89d6c5bac8SMichal Wajdeczko
pf_fini_action(void * arg)90c286ce6bSMichal Wajdeczko static void pf_fini_action(void *arg)
91c286ce6bSMichal Wajdeczko {
92c286ce6bSMichal Wajdeczko struct xe_gt *gt = arg;
93c286ce6bSMichal Wajdeczko
94c286ce6bSMichal Wajdeczko pf_fini_workers(gt);
95c286ce6bSMichal Wajdeczko }
96c286ce6bSMichal Wajdeczko
pf_init_late(struct xe_gt * gt)97c286ce6bSMichal Wajdeczko static int pf_init_late(struct xe_gt *gt)
98c286ce6bSMichal Wajdeczko {
99c286ce6bSMichal Wajdeczko struct xe_device *xe = gt_to_xe(gt);
100c286ce6bSMichal Wajdeczko
101c286ce6bSMichal Wajdeczko xe_gt_assert(gt, IS_SRIOV_PF(xe));
102c286ce6bSMichal Wajdeczko return devm_add_action_or_reset(xe->drm.dev, pf_fini_action, gt);
103c286ce6bSMichal Wajdeczko }
104c286ce6bSMichal Wajdeczko
1059ebb5846SMichal Wajdeczko /**
1069ebb5846SMichal Wajdeczko * xe_gt_sriov_pf_init - Prepare SR-IOV PF data structures on PF.
1079ebb5846SMichal Wajdeczko * @gt: the &xe_gt to initialize
1089ebb5846SMichal Wajdeczko *
1099ebb5846SMichal Wajdeczko * Late one-time initialization of the PF data.
1109ebb5846SMichal Wajdeczko *
1119ebb5846SMichal Wajdeczko * Return: 0 on success or a negative error code on failure.
1129ebb5846SMichal Wajdeczko */
xe_gt_sriov_pf_init(struct xe_gt * gt)1139ebb5846SMichal Wajdeczko int xe_gt_sriov_pf_init(struct xe_gt *gt)
1149ebb5846SMichal Wajdeczko {
115611160b0SMichal Wajdeczko int err;
116611160b0SMichal Wajdeczko
117611160b0SMichal Wajdeczko err = xe_gt_sriov_pf_config_init(gt);
118611160b0SMichal Wajdeczko if (err)
119611160b0SMichal Wajdeczko return err;
120611160b0SMichal Wajdeczko
121c286ce6bSMichal Wajdeczko err = xe_gt_sriov_pf_migration_init(gt);
122c286ce6bSMichal Wajdeczko if (err)
123c286ce6bSMichal Wajdeczko return err;
124c286ce6bSMichal Wajdeczko
125c286ce6bSMichal Wajdeczko err = pf_init_late(gt);
126c286ce6bSMichal Wajdeczko if (err)
127c286ce6bSMichal Wajdeczko return err;
128c286ce6bSMichal Wajdeczko
129c286ce6bSMichal Wajdeczko return 0;
1309ebb5846SMichal Wajdeczko }
1319ebb5846SMichal Wajdeczko
pf_needs_enable_ggtt_guest_update(struct xe_device * xe)132d6c5bac8SMichal Wajdeczko static bool pf_needs_enable_ggtt_guest_update(struct xe_device *xe)
133d6c5bac8SMichal Wajdeczko {
134d6c5bac8SMichal Wajdeczko return GRAPHICS_VERx100(xe) == 1200;
135d6c5bac8SMichal Wajdeczko }
136d6c5bac8SMichal Wajdeczko
pf_enable_ggtt_guest_update(struct xe_gt * gt)137d6c5bac8SMichal Wajdeczko static void pf_enable_ggtt_guest_update(struct xe_gt *gt)
138d6c5bac8SMichal Wajdeczko {
139f9bcd59aSMatt Roper xe_mmio_write32(>->mmio, VIRTUAL_CTRL_REG, GUEST_GTT_UPDATE_EN);
140d6c5bac8SMichal Wajdeczko }
141d6c5bac8SMichal Wajdeczko
142d6c5bac8SMichal Wajdeczko /**
143d6c5bac8SMichal Wajdeczko * xe_gt_sriov_pf_init_hw - Initialize SR-IOV hardware support.
144d6c5bac8SMichal Wajdeczko * @gt: the &xe_gt to initialize
145d6c5bac8SMichal Wajdeczko *
146d6c5bac8SMichal Wajdeczko * On some platforms the PF must explicitly enable VF's access to the GGTT.
147d6c5bac8SMichal Wajdeczko */
xe_gt_sriov_pf_init_hw(struct xe_gt * gt)148d6c5bac8SMichal Wajdeczko void xe_gt_sriov_pf_init_hw(struct xe_gt *gt)
149d6c5bac8SMichal Wajdeczko {
150d6c5bac8SMichal Wajdeczko if (pf_needs_enable_ggtt_guest_update(gt_to_xe(gt)))
151d6c5bac8SMichal Wajdeczko pf_enable_ggtt_guest_update(gt);
152e77dff51SMichal Wajdeczko
153e77dff51SMichal Wajdeczko xe_gt_sriov_pf_service_update(gt);
154d6c5bac8SMichal Wajdeczko }
15541122080SMichal Wajdeczko
pf_get_vf_regs_stride(struct xe_device * xe)15613a48a0fSMichal Wajdeczko static u32 pf_get_vf_regs_stride(struct xe_device *xe)
15713a48a0fSMichal Wajdeczko {
15813a48a0fSMichal Wajdeczko return GRAPHICS_VERx100(xe) > 1200 ? 0x400 : 0x1000;
15913a48a0fSMichal Wajdeczko }
16013a48a0fSMichal Wajdeczko
xe_reg_vf_to_pf(struct xe_reg vf_reg,unsigned int vfid,u32 stride)16113a48a0fSMichal Wajdeczko static struct xe_reg xe_reg_vf_to_pf(struct xe_reg vf_reg, unsigned int vfid, u32 stride)
16213a48a0fSMichal Wajdeczko {
16313a48a0fSMichal Wajdeczko struct xe_reg pf_reg = vf_reg;
16413a48a0fSMichal Wajdeczko
16513a48a0fSMichal Wajdeczko pf_reg.vf = 0;
16613a48a0fSMichal Wajdeczko pf_reg.addr += stride * vfid;
16713a48a0fSMichal Wajdeczko
16813a48a0fSMichal Wajdeczko return pf_reg;
16913a48a0fSMichal Wajdeczko }
17013a48a0fSMichal Wajdeczko
pf_clear_vf_scratch_regs(struct xe_gt * gt,unsigned int vfid)17113a48a0fSMichal Wajdeczko static void pf_clear_vf_scratch_regs(struct xe_gt *gt, unsigned int vfid)
17213a48a0fSMichal Wajdeczko {
17313a48a0fSMichal Wajdeczko u32 stride = pf_get_vf_regs_stride(gt_to_xe(gt));
17413a48a0fSMichal Wajdeczko struct xe_reg scratch;
17513a48a0fSMichal Wajdeczko int n, count;
17613a48a0fSMichal Wajdeczko
17713a48a0fSMichal Wajdeczko if (xe_gt_is_media_type(gt)) {
17813a48a0fSMichal Wajdeczko count = MED_VF_SW_FLAG_COUNT;
17913a48a0fSMichal Wajdeczko for (n = 0; n < count; n++) {
18013a48a0fSMichal Wajdeczko scratch = xe_reg_vf_to_pf(MED_VF_SW_FLAG(n), vfid, stride);
181f9bcd59aSMatt Roper xe_mmio_write32(>->mmio, scratch, 0);
18213a48a0fSMichal Wajdeczko }
18313a48a0fSMichal Wajdeczko } else {
18413a48a0fSMichal Wajdeczko count = VF_SW_FLAG_COUNT;
18513a48a0fSMichal Wajdeczko for (n = 0; n < count; n++) {
18613a48a0fSMichal Wajdeczko scratch = xe_reg_vf_to_pf(VF_SW_FLAG(n), vfid, stride);
187f9bcd59aSMatt Roper xe_mmio_write32(>->mmio, scratch, 0);
18813a48a0fSMichal Wajdeczko }
18913a48a0fSMichal Wajdeczko }
19013a48a0fSMichal Wajdeczko }
19113a48a0fSMichal Wajdeczko
19213a48a0fSMichal Wajdeczko /**
19313a48a0fSMichal Wajdeczko * xe_gt_sriov_pf_sanitize_hw() - Reset hardware state related to a VF.
19413a48a0fSMichal Wajdeczko * @gt: the &xe_gt
19513a48a0fSMichal Wajdeczko * @vfid: the VF identifier
19613a48a0fSMichal Wajdeczko *
19713a48a0fSMichal Wajdeczko * This function can only be called on PF.
19813a48a0fSMichal Wajdeczko */
xe_gt_sriov_pf_sanitize_hw(struct xe_gt * gt,unsigned int vfid)19913a48a0fSMichal Wajdeczko void xe_gt_sriov_pf_sanitize_hw(struct xe_gt *gt, unsigned int vfid)
20013a48a0fSMichal Wajdeczko {
20113a48a0fSMichal Wajdeczko xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
20213a48a0fSMichal Wajdeczko
20313a48a0fSMichal Wajdeczko pf_clear_vf_scratch_regs(gt, vfid);
20413a48a0fSMichal Wajdeczko }
20513a48a0fSMichal Wajdeczko
pf_cancel_restart(struct xe_gt * gt)2069f50b729SMichal Wajdeczko static void pf_cancel_restart(struct xe_gt *gt)
2079f50b729SMichal Wajdeczko {
2089f50b729SMichal Wajdeczko xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
2099f50b729SMichal Wajdeczko
2109f50b729SMichal Wajdeczko if (cancel_work_sync(>->sriov.pf.workers.restart))
2119f50b729SMichal Wajdeczko xe_gt_sriov_dbg_verbose(gt, "pending restart canceled!\n");
2129f50b729SMichal Wajdeczko }
2139f50b729SMichal Wajdeczko
2149f50b729SMichal Wajdeczko /**
2159f50b729SMichal Wajdeczko * xe_gt_sriov_pf_stop_prepare() - Prepare to stop SR-IOV support.
2169f50b729SMichal Wajdeczko * @gt: the &xe_gt
2179f50b729SMichal Wajdeczko *
2189f50b729SMichal Wajdeczko * This function can only be called on the PF.
2199f50b729SMichal Wajdeczko */
xe_gt_sriov_pf_stop_prepare(struct xe_gt * gt)2209f50b729SMichal Wajdeczko void xe_gt_sriov_pf_stop_prepare(struct xe_gt *gt)
2219f50b729SMichal Wajdeczko {
2229f50b729SMichal Wajdeczko pf_cancel_restart(gt);
2239f50b729SMichal Wajdeczko }
2249f50b729SMichal Wajdeczko
pf_restart(struct xe_gt * gt)225a4d1c5d0SMichal Wajdeczko static void pf_restart(struct xe_gt *gt)
226a4d1c5d0SMichal Wajdeczko {
227a4d1c5d0SMichal Wajdeczko struct xe_device *xe = gt_to_xe(gt);
228a4d1c5d0SMichal Wajdeczko
229a4d1c5d0SMichal Wajdeczko xe_pm_runtime_get(xe);
230a4d1c5d0SMichal Wajdeczko xe_gt_sriov_pf_config_restart(gt);
231a4d1c5d0SMichal Wajdeczko xe_gt_sriov_pf_control_restart(gt);
232a4d1c5d0SMichal Wajdeczko xe_pm_runtime_put(xe);
233a4d1c5d0SMichal Wajdeczko
234a4d1c5d0SMichal Wajdeczko xe_gt_sriov_dbg(gt, "restart completed\n");
235a4d1c5d0SMichal Wajdeczko }
236a4d1c5d0SMichal Wajdeczko
pf_worker_restart_func(struct work_struct * w)237a4d1c5d0SMichal Wajdeczko static void pf_worker_restart_func(struct work_struct *w)
238a4d1c5d0SMichal Wajdeczko {
239a4d1c5d0SMichal Wajdeczko struct xe_gt *gt = container_of(w, typeof(*gt), sriov.pf.workers.restart);
240a4d1c5d0SMichal Wajdeczko
241a4d1c5d0SMichal Wajdeczko pf_restart(gt);
242a4d1c5d0SMichal Wajdeczko }
243a4d1c5d0SMichal Wajdeczko
pf_queue_restart(struct xe_gt * gt)244a4d1c5d0SMichal Wajdeczko static void pf_queue_restart(struct xe_gt *gt)
245a4d1c5d0SMichal Wajdeczko {
246a4d1c5d0SMichal Wajdeczko struct xe_device *xe = gt_to_xe(gt);
247a4d1c5d0SMichal Wajdeczko
248a4d1c5d0SMichal Wajdeczko xe_gt_assert(gt, IS_SRIOV_PF(xe));
249a4d1c5d0SMichal Wajdeczko
250a4d1c5d0SMichal Wajdeczko if (!queue_work(xe->sriov.wq, >->sriov.pf.workers.restart))
251a4d1c5d0SMichal Wajdeczko xe_gt_sriov_dbg(gt, "restart already in queue!\n");
252a4d1c5d0SMichal Wajdeczko }
253a4d1c5d0SMichal Wajdeczko
25441122080SMichal Wajdeczko /**
25541122080SMichal Wajdeczko * xe_gt_sriov_pf_restart - Restart SR-IOV support after a GT reset.
25641122080SMichal Wajdeczko * @gt: the &xe_gt
25741122080SMichal Wajdeczko *
25841122080SMichal Wajdeczko * This function can only be called on PF.
25941122080SMichal Wajdeczko */
xe_gt_sriov_pf_restart(struct xe_gt * gt)26041122080SMichal Wajdeczko void xe_gt_sriov_pf_restart(struct xe_gt *gt)
26141122080SMichal Wajdeczko {
262a4d1c5d0SMichal Wajdeczko pf_queue_restart(gt);
26341122080SMichal Wajdeczko }
264*cb7a3f94SMichal Wajdeczko
pf_flush_restart(struct xe_gt * gt)265*cb7a3f94SMichal Wajdeczko static void pf_flush_restart(struct xe_gt *gt)
266*cb7a3f94SMichal Wajdeczko {
267*cb7a3f94SMichal Wajdeczko xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
268*cb7a3f94SMichal Wajdeczko flush_work(>->sriov.pf.workers.restart);
269*cb7a3f94SMichal Wajdeczko }
270*cb7a3f94SMichal Wajdeczko
271*cb7a3f94SMichal Wajdeczko /**
272*cb7a3f94SMichal Wajdeczko * xe_gt_sriov_pf_wait_ready() - Wait until per-GT PF SR-IOV support is ready.
273*cb7a3f94SMichal Wajdeczko * @gt: the &xe_gt
274*cb7a3f94SMichal Wajdeczko *
275*cb7a3f94SMichal Wajdeczko * This function can only be called on PF.
276*cb7a3f94SMichal Wajdeczko *
277*cb7a3f94SMichal Wajdeczko * Return: 0 on success or a negative error code on failure.
278*cb7a3f94SMichal Wajdeczko */
xe_gt_sriov_pf_wait_ready(struct xe_gt * gt)279*cb7a3f94SMichal Wajdeczko int xe_gt_sriov_pf_wait_ready(struct xe_gt *gt)
280*cb7a3f94SMichal Wajdeczko {
281*cb7a3f94SMichal Wajdeczko /* don't wait if there is another ongoing reset */
282*cb7a3f94SMichal Wajdeczko if (xe_guc_read_stopped(>->uc.guc))
283*cb7a3f94SMichal Wajdeczko return -EBUSY;
284*cb7a3f94SMichal Wajdeczko
285*cb7a3f94SMichal Wajdeczko pf_flush_restart(gt);
286*cb7a3f94SMichal Wajdeczko return 0;
287*cb7a3f94SMichal Wajdeczko }
288