1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2023-2024 Intel Corporation
4  */
5 
6 #include "xe_assert.h"
7 #include "xe_device.h"
8 #include "xe_gt_sriov_pf_config.h"
9 #include "xe_gt_sriov_pf_control.h"
10 #include "xe_pci_sriov.h"
11 #include "xe_pm.h"
12 #include "xe_sriov.h"
13 #include "xe_sriov_pf_helpers.h"
14 #include "xe_sriov_printk.h"
15 
pf_needs_provisioning(struct xe_gt * gt,unsigned int num_vfs)16 static int pf_needs_provisioning(struct xe_gt *gt, unsigned int num_vfs)
17 {
18 	unsigned int n;
19 
20 	for (n = 1; n <= num_vfs; n++)
21 		if (!xe_gt_sriov_pf_config_is_empty(gt, n))
22 			return false;
23 
24 	return true;
25 }
26 
pf_provision_vfs(struct xe_device * xe,unsigned int num_vfs)27 static int pf_provision_vfs(struct xe_device *xe, unsigned int num_vfs)
28 {
29 	struct xe_gt *gt;
30 	unsigned int id;
31 	int result = 0, err;
32 
33 	for_each_gt(gt, xe, id) {
34 		if (!pf_needs_provisioning(gt, num_vfs))
35 			continue;
36 		err = xe_gt_sriov_pf_config_set_fair(gt, VFID(1), num_vfs);
37 		result = result ?: err;
38 	}
39 
40 	return result;
41 }
42 
pf_unprovision_vfs(struct xe_device * xe,unsigned int num_vfs)43 static void pf_unprovision_vfs(struct xe_device *xe, unsigned int num_vfs)
44 {
45 	struct xe_gt *gt;
46 	unsigned int id;
47 	unsigned int n;
48 
49 	for_each_gt(gt, xe, id)
50 		for (n = 1; n <= num_vfs; n++)
51 			xe_gt_sriov_pf_config_release(gt, n, true);
52 }
53 
pf_reset_vfs(struct xe_device * xe,unsigned int num_vfs)54 static void pf_reset_vfs(struct xe_device *xe, unsigned int num_vfs)
55 {
56 	struct xe_gt *gt;
57 	unsigned int id;
58 	unsigned int n;
59 
60 	for_each_gt(gt, xe, id)
61 		for (n = 1; n <= num_vfs; n++)
62 			xe_gt_sriov_pf_control_trigger_flr(gt, n);
63 }
64 
xe_pci_pf_get_vf_dev(struct xe_device * xe,unsigned int vf_id)65 static struct pci_dev *xe_pci_pf_get_vf_dev(struct xe_device *xe, unsigned int vf_id)
66 {
67 	struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
68 
69 	xe_assert(xe, IS_SRIOV_PF(xe));
70 
71 	/* caller must use pci_dev_put() */
72 	return pci_get_domain_bus_and_slot(pci_domain_nr(pdev->bus),
73 			pdev->bus->number,
74 			pci_iov_virtfn_devfn(pdev, vf_id));
75 }
76 
pf_link_vfs(struct xe_device * xe,int num_vfs)77 static void pf_link_vfs(struct xe_device *xe, int num_vfs)
78 {
79 	struct pci_dev *pdev_pf = to_pci_dev(xe->drm.dev);
80 	struct device_link *link;
81 	struct pci_dev *pdev_vf;
82 	unsigned int n;
83 
84 	/*
85 	 * When both PF and VF devices are enabled on the host, during system
86 	 * resume they are resuming in parallel.
87 	 *
88 	 * But PF has to complete the provision of VF first to allow any VFs to
89 	 * successfully resume.
90 	 *
91 	 * Create a parent-child device link between PF and VF devices that will
92 	 * enforce correct resume order.
93 	 */
94 	for (n = 1; n <= num_vfs; n++) {
95 		pdev_vf = xe_pci_pf_get_vf_dev(xe, n - 1);
96 
97 		/* unlikely, something weird is happening, abort */
98 		if (!pdev_vf) {
99 			xe_sriov_err(xe, "Cannot find VF%u device, aborting link%s creation!\n",
100 				     n, str_plural(num_vfs));
101 			break;
102 		}
103 
104 		link = device_link_add(&pdev_vf->dev, &pdev_pf->dev,
105 				       DL_FLAG_AUTOREMOVE_CONSUMER);
106 		/* unlikely and harmless, continue with other VFs */
107 		if (!link)
108 			xe_sriov_notice(xe, "Failed linking VF%u\n", n);
109 
110 		pci_dev_put(pdev_vf);
111 	}
112 }
113 
pf_enable_vfs(struct xe_device * xe,int num_vfs)114 static int pf_enable_vfs(struct xe_device *xe, int num_vfs)
115 {
116 	struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
117 	int total_vfs = xe_sriov_pf_get_totalvfs(xe);
118 	int err;
119 
120 	xe_assert(xe, IS_SRIOV_PF(xe));
121 	xe_assert(xe, num_vfs > 0);
122 	xe_assert(xe, num_vfs <= total_vfs);
123 	xe_sriov_dbg(xe, "enabling %u VF%s\n", num_vfs, str_plural(num_vfs));
124 
125 	/*
126 	 * We must hold additional reference to the runtime PM to keep PF in D0
127 	 * during VFs lifetime, as our VFs do not implement the PM capability.
128 	 *
129 	 * With PF being in D0 state, all VFs will also behave as in D0 state.
130 	 * This will also keep GuC alive with all VFs' configurations.
131 	 *
132 	 * We will release this additional PM reference in pf_disable_vfs().
133 	 */
134 	xe_pm_runtime_get_noresume(xe);
135 
136 	err = pf_provision_vfs(xe, num_vfs);
137 	if (err < 0)
138 		goto failed;
139 
140 	err = pci_enable_sriov(pdev, num_vfs);
141 	if (err < 0)
142 		goto failed;
143 
144 	pf_link_vfs(xe, num_vfs);
145 
146 	xe_sriov_info(xe, "Enabled %u of %u VF%s\n",
147 		      num_vfs, total_vfs, str_plural(total_vfs));
148 	return num_vfs;
149 
150 failed:
151 	pf_unprovision_vfs(xe, num_vfs);
152 	xe_pm_runtime_put(xe);
153 
154 	xe_sriov_notice(xe, "Failed to enable %u VF%s (%pe)\n",
155 			num_vfs, str_plural(num_vfs), ERR_PTR(err));
156 	return err;
157 }
158 
pf_disable_vfs(struct xe_device * xe)159 static int pf_disable_vfs(struct xe_device *xe)
160 {
161 	struct device *dev = xe->drm.dev;
162 	struct pci_dev *pdev = to_pci_dev(dev);
163 	u16 num_vfs = pci_num_vf(pdev);
164 
165 	xe_assert(xe, IS_SRIOV_PF(xe));
166 	xe_sriov_dbg(xe, "disabling %u VF%s\n", num_vfs, str_plural(num_vfs));
167 
168 	if (!num_vfs)
169 		return 0;
170 
171 	pci_disable_sriov(pdev);
172 
173 	pf_reset_vfs(xe, num_vfs);
174 
175 	pf_unprovision_vfs(xe, num_vfs);
176 
177 	/* not needed anymore - see pf_enable_vfs() */
178 	xe_pm_runtime_put(xe);
179 
180 	xe_sriov_info(xe, "Disabled %u VF%s\n", num_vfs, str_plural(num_vfs));
181 	return 0;
182 }
183 
184 /**
185  * xe_pci_sriov_configure - Configure SR-IOV (enable/disable VFs).
186  * @pdev: the &pci_dev
187  * @num_vfs: number of VFs to enable or zero to disable all VFs
188  *
189  * This is the Xe implementation of struct pci_driver.sriov_configure callback.
190  *
191  * This callback will be called by the PCI subsystem to enable or disable SR-IOV
192  * Virtual Functions (VFs) as requested by the used via the PCI sysfs interface.
193  *
194  * Return: number of configured VFs or a negative error code on failure.
195  */
xe_pci_sriov_configure(struct pci_dev * pdev,int num_vfs)196 int xe_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
197 {
198 	struct xe_device *xe = pdev_to_xe_device(pdev);
199 	int ret;
200 
201 	if (!IS_SRIOV_PF(xe))
202 		return -ENODEV;
203 
204 	if (num_vfs < 0)
205 		return -EINVAL;
206 
207 	if (num_vfs > xe_sriov_pf_get_totalvfs(xe))
208 		return -ERANGE;
209 
210 	if (num_vfs && pci_num_vf(pdev))
211 		return -EBUSY;
212 
213 	xe_pm_runtime_get(xe);
214 	if (num_vfs > 0)
215 		ret = pf_enable_vfs(xe, num_vfs);
216 	else
217 		ret = pf_disable_vfs(xe);
218 	xe_pm_runtime_put(xe);
219 
220 	return ret;
221 }
222