xref: /linux/drivers/crypto/intel/qat/qat_common/adf_sriov.c (revision 44a8c96edd0ee9320a1ad87afc7b10f38e55d5ec)
1 // SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
2 /* Copyright(c) 2015 - 2021 Intel Corporation */
3 #include <linux/workqueue.h>
4 #include <linux/pci.h>
5 #include <linux/device.h>
6 #include "adf_common_drv.h"
7 #include "adf_cfg.h"
8 #include "adf_pfvf_pf_msg.h"
9 
10 #define ADF_VF2PF_RATELIMIT_INTERVAL	8
11 #define ADF_VF2PF_RATELIMIT_BURST	130
12 
13 static struct workqueue_struct *pf2vf_resp_wq;
14 
15 struct adf_pf2vf_resp {
16 	struct work_struct pf2vf_resp_work;
17 	struct adf_accel_vf_info *vf_info;
18 };
19 
adf_iov_send_resp(struct work_struct * work)20 static void adf_iov_send_resp(struct work_struct *work)
21 {
22 	struct adf_pf2vf_resp *pf2vf_resp =
23 		container_of(work, struct adf_pf2vf_resp, pf2vf_resp_work);
24 	struct adf_accel_vf_info *vf_info = pf2vf_resp->vf_info;
25 	struct adf_accel_dev *accel_dev = vf_info->accel_dev;
26 	u32 vf_nr = vf_info->vf_nr;
27 	bool ret;
28 
29 	mutex_lock(&vf_info->pfvf_mig_lock);
30 	ret = adf_recv_and_handle_vf2pf_msg(accel_dev, vf_nr);
31 	if (ret)
32 		/* re-enable interrupt on PF from this VF */
33 		adf_enable_vf2pf_interrupts(accel_dev, 1 << vf_nr);
34 	mutex_unlock(&vf_info->pfvf_mig_lock);
35 
36 	kfree(pf2vf_resp);
37 }
38 
adf_schedule_vf2pf_handler(struct adf_accel_vf_info * vf_info)39 void adf_schedule_vf2pf_handler(struct adf_accel_vf_info *vf_info)
40 {
41 	struct adf_pf2vf_resp *pf2vf_resp;
42 
43 	pf2vf_resp = kzalloc(sizeof(*pf2vf_resp), GFP_ATOMIC);
44 	if (!pf2vf_resp)
45 		return;
46 
47 	pf2vf_resp->vf_info = vf_info;
48 	INIT_WORK(&pf2vf_resp->pf2vf_resp_work, adf_iov_send_resp);
49 	queue_work(pf2vf_resp_wq, &pf2vf_resp->pf2vf_resp_work);
50 }
51 
adf_enable_sriov(struct adf_accel_dev * accel_dev)52 static int adf_enable_sriov(struct adf_accel_dev *accel_dev)
53 {
54 	struct pci_dev *pdev = accel_to_pci_dev(accel_dev);
55 	int totalvfs = pci_sriov_get_totalvfs(pdev);
56 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
57 	struct adf_accel_vf_info *vf_info;
58 	int i;
59 
60 	for (i = 0, vf_info = accel_dev->pf.vf_info; i < totalvfs;
61 	     i++, vf_info++) {
62 		/* This ptr will be populated when VFs will be created */
63 		vf_info->accel_dev = accel_dev;
64 		vf_info->vf_nr = i;
65 
66 		mutex_init(&vf_info->pf2vf_lock);
67 		mutex_init(&vf_info->pfvf_mig_lock);
68 		ratelimit_state_init(&vf_info->vf2pf_ratelimit,
69 				     ADF_VF2PF_RATELIMIT_INTERVAL,
70 				     ADF_VF2PF_RATELIMIT_BURST);
71 	}
72 
73 	/* Set Valid bits in AE Thread to PCIe Function Mapping */
74 	if (hw_data->configure_iov_threads)
75 		hw_data->configure_iov_threads(accel_dev, true);
76 
77 	/* Enable VF to PF interrupts for all VFs */
78 	adf_enable_vf2pf_interrupts(accel_dev, BIT_ULL(totalvfs) - 1);
79 
80 	/*
81 	 * Due to the hardware design, when SR-IOV and the ring arbiter
82 	 * are enabled all the VFs supported in hardware must be enabled in
83 	 * order for all the hardware resources (i.e. bundles) to be usable.
84 	 * When SR-IOV is enabled, each of the VFs will own one bundle.
85 	 */
86 	return pci_enable_sriov(pdev, totalvfs);
87 }
88 
adf_add_sriov_configuration(struct adf_accel_dev * accel_dev)89 static int adf_add_sriov_configuration(struct adf_accel_dev *accel_dev)
90 {
91 	unsigned long val = 0;
92 	int ret;
93 
94 	ret = adf_cfg_section_add(accel_dev, ADF_KERNEL_SEC);
95 	if (ret)
96 		return ret;
97 
98 	ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_CY,
99 					  &val, ADF_DEC);
100 	if (ret)
101 		return ret;
102 
103 	ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_DC,
104 					  &val, ADF_DEC);
105 	if (ret)
106 		return ret;
107 
108 	set_bit(ADF_STATUS_CONFIGURED, &accel_dev->status);
109 
110 	return ret;
111 }
112 
adf_do_disable_sriov(struct adf_accel_dev * accel_dev)113 static int adf_do_disable_sriov(struct adf_accel_dev *accel_dev)
114 {
115 	int ret;
116 
117 	if (adf_dev_in_use(accel_dev)) {
118 		dev_err(&GET_DEV(accel_dev),
119 			"Cannot disable SR-IOV, device in use\n");
120 		return -EBUSY;
121 	}
122 
123 	if (adf_dev_started(accel_dev)) {
124 		if (adf_devmgr_in_reset(accel_dev)) {
125 			dev_err(&GET_DEV(accel_dev),
126 				"Cannot disable SR-IOV, device in reset\n");
127 			return -EBUSY;
128 		}
129 
130 		ret = adf_dev_down(accel_dev);
131 		if (ret)
132 			goto err_del_cfg;
133 	}
134 
135 	adf_disable_sriov(accel_dev);
136 
137 	ret = adf_dev_up(accel_dev, true);
138 	if (ret)
139 		goto err_del_cfg;
140 
141 	return 0;
142 
143 err_del_cfg:
144 	adf_cfg_del_all_except(accel_dev, ADF_GENERAL_SEC);
145 	return ret;
146 }
147 
adf_do_enable_sriov(struct adf_accel_dev * accel_dev)148 static int adf_do_enable_sriov(struct adf_accel_dev *accel_dev)
149 {
150 	struct pci_dev *pdev = accel_to_pci_dev(accel_dev);
151 	int totalvfs = pci_sriov_get_totalvfs(pdev);
152 	unsigned long val;
153 	int ret;
154 
155 	if (!device_iommu_mapped(&GET_DEV(accel_dev))) {
156 		dev_warn(&GET_DEV(accel_dev),
157 			 "IOMMU should be enabled for SR-IOV to work correctly\n");
158 	}
159 
160 	if (adf_dev_started(accel_dev)) {
161 		if (adf_devmgr_in_reset(accel_dev) || adf_dev_in_use(accel_dev)) {
162 			dev_err(&GET_DEV(accel_dev), "Device busy\n");
163 			return -EBUSY;
164 		}
165 
166 		ret = adf_dev_down(accel_dev);
167 		if (ret)
168 			return ret;
169 	}
170 
171 	ret = adf_add_sriov_configuration(accel_dev);
172 	if (ret)
173 		goto err_del_cfg;
174 
175 	/* Allocate memory for VF info structs */
176 	accel_dev->pf.vf_info = kcalloc(totalvfs, sizeof(struct adf_accel_vf_info),
177 					GFP_KERNEL);
178 	ret = -ENOMEM;
179 	if (!accel_dev->pf.vf_info)
180 		goto err_del_cfg;
181 
182 	ret = adf_dev_up(accel_dev, false);
183 	if (ret) {
184 		dev_err(&GET_DEV(accel_dev), "Failed to start qat_dev%d\n",
185 			accel_dev->accel_id);
186 		goto err_free_vf_info;
187 	}
188 
189 	ret = adf_enable_sriov(accel_dev);
190 	if (ret)
191 		goto err_free_vf_info;
192 
193 	val = 1;
194 	ret = adf_cfg_add_key_value_param(accel_dev, ADF_GENERAL_SEC, ADF_SRIOV_ENABLED,
195 					  &val, ADF_DEC);
196 	if (ret)
197 		goto err_free_vf_info;
198 
199 	return totalvfs;
200 
201 err_free_vf_info:
202 	adf_dev_down(accel_dev);
203 	kfree(accel_dev->pf.vf_info);
204 	accel_dev->pf.vf_info = NULL;
205 	return ret;
206 err_del_cfg:
207 	adf_cfg_del_all_except(accel_dev, ADF_GENERAL_SEC);
208 	return ret;
209 }
210 
adf_reenable_sriov(struct adf_accel_dev * accel_dev)211 void adf_reenable_sriov(struct adf_accel_dev *accel_dev)
212 {
213 	struct pci_dev *pdev = accel_to_pci_dev(accel_dev);
214 	char cfg[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = {0};
215 
216 	if (adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC,
217 				    ADF_SRIOV_ENABLED, cfg))
218 		return;
219 
220 	if (!accel_dev->pf.vf_info)
221 		return;
222 
223 	if (adf_add_sriov_configuration(accel_dev))
224 		return;
225 
226 	dev_dbg(&pdev->dev, "Re-enabling SRIOV\n");
227 	adf_enable_sriov(accel_dev);
228 }
229 
230 /**
231  * adf_disable_sriov() - Disable SRIOV for the device
232  * @accel_dev:  Pointer to accel device.
233  *
234  * Function disables SRIOV for the accel device.
235  *
236  * Return: 0 on success, error code otherwise.
237  */
adf_disable_sriov(struct adf_accel_dev * accel_dev)238 void adf_disable_sriov(struct adf_accel_dev *accel_dev)
239 {
240 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
241 	int totalvfs = pci_sriov_get_totalvfs(accel_to_pci_dev(accel_dev));
242 	struct adf_accel_vf_info *vf;
243 	int i;
244 
245 	if (!accel_dev->pf.vf_info)
246 		return;
247 
248 	adf_pf2vf_notify_restarting(accel_dev);
249 	adf_pf2vf_wait_for_restarting_complete(accel_dev);
250 	pci_disable_sriov(accel_to_pci_dev(accel_dev));
251 
252 	/* Disable VF to PF interrupts */
253 	adf_disable_all_vf2pf_interrupts(accel_dev);
254 
255 	/* Clear Valid bits in AE Thread to PCIe Function Mapping */
256 	if (hw_data->configure_iov_threads)
257 		hw_data->configure_iov_threads(accel_dev, false);
258 
259 	for (i = 0, vf = accel_dev->pf.vf_info; i < totalvfs; i++, vf++) {
260 		mutex_destroy(&vf->pf2vf_lock);
261 		mutex_destroy(&vf->pfvf_mig_lock);
262 	}
263 
264 	if (!test_bit(ADF_STATUS_RESTARTING, &accel_dev->status)) {
265 		kfree(accel_dev->pf.vf_info);
266 		accel_dev->pf.vf_info = NULL;
267 	}
268 }
269 EXPORT_SYMBOL_GPL(adf_disable_sriov);
270 
271 /**
272  * adf_sriov_configure() - Enable SRIOV for the device
273  * @pdev:  Pointer to PCI device.
274  * @numvfs: Number of virtual functions (VFs) to enable.
275  *
276  * Note that the @numvfs parameter is ignored and all VFs supported by the
277  * device are enabled due to the design of the hardware.
278  *
279  * Function enables SRIOV for the PCI device.
280  *
281  * Return: number of VFs enabled on success, error code otherwise.
282  */
adf_sriov_configure(struct pci_dev * pdev,int numvfs)283 int adf_sriov_configure(struct pci_dev *pdev, int numvfs)
284 {
285 	struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
286 
287 	if (!accel_dev) {
288 		dev_err(&pdev->dev, "Failed to find accel_dev\n");
289 		return -EFAULT;
290 	}
291 
292 	if (numvfs)
293 		return adf_do_enable_sriov(accel_dev);
294 	else
295 		return adf_do_disable_sriov(accel_dev);
296 }
297 EXPORT_SYMBOL_GPL(adf_sriov_configure);
298 
adf_init_pf_wq(void)299 int __init adf_init_pf_wq(void)
300 {
301 	/* Workqueue for PF2VF responses */
302 	pf2vf_resp_wq = alloc_workqueue("qat_pf2vf_resp_wq", WQ_MEM_RECLAIM, 0);
303 
304 	return !pf2vf_resp_wq ? -ENOMEM : 0;
305 }
306 
adf_exit_pf_wq(void)307 void adf_exit_pf_wq(void)
308 {
309 	if (pf2vf_resp_wq) {
310 		destroy_workqueue(pf2vf_resp_wq);
311 		pf2vf_resp_wq = NULL;
312 	}
313 }
314