1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2021, Intel Corporation. */
3
4 /* Inter-Driver Communication */
5 #include "ice.h"
6 #include "ice_lib.h"
7 #include "ice_dcb_lib.h"
8
9 static DEFINE_XARRAY_ALLOC1(ice_aux_id);
10
11 /**
12 * ice_get_auxiliary_drv - retrieve iidc_auxiliary_drv struct
13 * @pf: pointer to PF struct
14 *
15 * This function has to be called with a device_lock on the
16 * pf->adev.dev to avoid race conditions.
17 */
ice_get_auxiliary_drv(struct ice_pf * pf)18 static struct iidc_auxiliary_drv *ice_get_auxiliary_drv(struct ice_pf *pf)
19 {
20 struct auxiliary_device *adev;
21
22 adev = pf->adev;
23 if (!adev || !adev->dev.driver)
24 return NULL;
25
26 return container_of(adev->dev.driver, struct iidc_auxiliary_drv,
27 adrv.driver);
28 }
29
30 /**
31 * ice_send_event_to_aux - send event to RDMA AUX driver
32 * @pf: pointer to PF struct
33 * @event: event struct
34 */
ice_send_event_to_aux(struct ice_pf * pf,struct iidc_event * event)35 void ice_send_event_to_aux(struct ice_pf *pf, struct iidc_event *event)
36 {
37 struct iidc_auxiliary_drv *iadrv;
38
39 if (WARN_ON_ONCE(!in_task()))
40 return;
41
42 mutex_lock(&pf->adev_mutex);
43 if (!pf->adev)
44 goto finish;
45
46 device_lock(&pf->adev->dev);
47 iadrv = ice_get_auxiliary_drv(pf);
48 if (iadrv && iadrv->event_handler)
49 iadrv->event_handler(pf, event);
50 device_unlock(&pf->adev->dev);
51 finish:
52 mutex_unlock(&pf->adev_mutex);
53 }
54
55 /**
56 * ice_add_rdma_qset - Add Leaf Node for RDMA Qset
57 * @pf: PF struct
58 * @qset: Resource to be allocated
59 */
ice_add_rdma_qset(struct ice_pf * pf,struct iidc_rdma_qset_params * qset)60 int ice_add_rdma_qset(struct ice_pf *pf, struct iidc_rdma_qset_params *qset)
61 {
62 u16 max_rdmaqs[ICE_MAX_TRAFFIC_CLASS];
63 struct ice_vsi *vsi;
64 struct device *dev;
65 u32 qset_teid;
66 u16 qs_handle;
67 int status;
68 int i;
69
70 if (WARN_ON(!pf || !qset))
71 return -EINVAL;
72
73 dev = ice_pf_to_dev(pf);
74
75 if (!ice_is_rdma_ena(pf))
76 return -EINVAL;
77
78 vsi = ice_get_main_vsi(pf);
79 if (!vsi) {
80 dev_err(dev, "RDMA QSet invalid VSI\n");
81 return -EINVAL;
82 }
83
84 ice_for_each_traffic_class(i)
85 max_rdmaqs[i] = 0;
86
87 max_rdmaqs[qset->tc]++;
88 qs_handle = qset->qs_handle;
89
90 status = ice_cfg_vsi_rdma(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
91 max_rdmaqs);
92 if (status) {
93 dev_err(dev, "Failed VSI RDMA Qset config\n");
94 return status;
95 }
96
97 status = ice_ena_vsi_rdma_qset(vsi->port_info, vsi->idx, qset->tc,
98 &qs_handle, 1, &qset_teid);
99 if (status) {
100 dev_err(dev, "Failed VSI RDMA Qset enable\n");
101 return status;
102 }
103 vsi->qset_handle[qset->tc] = qset->qs_handle;
104 qset->teid = qset_teid;
105
106 return 0;
107 }
108 EXPORT_SYMBOL_GPL(ice_add_rdma_qset);
109
110 /**
111 * ice_del_rdma_qset - Delete leaf node for RDMA Qset
112 * @pf: PF struct
113 * @qset: Resource to be freed
114 */
ice_del_rdma_qset(struct ice_pf * pf,struct iidc_rdma_qset_params * qset)115 int ice_del_rdma_qset(struct ice_pf *pf, struct iidc_rdma_qset_params *qset)
116 {
117 struct ice_vsi *vsi;
118 u32 teid;
119 u16 q_id;
120
121 if (WARN_ON(!pf || !qset))
122 return -EINVAL;
123
124 vsi = ice_find_vsi(pf, qset->vport_id);
125 if (!vsi) {
126 dev_err(ice_pf_to_dev(pf), "RDMA Invalid VSI\n");
127 return -EINVAL;
128 }
129
130 q_id = qset->qs_handle;
131 teid = qset->teid;
132
133 vsi->qset_handle[qset->tc] = 0;
134
135 return ice_dis_vsi_rdma_qset(vsi->port_info, 1, &teid, &q_id);
136 }
137 EXPORT_SYMBOL_GPL(ice_del_rdma_qset);
138
139 /**
140 * ice_rdma_request_reset - accept request from RDMA to perform a reset
141 * @pf: struct for PF
142 * @reset_type: type of reset
143 */
ice_rdma_request_reset(struct ice_pf * pf,enum iidc_reset_type reset_type)144 int ice_rdma_request_reset(struct ice_pf *pf, enum iidc_reset_type reset_type)
145 {
146 enum ice_reset_req reset;
147
148 if (WARN_ON(!pf))
149 return -EINVAL;
150
151 switch (reset_type) {
152 case IIDC_PFR:
153 reset = ICE_RESET_PFR;
154 break;
155 case IIDC_CORER:
156 reset = ICE_RESET_CORER;
157 break;
158 case IIDC_GLOBR:
159 reset = ICE_RESET_GLOBR;
160 break;
161 default:
162 dev_err(ice_pf_to_dev(pf), "incorrect reset request\n");
163 return -EINVAL;
164 }
165
166 return ice_schedule_reset(pf, reset);
167 }
168 EXPORT_SYMBOL_GPL(ice_rdma_request_reset);
169
170 /**
171 * ice_rdma_update_vsi_filter - update main VSI filters for RDMA
172 * @pf: pointer to struct for PF
173 * @vsi_id: VSI HW idx to update filter on
174 * @enable: bool whether to enable or disable filters
175 */
ice_rdma_update_vsi_filter(struct ice_pf * pf,u16 vsi_id,bool enable)176 int ice_rdma_update_vsi_filter(struct ice_pf *pf, u16 vsi_id, bool enable)
177 {
178 struct ice_vsi *vsi;
179 int status;
180
181 if (WARN_ON(!pf))
182 return -EINVAL;
183
184 vsi = ice_find_vsi(pf, vsi_id);
185 if (!vsi)
186 return -EINVAL;
187
188 status = ice_cfg_rdma_fltr(&pf->hw, vsi->idx, enable);
189 if (status) {
190 dev_err(ice_pf_to_dev(pf), "Failed to %sable RDMA filtering\n",
191 enable ? "en" : "dis");
192 } else {
193 if (enable)
194 vsi->info.q_opt_flags |= ICE_AQ_VSI_Q_OPT_PE_FLTR_EN;
195 else
196 vsi->info.q_opt_flags &= ~ICE_AQ_VSI_Q_OPT_PE_FLTR_EN;
197 }
198
199 return status;
200 }
201 EXPORT_SYMBOL_GPL(ice_rdma_update_vsi_filter);
202
203 /**
204 * ice_get_qos_params - parse QoS params for RDMA consumption
205 * @pf: pointer to PF struct
206 * @qos: set of QoS values
207 */
ice_get_qos_params(struct ice_pf * pf,struct iidc_qos_params * qos)208 void ice_get_qos_params(struct ice_pf *pf, struct iidc_qos_params *qos)
209 {
210 struct ice_dcbx_cfg *dcbx_cfg;
211 unsigned int i;
212 u32 up2tc;
213
214 dcbx_cfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg;
215 up2tc = rd32(&pf->hw, PRTDCB_TUP2TC);
216
217 qos->num_tc = ice_dcb_get_num_tc(dcbx_cfg);
218 for (i = 0; i < IIDC_MAX_USER_PRIORITY; i++)
219 qos->up2tc[i] = (up2tc >> (i * 3)) & 0x7;
220
221 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
222 qos->tc_info[i].rel_bw = dcbx_cfg->etscfg.tcbwtable[i];
223
224 qos->pfc_mode = dcbx_cfg->pfc_mode;
225 if (qos->pfc_mode == IIDC_DSCP_PFC_MODE)
226 for (i = 0; i < IIDC_MAX_DSCP_MAPPING; i++)
227 qos->dscp_map[i] = dcbx_cfg->dscp_map[i];
228 }
229 EXPORT_SYMBOL_GPL(ice_get_qos_params);
230
ice_alloc_rdma_qvector(struct ice_pf * pf,struct msix_entry * entry)231 int ice_alloc_rdma_qvector(struct ice_pf *pf, struct msix_entry *entry)
232 {
233 struct msi_map map = ice_alloc_irq(pf, true);
234
235 if (map.index < 0)
236 return -ENOMEM;
237
238 entry->entry = map.index;
239 entry->vector = map.virq;
240
241 return 0;
242 }
243 EXPORT_SYMBOL_GPL(ice_alloc_rdma_qvector);
244
245 /**
246 * ice_free_rdma_qvector - free vector resources reserved for RDMA driver
247 * @pf: board private structure to initialize
248 * @entry: MSI-X entry to be removed
249 */
ice_free_rdma_qvector(struct ice_pf * pf,struct msix_entry * entry)250 void ice_free_rdma_qvector(struct ice_pf *pf, struct msix_entry *entry)
251 {
252 struct msi_map map;
253
254 map.index = entry->entry;
255 map.virq = entry->vector;
256 ice_free_irq(pf, map);
257 }
258 EXPORT_SYMBOL_GPL(ice_free_rdma_qvector);
259
260 /**
261 * ice_adev_release - function to be mapped to AUX dev's release op
262 * @dev: pointer to device to free
263 */
ice_adev_release(struct device * dev)264 static void ice_adev_release(struct device *dev)
265 {
266 struct iidc_auxiliary_dev *iadev;
267
268 iadev = container_of(dev, struct iidc_auxiliary_dev, adev.dev);
269 kfree(iadev);
270 }
271
272 /**
273 * ice_plug_aux_dev - allocate and register AUX device
274 * @pf: pointer to pf struct
275 */
ice_plug_aux_dev(struct ice_pf * pf)276 int ice_plug_aux_dev(struct ice_pf *pf)
277 {
278 struct iidc_auxiliary_dev *iadev;
279 struct auxiliary_device *adev;
280 int ret;
281
282 /* if this PF doesn't support a technology that requires auxiliary
283 * devices, then gracefully exit
284 */
285 if (!ice_is_rdma_ena(pf))
286 return 0;
287
288 iadev = kzalloc(sizeof(*iadev), GFP_KERNEL);
289 if (!iadev)
290 return -ENOMEM;
291
292 adev = &iadev->adev;
293 iadev->pf = pf;
294
295 adev->id = pf->aux_idx;
296 adev->dev.release = ice_adev_release;
297 adev->dev.parent = &pf->pdev->dev;
298 adev->name = pf->rdma_mode & IIDC_RDMA_PROTOCOL_ROCEV2 ? "roce" : "iwarp";
299
300 ret = auxiliary_device_init(adev);
301 if (ret) {
302 kfree(iadev);
303 return ret;
304 }
305
306 ret = auxiliary_device_add(adev);
307 if (ret) {
308 auxiliary_device_uninit(adev);
309 return ret;
310 }
311
312 mutex_lock(&pf->adev_mutex);
313 pf->adev = adev;
314 mutex_unlock(&pf->adev_mutex);
315
316 return 0;
317 }
318
319 /* ice_unplug_aux_dev - unregister and free AUX device
320 * @pf: pointer to pf struct
321 */
ice_unplug_aux_dev(struct ice_pf * pf)322 void ice_unplug_aux_dev(struct ice_pf *pf)
323 {
324 struct auxiliary_device *adev;
325
326 mutex_lock(&pf->adev_mutex);
327 adev = pf->adev;
328 pf->adev = NULL;
329 mutex_unlock(&pf->adev_mutex);
330
331 if (adev) {
332 auxiliary_device_delete(adev);
333 auxiliary_device_uninit(adev);
334 }
335 }
336
337 /**
338 * ice_init_rdma - initializes PF for RDMA use
339 * @pf: ptr to ice_pf
340 */
ice_init_rdma(struct ice_pf * pf)341 int ice_init_rdma(struct ice_pf *pf)
342 {
343 struct device *dev = &pf->pdev->dev;
344 int ret;
345
346 if (!ice_is_rdma_ena(pf)) {
347 dev_warn(dev, "RDMA is not supported on this device\n");
348 return 0;
349 }
350
351 ret = xa_alloc(&ice_aux_id, &pf->aux_idx, NULL, XA_LIMIT(1, INT_MAX),
352 GFP_KERNEL);
353 if (ret) {
354 dev_err(dev, "Failed to allocate device ID for AUX driver\n");
355 return -ENOMEM;
356 }
357
358 pf->rdma_mode |= IIDC_RDMA_PROTOCOL_ROCEV2;
359 ret = ice_plug_aux_dev(pf);
360 if (ret)
361 goto err_plug_aux_dev;
362 return 0;
363
364 err_plug_aux_dev:
365 pf->adev = NULL;
366 xa_erase(&ice_aux_id, pf->aux_idx);
367 return ret;
368 }
369
370 /**
371 * ice_deinit_rdma - deinitialize RDMA on PF
372 * @pf: ptr to ice_pf
373 */
ice_deinit_rdma(struct ice_pf * pf)374 void ice_deinit_rdma(struct ice_pf *pf)
375 {
376 if (!ice_is_rdma_ena(pf))
377 return;
378
379 ice_unplug_aux_dev(pf);
380 xa_erase(&ice_aux_id, pf->aux_idx);
381 }
382