1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2018, Intel Corporation. */
3
4 #include "ice.h"
5 #include "ice_vf_lib_private.h"
6 #include "ice_base.h"
7 #include "ice_lib.h"
8 #include "ice_fltr.h"
9 #include "ice_dcb_lib.h"
10 #include "ice_flow.h"
11 #include "ice_eswitch.h"
12 #include "ice_virtchnl_allowlist.h"
13 #include "ice_flex_pipe.h"
14 #include "ice_vf_vsi_vlan_ops.h"
15 #include "ice_vlan.h"
16
17 /**
18 * ice_free_vf_entries - Free all VF entries from the hash table
19 * @pf: pointer to the PF structure
20 *
21 * Iterate over the VF hash table, removing and releasing all VF entries.
22 * Called during VF teardown or as cleanup during failed VF initialization.
23 */
ice_free_vf_entries(struct ice_pf * pf)24 static void ice_free_vf_entries(struct ice_pf *pf)
25 {
26 struct ice_vfs *vfs = &pf->vfs;
27 struct hlist_node *tmp;
28 struct ice_vf *vf;
29 unsigned int bkt;
30
31 /* Remove all VFs from the hash table and release their main
32 * reference. Once all references to the VF are dropped, ice_put_vf()
33 * will call ice_release_vf which will remove the VF memory.
34 */
35 lockdep_assert_held(&vfs->table_lock);
36
37 hash_for_each_safe(vfs->table, bkt, tmp, vf, entry) {
38 hash_del_rcu(&vf->entry);
39 ice_deinitialize_vf_entry(vf);
40 ice_put_vf(vf);
41 }
42 }
43
44 /**
45 * ice_free_vf_res - Free a VF's resources
46 * @vf: pointer to the VF info
47 */
ice_free_vf_res(struct ice_vf * vf)48 static void ice_free_vf_res(struct ice_vf *vf)
49 {
50 struct ice_pf *pf = vf->pf;
51 int i, last_vector_idx;
52
53 /* First, disable VF's configuration API to prevent OS from
54 * accessing the VF's VSI after it's freed or invalidated.
55 */
56 clear_bit(ICE_VF_STATE_INIT, vf->vf_states);
57 ice_vf_fdir_exit(vf);
58 /* free VF control VSI */
59 if (vf->ctrl_vsi_idx != ICE_NO_VSI)
60 ice_vf_ctrl_vsi_release(vf);
61
62 /* free VSI and disconnect it from the parent uplink */
63 if (vf->lan_vsi_idx != ICE_NO_VSI) {
64 ice_vf_vsi_release(vf);
65 vf->num_mac = 0;
66 }
67
68 last_vector_idx = vf->first_vector_idx + vf->num_msix - 1;
69
70 /* clear VF MDD event information */
71 memset(&vf->mdd_tx_events, 0, sizeof(vf->mdd_tx_events));
72 memset(&vf->mdd_rx_events, 0, sizeof(vf->mdd_rx_events));
73
74 /* Disable interrupts so that VF starts in a known state */
75 for (i = vf->first_vector_idx; i <= last_vector_idx; i++) {
76 wr32(&pf->hw, GLINT_DYN_CTL(i), GLINT_DYN_CTL_CLEARPBA_M);
77 ice_flush(&pf->hw);
78 }
79 /* reset some of the state variables keeping track of the resources */
80 clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states);
81 clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states);
82 }
83
84 /**
85 * ice_dis_vf_mappings
86 * @vf: pointer to the VF structure
87 */
ice_dis_vf_mappings(struct ice_vf * vf)88 static void ice_dis_vf_mappings(struct ice_vf *vf)
89 {
90 struct ice_pf *pf = vf->pf;
91 struct ice_vsi *vsi;
92 struct device *dev;
93 int first, last, v;
94 struct ice_hw *hw;
95
96 hw = &pf->hw;
97 vsi = ice_get_vf_vsi(vf);
98 if (WARN_ON(!vsi))
99 return;
100
101 dev = ice_pf_to_dev(pf);
102 wr32(hw, VPINT_ALLOC(vf->vf_id), 0);
103 wr32(hw, VPINT_ALLOC_PCI(vf->vf_id), 0);
104
105 first = vf->first_vector_idx;
106 last = first + vf->num_msix - 1;
107 for (v = first; v <= last; v++) {
108 u32 reg;
109
110 reg = FIELD_PREP(GLINT_VECT2FUNC_IS_PF_M, 1) |
111 FIELD_PREP(GLINT_VECT2FUNC_PF_NUM_M, hw->pf_id);
112 wr32(hw, GLINT_VECT2FUNC(v), reg);
113 }
114
115 if (vsi->tx_mapping_mode == ICE_VSI_MAP_CONTIG)
116 wr32(hw, VPLAN_TX_QBASE(vf->vf_id), 0);
117 else
118 dev_err(dev, "Scattered mode for VF Tx queues is not yet implemented\n");
119
120 if (vsi->rx_mapping_mode == ICE_VSI_MAP_CONTIG)
121 wr32(hw, VPLAN_RX_QBASE(vf->vf_id), 0);
122 else
123 dev_err(dev, "Scattered mode for VF Rx queues is not yet implemented\n");
124 }
125
126 /**
127 * ice_free_vfs - Free all VFs
128 * @pf: pointer to the PF structure
129 */
ice_free_vfs(struct ice_pf * pf)130 void ice_free_vfs(struct ice_pf *pf)
131 {
132 struct device *dev = ice_pf_to_dev(pf);
133 struct ice_vfs *vfs = &pf->vfs;
134 struct ice_hw *hw = &pf->hw;
135 struct ice_vf *vf;
136 unsigned int bkt;
137
138 if (!ice_has_vfs(pf))
139 return;
140
141 while (test_and_set_bit(ICE_VF_DIS, pf->state))
142 usleep_range(1000, 2000);
143
144 /* Disable IOV before freeing resources. This lets any VF drivers
145 * running in the host get themselves cleaned up before we yank
146 * the carpet out from underneath their feet.
147 */
148 if (!pci_vfs_assigned(pf->pdev))
149 pci_disable_sriov(pf->pdev);
150 else
151 dev_warn(dev, "VFs are assigned - not disabling SR-IOV\n");
152
153 mutex_lock(&vfs->table_lock);
154
155 ice_for_each_vf(pf, bkt, vf) {
156 mutex_lock(&vf->cfg_lock);
157
158 ice_eswitch_detach_vf(pf, vf);
159 ice_dis_vf_qs(vf);
160 ice_virt_free_irqs(pf, vf->first_vector_idx, vf->num_msix);
161
162 if (test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
163 /* disable VF qp mappings and set VF disable state */
164 ice_dis_vf_mappings(vf);
165 set_bit(ICE_VF_STATE_DIS, vf->vf_states);
166 ice_free_vf_res(vf);
167 }
168
169 if (!pci_vfs_assigned(pf->pdev)) {
170 u32 reg_idx, bit_idx;
171
172 reg_idx = (hw->func_caps.vf_base_id + vf->vf_id) / 32;
173 bit_idx = (hw->func_caps.vf_base_id + vf->vf_id) % 32;
174 wr32(hw, GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
175 }
176
177 mutex_unlock(&vf->cfg_lock);
178 }
179
180 vfs->num_qps_per = 0;
181 ice_free_vf_entries(pf);
182
183 mutex_unlock(&vfs->table_lock);
184
185 clear_bit(ICE_VF_DIS, pf->state);
186 clear_bit(ICE_FLAG_SRIOV_ENA, pf->flags);
187 }
188
189 /**
190 * ice_vf_vsi_setup - Set up a VF VSI
191 * @vf: VF to setup VSI for
192 *
193 * Returns pointer to the successfully allocated VSI struct on success,
194 * otherwise returns NULL on failure.
195 */
ice_vf_vsi_setup(struct ice_vf * vf)196 static struct ice_vsi *ice_vf_vsi_setup(struct ice_vf *vf)
197 {
198 struct ice_vsi_cfg_params params = {};
199 struct ice_pf *pf = vf->pf;
200 struct ice_vsi *vsi;
201
202 params.type = ICE_VSI_VF;
203 params.port_info = ice_vf_get_port_info(vf);
204 params.vf = vf;
205 params.flags = ICE_VSI_FLAG_INIT;
206
207 vsi = ice_vsi_setup(pf, ¶ms);
208
209 if (!vsi) {
210 dev_err(ice_pf_to_dev(pf), "Failed to create VF VSI\n");
211 ice_vf_invalidate_vsi(vf);
212 return NULL;
213 }
214
215 vf->lan_vsi_idx = vsi->idx;
216
217 return vsi;
218 }
219
220
221 /**
222 * ice_ena_vf_msix_mappings - enable VF MSIX mappings in hardware
223 * @vf: VF to enable MSIX mappings for
224 *
225 * Some of the registers need to be indexed/configured using hardware global
226 * device values and other registers need 0-based values, which represent PF
227 * based values.
228 */
ice_ena_vf_msix_mappings(struct ice_vf * vf)229 static void ice_ena_vf_msix_mappings(struct ice_vf *vf)
230 {
231 int device_based_first_msix, device_based_last_msix;
232 int pf_based_first_msix, pf_based_last_msix, v;
233 struct ice_pf *pf = vf->pf;
234 int device_based_vf_id;
235 struct ice_hw *hw;
236 u32 reg;
237
238 hw = &pf->hw;
239 pf_based_first_msix = vf->first_vector_idx;
240 pf_based_last_msix = (pf_based_first_msix + vf->num_msix) - 1;
241
242 device_based_first_msix = pf_based_first_msix +
243 pf->hw.func_caps.common_cap.msix_vector_first_id;
244 device_based_last_msix =
245 (device_based_first_msix + vf->num_msix) - 1;
246 device_based_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
247
248 reg = FIELD_PREP(VPINT_ALLOC_FIRST_M, device_based_first_msix) |
249 FIELD_PREP(VPINT_ALLOC_LAST_M, device_based_last_msix) |
250 VPINT_ALLOC_VALID_M;
251 wr32(hw, VPINT_ALLOC(vf->vf_id), reg);
252
253 reg = FIELD_PREP(VPINT_ALLOC_PCI_FIRST_M, device_based_first_msix) |
254 FIELD_PREP(VPINT_ALLOC_PCI_LAST_M, device_based_last_msix) |
255 VPINT_ALLOC_PCI_VALID_M;
256 wr32(hw, VPINT_ALLOC_PCI(vf->vf_id), reg);
257
258 /* map the interrupts to its functions */
259 for (v = pf_based_first_msix; v <= pf_based_last_msix; v++) {
260 reg = FIELD_PREP(GLINT_VECT2FUNC_VF_NUM_M, device_based_vf_id) |
261 FIELD_PREP(GLINT_VECT2FUNC_PF_NUM_M, hw->pf_id);
262 wr32(hw, GLINT_VECT2FUNC(v), reg);
263 }
264
265 /* Map mailbox interrupt to VF MSI-X vector 0 */
266 wr32(hw, VPINT_MBX_CTL(device_based_vf_id), VPINT_MBX_CTL_CAUSE_ENA_M);
267 }
268
269 /**
270 * ice_ena_vf_q_mappings - enable Rx/Tx queue mappings for a VF
271 * @vf: VF to enable the mappings for
272 * @max_txq: max Tx queues allowed on the VF's VSI
273 * @max_rxq: max Rx queues allowed on the VF's VSI
274 */
ice_ena_vf_q_mappings(struct ice_vf * vf,u16 max_txq,u16 max_rxq)275 static void ice_ena_vf_q_mappings(struct ice_vf *vf, u16 max_txq, u16 max_rxq)
276 {
277 struct device *dev = ice_pf_to_dev(vf->pf);
278 struct ice_vsi *vsi = ice_get_vf_vsi(vf);
279 struct ice_hw *hw = &vf->pf->hw;
280 u32 reg;
281
282 if (WARN_ON(!vsi))
283 return;
284
285 /* set regardless of mapping mode */
286 wr32(hw, VPLAN_TXQ_MAPENA(vf->vf_id), VPLAN_TXQ_MAPENA_TX_ENA_M);
287
288 /* VF Tx queues allocation */
289 if (vsi->tx_mapping_mode == ICE_VSI_MAP_CONTIG) {
290 /* set the VF PF Tx queue range
291 * VFNUMQ value should be set to (number of queues - 1). A value
292 * of 0 means 1 queue and a value of 255 means 256 queues
293 */
294 reg = FIELD_PREP(VPLAN_TX_QBASE_VFFIRSTQ_M, vsi->txq_map[0]) |
295 FIELD_PREP(VPLAN_TX_QBASE_VFNUMQ_M, max_txq - 1);
296 wr32(hw, VPLAN_TX_QBASE(vf->vf_id), reg);
297 } else {
298 dev_err(dev, "Scattered mode for VF Tx queues is not yet implemented\n");
299 }
300
301 /* set regardless of mapping mode */
302 wr32(hw, VPLAN_RXQ_MAPENA(vf->vf_id), VPLAN_RXQ_MAPENA_RX_ENA_M);
303
304 /* VF Rx queues allocation */
305 if (vsi->rx_mapping_mode == ICE_VSI_MAP_CONTIG) {
306 /* set the VF PF Rx queue range
307 * VFNUMQ value should be set to (number of queues - 1). A value
308 * of 0 means 1 queue and a value of 255 means 256 queues
309 */
310 reg = FIELD_PREP(VPLAN_RX_QBASE_VFFIRSTQ_M, vsi->rxq_map[0]) |
311 FIELD_PREP(VPLAN_RX_QBASE_VFNUMQ_M, max_rxq - 1);
312 wr32(hw, VPLAN_RX_QBASE(vf->vf_id), reg);
313 } else {
314 dev_err(dev, "Scattered mode for VF Rx queues is not yet implemented\n");
315 }
316 }
317
318 /**
319 * ice_ena_vf_mappings - enable VF MSIX and queue mapping
320 * @vf: pointer to the VF structure
321 */
ice_ena_vf_mappings(struct ice_vf * vf)322 static void ice_ena_vf_mappings(struct ice_vf *vf)
323 {
324 struct ice_vsi *vsi = ice_get_vf_vsi(vf);
325
326 if (WARN_ON(!vsi))
327 return;
328
329 ice_ena_vf_msix_mappings(vf);
330 ice_ena_vf_q_mappings(vf, vsi->alloc_txq, vsi->alloc_rxq);
331 }
332
333 /**
334 * ice_calc_vf_reg_idx - Calculate the VF's register index in the PF space
335 * @vf: VF to calculate the register index for
336 * @q_vector: a q_vector associated to the VF
337 */
ice_calc_vf_reg_idx(struct ice_vf * vf,struct ice_q_vector * q_vector)338 void ice_calc_vf_reg_idx(struct ice_vf *vf, struct ice_q_vector *q_vector)
339 {
340 if (!vf || !q_vector)
341 return;
342
343 /* always add one to account for the OICR being the first MSIX */
344 q_vector->vf_reg_idx = q_vector->v_idx + ICE_NONQ_VECS_VF;
345 q_vector->reg_idx = vf->first_vector_idx + q_vector->vf_reg_idx;
346 }
347
348 /**
349 * ice_set_per_vf_res - check if vectors and queues are available
350 * @pf: pointer to the PF structure
351 * @num_vfs: the number of SR-IOV VFs being configured
352 *
353 * First, determine HW interrupts from common pool. If we allocate fewer VFs, we
354 * get more vectors and can enable more queues per VF. Note that this does not
355 * grab any vectors from the SW pool already allocated. Also note, that all
356 * vector counts include one for each VF's miscellaneous interrupt vector
357 * (i.e. OICR).
358 *
359 * Minimum VFs - 2 vectors, 1 queue pair
360 * Small VFs - 5 vectors, 4 queue pairs
361 * Medium VFs - 17 vectors, 16 queue pairs
362 *
363 * Second, determine number of queue pairs per VF by starting with a pre-defined
364 * maximum each VF supports. If this is not possible, then we adjust based on
365 * queue pairs available on the device.
366 *
367 * Lastly, set queue and MSI-X VF variables tracked by the PF so it can be used
368 * by each VF during VF initialization and reset.
369 */
ice_set_per_vf_res(struct ice_pf * pf,u16 num_vfs)370 static int ice_set_per_vf_res(struct ice_pf *pf, u16 num_vfs)
371 {
372 u16 num_msix_per_vf, num_txq, num_rxq, avail_qs;
373 int msix_avail_per_vf, msix_avail_for_sriov;
374 struct device *dev = ice_pf_to_dev(pf);
375
376 lockdep_assert_held(&pf->vfs.table_lock);
377
378 if (!num_vfs)
379 return -EINVAL;
380
381 /* determine MSI-X resources per VF */
382 msix_avail_for_sriov = pf->virt_irq_tracker.num_entries;
383 msix_avail_per_vf = msix_avail_for_sriov / num_vfs;
384 if (msix_avail_per_vf >= ICE_NUM_VF_MSIX_MED) {
385 num_msix_per_vf = ICE_NUM_VF_MSIX_MED;
386 } else if (msix_avail_per_vf >= ICE_NUM_VF_MSIX_SMALL) {
387 num_msix_per_vf = ICE_NUM_VF_MSIX_SMALL;
388 } else if (msix_avail_per_vf >= ICE_NUM_VF_MSIX_MULTIQ_MIN) {
389 num_msix_per_vf = ICE_NUM_VF_MSIX_MULTIQ_MIN;
390 } else if (msix_avail_per_vf >= ICE_MIN_INTR_PER_VF) {
391 num_msix_per_vf = ICE_MIN_INTR_PER_VF;
392 } else {
393 dev_err(dev, "Only %d MSI-X interrupts available for SR-IOV. Not enough to support minimum of %d MSI-X interrupts per VF for %d VFs\n",
394 msix_avail_for_sriov, ICE_MIN_INTR_PER_VF,
395 num_vfs);
396 return -ENOSPC;
397 }
398
399 num_txq = min_t(u16, num_msix_per_vf - ICE_NONQ_VECS_VF,
400 ICE_MAX_RSS_QS_PER_VF);
401 avail_qs = ice_get_avail_txq_count(pf) / num_vfs;
402 if (!avail_qs)
403 num_txq = 0;
404 else if (num_txq > avail_qs)
405 num_txq = rounddown_pow_of_two(avail_qs);
406
407 num_rxq = min_t(u16, num_msix_per_vf - ICE_NONQ_VECS_VF,
408 ICE_MAX_RSS_QS_PER_VF);
409 avail_qs = ice_get_avail_rxq_count(pf) / num_vfs;
410 if (!avail_qs)
411 num_rxq = 0;
412 else if (num_rxq > avail_qs)
413 num_rxq = rounddown_pow_of_two(avail_qs);
414
415 if (num_txq < ICE_MIN_QS_PER_VF || num_rxq < ICE_MIN_QS_PER_VF) {
416 dev_err(dev, "Not enough queues to support minimum of %d queue pairs per VF for %d VFs\n",
417 ICE_MIN_QS_PER_VF, num_vfs);
418 return -ENOSPC;
419 }
420
421 /* only allow equal Tx/Rx queue count (i.e. queue pairs) */
422 pf->vfs.num_qps_per = min_t(int, num_txq, num_rxq);
423 pf->vfs.num_msix_per = num_msix_per_vf;
424 dev_info(dev, "Enabling %d VFs with %d vectors and %d queues per VF\n",
425 num_vfs, pf->vfs.num_msix_per, pf->vfs.num_qps_per);
426
427 return 0;
428 }
429
430 /**
431 * ice_init_vf_vsi_res - initialize/setup VF VSI resources
432 * @vf: VF to initialize/setup the VSI for
433 *
434 * This function creates a VSI for the VF, adds a VLAN 0 filter, and sets up the
435 * VF VSI's broadcast filter and is only used during initial VF creation.
436 */
ice_init_vf_vsi_res(struct ice_vf * vf)437 static int ice_init_vf_vsi_res(struct ice_vf *vf)
438 {
439 struct ice_pf *pf = vf->pf;
440 struct ice_vsi *vsi;
441 int err;
442
443 vf->first_vector_idx = ice_virt_get_irqs(pf, vf->num_msix);
444 if (vf->first_vector_idx < 0)
445 return -ENOMEM;
446
447 vsi = ice_vf_vsi_setup(vf);
448 if (!vsi)
449 return -ENOMEM;
450
451 err = ice_vf_init_host_cfg(vf, vsi);
452 if (err)
453 goto release_vsi;
454
455 return 0;
456
457 release_vsi:
458 ice_vf_vsi_release(vf);
459 return err;
460 }
461
462 /**
463 * ice_start_vfs - start VFs so they are ready to be used by SR-IOV
464 * @pf: PF the VFs are associated with
465 */
ice_start_vfs(struct ice_pf * pf)466 static int ice_start_vfs(struct ice_pf *pf)
467 {
468 struct ice_hw *hw = &pf->hw;
469 unsigned int bkt, it_cnt;
470 struct ice_vf *vf;
471 int retval;
472
473 lockdep_assert_held(&pf->vfs.table_lock);
474
475 it_cnt = 0;
476 ice_for_each_vf(pf, bkt, vf) {
477 vf->vf_ops->clear_reset_trigger(vf);
478
479 retval = ice_init_vf_vsi_res(vf);
480 if (retval) {
481 dev_err(ice_pf_to_dev(pf), "Failed to initialize VSI resources for VF %d, error %d\n",
482 vf->vf_id, retval);
483 goto teardown;
484 }
485
486 retval = ice_eswitch_attach_vf(pf, vf);
487 if (retval) {
488 dev_err(ice_pf_to_dev(pf), "Failed to attach VF %d to eswitch, error %d",
489 vf->vf_id, retval);
490 ice_vf_vsi_release(vf);
491 goto teardown;
492 }
493
494 set_bit(ICE_VF_STATE_INIT, vf->vf_states);
495 ice_ena_vf_mappings(vf);
496 wr32(hw, VFGEN_RSTAT(vf->vf_id), VIRTCHNL_VFR_VFACTIVE);
497 it_cnt++;
498 }
499
500 ice_flush(hw);
501 return 0;
502
503 teardown:
504 ice_for_each_vf(pf, bkt, vf) {
505 if (it_cnt == 0)
506 break;
507
508 ice_dis_vf_mappings(vf);
509 ice_vf_vsi_release(vf);
510 it_cnt--;
511 }
512
513 return retval;
514 }
515
516 /**
517 * ice_sriov_free_vf - Free VF memory after all references are dropped
518 * @vf: pointer to VF to free
519 *
520 * Called by ice_put_vf through ice_release_vf once the last reference to a VF
521 * structure has been dropped.
522 */
ice_sriov_free_vf(struct ice_vf * vf)523 static void ice_sriov_free_vf(struct ice_vf *vf)
524 {
525 mutex_destroy(&vf->cfg_lock);
526
527 kfree_rcu(vf, rcu);
528 }
529
530 /**
531 * ice_sriov_clear_reset_state - clears VF Reset status register
532 * @vf: the vf to configure
533 */
ice_sriov_clear_reset_state(struct ice_vf * vf)534 static void ice_sriov_clear_reset_state(struct ice_vf *vf)
535 {
536 struct ice_hw *hw = &vf->pf->hw;
537
538 /* Clear the reset status register so that VF immediately sees that
539 * the device is resetting, even if hardware hasn't yet gotten around
540 * to clearing VFGEN_RSTAT for us.
541 */
542 wr32(hw, VFGEN_RSTAT(vf->vf_id), VIRTCHNL_VFR_INPROGRESS);
543 }
544
545 /**
546 * ice_sriov_clear_mbx_register - clears SRIOV VF's mailbox registers
547 * @vf: the vf to configure
548 */
ice_sriov_clear_mbx_register(struct ice_vf * vf)549 static void ice_sriov_clear_mbx_register(struct ice_vf *vf)
550 {
551 struct ice_pf *pf = vf->pf;
552
553 wr32(&pf->hw, VF_MBX_ARQLEN(vf->vf_id), 0);
554 wr32(&pf->hw, VF_MBX_ATQLEN(vf->vf_id), 0);
555 }
556
557 /**
558 * ice_sriov_trigger_reset_register - trigger VF reset for SRIOV VF
559 * @vf: pointer to VF structure
560 * @is_vflr: true if reset occurred due to VFLR
561 *
562 * Trigger and cleanup after a VF reset for a SR-IOV VF.
563 */
ice_sriov_trigger_reset_register(struct ice_vf * vf,bool is_vflr)564 static void ice_sriov_trigger_reset_register(struct ice_vf *vf, bool is_vflr)
565 {
566 struct ice_pf *pf = vf->pf;
567 u32 reg, reg_idx, bit_idx;
568 unsigned int vf_abs_id, i;
569 struct device *dev;
570 struct ice_hw *hw;
571
572 dev = ice_pf_to_dev(pf);
573 hw = &pf->hw;
574 vf_abs_id = vf->vf_id + hw->func_caps.vf_base_id;
575
576 /* In the case of a VFLR, HW has already reset the VF and we just need
577 * to clean up. Otherwise we must first trigger the reset using the
578 * VFRTRIG register.
579 */
580 if (!is_vflr) {
581 reg = rd32(hw, VPGEN_VFRTRIG(vf->vf_id));
582 reg |= VPGEN_VFRTRIG_VFSWR_M;
583 wr32(hw, VPGEN_VFRTRIG(vf->vf_id), reg);
584 }
585
586 /* clear the VFLR bit in GLGEN_VFLRSTAT */
587 reg_idx = (vf_abs_id) / 32;
588 bit_idx = (vf_abs_id) % 32;
589 wr32(hw, GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
590 ice_flush(hw);
591
592 wr32(hw, PF_PCI_CIAA,
593 VF_DEVICE_STATUS | (vf_abs_id << PF_PCI_CIAA_VF_NUM_S));
594 for (i = 0; i < ICE_PCI_CIAD_WAIT_COUNT; i++) {
595 reg = rd32(hw, PF_PCI_CIAD);
596 /* no transactions pending so stop polling */
597 if ((reg & VF_TRANS_PENDING_M) == 0)
598 break;
599
600 dev_err(dev, "VF %u PCI transactions stuck\n", vf->vf_id);
601 udelay(ICE_PCI_CIAD_WAIT_DELAY_US);
602 }
603 }
604
605 /**
606 * ice_sriov_poll_reset_status - poll SRIOV VF reset status
607 * @vf: pointer to VF structure
608 *
609 * Returns true when reset is successful, else returns false
610 */
ice_sriov_poll_reset_status(struct ice_vf * vf)611 static bool ice_sriov_poll_reset_status(struct ice_vf *vf)
612 {
613 struct ice_pf *pf = vf->pf;
614 unsigned int i;
615 u32 reg;
616
617 for (i = 0; i < 10; i++) {
618 /* VF reset requires driver to first reset the VF and then
619 * poll the status register to make sure that the reset
620 * completed successfully.
621 */
622 reg = rd32(&pf->hw, VPGEN_VFRSTAT(vf->vf_id));
623 if (reg & VPGEN_VFRSTAT_VFRD_M)
624 return true;
625
626 /* only sleep if the reset is not done */
627 usleep_range(10, 20);
628 }
629 return false;
630 }
631
632 /**
633 * ice_sriov_clear_reset_trigger - enable VF to access hardware
634 * @vf: VF to enabled hardware access for
635 */
ice_sriov_clear_reset_trigger(struct ice_vf * vf)636 static void ice_sriov_clear_reset_trigger(struct ice_vf *vf)
637 {
638 struct ice_hw *hw = &vf->pf->hw;
639 u32 reg;
640
641 reg = rd32(hw, VPGEN_VFRTRIG(vf->vf_id));
642 reg &= ~VPGEN_VFRTRIG_VFSWR_M;
643 wr32(hw, VPGEN_VFRTRIG(vf->vf_id), reg);
644 ice_flush(hw);
645 }
646
647 /**
648 * ice_sriov_post_vsi_rebuild - tasks to do after the VF's VSI have been rebuilt
649 * @vf: VF to perform tasks on
650 */
ice_sriov_post_vsi_rebuild(struct ice_vf * vf)651 static void ice_sriov_post_vsi_rebuild(struct ice_vf *vf)
652 {
653 ice_ena_vf_mappings(vf);
654 wr32(&vf->pf->hw, VFGEN_RSTAT(vf->vf_id), VIRTCHNL_VFR_VFACTIVE);
655 }
656
657 static const struct ice_vf_ops ice_sriov_vf_ops = {
658 .reset_type = ICE_VF_RESET,
659 .free = ice_sriov_free_vf,
660 .clear_reset_state = ice_sriov_clear_reset_state,
661 .clear_mbx_register = ice_sriov_clear_mbx_register,
662 .trigger_reset_register = ice_sriov_trigger_reset_register,
663 .poll_reset_status = ice_sriov_poll_reset_status,
664 .clear_reset_trigger = ice_sriov_clear_reset_trigger,
665 .irq_close = NULL,
666 .post_vsi_rebuild = ice_sriov_post_vsi_rebuild,
667 };
668
669 /**
670 * ice_create_vf_entries - Allocate and insert VF entries
671 * @pf: pointer to the PF structure
672 * @num_vfs: the number of VFs to allocate
673 *
674 * Allocate new VF entries and insert them into the hash table. Set some
675 * basic default fields for initializing the new VFs.
676 *
677 * After this function exits, the hash table will have num_vfs entries
678 * inserted.
679 *
680 * Returns 0 on success or an integer error code on failure.
681 */
ice_create_vf_entries(struct ice_pf * pf,u16 num_vfs)682 static int ice_create_vf_entries(struct ice_pf *pf, u16 num_vfs)
683 {
684 struct pci_dev *pdev = pf->pdev;
685 struct ice_vfs *vfs = &pf->vfs;
686 struct pci_dev *vfdev = NULL;
687 struct ice_vf *vf;
688 u16 vf_pdev_id;
689 int err, pos;
690
691 lockdep_assert_held(&vfs->table_lock);
692
693 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
694 pci_read_config_word(pdev, pos + PCI_SRIOV_VF_DID, &vf_pdev_id);
695
696 for (u16 vf_id = 0; vf_id < num_vfs; vf_id++) {
697 vf = kzalloc(sizeof(*vf), GFP_KERNEL);
698 if (!vf) {
699 err = -ENOMEM;
700 goto err_free_entries;
701 }
702 kref_init(&vf->refcnt);
703
704 vf->pf = pf;
705 vf->vf_id = vf_id;
706
707 /* set sriov vf ops for VFs created during SRIOV flow */
708 vf->vf_ops = &ice_sriov_vf_ops;
709
710 ice_initialize_vf_entry(vf);
711
712 do {
713 vfdev = pci_get_device(pdev->vendor, vf_pdev_id, vfdev);
714 } while (vfdev && vfdev->physfn != pdev);
715 vf->vfdev = vfdev;
716 vf->vf_sw_id = pf->first_sw;
717
718 pci_dev_get(vfdev);
719
720 hash_add_rcu(vfs->table, &vf->entry, vf_id);
721 }
722
723 /* Decrement of refcount done by pci_get_device() inside the loop does
724 * not touch the last iteration's vfdev, so it has to be done manually
725 * to balance pci_dev_get() added within the loop.
726 */
727 pci_dev_put(vfdev);
728
729 return 0;
730
731 err_free_entries:
732 ice_free_vf_entries(pf);
733 return err;
734 }
735
736 /**
737 * ice_ena_vfs - enable VFs so they are ready to be used
738 * @pf: pointer to the PF structure
739 * @num_vfs: number of VFs to enable
740 */
ice_ena_vfs(struct ice_pf * pf,u16 num_vfs)741 static int ice_ena_vfs(struct ice_pf *pf, u16 num_vfs)
742 {
743 struct device *dev = ice_pf_to_dev(pf);
744 struct ice_hw *hw = &pf->hw;
745 int ret;
746
747 /* Disable global interrupt 0 so we don't try to handle the VFLR. */
748 wr32(hw, GLINT_DYN_CTL(pf->oicr_irq.index),
749 ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S);
750 set_bit(ICE_OICR_INTR_DIS, pf->state);
751 ice_flush(hw);
752
753 ret = pci_enable_sriov(pf->pdev, num_vfs);
754 if (ret)
755 goto err_unroll_intr;
756
757 mutex_lock(&pf->vfs.table_lock);
758
759 ret = ice_set_per_vf_res(pf, num_vfs);
760 if (ret) {
761 dev_err(dev, "Not enough resources for %d VFs, err %d. Try with fewer number of VFs\n",
762 num_vfs, ret);
763 goto err_unroll_sriov;
764 }
765
766 ret = ice_create_vf_entries(pf, num_vfs);
767 if (ret) {
768 dev_err(dev, "Failed to allocate VF entries for %d VFs\n",
769 num_vfs);
770 goto err_unroll_sriov;
771 }
772
773 ret = ice_start_vfs(pf);
774 if (ret) {
775 dev_err(dev, "Failed to start %d VFs, err %d\n", num_vfs, ret);
776 ret = -EAGAIN;
777 goto err_unroll_vf_entries;
778 }
779
780 clear_bit(ICE_VF_DIS, pf->state);
781
782 /* rearm global interrupts */
783 if (test_and_clear_bit(ICE_OICR_INTR_DIS, pf->state))
784 ice_irq_dynamic_ena(hw, NULL, NULL);
785
786 mutex_unlock(&pf->vfs.table_lock);
787
788 return 0;
789
790 err_unroll_vf_entries:
791 ice_free_vf_entries(pf);
792 err_unroll_sriov:
793 mutex_unlock(&pf->vfs.table_lock);
794 pci_disable_sriov(pf->pdev);
795 err_unroll_intr:
796 /* rearm interrupts here */
797 ice_irq_dynamic_ena(hw, NULL, NULL);
798 clear_bit(ICE_OICR_INTR_DIS, pf->state);
799 return ret;
800 }
801
802 /**
803 * ice_pci_sriov_ena - Enable or change number of VFs
804 * @pf: pointer to the PF structure
805 * @num_vfs: number of VFs to allocate
806 *
807 * Returns 0 on success and negative on failure
808 */
ice_pci_sriov_ena(struct ice_pf * pf,int num_vfs)809 static int ice_pci_sriov_ena(struct ice_pf *pf, int num_vfs)
810 {
811 struct device *dev = ice_pf_to_dev(pf);
812 int err;
813
814 if (!num_vfs) {
815 ice_free_vfs(pf);
816 return 0;
817 }
818
819 if (num_vfs > pf->vfs.num_supported) {
820 dev_err(dev, "Can't enable %d VFs, max VFs supported is %d\n",
821 num_vfs, pf->vfs.num_supported);
822 return -EOPNOTSUPP;
823 }
824
825 dev_info(dev, "Enabling %d VFs\n", num_vfs);
826 err = ice_ena_vfs(pf, num_vfs);
827 if (err) {
828 dev_err(dev, "Failed to enable SR-IOV: %d\n", err);
829 return err;
830 }
831
832 set_bit(ICE_FLAG_SRIOV_ENA, pf->flags);
833 return 0;
834 }
835
836 /**
837 * ice_check_sriov_allowed - check if SR-IOV is allowed based on various checks
838 * @pf: PF to enabled SR-IOV on
839 */
ice_check_sriov_allowed(struct ice_pf * pf)840 static int ice_check_sriov_allowed(struct ice_pf *pf)
841 {
842 struct device *dev = ice_pf_to_dev(pf);
843
844 if (!test_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags)) {
845 dev_err(dev, "This device is not capable of SR-IOV\n");
846 return -EOPNOTSUPP;
847 }
848
849 if (ice_is_safe_mode(pf)) {
850 dev_err(dev, "SR-IOV cannot be configured - Device is in Safe Mode\n");
851 return -EOPNOTSUPP;
852 }
853
854 if (!ice_pf_state_is_nominal(pf)) {
855 dev_err(dev, "Cannot enable SR-IOV, device not ready\n");
856 return -EBUSY;
857 }
858
859 return 0;
860 }
861
862 /**
863 * ice_sriov_get_vf_total_msix - return number of MSI-X used by VFs
864 * @pdev: pointer to pci_dev struct
865 *
866 * The function is called via sysfs ops
867 */
ice_sriov_get_vf_total_msix(struct pci_dev * pdev)868 u32 ice_sriov_get_vf_total_msix(struct pci_dev *pdev)
869 {
870 struct ice_pf *pf = pci_get_drvdata(pdev);
871
872 return pf->virt_irq_tracker.num_entries;
873 }
874
ice_sriov_remap_vectors(struct ice_pf * pf,u16 restricted_id)875 static void ice_sriov_remap_vectors(struct ice_pf *pf, u16 restricted_id)
876 {
877 u16 vf_ids[ICE_MAX_SRIOV_VFS];
878 struct ice_vf *tmp_vf;
879 int to_remap = 0, bkt;
880
881 /* For better irqs usage try to remap irqs of VFs
882 * that aren't running yet
883 */
884 ice_for_each_vf(pf, bkt, tmp_vf) {
885 /* skip VF which is changing the number of MSI-X */
886 if (restricted_id == tmp_vf->vf_id ||
887 test_bit(ICE_VF_STATE_ACTIVE, tmp_vf->vf_states))
888 continue;
889
890 ice_dis_vf_mappings(tmp_vf);
891 ice_virt_free_irqs(pf, tmp_vf->first_vector_idx,
892 tmp_vf->num_msix);
893
894 vf_ids[to_remap] = tmp_vf->vf_id;
895 to_remap += 1;
896 }
897
898 for (int i = 0; i < to_remap; i++) {
899 tmp_vf = ice_get_vf_by_id(pf, vf_ids[i]);
900 if (!tmp_vf)
901 continue;
902
903 tmp_vf->first_vector_idx =
904 ice_virt_get_irqs(pf, tmp_vf->num_msix);
905 /* there is no need to rebuild VSI as we are only changing the
906 * vector indexes not amount of MSI-X or queues
907 */
908 ice_ena_vf_mappings(tmp_vf);
909 ice_put_vf(tmp_vf);
910 }
911 }
912
913 /**
914 * ice_sriov_set_msix_vec_count
915 * @vf_dev: pointer to pci_dev struct of VF device
916 * @msix_vec_count: new value for MSI-X amount on this VF
917 *
918 * Set requested MSI-X, queues and registers for @vf_dev.
919 *
920 * First do some sanity checks like if there are any VFs, if the new value
921 * is correct etc. Then disable old mapping (MSI-X and queues registers), change
922 * MSI-X and queues, rebuild VSI and enable new mapping.
923 *
924 * If it is possible (driver not binded to VF) try to remap also other VFs to
925 * linearize irqs register usage.
926 */
ice_sriov_set_msix_vec_count(struct pci_dev * vf_dev,int msix_vec_count)927 int ice_sriov_set_msix_vec_count(struct pci_dev *vf_dev, int msix_vec_count)
928 {
929 struct pci_dev *pdev = pci_physfn(vf_dev);
930 struct ice_pf *pf = pci_get_drvdata(pdev);
931 u16 prev_msix, prev_queues, queues;
932 bool needs_rebuild = false;
933 struct ice_vsi *vsi;
934 struct ice_vf *vf;
935 int id;
936
937 if (!ice_get_num_vfs(pf))
938 return -ENOENT;
939
940 if (!msix_vec_count)
941 return 0;
942
943 queues = msix_vec_count;
944 /* add 1 MSI-X for OICR */
945 msix_vec_count += 1;
946
947 if (queues > min(ice_get_avail_txq_count(pf),
948 ice_get_avail_rxq_count(pf)))
949 return -EINVAL;
950
951 if (msix_vec_count < ICE_MIN_INTR_PER_VF)
952 return -EINVAL;
953
954 /* Transition of PCI VF function number to function_id */
955 for (id = 0; id < pci_num_vf(pdev); id++) {
956 if (vf_dev->devfn == pci_iov_virtfn_devfn(pdev, id))
957 break;
958 }
959
960 if (id == pci_num_vf(pdev))
961 return -ENOENT;
962
963 vf = ice_get_vf_by_id(pf, id);
964
965 if (!vf)
966 return -ENOENT;
967
968 vsi = ice_get_vf_vsi(vf);
969 if (!vsi) {
970 ice_put_vf(vf);
971 return -ENOENT;
972 }
973
974 prev_msix = vf->num_msix;
975 prev_queues = vf->num_vf_qs;
976
977 ice_dis_vf_mappings(vf);
978 ice_virt_free_irqs(pf, vf->first_vector_idx, vf->num_msix);
979
980 /* Remap all VFs beside the one is now configured */
981 ice_sriov_remap_vectors(pf, vf->vf_id);
982
983 vf->num_msix = msix_vec_count;
984 vf->num_vf_qs = queues;
985 vf->first_vector_idx = ice_virt_get_irqs(pf, vf->num_msix);
986 if (vf->first_vector_idx < 0)
987 goto unroll;
988
989 vsi->req_txq = queues;
990 vsi->req_rxq = queues;
991
992 if (ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT)) {
993 /* Try to rebuild with previous values */
994 needs_rebuild = true;
995 goto unroll;
996 }
997
998 dev_info(ice_pf_to_dev(pf),
999 "Changing VF %d resources to %d vectors and %d queues\n",
1000 vf->vf_id, vf->num_msix, vf->num_vf_qs);
1001
1002 ice_ena_vf_mappings(vf);
1003 ice_put_vf(vf);
1004
1005 return 0;
1006
1007 unroll:
1008 dev_info(ice_pf_to_dev(pf),
1009 "Can't set %d vectors on VF %d, falling back to %d\n",
1010 vf->num_msix, vf->vf_id, prev_msix);
1011
1012 vf->num_msix = prev_msix;
1013 vf->num_vf_qs = prev_queues;
1014
1015 vf->first_vector_idx = ice_virt_get_irqs(pf, vf->num_msix);
1016 if (vf->first_vector_idx < 0) {
1017 ice_put_vf(vf);
1018 return -EINVAL;
1019 }
1020
1021 if (needs_rebuild) {
1022 vsi->req_txq = prev_queues;
1023 vsi->req_rxq = prev_queues;
1024
1025 ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT);
1026 }
1027
1028 ice_ena_vf_mappings(vf);
1029 ice_put_vf(vf);
1030
1031 return -EINVAL;
1032 }
1033
1034 /**
1035 * ice_sriov_configure - Enable or change number of VFs via sysfs
1036 * @pdev: pointer to a pci_dev structure
1037 * @num_vfs: number of VFs to allocate or 0 to free VFs
1038 *
1039 * This function is called when the user updates the number of VFs in sysfs. On
1040 * success return whatever num_vfs was set to by the caller. Return negative on
1041 * failure.
1042 */
ice_sriov_configure(struct pci_dev * pdev,int num_vfs)1043 int ice_sriov_configure(struct pci_dev *pdev, int num_vfs)
1044 {
1045 struct ice_pf *pf = pci_get_drvdata(pdev);
1046 struct device *dev = ice_pf_to_dev(pf);
1047 int err;
1048
1049 err = ice_check_sriov_allowed(pf);
1050 if (err)
1051 return err;
1052
1053 if (!num_vfs) {
1054 if (!pci_vfs_assigned(pdev)) {
1055 ice_free_vfs(pf);
1056 return 0;
1057 }
1058
1059 dev_err(dev, "can't free VFs because some are assigned to VMs.\n");
1060 return -EBUSY;
1061 }
1062
1063 err = ice_pci_sriov_ena(pf, num_vfs);
1064 if (err)
1065 return err;
1066
1067 return num_vfs;
1068 }
1069
1070 /**
1071 * ice_process_vflr_event - Free VF resources via IRQ calls
1072 * @pf: pointer to the PF structure
1073 *
1074 * called from the VFLR IRQ handler to
1075 * free up VF resources and state variables
1076 */
ice_process_vflr_event(struct ice_pf * pf)1077 void ice_process_vflr_event(struct ice_pf *pf)
1078 {
1079 struct ice_hw *hw = &pf->hw;
1080 struct ice_vf *vf;
1081 unsigned int bkt;
1082 u32 reg;
1083
1084 if (!test_and_clear_bit(ICE_VFLR_EVENT_PENDING, pf->state) ||
1085 !ice_has_vfs(pf))
1086 return;
1087
1088 mutex_lock(&pf->vfs.table_lock);
1089 ice_for_each_vf(pf, bkt, vf) {
1090 u32 reg_idx, bit_idx;
1091
1092 reg_idx = (hw->func_caps.vf_base_id + vf->vf_id) / 32;
1093 bit_idx = (hw->func_caps.vf_base_id + vf->vf_id) % 32;
1094 /* read GLGEN_VFLRSTAT register to find out the flr VFs */
1095 reg = rd32(hw, GLGEN_VFLRSTAT(reg_idx));
1096 if (reg & BIT(bit_idx))
1097 /* GLGEN_VFLRSTAT bit will be cleared in ice_reset_vf */
1098 ice_reset_vf(vf, ICE_VF_RESET_VFLR | ICE_VF_RESET_LOCK);
1099 }
1100 mutex_unlock(&pf->vfs.table_lock);
1101 }
1102
1103 /**
1104 * ice_get_vf_from_pfq - get the VF who owns the PF space queue passed in
1105 * @pf: PF used to index all VFs
1106 * @pfq: queue index relative to the PF's function space
1107 *
1108 * If no VF is found who owns the pfq then return NULL, otherwise return a
1109 * pointer to the VF who owns the pfq
1110 *
1111 * If this function returns non-NULL, it acquires a reference count of the VF
1112 * structure. The caller is responsible for calling ice_put_vf() to drop this
1113 * reference.
1114 */
ice_get_vf_from_pfq(struct ice_pf * pf,u16 pfq)1115 static struct ice_vf *ice_get_vf_from_pfq(struct ice_pf *pf, u16 pfq)
1116 {
1117 struct ice_vf *vf;
1118 unsigned int bkt;
1119
1120 rcu_read_lock();
1121 ice_for_each_vf_rcu(pf, bkt, vf) {
1122 struct ice_vsi *vsi;
1123 u16 rxq_idx;
1124
1125 vsi = ice_get_vf_vsi(vf);
1126 if (!vsi)
1127 continue;
1128
1129 ice_for_each_rxq(vsi, rxq_idx)
1130 if (vsi->rxq_map[rxq_idx] == pfq) {
1131 struct ice_vf *found;
1132
1133 if (kref_get_unless_zero(&vf->refcnt))
1134 found = vf;
1135 else
1136 found = NULL;
1137 rcu_read_unlock();
1138 return found;
1139 }
1140 }
1141 rcu_read_unlock();
1142
1143 return NULL;
1144 }
1145
1146 /**
1147 * ice_globalq_to_pfq - convert from global queue index to PF space queue index
1148 * @pf: PF used for conversion
1149 * @globalq: global queue index used to convert to PF space queue index
1150 */
ice_globalq_to_pfq(struct ice_pf * pf,u32 globalq)1151 static u32 ice_globalq_to_pfq(struct ice_pf *pf, u32 globalq)
1152 {
1153 return globalq - pf->hw.func_caps.common_cap.rxq_first_id;
1154 }
1155
1156 /**
1157 * ice_vf_lan_overflow_event - handle LAN overflow event for a VF
1158 * @pf: PF that the LAN overflow event happened on
1159 * @event: structure holding the event information for the LAN overflow event
1160 *
1161 * Determine if the LAN overflow event was caused by a VF queue. If it was not
1162 * caused by a VF, do nothing. If a VF caused this LAN overflow event trigger a
1163 * reset on the offending VF.
1164 */
1165 void
ice_vf_lan_overflow_event(struct ice_pf * pf,struct ice_rq_event_info * event)1166 ice_vf_lan_overflow_event(struct ice_pf *pf, struct ice_rq_event_info *event)
1167 {
1168 u32 gldcb_rtctq, queue;
1169 struct ice_vf *vf;
1170
1171 gldcb_rtctq = le32_to_cpu(event->desc.params.lan_overflow.prtdcb_ruptq);
1172 dev_dbg(ice_pf_to_dev(pf), "GLDCB_RTCTQ: 0x%08x\n", gldcb_rtctq);
1173
1174 /* event returns device global Rx queue number */
1175 queue = FIELD_GET(GLDCB_RTCTQ_RXQNUM_M, gldcb_rtctq);
1176
1177 vf = ice_get_vf_from_pfq(pf, ice_globalq_to_pfq(pf, queue));
1178 if (!vf)
1179 return;
1180
1181 ice_reset_vf(vf, ICE_VF_RESET_NOTIFY | ICE_VF_RESET_LOCK);
1182 ice_put_vf(vf);
1183 }
1184
1185 /**
1186 * ice_set_vf_spoofchk
1187 * @netdev: network interface device structure
1188 * @vf_id: VF identifier
1189 * @ena: flag to enable or disable feature
1190 *
1191 * Enable or disable VF spoof checking
1192 */
ice_set_vf_spoofchk(struct net_device * netdev,int vf_id,bool ena)1193 int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena)
1194 {
1195 struct ice_netdev_priv *np = netdev_priv(netdev);
1196 struct ice_pf *pf = np->vsi->back;
1197 struct ice_vsi *vf_vsi;
1198 struct device *dev;
1199 struct ice_vf *vf;
1200 int ret;
1201
1202 dev = ice_pf_to_dev(pf);
1203
1204 vf = ice_get_vf_by_id(pf, vf_id);
1205 if (!vf)
1206 return -EINVAL;
1207
1208 ret = ice_check_vf_ready_for_cfg(vf);
1209 if (ret)
1210 goto out_put_vf;
1211
1212 vf_vsi = ice_get_vf_vsi(vf);
1213 if (!vf_vsi) {
1214 netdev_err(netdev, "VSI %d for VF %d is null\n",
1215 vf->lan_vsi_idx, vf->vf_id);
1216 ret = -EINVAL;
1217 goto out_put_vf;
1218 }
1219
1220 if (vf_vsi->type != ICE_VSI_VF) {
1221 netdev_err(netdev, "Type %d of VSI %d for VF %d is no ICE_VSI_VF\n",
1222 vf_vsi->type, vf_vsi->vsi_num, vf->vf_id);
1223 ret = -ENODEV;
1224 goto out_put_vf;
1225 }
1226
1227 if (ena == vf->spoofchk) {
1228 dev_dbg(dev, "VF spoofchk already %s\n", ena ? "ON" : "OFF");
1229 ret = 0;
1230 goto out_put_vf;
1231 }
1232
1233 ret = ice_vsi_apply_spoofchk(vf_vsi, ena);
1234 if (ret)
1235 dev_err(dev, "Failed to set spoofchk %s for VF %d VSI %d\n error %d\n",
1236 ena ? "ON" : "OFF", vf->vf_id, vf_vsi->vsi_num, ret);
1237 else
1238 vf->spoofchk = ena;
1239
1240 out_put_vf:
1241 ice_put_vf(vf);
1242 return ret;
1243 }
1244
1245 /**
1246 * ice_get_vf_cfg
1247 * @netdev: network interface device structure
1248 * @vf_id: VF identifier
1249 * @ivi: VF configuration structure
1250 *
1251 * return VF configuration
1252 */
1253 int
ice_get_vf_cfg(struct net_device * netdev,int vf_id,struct ifla_vf_info * ivi)1254 ice_get_vf_cfg(struct net_device *netdev, int vf_id, struct ifla_vf_info *ivi)
1255 {
1256 struct ice_pf *pf = ice_netdev_to_pf(netdev);
1257 struct ice_vf *vf;
1258 int ret;
1259
1260 vf = ice_get_vf_by_id(pf, vf_id);
1261 if (!vf)
1262 return -EINVAL;
1263
1264 ret = ice_check_vf_ready_for_cfg(vf);
1265 if (ret)
1266 goto out_put_vf;
1267
1268 ivi->vf = vf_id;
1269 ether_addr_copy(ivi->mac, vf->hw_lan_addr);
1270
1271 /* VF configuration for VLAN and applicable QoS */
1272 ivi->vlan = ice_vf_get_port_vlan_id(vf);
1273 ivi->qos = ice_vf_get_port_vlan_prio(vf);
1274 if (ice_vf_is_port_vlan_ena(vf))
1275 ivi->vlan_proto = cpu_to_be16(ice_vf_get_port_vlan_tpid(vf));
1276
1277 ivi->trusted = vf->trusted;
1278 ivi->spoofchk = vf->spoofchk;
1279 if (!vf->link_forced)
1280 ivi->linkstate = IFLA_VF_LINK_STATE_AUTO;
1281 else if (vf->link_up)
1282 ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE;
1283 else
1284 ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE;
1285 ivi->max_tx_rate = vf->max_tx_rate;
1286 ivi->min_tx_rate = vf->min_tx_rate;
1287
1288 out_put_vf:
1289 ice_put_vf(vf);
1290 return ret;
1291 }
1292
1293 /**
1294 * __ice_set_vf_mac - program VF MAC address
1295 * @pf: PF to be configure
1296 * @vf_id: VF identifier
1297 * @mac: MAC address
1298 *
1299 * program VF MAC address
1300 * Return: zero on success or an error code on failure
1301 */
__ice_set_vf_mac(struct ice_pf * pf,u16 vf_id,const u8 * mac)1302 int __ice_set_vf_mac(struct ice_pf *pf, u16 vf_id, const u8 *mac)
1303 {
1304 struct device *dev;
1305 struct ice_vf *vf;
1306 int ret;
1307
1308 dev = ice_pf_to_dev(pf);
1309 if (is_multicast_ether_addr(mac)) {
1310 dev_err(dev, "%pM not a valid unicast address\n", mac);
1311 return -EINVAL;
1312 }
1313
1314 vf = ice_get_vf_by_id(pf, vf_id);
1315 if (!vf)
1316 return -EINVAL;
1317
1318 /* nothing left to do, unicast MAC already set */
1319 if (ether_addr_equal(vf->dev_lan_addr, mac) &&
1320 ether_addr_equal(vf->hw_lan_addr, mac)) {
1321 ret = 0;
1322 goto out_put_vf;
1323 }
1324
1325 ret = ice_check_vf_ready_for_cfg(vf);
1326 if (ret)
1327 goto out_put_vf;
1328
1329 mutex_lock(&vf->cfg_lock);
1330
1331 /* VF is notified of its new MAC via the PF's response to the
1332 * VIRTCHNL_OP_GET_VF_RESOURCES message after the VF has been reset
1333 */
1334 ether_addr_copy(vf->dev_lan_addr, mac);
1335 ether_addr_copy(vf->hw_lan_addr, mac);
1336 if (is_zero_ether_addr(mac)) {
1337 /* VF will send VIRTCHNL_OP_ADD_ETH_ADDR message with its MAC */
1338 vf->pf_set_mac = false;
1339 dev_info(dev, "Removing MAC on VF %d. VF driver will be reinitialized\n",
1340 vf->vf_id);
1341 } else {
1342 /* PF will add MAC rule for the VF */
1343 vf->pf_set_mac = true;
1344 dev_info(dev, "Setting MAC %pM on VF %d. VF driver will be reinitialized\n",
1345 mac, vf_id);
1346 }
1347
1348 ice_reset_vf(vf, ICE_VF_RESET_NOTIFY);
1349 mutex_unlock(&vf->cfg_lock);
1350
1351 out_put_vf:
1352 ice_put_vf(vf);
1353 return ret;
1354 }
1355
1356 /**
1357 * ice_set_vf_mac - .ndo_set_vf_mac handler
1358 * @netdev: network interface device structure
1359 * @vf_id: VF identifier
1360 * @mac: MAC address
1361 *
1362 * program VF MAC address
1363 * Return: zero on success or an error code on failure
1364 */
ice_set_vf_mac(struct net_device * netdev,int vf_id,u8 * mac)1365 int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
1366 {
1367 return __ice_set_vf_mac(ice_netdev_to_pf(netdev), vf_id, mac);
1368 }
1369
1370 /**
1371 * ice_set_vf_trust
1372 * @netdev: network interface device structure
1373 * @vf_id: VF identifier
1374 * @trusted: Boolean value to enable/disable trusted VF
1375 *
1376 * Enable or disable a given VF as trusted
1377 */
ice_set_vf_trust(struct net_device * netdev,int vf_id,bool trusted)1378 int ice_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted)
1379 {
1380 struct ice_pf *pf = ice_netdev_to_pf(netdev);
1381 struct ice_vf *vf;
1382 int ret;
1383
1384 vf = ice_get_vf_by_id(pf, vf_id);
1385 if (!vf)
1386 return -EINVAL;
1387
1388 if (ice_is_eswitch_mode_switchdev(pf)) {
1389 dev_info(ice_pf_to_dev(pf), "Trusted VF is forbidden in switchdev mode\n");
1390 return -EOPNOTSUPP;
1391 }
1392
1393 ret = ice_check_vf_ready_for_cfg(vf);
1394 if (ret)
1395 goto out_put_vf;
1396
1397 /* Check if already trusted */
1398 if (trusted == vf->trusted) {
1399 ret = 0;
1400 goto out_put_vf;
1401 }
1402
1403 mutex_lock(&vf->cfg_lock);
1404
1405 vf->trusted = trusted;
1406 ice_reset_vf(vf, ICE_VF_RESET_NOTIFY);
1407 dev_info(ice_pf_to_dev(pf), "VF %u is now %strusted\n",
1408 vf_id, trusted ? "" : "un");
1409
1410 mutex_unlock(&vf->cfg_lock);
1411
1412 out_put_vf:
1413 ice_put_vf(vf);
1414 return ret;
1415 }
1416
1417 /**
1418 * ice_set_vf_link_state
1419 * @netdev: network interface device structure
1420 * @vf_id: VF identifier
1421 * @link_state: required link state
1422 *
1423 * Set VF's link state, irrespective of physical link state status
1424 */
ice_set_vf_link_state(struct net_device * netdev,int vf_id,int link_state)1425 int ice_set_vf_link_state(struct net_device *netdev, int vf_id, int link_state)
1426 {
1427 struct ice_pf *pf = ice_netdev_to_pf(netdev);
1428 struct ice_vf *vf;
1429 int ret;
1430
1431 vf = ice_get_vf_by_id(pf, vf_id);
1432 if (!vf)
1433 return -EINVAL;
1434
1435 ret = ice_check_vf_ready_for_cfg(vf);
1436 if (ret)
1437 goto out_put_vf;
1438
1439 switch (link_state) {
1440 case IFLA_VF_LINK_STATE_AUTO:
1441 vf->link_forced = false;
1442 break;
1443 case IFLA_VF_LINK_STATE_ENABLE:
1444 vf->link_forced = true;
1445 vf->link_up = true;
1446 break;
1447 case IFLA_VF_LINK_STATE_DISABLE:
1448 vf->link_forced = true;
1449 vf->link_up = false;
1450 break;
1451 default:
1452 ret = -EINVAL;
1453 goto out_put_vf;
1454 }
1455
1456 ice_vc_notify_vf_link_state(vf);
1457
1458 out_put_vf:
1459 ice_put_vf(vf);
1460 return ret;
1461 }
1462
1463 /**
1464 * ice_calc_all_vfs_min_tx_rate - calculate cumulative min Tx rate on all VFs
1465 * @pf: PF associated with VFs
1466 */
ice_calc_all_vfs_min_tx_rate(struct ice_pf * pf)1467 static int ice_calc_all_vfs_min_tx_rate(struct ice_pf *pf)
1468 {
1469 struct ice_vf *vf;
1470 unsigned int bkt;
1471 int rate = 0;
1472
1473 rcu_read_lock();
1474 ice_for_each_vf_rcu(pf, bkt, vf)
1475 rate += vf->min_tx_rate;
1476 rcu_read_unlock();
1477
1478 return rate;
1479 }
1480
1481 /**
1482 * ice_min_tx_rate_oversubscribed - check if min Tx rate causes oversubscription
1483 * @vf: VF trying to configure min_tx_rate
1484 * @min_tx_rate: min Tx rate in Mbps
1485 *
1486 * Check if the min_tx_rate being passed in will cause oversubscription of total
1487 * min_tx_rate based on the current link speed and all other VFs configured
1488 * min_tx_rate
1489 *
1490 * Return true if the passed min_tx_rate would cause oversubscription, else
1491 * return false
1492 */
1493 static bool
ice_min_tx_rate_oversubscribed(struct ice_vf * vf,int min_tx_rate)1494 ice_min_tx_rate_oversubscribed(struct ice_vf *vf, int min_tx_rate)
1495 {
1496 struct ice_vsi *vsi = ice_get_vf_vsi(vf);
1497 int all_vfs_min_tx_rate;
1498 int link_speed_mbps;
1499
1500 if (WARN_ON(!vsi))
1501 return false;
1502
1503 link_speed_mbps = ice_get_link_speed_mbps(vsi);
1504 all_vfs_min_tx_rate = ice_calc_all_vfs_min_tx_rate(vf->pf);
1505
1506 /* this VF's previous rate is being overwritten */
1507 all_vfs_min_tx_rate -= vf->min_tx_rate;
1508
1509 if (all_vfs_min_tx_rate + min_tx_rate > link_speed_mbps) {
1510 dev_err(ice_pf_to_dev(vf->pf), "min_tx_rate of %d Mbps on VF %u would cause oversubscription of %d Mbps based on the current link speed %d Mbps\n",
1511 min_tx_rate, vf->vf_id,
1512 all_vfs_min_tx_rate + min_tx_rate - link_speed_mbps,
1513 link_speed_mbps);
1514 return true;
1515 }
1516
1517 return false;
1518 }
1519
1520 /**
1521 * ice_set_vf_bw - set min/max VF bandwidth
1522 * @netdev: network interface device structure
1523 * @vf_id: VF identifier
1524 * @min_tx_rate: Minimum Tx rate in Mbps
1525 * @max_tx_rate: Maximum Tx rate in Mbps
1526 */
1527 int
ice_set_vf_bw(struct net_device * netdev,int vf_id,int min_tx_rate,int max_tx_rate)1528 ice_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate,
1529 int max_tx_rate)
1530 {
1531 struct ice_pf *pf = ice_netdev_to_pf(netdev);
1532 struct ice_vsi *vsi;
1533 struct device *dev;
1534 struct ice_vf *vf;
1535 int ret;
1536
1537 dev = ice_pf_to_dev(pf);
1538
1539 vf = ice_get_vf_by_id(pf, vf_id);
1540 if (!vf)
1541 return -EINVAL;
1542
1543 ret = ice_check_vf_ready_for_cfg(vf);
1544 if (ret)
1545 goto out_put_vf;
1546
1547 vsi = ice_get_vf_vsi(vf);
1548 if (!vsi) {
1549 ret = -EINVAL;
1550 goto out_put_vf;
1551 }
1552
1553 if (min_tx_rate && ice_is_dcb_active(pf)) {
1554 dev_err(dev, "DCB on PF is currently enabled. VF min Tx rate limiting not allowed on this PF.\n");
1555 ret = -EOPNOTSUPP;
1556 goto out_put_vf;
1557 }
1558
1559 if (ice_min_tx_rate_oversubscribed(vf, min_tx_rate)) {
1560 ret = -EINVAL;
1561 goto out_put_vf;
1562 }
1563
1564 if (vf->min_tx_rate != (unsigned int)min_tx_rate) {
1565 ret = ice_set_min_bw_limit(vsi, (u64)min_tx_rate * 1000);
1566 if (ret) {
1567 dev_err(dev, "Unable to set min-tx-rate for VF %d\n",
1568 vf->vf_id);
1569 goto out_put_vf;
1570 }
1571
1572 vf->min_tx_rate = min_tx_rate;
1573 }
1574
1575 if (vf->max_tx_rate != (unsigned int)max_tx_rate) {
1576 ret = ice_set_max_bw_limit(vsi, (u64)max_tx_rate * 1000);
1577 if (ret) {
1578 dev_err(dev, "Unable to set max-tx-rate for VF %d\n",
1579 vf->vf_id);
1580 goto out_put_vf;
1581 }
1582
1583 vf->max_tx_rate = max_tx_rate;
1584 }
1585
1586 out_put_vf:
1587 ice_put_vf(vf);
1588 return ret;
1589 }
1590
1591 /**
1592 * ice_get_vf_stats - populate some stats for the VF
1593 * @netdev: the netdev of the PF
1594 * @vf_id: the host OS identifier (0-255)
1595 * @vf_stats: pointer to the OS memory to be initialized
1596 */
ice_get_vf_stats(struct net_device * netdev,int vf_id,struct ifla_vf_stats * vf_stats)1597 int ice_get_vf_stats(struct net_device *netdev, int vf_id,
1598 struct ifla_vf_stats *vf_stats)
1599 {
1600 struct ice_pf *pf = ice_netdev_to_pf(netdev);
1601 struct ice_eth_stats *stats;
1602 struct ice_vsi *vsi;
1603 struct ice_vf *vf;
1604 int ret;
1605
1606 vf = ice_get_vf_by_id(pf, vf_id);
1607 if (!vf)
1608 return -EINVAL;
1609
1610 ret = ice_check_vf_ready_for_cfg(vf);
1611 if (ret)
1612 goto out_put_vf;
1613
1614 vsi = ice_get_vf_vsi(vf);
1615 if (!vsi) {
1616 ret = -EINVAL;
1617 goto out_put_vf;
1618 }
1619
1620 ice_update_eth_stats(vsi);
1621 stats = &vsi->eth_stats;
1622
1623 memset(vf_stats, 0, sizeof(*vf_stats));
1624
1625 vf_stats->rx_packets = stats->rx_unicast + stats->rx_broadcast +
1626 stats->rx_multicast;
1627 vf_stats->tx_packets = stats->tx_unicast + stats->tx_broadcast +
1628 stats->tx_multicast;
1629 vf_stats->rx_bytes = stats->rx_bytes;
1630 vf_stats->tx_bytes = stats->tx_bytes;
1631 vf_stats->broadcast = stats->rx_broadcast;
1632 vf_stats->multicast = stats->rx_multicast;
1633 vf_stats->rx_dropped = stats->rx_discards;
1634 vf_stats->tx_dropped = stats->tx_discards;
1635
1636 out_put_vf:
1637 ice_put_vf(vf);
1638 return ret;
1639 }
1640
1641 /**
1642 * ice_is_supported_port_vlan_proto - make sure the vlan_proto is supported
1643 * @hw: hardware structure used to check the VLAN mode
1644 * @vlan_proto: VLAN TPID being checked
1645 *
1646 * If the device is configured in Double VLAN Mode (DVM), then both ETH_P_8021Q
1647 * and ETH_P_8021AD are supported. If the device is configured in Single VLAN
1648 * Mode (SVM), then only ETH_P_8021Q is supported.
1649 */
1650 static bool
ice_is_supported_port_vlan_proto(struct ice_hw * hw,u16 vlan_proto)1651 ice_is_supported_port_vlan_proto(struct ice_hw *hw, u16 vlan_proto)
1652 {
1653 bool is_supported = false;
1654
1655 switch (vlan_proto) {
1656 case ETH_P_8021Q:
1657 is_supported = true;
1658 break;
1659 case ETH_P_8021AD:
1660 if (ice_is_dvm_ena(hw))
1661 is_supported = true;
1662 break;
1663 }
1664
1665 return is_supported;
1666 }
1667
1668 /**
1669 * ice_set_vf_port_vlan
1670 * @netdev: network interface device structure
1671 * @vf_id: VF identifier
1672 * @vlan_id: VLAN ID being set
1673 * @qos: priority setting
1674 * @vlan_proto: VLAN protocol
1675 *
1676 * program VF Port VLAN ID and/or QoS
1677 */
1678 int
ice_set_vf_port_vlan(struct net_device * netdev,int vf_id,u16 vlan_id,u8 qos,__be16 vlan_proto)1679 ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos,
1680 __be16 vlan_proto)
1681 {
1682 struct ice_pf *pf = ice_netdev_to_pf(netdev);
1683 u16 local_vlan_proto = ntohs(vlan_proto);
1684 struct device *dev;
1685 struct ice_vf *vf;
1686 int ret;
1687
1688 dev = ice_pf_to_dev(pf);
1689
1690 if (vlan_id >= VLAN_N_VID || qos > 7) {
1691 dev_err(dev, "Invalid Port VLAN parameters for VF %d, ID %d, QoS %d\n",
1692 vf_id, vlan_id, qos);
1693 return -EINVAL;
1694 }
1695
1696 if (!ice_is_supported_port_vlan_proto(&pf->hw, local_vlan_proto)) {
1697 dev_err(dev, "VF VLAN protocol 0x%04x is not supported\n",
1698 local_vlan_proto);
1699 return -EPROTONOSUPPORT;
1700 }
1701
1702 vf = ice_get_vf_by_id(pf, vf_id);
1703 if (!vf)
1704 return -EINVAL;
1705
1706 ret = ice_check_vf_ready_for_cfg(vf);
1707 if (ret)
1708 goto out_put_vf;
1709
1710 if (ice_vf_get_port_vlan_prio(vf) == qos &&
1711 ice_vf_get_port_vlan_tpid(vf) == local_vlan_proto &&
1712 ice_vf_get_port_vlan_id(vf) == vlan_id) {
1713 /* duplicate request, so just return success */
1714 dev_dbg(dev, "Duplicate port VLAN %u, QoS %u, TPID 0x%04x request\n",
1715 vlan_id, qos, local_vlan_proto);
1716 ret = 0;
1717 goto out_put_vf;
1718 }
1719
1720 mutex_lock(&vf->cfg_lock);
1721
1722 vf->port_vlan_info = ICE_VLAN(local_vlan_proto, vlan_id, qos);
1723 if (ice_vf_is_port_vlan_ena(vf))
1724 dev_info(dev, "Setting VLAN %u, QoS %u, TPID 0x%04x on VF %d\n",
1725 vlan_id, qos, local_vlan_proto, vf_id);
1726 else
1727 dev_info(dev, "Clearing port VLAN on VF %d\n", vf_id);
1728
1729 ice_reset_vf(vf, ICE_VF_RESET_NOTIFY);
1730 mutex_unlock(&vf->cfg_lock);
1731
1732 out_put_vf:
1733 ice_put_vf(vf);
1734 return ret;
1735 }
1736
1737 /**
1738 * ice_print_vf_rx_mdd_event - print VF Rx malicious driver detect event
1739 * @vf: pointer to the VF structure
1740 */
ice_print_vf_rx_mdd_event(struct ice_vf * vf)1741 void ice_print_vf_rx_mdd_event(struct ice_vf *vf)
1742 {
1743 struct ice_pf *pf = vf->pf;
1744 struct device *dev;
1745
1746 dev = ice_pf_to_dev(pf);
1747
1748 dev_info(dev, "%d Rx Malicious Driver Detection events detected on PF %d VF %d MAC %pM. mdd-auto-reset-vfs=%s\n",
1749 vf->mdd_rx_events.count, pf->hw.pf_id, vf->vf_id,
1750 vf->dev_lan_addr,
1751 test_bit(ICE_FLAG_MDD_AUTO_RESET_VF, pf->flags)
1752 ? "on" : "off");
1753 }
1754
1755 /**
1756 * ice_print_vf_tx_mdd_event - print VF Tx malicious driver detect event
1757 * @vf: pointer to the VF structure
1758 */
ice_print_vf_tx_mdd_event(struct ice_vf * vf)1759 void ice_print_vf_tx_mdd_event(struct ice_vf *vf)
1760 {
1761 struct ice_pf *pf = vf->pf;
1762 struct device *dev;
1763
1764 dev = ice_pf_to_dev(pf);
1765
1766 dev_info(dev, "%d Tx Malicious Driver Detection events detected on PF %d VF %d MAC %pM. mdd-auto-reset-vfs=%s\n",
1767 vf->mdd_tx_events.count, pf->hw.pf_id, vf->vf_id,
1768 vf->dev_lan_addr,
1769 test_bit(ICE_FLAG_MDD_AUTO_RESET_VF, pf->flags)
1770 ? "on" : "off");
1771 }
1772
1773 /**
1774 * ice_print_vfs_mdd_events - print VFs malicious driver detect event
1775 * @pf: pointer to the PF structure
1776 *
1777 * Called from ice_handle_mdd_event to rate limit and print VFs MDD events.
1778 */
ice_print_vfs_mdd_events(struct ice_pf * pf)1779 void ice_print_vfs_mdd_events(struct ice_pf *pf)
1780 {
1781 struct ice_vf *vf;
1782 unsigned int bkt;
1783
1784 /* check that there are pending MDD events to print */
1785 if (!test_and_clear_bit(ICE_MDD_VF_PRINT_PENDING, pf->state))
1786 return;
1787
1788 /* VF MDD event logs are rate limited to one second intervals */
1789 if (time_is_after_jiffies(pf->vfs.last_printed_mdd_jiffies + HZ * 1))
1790 return;
1791
1792 pf->vfs.last_printed_mdd_jiffies = jiffies;
1793
1794 mutex_lock(&pf->vfs.table_lock);
1795 ice_for_each_vf(pf, bkt, vf) {
1796 /* only print Rx MDD event message if there are new events */
1797 if (vf->mdd_rx_events.count != vf->mdd_rx_events.last_printed) {
1798 vf->mdd_rx_events.last_printed =
1799 vf->mdd_rx_events.count;
1800 ice_print_vf_rx_mdd_event(vf);
1801 }
1802
1803 /* only print Tx MDD event message if there are new events */
1804 if (vf->mdd_tx_events.count != vf->mdd_tx_events.last_printed) {
1805 vf->mdd_tx_events.last_printed =
1806 vf->mdd_tx_events.count;
1807 ice_print_vf_tx_mdd_event(vf);
1808 }
1809 }
1810 mutex_unlock(&pf->vfs.table_lock);
1811 }
1812
1813 /**
1814 * ice_restore_all_vfs_msi_state - restore VF MSI state after PF FLR
1815 * @pf: pointer to the PF structure
1816 *
1817 * Called when recovering from a PF FLR to restore interrupt capability to
1818 * the VFs.
1819 */
ice_restore_all_vfs_msi_state(struct ice_pf * pf)1820 void ice_restore_all_vfs_msi_state(struct ice_pf *pf)
1821 {
1822 struct ice_vf *vf;
1823 u32 bkt;
1824
1825 ice_for_each_vf(pf, bkt, vf)
1826 pci_restore_msi_state(vf->vfdev);
1827 }
1828