1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2013 - 2018 Intel Corporation. */
3
4 #include "iavf.h"
5 #include "iavf_prototype.h"
6 #include "iavf_client.h"
7 /* All iavf tracepoints are defined by the include below, which must
8 * be included exactly once across the whole kernel with
9 * CREATE_TRACE_POINTS defined
10 */
11 #define CREATE_TRACE_POINTS
12 #include "iavf_trace.h"
13
14 static int iavf_setup_all_tx_resources(struct iavf_adapter *adapter);
15 static int iavf_setup_all_rx_resources(struct iavf_adapter *adapter);
16 static int iavf_close(struct net_device *netdev);
17 static int iavf_init_get_resources(struct iavf_adapter *adapter);
18 static int iavf_check_reset_complete(struct iavf_hw *hw);
19
20 char iavf_driver_name[] = "iavf";
21 static const char iavf_driver_string[] =
22 "Intel(R) Ethernet Adaptive Virtual Function Network Driver";
23
24 static const char iavf_copyright[] =
25 "Copyright (c) 2013 - 2018 Intel Corporation.";
26
27 /* iavf_pci_tbl - PCI Device ID Table
28 *
29 * Wildcard entries (PCI_ANY_ID) should come last
30 * Last entry must be all 0s
31 *
32 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
33 * Class, Class Mask, private data (not used) }
34 */
35 static const struct pci_device_id iavf_pci_tbl[] = {
36 {PCI_VDEVICE(INTEL, IAVF_DEV_ID_VF), 0},
37 {PCI_VDEVICE(INTEL, IAVF_DEV_ID_VF_HV), 0},
38 {PCI_VDEVICE(INTEL, IAVF_DEV_ID_X722_VF), 0},
39 {PCI_VDEVICE(INTEL, IAVF_DEV_ID_ADAPTIVE_VF), 0},
40 /* required last entry */
41 {0, }
42 };
43
44 MODULE_DEVICE_TABLE(pci, iavf_pci_tbl);
45
46 MODULE_ALIAS("i40evf");
47 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
48 MODULE_DESCRIPTION("Intel(R) Ethernet Adaptive Virtual Function Network Driver");
49 MODULE_LICENSE("GPL v2");
50
51 static const struct net_device_ops iavf_netdev_ops;
52 struct workqueue_struct *iavf_wq;
53
54 /**
55 * iavf_allocate_dma_mem_d - OS specific memory alloc for shared code
56 * @hw: pointer to the HW structure
57 * @mem: ptr to mem struct to fill out
58 * @size: size of memory requested
59 * @alignment: what to align the allocation to
60 **/
iavf_allocate_dma_mem_d(struct iavf_hw * hw,struct iavf_dma_mem * mem,u64 size,u32 alignment)61 enum iavf_status iavf_allocate_dma_mem_d(struct iavf_hw *hw,
62 struct iavf_dma_mem *mem,
63 u64 size, u32 alignment)
64 {
65 struct iavf_adapter *adapter = (struct iavf_adapter *)hw->back;
66
67 if (!mem)
68 return IAVF_ERR_PARAM;
69
70 mem->size = ALIGN(size, alignment);
71 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size,
72 (dma_addr_t *)&mem->pa, GFP_KERNEL);
73 if (mem->va)
74 return 0;
75 else
76 return IAVF_ERR_NO_MEMORY;
77 }
78
79 /**
80 * iavf_free_dma_mem_d - OS specific memory free for shared code
81 * @hw: pointer to the HW structure
82 * @mem: ptr to mem struct to free
83 **/
iavf_free_dma_mem_d(struct iavf_hw * hw,struct iavf_dma_mem * mem)84 enum iavf_status iavf_free_dma_mem_d(struct iavf_hw *hw,
85 struct iavf_dma_mem *mem)
86 {
87 struct iavf_adapter *adapter = (struct iavf_adapter *)hw->back;
88
89 if (!mem || !mem->va)
90 return IAVF_ERR_PARAM;
91 dma_free_coherent(&adapter->pdev->dev, mem->size,
92 mem->va, (dma_addr_t)mem->pa);
93 return 0;
94 }
95
96 /**
97 * iavf_allocate_virt_mem_d - OS specific memory alloc for shared code
98 * @hw: pointer to the HW structure
99 * @mem: ptr to mem struct to fill out
100 * @size: size of memory requested
101 **/
iavf_allocate_virt_mem_d(struct iavf_hw * hw,struct iavf_virt_mem * mem,u32 size)102 enum iavf_status iavf_allocate_virt_mem_d(struct iavf_hw *hw,
103 struct iavf_virt_mem *mem, u32 size)
104 {
105 if (!mem)
106 return IAVF_ERR_PARAM;
107
108 mem->size = size;
109 mem->va = kzalloc(size, GFP_KERNEL);
110
111 if (mem->va)
112 return 0;
113 else
114 return IAVF_ERR_NO_MEMORY;
115 }
116
117 /**
118 * iavf_free_virt_mem_d - OS specific memory free for shared code
119 * @hw: pointer to the HW structure
120 * @mem: ptr to mem struct to free
121 **/
iavf_free_virt_mem_d(struct iavf_hw * hw,struct iavf_virt_mem * mem)122 enum iavf_status iavf_free_virt_mem_d(struct iavf_hw *hw,
123 struct iavf_virt_mem *mem)
124 {
125 if (!mem)
126 return IAVF_ERR_PARAM;
127
128 /* it's ok to kfree a NULL pointer */
129 kfree(mem->va);
130
131 return 0;
132 }
133
134 /**
135 * iavf_schedule_reset - Set the flags and schedule a reset event
136 * @adapter: board private structure
137 **/
iavf_schedule_reset(struct iavf_adapter * adapter)138 void iavf_schedule_reset(struct iavf_adapter *adapter)
139 {
140 if (!(adapter->flags &
141 (IAVF_FLAG_RESET_PENDING | IAVF_FLAG_RESET_NEEDED))) {
142 adapter->flags |= IAVF_FLAG_RESET_NEEDED;
143 queue_work(iavf_wq, &adapter->reset_task);
144 }
145 }
146
147 /**
148 * iavf_tx_timeout - Respond to a Tx Hang
149 * @netdev: network interface device structure
150 * @txqueue: queue number that is timing out
151 **/
iavf_tx_timeout(struct net_device * netdev,unsigned int txqueue)152 static void iavf_tx_timeout(struct net_device *netdev, unsigned int txqueue)
153 {
154 struct iavf_adapter *adapter = netdev_priv(netdev);
155
156 adapter->tx_timeout_count++;
157 iavf_schedule_reset(adapter);
158 }
159
160 /**
161 * iavf_misc_irq_disable - Mask off interrupt generation on the NIC
162 * @adapter: board private structure
163 **/
iavf_misc_irq_disable(struct iavf_adapter * adapter)164 static void iavf_misc_irq_disable(struct iavf_adapter *adapter)
165 {
166 struct iavf_hw *hw = &adapter->hw;
167
168 if (!adapter->msix_entries)
169 return;
170
171 wr32(hw, IAVF_VFINT_DYN_CTL01, 0);
172
173 iavf_flush(hw);
174
175 synchronize_irq(adapter->msix_entries[0].vector);
176 }
177
178 /**
179 * iavf_misc_irq_enable - Enable default interrupt generation settings
180 * @adapter: board private structure
181 **/
iavf_misc_irq_enable(struct iavf_adapter * adapter)182 static void iavf_misc_irq_enable(struct iavf_adapter *adapter)
183 {
184 struct iavf_hw *hw = &adapter->hw;
185
186 wr32(hw, IAVF_VFINT_DYN_CTL01, IAVF_VFINT_DYN_CTL01_INTENA_MASK |
187 IAVF_VFINT_DYN_CTL01_ITR_INDX_MASK);
188 wr32(hw, IAVF_VFINT_ICR0_ENA1, IAVF_VFINT_ICR0_ENA1_ADMINQ_MASK);
189
190 iavf_flush(hw);
191 }
192
193 /**
194 * iavf_irq_disable - Mask off interrupt generation on the NIC
195 * @adapter: board private structure
196 **/
iavf_irq_disable(struct iavf_adapter * adapter)197 static void iavf_irq_disable(struct iavf_adapter *adapter)
198 {
199 int i;
200 struct iavf_hw *hw = &adapter->hw;
201
202 if (!adapter->msix_entries)
203 return;
204
205 for (i = 1; i < adapter->num_msix_vectors; i++) {
206 wr32(hw, IAVF_VFINT_DYN_CTLN1(i - 1), 0);
207 synchronize_irq(adapter->msix_entries[i].vector);
208 }
209 iavf_flush(hw);
210 }
211
212 /**
213 * iavf_irq_enable_queues - Enable interrupt for specified queues
214 * @adapter: board private structure
215 * @mask: bitmap of queues to enable
216 **/
iavf_irq_enable_queues(struct iavf_adapter * adapter,u32 mask)217 void iavf_irq_enable_queues(struct iavf_adapter *adapter, u32 mask)
218 {
219 struct iavf_hw *hw = &adapter->hw;
220 int i;
221
222 for (i = 1; i < adapter->num_msix_vectors; i++) {
223 if (mask & BIT(i - 1)) {
224 wr32(hw, IAVF_VFINT_DYN_CTLN1(i - 1),
225 IAVF_VFINT_DYN_CTLN1_INTENA_MASK |
226 IAVF_VFINT_DYN_CTLN1_ITR_INDX_MASK);
227 }
228 }
229 }
230
231 /**
232 * iavf_irq_enable - Enable default interrupt generation settings
233 * @adapter: board private structure
234 * @flush: boolean value whether to run rd32()
235 **/
iavf_irq_enable(struct iavf_adapter * adapter,bool flush)236 void iavf_irq_enable(struct iavf_adapter *adapter, bool flush)
237 {
238 struct iavf_hw *hw = &adapter->hw;
239
240 iavf_misc_irq_enable(adapter);
241 iavf_irq_enable_queues(adapter, ~0);
242
243 if (flush)
244 iavf_flush(hw);
245 }
246
247 /**
248 * iavf_msix_aq - Interrupt handler for vector 0
249 * @irq: interrupt number
250 * @data: pointer to netdev
251 **/
iavf_msix_aq(int irq,void * data)252 static irqreturn_t iavf_msix_aq(int irq, void *data)
253 {
254 struct net_device *netdev = data;
255 struct iavf_adapter *adapter = netdev_priv(netdev);
256 struct iavf_hw *hw = &adapter->hw;
257
258 /* handle non-queue interrupts, these reads clear the registers */
259 rd32(hw, IAVF_VFINT_ICR01);
260 rd32(hw, IAVF_VFINT_ICR0_ENA1);
261
262 /* schedule work on the private workqueue */
263 queue_work(iavf_wq, &adapter->adminq_task);
264
265 return IRQ_HANDLED;
266 }
267
268 /**
269 * iavf_msix_clean_rings - MSIX mode Interrupt Handler
270 * @irq: interrupt number
271 * @data: pointer to a q_vector
272 **/
iavf_msix_clean_rings(int irq,void * data)273 static irqreturn_t iavf_msix_clean_rings(int irq, void *data)
274 {
275 struct iavf_q_vector *q_vector = data;
276
277 if (!q_vector->tx.ring && !q_vector->rx.ring)
278 return IRQ_HANDLED;
279
280 napi_schedule_irqoff(&q_vector->napi);
281
282 return IRQ_HANDLED;
283 }
284
285 /**
286 * iavf_map_vector_to_rxq - associate irqs with rx queues
287 * @adapter: board private structure
288 * @v_idx: interrupt number
289 * @r_idx: queue number
290 **/
291 static void
iavf_map_vector_to_rxq(struct iavf_adapter * adapter,int v_idx,int r_idx)292 iavf_map_vector_to_rxq(struct iavf_adapter *adapter, int v_idx, int r_idx)
293 {
294 struct iavf_q_vector *q_vector = &adapter->q_vectors[v_idx];
295 struct iavf_ring *rx_ring = &adapter->rx_rings[r_idx];
296 struct iavf_hw *hw = &adapter->hw;
297
298 rx_ring->q_vector = q_vector;
299 rx_ring->next = q_vector->rx.ring;
300 rx_ring->vsi = &adapter->vsi;
301 q_vector->rx.ring = rx_ring;
302 q_vector->rx.count++;
303 q_vector->rx.next_update = jiffies + 1;
304 q_vector->rx.target_itr = ITR_TO_REG(rx_ring->itr_setting);
305 q_vector->ring_mask |= BIT(r_idx);
306 wr32(hw, IAVF_VFINT_ITRN1(IAVF_RX_ITR, q_vector->reg_idx),
307 q_vector->rx.current_itr >> 1);
308 q_vector->rx.current_itr = q_vector->rx.target_itr;
309 }
310
311 /**
312 * iavf_map_vector_to_txq - associate irqs with tx queues
313 * @adapter: board private structure
314 * @v_idx: interrupt number
315 * @t_idx: queue number
316 **/
317 static void
iavf_map_vector_to_txq(struct iavf_adapter * adapter,int v_idx,int t_idx)318 iavf_map_vector_to_txq(struct iavf_adapter *adapter, int v_idx, int t_idx)
319 {
320 struct iavf_q_vector *q_vector = &adapter->q_vectors[v_idx];
321 struct iavf_ring *tx_ring = &adapter->tx_rings[t_idx];
322 struct iavf_hw *hw = &adapter->hw;
323
324 tx_ring->q_vector = q_vector;
325 tx_ring->next = q_vector->tx.ring;
326 tx_ring->vsi = &adapter->vsi;
327 q_vector->tx.ring = tx_ring;
328 q_vector->tx.count++;
329 q_vector->tx.next_update = jiffies + 1;
330 q_vector->tx.target_itr = ITR_TO_REG(tx_ring->itr_setting);
331 q_vector->num_ringpairs++;
332 wr32(hw, IAVF_VFINT_ITRN1(IAVF_TX_ITR, q_vector->reg_idx),
333 q_vector->tx.target_itr >> 1);
334 q_vector->tx.current_itr = q_vector->tx.target_itr;
335 }
336
337 /**
338 * iavf_map_rings_to_vectors - Maps descriptor rings to vectors
339 * @adapter: board private structure to initialize
340 *
341 * This function maps descriptor rings to the queue-specific vectors
342 * we were allotted through the MSI-X enabling code. Ideally, we'd have
343 * one vector per ring/queue, but on a constrained vector budget, we
344 * group the rings as "efficiently" as possible. You would add new
345 * mapping configurations in here.
346 **/
iavf_map_rings_to_vectors(struct iavf_adapter * adapter)347 static void iavf_map_rings_to_vectors(struct iavf_adapter *adapter)
348 {
349 int rings_remaining = adapter->num_active_queues;
350 int ridx = 0, vidx = 0;
351 int q_vectors;
352
353 q_vectors = adapter->num_msix_vectors - NONQ_VECS;
354
355 for (; ridx < rings_remaining; ridx++) {
356 iavf_map_vector_to_rxq(adapter, vidx, ridx);
357 iavf_map_vector_to_txq(adapter, vidx, ridx);
358
359 /* In the case where we have more queues than vectors, continue
360 * round-robin on vectors until all queues are mapped.
361 */
362 if (++vidx >= q_vectors)
363 vidx = 0;
364 }
365
366 adapter->aq_required |= IAVF_FLAG_AQ_MAP_VECTORS;
367 }
368
369 /**
370 * iavf_irq_affinity_notify - Callback for affinity changes
371 * @notify: context as to what irq was changed
372 * @mask: the new affinity mask
373 *
374 * This is a callback function used by the irq_set_affinity_notifier function
375 * so that we may register to receive changes to the irq affinity masks.
376 **/
iavf_irq_affinity_notify(struct irq_affinity_notify * notify,const cpumask_t * mask)377 static void iavf_irq_affinity_notify(struct irq_affinity_notify *notify,
378 const cpumask_t *mask)
379 {
380 struct iavf_q_vector *q_vector =
381 container_of(notify, struct iavf_q_vector, affinity_notify);
382
383 cpumask_copy(&q_vector->affinity_mask, mask);
384 }
385
386 /**
387 * iavf_irq_affinity_release - Callback for affinity notifier release
388 * @ref: internal core kernel usage
389 *
390 * This is a callback function used by the irq_set_affinity_notifier function
391 * to inform the current notification subscriber that they will no longer
392 * receive notifications.
393 **/
iavf_irq_affinity_release(struct kref * ref)394 static void iavf_irq_affinity_release(struct kref *ref) {}
395
396 /**
397 * iavf_request_traffic_irqs - Initialize MSI-X interrupts
398 * @adapter: board private structure
399 * @basename: device basename
400 *
401 * Allocates MSI-X vectors for tx and rx handling, and requests
402 * interrupts from the kernel.
403 **/
404 static int
iavf_request_traffic_irqs(struct iavf_adapter * adapter,char * basename)405 iavf_request_traffic_irqs(struct iavf_adapter *adapter, char *basename)
406 {
407 unsigned int vector, q_vectors;
408 unsigned int rx_int_idx = 0, tx_int_idx = 0;
409 int irq_num, err;
410 int cpu;
411
412 iavf_irq_disable(adapter);
413 /* Decrement for Other and TCP Timer vectors */
414 q_vectors = adapter->num_msix_vectors - NONQ_VECS;
415
416 for (vector = 0; vector < q_vectors; vector++) {
417 struct iavf_q_vector *q_vector = &adapter->q_vectors[vector];
418
419 irq_num = adapter->msix_entries[vector + NONQ_VECS].vector;
420
421 if (q_vector->tx.ring && q_vector->rx.ring) {
422 snprintf(q_vector->name, sizeof(q_vector->name),
423 "iavf-%s-TxRx-%d", basename, rx_int_idx++);
424 tx_int_idx++;
425 } else if (q_vector->rx.ring) {
426 snprintf(q_vector->name, sizeof(q_vector->name),
427 "iavf-%s-rx-%d", basename, rx_int_idx++);
428 } else if (q_vector->tx.ring) {
429 snprintf(q_vector->name, sizeof(q_vector->name),
430 "iavf-%s-tx-%d", basename, tx_int_idx++);
431 } else {
432 /* skip this unused q_vector */
433 continue;
434 }
435 err = request_irq(irq_num,
436 iavf_msix_clean_rings,
437 0,
438 q_vector->name,
439 q_vector);
440 if (err) {
441 dev_info(&adapter->pdev->dev,
442 "Request_irq failed, error: %d\n", err);
443 goto free_queue_irqs;
444 }
445 /* register for affinity change notifications */
446 q_vector->affinity_notify.notify = iavf_irq_affinity_notify;
447 q_vector->affinity_notify.release =
448 iavf_irq_affinity_release;
449 irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify);
450 /* Spread the IRQ affinity hints across online CPUs. Note that
451 * get_cpu_mask returns a mask with a permanent lifetime so
452 * it's safe to use as a hint for irq_set_affinity_hint.
453 */
454 cpu = cpumask_local_spread(q_vector->v_idx, -1);
455 irq_set_affinity_hint(irq_num, get_cpu_mask(cpu));
456 }
457
458 return 0;
459
460 free_queue_irqs:
461 while (vector) {
462 vector--;
463 irq_num = adapter->msix_entries[vector + NONQ_VECS].vector;
464 irq_set_affinity_notifier(irq_num, NULL);
465 irq_set_affinity_hint(irq_num, NULL);
466 free_irq(irq_num, &adapter->q_vectors[vector]);
467 }
468 return err;
469 }
470
471 /**
472 * iavf_request_misc_irq - Initialize MSI-X interrupts
473 * @adapter: board private structure
474 *
475 * Allocates MSI-X vector 0 and requests interrupts from the kernel. This
476 * vector is only for the admin queue, and stays active even when the netdev
477 * is closed.
478 **/
iavf_request_misc_irq(struct iavf_adapter * adapter)479 static int iavf_request_misc_irq(struct iavf_adapter *adapter)
480 {
481 struct net_device *netdev = adapter->netdev;
482 int err;
483
484 snprintf(adapter->misc_vector_name,
485 sizeof(adapter->misc_vector_name) - 1, "iavf-%s:mbx",
486 dev_name(&adapter->pdev->dev));
487 err = request_irq(adapter->msix_entries[0].vector,
488 &iavf_msix_aq, 0,
489 adapter->misc_vector_name, netdev);
490 if (err) {
491 dev_err(&adapter->pdev->dev,
492 "request_irq for %s failed: %d\n",
493 adapter->misc_vector_name, err);
494 free_irq(adapter->msix_entries[0].vector, netdev);
495 }
496 return err;
497 }
498
499 /**
500 * iavf_free_traffic_irqs - Free MSI-X interrupts
501 * @adapter: board private structure
502 *
503 * Frees all MSI-X vectors other than 0.
504 **/
iavf_free_traffic_irqs(struct iavf_adapter * adapter)505 static void iavf_free_traffic_irqs(struct iavf_adapter *adapter)
506 {
507 int vector, irq_num, q_vectors;
508
509 if (!adapter->msix_entries)
510 return;
511
512 q_vectors = adapter->num_msix_vectors - NONQ_VECS;
513
514 for (vector = 0; vector < q_vectors; vector++) {
515 irq_num = adapter->msix_entries[vector + NONQ_VECS].vector;
516 irq_set_affinity_notifier(irq_num, NULL);
517 irq_set_affinity_hint(irq_num, NULL);
518 free_irq(irq_num, &adapter->q_vectors[vector]);
519 }
520 }
521
522 /**
523 * iavf_free_misc_irq - Free MSI-X miscellaneous vector
524 * @adapter: board private structure
525 *
526 * Frees MSI-X vector 0.
527 **/
iavf_free_misc_irq(struct iavf_adapter * adapter)528 static void iavf_free_misc_irq(struct iavf_adapter *adapter)
529 {
530 struct net_device *netdev = adapter->netdev;
531
532 if (!adapter->msix_entries)
533 return;
534
535 free_irq(adapter->msix_entries[0].vector, netdev);
536 }
537
538 /**
539 * iavf_configure_tx - Configure Transmit Unit after Reset
540 * @adapter: board private structure
541 *
542 * Configure the Tx unit of the MAC after a reset.
543 **/
iavf_configure_tx(struct iavf_adapter * adapter)544 static void iavf_configure_tx(struct iavf_adapter *adapter)
545 {
546 struct iavf_hw *hw = &adapter->hw;
547 int i;
548
549 for (i = 0; i < adapter->num_active_queues; i++)
550 adapter->tx_rings[i].tail = hw->hw_addr + IAVF_QTX_TAIL1(i);
551 }
552
553 /**
554 * iavf_configure_rx - Configure Receive Unit after Reset
555 * @adapter: board private structure
556 *
557 * Configure the Rx unit of the MAC after a reset.
558 **/
iavf_configure_rx(struct iavf_adapter * adapter)559 static void iavf_configure_rx(struct iavf_adapter *adapter)
560 {
561 unsigned int rx_buf_len = IAVF_RXBUFFER_2048;
562 struct iavf_hw *hw = &adapter->hw;
563 int i;
564
565 /* Legacy Rx will always default to a 2048 buffer size. */
566 #if (PAGE_SIZE < 8192)
567 if (!(adapter->flags & IAVF_FLAG_LEGACY_RX)) {
568 struct net_device *netdev = adapter->netdev;
569
570 /* For jumbo frames on systems with 4K pages we have to use
571 * an order 1 page, so we might as well increase the size
572 * of our Rx buffer to make better use of the available space
573 */
574 rx_buf_len = IAVF_RXBUFFER_3072;
575
576 /* We use a 1536 buffer size for configurations with
577 * standard Ethernet mtu. On x86 this gives us enough room
578 * for shared info and 192 bytes of padding.
579 */
580 if (!IAVF_2K_TOO_SMALL_WITH_PADDING &&
581 (netdev->mtu <= ETH_DATA_LEN))
582 rx_buf_len = IAVF_RXBUFFER_1536 - NET_IP_ALIGN;
583 }
584 #endif
585
586 for (i = 0; i < adapter->num_active_queues; i++) {
587 adapter->rx_rings[i].tail = hw->hw_addr + IAVF_QRX_TAIL1(i);
588 adapter->rx_rings[i].rx_buf_len = rx_buf_len;
589
590 if (adapter->flags & IAVF_FLAG_LEGACY_RX)
591 clear_ring_build_skb_enabled(&adapter->rx_rings[i]);
592 else
593 set_ring_build_skb_enabled(&adapter->rx_rings[i]);
594 }
595 }
596
597 /**
598 * iavf_find_vlan - Search filter list for specific vlan filter
599 * @adapter: board private structure
600 * @vlan: vlan tag
601 *
602 * Returns ptr to the filter object or NULL. Must be called while holding the
603 * mac_vlan_list_lock.
604 **/
605 static struct
iavf_find_vlan(struct iavf_adapter * adapter,u16 vlan)606 iavf_vlan_filter *iavf_find_vlan(struct iavf_adapter *adapter, u16 vlan)
607 {
608 struct iavf_vlan_filter *f;
609
610 list_for_each_entry(f, &adapter->vlan_filter_list, list) {
611 if (vlan == f->vlan)
612 return f;
613 }
614 return NULL;
615 }
616
617 /**
618 * iavf_add_vlan - Add a vlan filter to the list
619 * @adapter: board private structure
620 * @vlan: VLAN tag
621 *
622 * Returns ptr to the filter object or NULL when no memory available.
623 **/
624 static struct
iavf_add_vlan(struct iavf_adapter * adapter,u16 vlan)625 iavf_vlan_filter *iavf_add_vlan(struct iavf_adapter *adapter, u16 vlan)
626 {
627 struct iavf_vlan_filter *f = NULL;
628
629 spin_lock_bh(&adapter->mac_vlan_list_lock);
630
631 f = iavf_find_vlan(adapter, vlan);
632 if (!f) {
633 f = kzalloc(sizeof(*f), GFP_ATOMIC);
634 if (!f)
635 goto clearout;
636
637 f->vlan = vlan;
638
639 list_add_tail(&f->list, &adapter->vlan_filter_list);
640 f->add = true;
641 adapter->aq_required |= IAVF_FLAG_AQ_ADD_VLAN_FILTER;
642 }
643
644 clearout:
645 spin_unlock_bh(&adapter->mac_vlan_list_lock);
646 return f;
647 }
648
649 /**
650 * iavf_del_vlan - Remove a vlan filter from the list
651 * @adapter: board private structure
652 * @vlan: VLAN tag
653 **/
iavf_del_vlan(struct iavf_adapter * adapter,u16 vlan)654 static void iavf_del_vlan(struct iavf_adapter *adapter, u16 vlan)
655 {
656 struct iavf_vlan_filter *f;
657
658 spin_lock_bh(&adapter->mac_vlan_list_lock);
659
660 f = iavf_find_vlan(adapter, vlan);
661 if (f) {
662 f->remove = true;
663 adapter->aq_required |= IAVF_FLAG_AQ_DEL_VLAN_FILTER;
664 }
665
666 spin_unlock_bh(&adapter->mac_vlan_list_lock);
667 }
668
669 /**
670 * iavf_vlan_rx_add_vid - Add a VLAN filter to a device
671 * @netdev: network device struct
672 * @proto: unused protocol data
673 * @vid: VLAN tag
674 **/
iavf_vlan_rx_add_vid(struct net_device * netdev,__always_unused __be16 proto,u16 vid)675 static int iavf_vlan_rx_add_vid(struct net_device *netdev,
676 __always_unused __be16 proto, u16 vid)
677 {
678 struct iavf_adapter *adapter = netdev_priv(netdev);
679
680 if (!VLAN_ALLOWED(adapter))
681 return -EIO;
682 if (iavf_add_vlan(adapter, vid) == NULL)
683 return -ENOMEM;
684 return 0;
685 }
686
687 /**
688 * iavf_vlan_rx_kill_vid - Remove a VLAN filter from a device
689 * @netdev: network device struct
690 * @proto: unused protocol data
691 * @vid: VLAN tag
692 **/
iavf_vlan_rx_kill_vid(struct net_device * netdev,__always_unused __be16 proto,u16 vid)693 static int iavf_vlan_rx_kill_vid(struct net_device *netdev,
694 __always_unused __be16 proto, u16 vid)
695 {
696 struct iavf_adapter *adapter = netdev_priv(netdev);
697
698 if (VLAN_ALLOWED(adapter)) {
699 iavf_del_vlan(adapter, vid);
700 return 0;
701 }
702 return -EIO;
703 }
704
705 /**
706 * iavf_find_filter - Search filter list for specific mac filter
707 * @adapter: board private structure
708 * @macaddr: the MAC address
709 *
710 * Returns ptr to the filter object or NULL. Must be called while holding the
711 * mac_vlan_list_lock.
712 **/
713 static struct
iavf_find_filter(struct iavf_adapter * adapter,const u8 * macaddr)714 iavf_mac_filter *iavf_find_filter(struct iavf_adapter *adapter,
715 const u8 *macaddr)
716 {
717 struct iavf_mac_filter *f;
718
719 if (!macaddr)
720 return NULL;
721
722 list_for_each_entry(f, &adapter->mac_filter_list, list) {
723 if (ether_addr_equal(macaddr, f->macaddr))
724 return f;
725 }
726 return NULL;
727 }
728
729 /**
730 * iavf_add_filter - Add a mac filter to the filter list
731 * @adapter: board private structure
732 * @macaddr: the MAC address
733 *
734 * Returns ptr to the filter object or NULL when no memory available.
735 **/
iavf_add_filter(struct iavf_adapter * adapter,const u8 * macaddr)736 struct iavf_mac_filter *iavf_add_filter(struct iavf_adapter *adapter,
737 const u8 *macaddr)
738 {
739 struct iavf_mac_filter *f;
740
741 if (!macaddr)
742 return NULL;
743
744 f = iavf_find_filter(adapter, macaddr);
745 if (!f) {
746 f = kzalloc(sizeof(*f), GFP_ATOMIC);
747 if (!f)
748 return f;
749
750 ether_addr_copy(f->macaddr, macaddr);
751
752 list_add_tail(&f->list, &adapter->mac_filter_list);
753 f->add = true;
754 adapter->aq_required |= IAVF_FLAG_AQ_ADD_MAC_FILTER;
755 } else {
756 f->remove = false;
757 }
758
759 return f;
760 }
761
762 /**
763 * iavf_set_mac - NDO callback to set port mac address
764 * @netdev: network interface device structure
765 * @p: pointer to an address structure
766 *
767 * Returns 0 on success, negative on failure
768 **/
iavf_set_mac(struct net_device * netdev,void * p)769 static int iavf_set_mac(struct net_device *netdev, void *p)
770 {
771 struct iavf_adapter *adapter = netdev_priv(netdev);
772 struct iavf_hw *hw = &adapter->hw;
773 struct iavf_mac_filter *f;
774 struct sockaddr *addr = p;
775
776 if (!is_valid_ether_addr(addr->sa_data))
777 return -EADDRNOTAVAIL;
778
779 if (ether_addr_equal(netdev->dev_addr, addr->sa_data))
780 return 0;
781
782 spin_lock_bh(&adapter->mac_vlan_list_lock);
783
784 f = iavf_find_filter(adapter, hw->mac.addr);
785 if (f) {
786 f->remove = true;
787 adapter->aq_required |= IAVF_FLAG_AQ_DEL_MAC_FILTER;
788 }
789
790 f = iavf_add_filter(adapter, addr->sa_data);
791
792 spin_unlock_bh(&adapter->mac_vlan_list_lock);
793
794 if (f) {
795 ether_addr_copy(hw->mac.addr, addr->sa_data);
796 }
797
798 return (f == NULL) ? -ENOMEM : 0;
799 }
800
801 /**
802 * iavf_addr_sync - Callback for dev_(mc|uc)_sync to add address
803 * @netdev: the netdevice
804 * @addr: address to add
805 *
806 * Called by __dev_(mc|uc)_sync when an address needs to be added. We call
807 * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock.
808 */
iavf_addr_sync(struct net_device * netdev,const u8 * addr)809 static int iavf_addr_sync(struct net_device *netdev, const u8 *addr)
810 {
811 struct iavf_adapter *adapter = netdev_priv(netdev);
812
813 if (iavf_add_filter(adapter, addr))
814 return 0;
815 else
816 return -ENOMEM;
817 }
818
819 /**
820 * iavf_addr_unsync - Callback for dev_(mc|uc)_sync to remove address
821 * @netdev: the netdevice
822 * @addr: address to add
823 *
824 * Called by __dev_(mc|uc)_sync when an address needs to be removed. We call
825 * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock.
826 */
iavf_addr_unsync(struct net_device * netdev,const u8 * addr)827 static int iavf_addr_unsync(struct net_device *netdev, const u8 *addr)
828 {
829 struct iavf_adapter *adapter = netdev_priv(netdev);
830 struct iavf_mac_filter *f;
831
832 /* Under some circumstances, we might receive a request to delete
833 * our own device address from our uc list. Because we store the
834 * device address in the VSI's MAC/VLAN filter list, we need to ignore
835 * such requests and not delete our device address from this list.
836 */
837 if (ether_addr_equal(addr, netdev->dev_addr))
838 return 0;
839
840 f = iavf_find_filter(adapter, addr);
841 if (f) {
842 f->remove = true;
843 adapter->aq_required |= IAVF_FLAG_AQ_DEL_MAC_FILTER;
844 }
845 return 0;
846 }
847
848 /**
849 * iavf_set_rx_mode - NDO callback to set the netdev filters
850 * @netdev: network interface device structure
851 **/
iavf_set_rx_mode(struct net_device * netdev)852 static void iavf_set_rx_mode(struct net_device *netdev)
853 {
854 struct iavf_adapter *adapter = netdev_priv(netdev);
855
856 spin_lock_bh(&adapter->mac_vlan_list_lock);
857 __dev_uc_sync(netdev, iavf_addr_sync, iavf_addr_unsync);
858 __dev_mc_sync(netdev, iavf_addr_sync, iavf_addr_unsync);
859 spin_unlock_bh(&adapter->mac_vlan_list_lock);
860
861 if (netdev->flags & IFF_PROMISC &&
862 !(adapter->flags & IAVF_FLAG_PROMISC_ON))
863 adapter->aq_required |= IAVF_FLAG_AQ_REQUEST_PROMISC;
864 else if (!(netdev->flags & IFF_PROMISC) &&
865 adapter->flags & IAVF_FLAG_PROMISC_ON)
866 adapter->aq_required |= IAVF_FLAG_AQ_RELEASE_PROMISC;
867
868 if (netdev->flags & IFF_ALLMULTI &&
869 !(adapter->flags & IAVF_FLAG_ALLMULTI_ON))
870 adapter->aq_required |= IAVF_FLAG_AQ_REQUEST_ALLMULTI;
871 else if (!(netdev->flags & IFF_ALLMULTI) &&
872 adapter->flags & IAVF_FLAG_ALLMULTI_ON)
873 adapter->aq_required |= IAVF_FLAG_AQ_RELEASE_ALLMULTI;
874 }
875
876 /**
877 * iavf_napi_enable_all - enable NAPI on all queue vectors
878 * @adapter: board private structure
879 **/
iavf_napi_enable_all(struct iavf_adapter * adapter)880 static void iavf_napi_enable_all(struct iavf_adapter *adapter)
881 {
882 int q_idx;
883 struct iavf_q_vector *q_vector;
884 int q_vectors = adapter->num_msix_vectors - NONQ_VECS;
885
886 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
887 struct napi_struct *napi;
888
889 q_vector = &adapter->q_vectors[q_idx];
890 napi = &q_vector->napi;
891 napi_enable(napi);
892 }
893 }
894
895 /**
896 * iavf_napi_disable_all - disable NAPI on all queue vectors
897 * @adapter: board private structure
898 **/
iavf_napi_disable_all(struct iavf_adapter * adapter)899 static void iavf_napi_disable_all(struct iavf_adapter *adapter)
900 {
901 int q_idx;
902 struct iavf_q_vector *q_vector;
903 int q_vectors = adapter->num_msix_vectors - NONQ_VECS;
904
905 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
906 q_vector = &adapter->q_vectors[q_idx];
907 napi_disable(&q_vector->napi);
908 }
909 }
910
911 /**
912 * iavf_configure - set up transmit and receive data structures
913 * @adapter: board private structure
914 **/
iavf_configure(struct iavf_adapter * adapter)915 static void iavf_configure(struct iavf_adapter *adapter)
916 {
917 struct net_device *netdev = adapter->netdev;
918 int i;
919
920 iavf_set_rx_mode(netdev);
921
922 iavf_configure_tx(adapter);
923 iavf_configure_rx(adapter);
924 adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_QUEUES;
925
926 for (i = 0; i < adapter->num_active_queues; i++) {
927 struct iavf_ring *ring = &adapter->rx_rings[i];
928
929 iavf_alloc_rx_buffers(ring, IAVF_DESC_UNUSED(ring));
930 }
931 }
932
933 /**
934 * iavf_up_complete - Finish the last steps of bringing up a connection
935 * @adapter: board private structure
936 *
937 * Expects to be called while holding the __IAVF_IN_CRITICAL_TASK bit lock.
938 **/
iavf_up_complete(struct iavf_adapter * adapter)939 static void iavf_up_complete(struct iavf_adapter *adapter)
940 {
941 adapter->state = __IAVF_RUNNING;
942 clear_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
943
944 iavf_napi_enable_all(adapter);
945
946 adapter->aq_required |= IAVF_FLAG_AQ_ENABLE_QUEUES;
947 if (CLIENT_ENABLED(adapter))
948 adapter->flags |= IAVF_FLAG_CLIENT_NEEDS_OPEN;
949 mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0);
950 }
951
952 /**
953 * iavf_down - Shutdown the connection processing
954 * @adapter: board private structure
955 *
956 * Expects to be called while holding the __IAVF_IN_CRITICAL_TASK bit lock.
957 **/
iavf_down(struct iavf_adapter * adapter)958 void iavf_down(struct iavf_adapter *adapter)
959 {
960 struct net_device *netdev = adapter->netdev;
961 struct iavf_vlan_filter *vlf;
962 struct iavf_mac_filter *f;
963 struct iavf_cloud_filter *cf;
964
965 if (adapter->state <= __IAVF_DOWN_PENDING)
966 return;
967
968 netif_carrier_off(netdev);
969 netif_tx_disable(netdev);
970 adapter->link_up = false;
971 iavf_napi_disable_all(adapter);
972 iavf_irq_disable(adapter);
973
974 spin_lock_bh(&adapter->mac_vlan_list_lock);
975
976 /* clear the sync flag on all filters */
977 __dev_uc_unsync(adapter->netdev, NULL);
978 __dev_mc_unsync(adapter->netdev, NULL);
979
980 /* remove all MAC filters */
981 list_for_each_entry(f, &adapter->mac_filter_list, list) {
982 f->remove = true;
983 }
984
985 /* remove all VLAN filters */
986 list_for_each_entry(vlf, &adapter->vlan_filter_list, list) {
987 vlf->remove = true;
988 }
989
990 spin_unlock_bh(&adapter->mac_vlan_list_lock);
991
992 /* remove all cloud filters */
993 spin_lock_bh(&adapter->cloud_filter_list_lock);
994 list_for_each_entry(cf, &adapter->cloud_filter_list, list) {
995 cf->del = true;
996 }
997 spin_unlock_bh(&adapter->cloud_filter_list_lock);
998
999 if (!(adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) &&
1000 adapter->state != __IAVF_RESETTING) {
1001 /* cancel any current operation */
1002 adapter->current_op = VIRTCHNL_OP_UNKNOWN;
1003 /* Schedule operations to close down the HW. Don't wait
1004 * here for this to complete. The watchdog is still running
1005 * and it will take care of this.
1006 */
1007 adapter->aq_required = IAVF_FLAG_AQ_DEL_MAC_FILTER;
1008 adapter->aq_required |= IAVF_FLAG_AQ_DEL_VLAN_FILTER;
1009 adapter->aq_required |= IAVF_FLAG_AQ_DEL_CLOUD_FILTER;
1010 adapter->aq_required |= IAVF_FLAG_AQ_DISABLE_QUEUES;
1011 }
1012
1013 mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0);
1014 }
1015
1016 /**
1017 * iavf_acquire_msix_vectors - Setup the MSIX capability
1018 * @adapter: board private structure
1019 * @vectors: number of vectors to request
1020 *
1021 * Work with the OS to set up the MSIX vectors needed.
1022 *
1023 * Returns 0 on success, negative on failure
1024 **/
1025 static int
iavf_acquire_msix_vectors(struct iavf_adapter * adapter,int vectors)1026 iavf_acquire_msix_vectors(struct iavf_adapter *adapter, int vectors)
1027 {
1028 int err, vector_threshold;
1029
1030 /* We'll want at least 3 (vector_threshold):
1031 * 0) Other (Admin Queue and link, mostly)
1032 * 1) TxQ[0] Cleanup
1033 * 2) RxQ[0] Cleanup
1034 */
1035 vector_threshold = MIN_MSIX_COUNT;
1036
1037 /* The more we get, the more we will assign to Tx/Rx Cleanup
1038 * for the separate queues...where Rx Cleanup >= Tx Cleanup.
1039 * Right now, we simply care about how many we'll get; we'll
1040 * set them up later while requesting irq's.
1041 */
1042 err = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
1043 vector_threshold, vectors);
1044 if (err < 0) {
1045 dev_err(&adapter->pdev->dev, "Unable to allocate MSI-X interrupts\n");
1046 kfree(adapter->msix_entries);
1047 adapter->msix_entries = NULL;
1048 return err;
1049 }
1050
1051 /* Adjust for only the vectors we'll use, which is minimum
1052 * of max_msix_q_vectors + NONQ_VECS, or the number of
1053 * vectors we were allocated.
1054 */
1055 adapter->num_msix_vectors = err;
1056 return 0;
1057 }
1058
1059 /**
1060 * iavf_free_queues - Free memory for all rings
1061 * @adapter: board private structure to initialize
1062 *
1063 * Free all of the memory associated with queue pairs.
1064 **/
iavf_free_queues(struct iavf_adapter * adapter)1065 static void iavf_free_queues(struct iavf_adapter *adapter)
1066 {
1067 if (!adapter->vsi_res)
1068 return;
1069 adapter->num_active_queues = 0;
1070 kfree(adapter->tx_rings);
1071 adapter->tx_rings = NULL;
1072 kfree(adapter->rx_rings);
1073 adapter->rx_rings = NULL;
1074 }
1075
1076 /**
1077 * iavf_alloc_queues - Allocate memory for all rings
1078 * @adapter: board private structure to initialize
1079 *
1080 * We allocate one ring per queue at run-time since we don't know the
1081 * number of queues at compile-time. The polling_netdev array is
1082 * intended for Multiqueue, but should work fine with a single queue.
1083 **/
iavf_alloc_queues(struct iavf_adapter * adapter)1084 static int iavf_alloc_queues(struct iavf_adapter *adapter)
1085 {
1086 int i, num_active_queues;
1087
1088 /* If we're in reset reallocating queues we don't actually know yet for
1089 * certain the PF gave us the number of queues we asked for but we'll
1090 * assume it did. Once basic reset is finished we'll confirm once we
1091 * start negotiating config with PF.
1092 */
1093 if (adapter->num_req_queues)
1094 num_active_queues = adapter->num_req_queues;
1095 else if ((adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) &&
1096 adapter->num_tc)
1097 num_active_queues = adapter->ch_config.total_qps;
1098 else
1099 num_active_queues = min_t(int,
1100 adapter->vsi_res->num_queue_pairs,
1101 (int)(num_online_cpus()));
1102
1103
1104 adapter->tx_rings = kcalloc(num_active_queues,
1105 sizeof(struct iavf_ring), GFP_KERNEL);
1106 if (!adapter->tx_rings)
1107 goto err_out;
1108 adapter->rx_rings = kcalloc(num_active_queues,
1109 sizeof(struct iavf_ring), GFP_KERNEL);
1110 if (!adapter->rx_rings)
1111 goto err_out;
1112
1113 for (i = 0; i < num_active_queues; i++) {
1114 struct iavf_ring *tx_ring;
1115 struct iavf_ring *rx_ring;
1116
1117 tx_ring = &adapter->tx_rings[i];
1118
1119 tx_ring->queue_index = i;
1120 tx_ring->netdev = adapter->netdev;
1121 tx_ring->dev = &adapter->pdev->dev;
1122 tx_ring->count = adapter->tx_desc_count;
1123 tx_ring->itr_setting = IAVF_ITR_TX_DEF;
1124 if (adapter->flags & IAVF_FLAG_WB_ON_ITR_CAPABLE)
1125 tx_ring->flags |= IAVF_TXR_FLAGS_WB_ON_ITR;
1126
1127 rx_ring = &adapter->rx_rings[i];
1128 rx_ring->queue_index = i;
1129 rx_ring->netdev = adapter->netdev;
1130 rx_ring->dev = &adapter->pdev->dev;
1131 rx_ring->count = adapter->rx_desc_count;
1132 rx_ring->itr_setting = IAVF_ITR_RX_DEF;
1133 }
1134
1135 adapter->num_active_queues = num_active_queues;
1136
1137 return 0;
1138
1139 err_out:
1140 iavf_free_queues(adapter);
1141 return -ENOMEM;
1142 }
1143
1144 /**
1145 * iavf_set_interrupt_capability - set MSI-X or FAIL if not supported
1146 * @adapter: board private structure to initialize
1147 *
1148 * Attempt to configure the interrupts using the best available
1149 * capabilities of the hardware and the kernel.
1150 **/
iavf_set_interrupt_capability(struct iavf_adapter * adapter)1151 static int iavf_set_interrupt_capability(struct iavf_adapter *adapter)
1152 {
1153 int vector, v_budget;
1154 int pairs = 0;
1155 int err = 0;
1156
1157 if (!adapter->vsi_res) {
1158 err = -EIO;
1159 goto out;
1160 }
1161 pairs = adapter->num_active_queues;
1162
1163 /* It's easy to be greedy for MSI-X vectors, but it really doesn't do
1164 * us much good if we have more vectors than CPUs. However, we already
1165 * limit the total number of queues by the number of CPUs so we do not
1166 * need any further limiting here.
1167 */
1168 v_budget = min_t(int, pairs + NONQ_VECS,
1169 (int)adapter->vf_res->max_vectors);
1170
1171 adapter->msix_entries = kcalloc(v_budget,
1172 sizeof(struct msix_entry), GFP_KERNEL);
1173 if (!adapter->msix_entries) {
1174 err = -ENOMEM;
1175 goto out;
1176 }
1177
1178 for (vector = 0; vector < v_budget; vector++)
1179 adapter->msix_entries[vector].entry = vector;
1180
1181 err = iavf_acquire_msix_vectors(adapter, v_budget);
1182
1183 out:
1184 netif_set_real_num_rx_queues(adapter->netdev, pairs);
1185 netif_set_real_num_tx_queues(adapter->netdev, pairs);
1186 return err;
1187 }
1188
1189 /**
1190 * iavf_config_rss_aq - Configure RSS keys and lut by using AQ commands
1191 * @adapter: board private structure
1192 *
1193 * Return 0 on success, negative on failure
1194 **/
iavf_config_rss_aq(struct iavf_adapter * adapter)1195 static int iavf_config_rss_aq(struct iavf_adapter *adapter)
1196 {
1197 struct iavf_aqc_get_set_rss_key_data *rss_key =
1198 (struct iavf_aqc_get_set_rss_key_data *)adapter->rss_key;
1199 struct iavf_hw *hw = &adapter->hw;
1200 int ret = 0;
1201
1202 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
1203 /* bail because we already have a command pending */
1204 dev_err(&adapter->pdev->dev, "Cannot configure RSS, command %d pending\n",
1205 adapter->current_op);
1206 return -EBUSY;
1207 }
1208
1209 ret = iavf_aq_set_rss_key(hw, adapter->vsi.id, rss_key);
1210 if (ret) {
1211 dev_err(&adapter->pdev->dev, "Cannot set RSS key, err %s aq_err %s\n",
1212 iavf_stat_str(hw, ret),
1213 iavf_aq_str(hw, hw->aq.asq_last_status));
1214 return ret;
1215
1216 }
1217
1218 ret = iavf_aq_set_rss_lut(hw, adapter->vsi.id, false,
1219 adapter->rss_lut, adapter->rss_lut_size);
1220 if (ret) {
1221 dev_err(&adapter->pdev->dev, "Cannot set RSS lut, err %s aq_err %s\n",
1222 iavf_stat_str(hw, ret),
1223 iavf_aq_str(hw, hw->aq.asq_last_status));
1224 }
1225
1226 return ret;
1227
1228 }
1229
1230 /**
1231 * iavf_config_rss_reg - Configure RSS keys and lut by writing registers
1232 * @adapter: board private structure
1233 *
1234 * Returns 0 on success, negative on failure
1235 **/
iavf_config_rss_reg(struct iavf_adapter * adapter)1236 static int iavf_config_rss_reg(struct iavf_adapter *adapter)
1237 {
1238 struct iavf_hw *hw = &adapter->hw;
1239 u32 *dw;
1240 u16 i;
1241
1242 dw = (u32 *)adapter->rss_key;
1243 for (i = 0; i <= adapter->rss_key_size / 4; i++)
1244 wr32(hw, IAVF_VFQF_HKEY(i), dw[i]);
1245
1246 dw = (u32 *)adapter->rss_lut;
1247 for (i = 0; i <= adapter->rss_lut_size / 4; i++)
1248 wr32(hw, IAVF_VFQF_HLUT(i), dw[i]);
1249
1250 iavf_flush(hw);
1251
1252 return 0;
1253 }
1254
1255 /**
1256 * iavf_config_rss - Configure RSS keys and lut
1257 * @adapter: board private structure
1258 *
1259 * Returns 0 on success, negative on failure
1260 **/
iavf_config_rss(struct iavf_adapter * adapter)1261 int iavf_config_rss(struct iavf_adapter *adapter)
1262 {
1263
1264 if (RSS_PF(adapter)) {
1265 adapter->aq_required |= IAVF_FLAG_AQ_SET_RSS_LUT |
1266 IAVF_FLAG_AQ_SET_RSS_KEY;
1267 return 0;
1268 } else if (RSS_AQ(adapter)) {
1269 return iavf_config_rss_aq(adapter);
1270 } else {
1271 return iavf_config_rss_reg(adapter);
1272 }
1273 }
1274
1275 /**
1276 * iavf_fill_rss_lut - Fill the lut with default values
1277 * @adapter: board private structure
1278 **/
iavf_fill_rss_lut(struct iavf_adapter * adapter)1279 static void iavf_fill_rss_lut(struct iavf_adapter *adapter)
1280 {
1281 u16 i;
1282
1283 for (i = 0; i < adapter->rss_lut_size; i++)
1284 adapter->rss_lut[i] = i % adapter->num_active_queues;
1285 }
1286
1287 /**
1288 * iavf_init_rss - Prepare for RSS
1289 * @adapter: board private structure
1290 *
1291 * Return 0 on success, negative on failure
1292 **/
iavf_init_rss(struct iavf_adapter * adapter)1293 static int iavf_init_rss(struct iavf_adapter *adapter)
1294 {
1295 struct iavf_hw *hw = &adapter->hw;
1296 int ret;
1297
1298 if (!RSS_PF(adapter)) {
1299 /* Enable PCTYPES for RSS, TCP/UDP with IPv4/IPv6 */
1300 if (adapter->vf_res->vf_cap_flags &
1301 VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
1302 adapter->hena = IAVF_DEFAULT_RSS_HENA_EXPANDED;
1303 else
1304 adapter->hena = IAVF_DEFAULT_RSS_HENA;
1305
1306 wr32(hw, IAVF_VFQF_HENA(0), (u32)adapter->hena);
1307 wr32(hw, IAVF_VFQF_HENA(1), (u32)(adapter->hena >> 32));
1308 }
1309
1310 iavf_fill_rss_lut(adapter);
1311 netdev_rss_key_fill((void *)adapter->rss_key, adapter->rss_key_size);
1312 ret = iavf_config_rss(adapter);
1313
1314 return ret;
1315 }
1316
1317 /**
1318 * iavf_alloc_q_vectors - Allocate memory for interrupt vectors
1319 * @adapter: board private structure to initialize
1320 *
1321 * We allocate one q_vector per queue interrupt. If allocation fails we
1322 * return -ENOMEM.
1323 **/
iavf_alloc_q_vectors(struct iavf_adapter * adapter)1324 static int iavf_alloc_q_vectors(struct iavf_adapter *adapter)
1325 {
1326 int q_idx = 0, num_q_vectors;
1327 struct iavf_q_vector *q_vector;
1328
1329 num_q_vectors = adapter->num_msix_vectors - NONQ_VECS;
1330 adapter->q_vectors = kcalloc(num_q_vectors, sizeof(*q_vector),
1331 GFP_KERNEL);
1332 if (!adapter->q_vectors)
1333 return -ENOMEM;
1334
1335 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
1336 q_vector = &adapter->q_vectors[q_idx];
1337 q_vector->adapter = adapter;
1338 q_vector->vsi = &adapter->vsi;
1339 q_vector->v_idx = q_idx;
1340 q_vector->reg_idx = q_idx;
1341 cpumask_copy(&q_vector->affinity_mask, cpu_possible_mask);
1342 netif_napi_add(adapter->netdev, &q_vector->napi,
1343 iavf_napi_poll, NAPI_POLL_WEIGHT);
1344 }
1345
1346 return 0;
1347 }
1348
1349 /**
1350 * iavf_free_q_vectors - Free memory allocated for interrupt vectors
1351 * @adapter: board private structure to initialize
1352 *
1353 * This function frees the memory allocated to the q_vectors. In addition if
1354 * NAPI is enabled it will delete any references to the NAPI struct prior
1355 * to freeing the q_vector.
1356 **/
iavf_free_q_vectors(struct iavf_adapter * adapter)1357 static void iavf_free_q_vectors(struct iavf_adapter *adapter)
1358 {
1359 int q_idx, num_q_vectors;
1360 int napi_vectors;
1361
1362 if (!adapter->q_vectors)
1363 return;
1364
1365 num_q_vectors = adapter->num_msix_vectors - NONQ_VECS;
1366 napi_vectors = adapter->num_active_queues;
1367
1368 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
1369 struct iavf_q_vector *q_vector = &adapter->q_vectors[q_idx];
1370
1371 if (q_idx < napi_vectors)
1372 netif_napi_del(&q_vector->napi);
1373 }
1374 kfree(adapter->q_vectors);
1375 adapter->q_vectors = NULL;
1376 }
1377
1378 /**
1379 * iavf_reset_interrupt_capability - Reset MSIX setup
1380 * @adapter: board private structure
1381 *
1382 **/
iavf_reset_interrupt_capability(struct iavf_adapter * adapter)1383 void iavf_reset_interrupt_capability(struct iavf_adapter *adapter)
1384 {
1385 if (!adapter->msix_entries)
1386 return;
1387
1388 pci_disable_msix(adapter->pdev);
1389 kfree(adapter->msix_entries);
1390 adapter->msix_entries = NULL;
1391 }
1392
1393 /**
1394 * iavf_init_interrupt_scheme - Determine if MSIX is supported and init
1395 * @adapter: board private structure to initialize
1396 *
1397 **/
iavf_init_interrupt_scheme(struct iavf_adapter * adapter)1398 int iavf_init_interrupt_scheme(struct iavf_adapter *adapter)
1399 {
1400 int err;
1401
1402 err = iavf_alloc_queues(adapter);
1403 if (err) {
1404 dev_err(&adapter->pdev->dev,
1405 "Unable to allocate memory for queues\n");
1406 goto err_alloc_queues;
1407 }
1408
1409 rtnl_lock();
1410 err = iavf_set_interrupt_capability(adapter);
1411 rtnl_unlock();
1412 if (err) {
1413 dev_err(&adapter->pdev->dev,
1414 "Unable to setup interrupt capabilities\n");
1415 goto err_set_interrupt;
1416 }
1417
1418 err = iavf_alloc_q_vectors(adapter);
1419 if (err) {
1420 dev_err(&adapter->pdev->dev,
1421 "Unable to allocate memory for queue vectors\n");
1422 goto err_alloc_q_vectors;
1423 }
1424
1425 /* If we've made it so far while ADq flag being ON, then we haven't
1426 * bailed out anywhere in middle. And ADq isn't just enabled but actual
1427 * resources have been allocated in the reset path.
1428 * Now we can truly claim that ADq is enabled.
1429 */
1430 if ((adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) &&
1431 adapter->num_tc)
1432 dev_info(&adapter->pdev->dev, "ADq Enabled, %u TCs created",
1433 adapter->num_tc);
1434
1435 dev_info(&adapter->pdev->dev, "Multiqueue %s: Queue pair count = %u",
1436 (adapter->num_active_queues > 1) ? "Enabled" : "Disabled",
1437 adapter->num_active_queues);
1438
1439 return 0;
1440 err_alloc_q_vectors:
1441 iavf_reset_interrupt_capability(adapter);
1442 err_set_interrupt:
1443 iavf_free_queues(adapter);
1444 err_alloc_queues:
1445 return err;
1446 }
1447
1448 /**
1449 * iavf_free_rss - Free memory used by RSS structs
1450 * @adapter: board private structure
1451 **/
iavf_free_rss(struct iavf_adapter * adapter)1452 static void iavf_free_rss(struct iavf_adapter *adapter)
1453 {
1454 kfree(adapter->rss_key);
1455 adapter->rss_key = NULL;
1456
1457 kfree(adapter->rss_lut);
1458 adapter->rss_lut = NULL;
1459 }
1460
1461 /**
1462 * iavf_reinit_interrupt_scheme - Reallocate queues and vectors
1463 * @adapter: board private structure
1464 *
1465 * Returns 0 on success, negative on failure
1466 **/
iavf_reinit_interrupt_scheme(struct iavf_adapter * adapter)1467 static int iavf_reinit_interrupt_scheme(struct iavf_adapter *adapter)
1468 {
1469 struct net_device *netdev = adapter->netdev;
1470 int err;
1471
1472 if (netif_running(netdev))
1473 iavf_free_traffic_irqs(adapter);
1474 iavf_free_misc_irq(adapter);
1475 iavf_reset_interrupt_capability(adapter);
1476 iavf_free_q_vectors(adapter);
1477 iavf_free_queues(adapter);
1478
1479 err = iavf_init_interrupt_scheme(adapter);
1480 if (err)
1481 goto err;
1482
1483 netif_tx_stop_all_queues(netdev);
1484
1485 err = iavf_request_misc_irq(adapter);
1486 if (err)
1487 goto err;
1488
1489 set_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
1490
1491 iavf_map_rings_to_vectors(adapter);
1492
1493 if (RSS_AQ(adapter))
1494 adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_RSS;
1495 else
1496 err = iavf_init_rss(adapter);
1497 err:
1498 return err;
1499 }
1500
1501 /**
1502 * iavf_process_aq_command - process aq_required flags
1503 * and sends aq command
1504 * @adapter: pointer to iavf adapter structure
1505 *
1506 * Returns 0 on success
1507 * Returns error code if no command was sent
1508 * or error code if the command failed.
1509 **/
iavf_process_aq_command(struct iavf_adapter * adapter)1510 static int iavf_process_aq_command(struct iavf_adapter *adapter)
1511 {
1512 if (adapter->aq_required & IAVF_FLAG_AQ_GET_CONFIG)
1513 return iavf_send_vf_config_msg(adapter);
1514 if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_QUEUES) {
1515 iavf_disable_queues(adapter);
1516 return 0;
1517 }
1518
1519 if (adapter->aq_required & IAVF_FLAG_AQ_MAP_VECTORS) {
1520 iavf_map_queues(adapter);
1521 return 0;
1522 }
1523
1524 if (adapter->aq_required & IAVF_FLAG_AQ_ADD_MAC_FILTER) {
1525 iavf_add_ether_addrs(adapter);
1526 return 0;
1527 }
1528
1529 if (adapter->aq_required & IAVF_FLAG_AQ_ADD_VLAN_FILTER) {
1530 iavf_add_vlans(adapter);
1531 return 0;
1532 }
1533
1534 if (adapter->aq_required & IAVF_FLAG_AQ_DEL_MAC_FILTER) {
1535 iavf_del_ether_addrs(adapter);
1536 return 0;
1537 }
1538
1539 if (adapter->aq_required & IAVF_FLAG_AQ_DEL_VLAN_FILTER) {
1540 iavf_del_vlans(adapter);
1541 return 0;
1542 }
1543
1544 if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_VLAN_STRIPPING) {
1545 iavf_enable_vlan_stripping(adapter);
1546 return 0;
1547 }
1548
1549 if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_VLAN_STRIPPING) {
1550 iavf_disable_vlan_stripping(adapter);
1551 return 0;
1552 }
1553
1554 if (adapter->aq_required & IAVF_FLAG_AQ_CONFIGURE_QUEUES) {
1555 iavf_configure_queues(adapter);
1556 return 0;
1557 }
1558
1559 if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_QUEUES) {
1560 iavf_enable_queues(adapter);
1561 return 0;
1562 }
1563
1564 if (adapter->aq_required & IAVF_FLAG_AQ_CONFIGURE_RSS) {
1565 /* This message goes straight to the firmware, not the
1566 * PF, so we don't have to set current_op as we will
1567 * not get a response through the ARQ.
1568 */
1569 adapter->aq_required &= ~IAVF_FLAG_AQ_CONFIGURE_RSS;
1570 return 0;
1571 }
1572 if (adapter->aq_required & IAVF_FLAG_AQ_GET_HENA) {
1573 iavf_get_hena(adapter);
1574 return 0;
1575 }
1576 if (adapter->aq_required & IAVF_FLAG_AQ_SET_HENA) {
1577 iavf_set_hena(adapter);
1578 return 0;
1579 }
1580 if (adapter->aq_required & IAVF_FLAG_AQ_SET_RSS_KEY) {
1581 iavf_set_rss_key(adapter);
1582 return 0;
1583 }
1584 if (adapter->aq_required & IAVF_FLAG_AQ_SET_RSS_LUT) {
1585 iavf_set_rss_lut(adapter);
1586 return 0;
1587 }
1588
1589 if (adapter->aq_required & IAVF_FLAG_AQ_REQUEST_PROMISC) {
1590 iavf_set_promiscuous(adapter, FLAG_VF_UNICAST_PROMISC |
1591 FLAG_VF_MULTICAST_PROMISC);
1592 return 0;
1593 }
1594
1595 if (adapter->aq_required & IAVF_FLAG_AQ_REQUEST_ALLMULTI) {
1596 iavf_set_promiscuous(adapter, FLAG_VF_MULTICAST_PROMISC);
1597 return 0;
1598 }
1599
1600 if ((adapter->aq_required & IAVF_FLAG_AQ_RELEASE_PROMISC) &&
1601 (adapter->aq_required & IAVF_FLAG_AQ_RELEASE_ALLMULTI)) {
1602 iavf_set_promiscuous(adapter, 0);
1603 return 0;
1604 }
1605
1606 if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_CHANNELS) {
1607 iavf_enable_channels(adapter);
1608 return 0;
1609 }
1610
1611 if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_CHANNELS) {
1612 iavf_disable_channels(adapter);
1613 return 0;
1614 }
1615 if (adapter->aq_required & IAVF_FLAG_AQ_ADD_CLOUD_FILTER) {
1616 iavf_add_cloud_filter(adapter);
1617 return 0;
1618 }
1619
1620 if (adapter->aq_required & IAVF_FLAG_AQ_DEL_CLOUD_FILTER) {
1621 iavf_del_cloud_filter(adapter);
1622 return 0;
1623 }
1624 if (adapter->aq_required & IAVF_FLAG_AQ_DEL_CLOUD_FILTER) {
1625 iavf_del_cloud_filter(adapter);
1626 return 0;
1627 }
1628 if (adapter->aq_required & IAVF_FLAG_AQ_ADD_CLOUD_FILTER) {
1629 iavf_add_cloud_filter(adapter);
1630 return 0;
1631 }
1632 return -EAGAIN;
1633 }
1634
1635 /**
1636 * iavf_startup - first step of driver startup
1637 * @adapter: board private structure
1638 *
1639 * Function process __IAVF_STARTUP driver state.
1640 * When success the state is changed to __IAVF_INIT_VERSION_CHECK
1641 * when fails it returns -EAGAIN
1642 **/
iavf_startup(struct iavf_adapter * adapter)1643 static int iavf_startup(struct iavf_adapter *adapter)
1644 {
1645 struct pci_dev *pdev = adapter->pdev;
1646 struct iavf_hw *hw = &adapter->hw;
1647 int err;
1648
1649 WARN_ON(adapter->state != __IAVF_STARTUP);
1650
1651 /* driver loaded, probe complete */
1652 adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED;
1653 adapter->flags &= ~IAVF_FLAG_RESET_PENDING;
1654 err = iavf_set_mac_type(hw);
1655 if (err) {
1656 dev_err(&pdev->dev, "Failed to set MAC type (%d)\n", err);
1657 goto err;
1658 }
1659
1660 err = iavf_check_reset_complete(hw);
1661 if (err) {
1662 dev_info(&pdev->dev, "Device is still in reset (%d), retrying\n",
1663 err);
1664 goto err;
1665 }
1666 hw->aq.num_arq_entries = IAVF_AQ_LEN;
1667 hw->aq.num_asq_entries = IAVF_AQ_LEN;
1668 hw->aq.arq_buf_size = IAVF_MAX_AQ_BUF_SIZE;
1669 hw->aq.asq_buf_size = IAVF_MAX_AQ_BUF_SIZE;
1670
1671 err = iavf_init_adminq(hw);
1672 if (err) {
1673 dev_err(&pdev->dev, "Failed to init Admin Queue (%d)\n", err);
1674 goto err;
1675 }
1676 err = iavf_send_api_ver(adapter);
1677 if (err) {
1678 dev_err(&pdev->dev, "Unable to send to PF (%d)\n", err);
1679 iavf_shutdown_adminq(hw);
1680 goto err;
1681 }
1682 adapter->state = __IAVF_INIT_VERSION_CHECK;
1683 err:
1684 return err;
1685 }
1686
1687 /**
1688 * iavf_init_version_check - second step of driver startup
1689 * @adapter: board private structure
1690 *
1691 * Function process __IAVF_INIT_VERSION_CHECK driver state.
1692 * When success the state is changed to __IAVF_INIT_GET_RESOURCES
1693 * when fails it returns -EAGAIN
1694 **/
iavf_init_version_check(struct iavf_adapter * adapter)1695 static int iavf_init_version_check(struct iavf_adapter *adapter)
1696 {
1697 struct pci_dev *pdev = adapter->pdev;
1698 struct iavf_hw *hw = &adapter->hw;
1699 int err = -EAGAIN;
1700
1701 WARN_ON(adapter->state != __IAVF_INIT_VERSION_CHECK);
1702
1703 if (!iavf_asq_done(hw)) {
1704 dev_err(&pdev->dev, "Admin queue command never completed\n");
1705 iavf_shutdown_adminq(hw);
1706 adapter->state = __IAVF_STARTUP;
1707 goto err;
1708 }
1709
1710 /* aq msg sent, awaiting reply */
1711 err = iavf_verify_api_ver(adapter);
1712 if (err) {
1713 if (err == IAVF_ERR_ADMIN_QUEUE_NO_WORK)
1714 err = iavf_send_api_ver(adapter);
1715 else
1716 dev_err(&pdev->dev, "Unsupported PF API version %d.%d, expected %d.%d\n",
1717 adapter->pf_version.major,
1718 adapter->pf_version.minor,
1719 VIRTCHNL_VERSION_MAJOR,
1720 VIRTCHNL_VERSION_MINOR);
1721 goto err;
1722 }
1723 err = iavf_send_vf_config_msg(adapter);
1724 if (err) {
1725 dev_err(&pdev->dev, "Unable to send config request (%d)\n",
1726 err);
1727 goto err;
1728 }
1729 adapter->state = __IAVF_INIT_GET_RESOURCES;
1730
1731 err:
1732 return err;
1733 }
1734
1735 /**
1736 * iavf_init_get_resources - third step of driver startup
1737 * @adapter: board private structure
1738 *
1739 * Function process __IAVF_INIT_GET_RESOURCES driver state and
1740 * finishes driver initialization procedure.
1741 * When success the state is changed to __IAVF_DOWN
1742 * when fails it returns -EAGAIN
1743 **/
iavf_init_get_resources(struct iavf_adapter * adapter)1744 static int iavf_init_get_resources(struct iavf_adapter *adapter)
1745 {
1746 struct net_device *netdev = adapter->netdev;
1747 struct pci_dev *pdev = adapter->pdev;
1748 struct iavf_hw *hw = &adapter->hw;
1749 int err;
1750
1751 WARN_ON(adapter->state != __IAVF_INIT_GET_RESOURCES);
1752 /* aq msg sent, awaiting reply */
1753 if (!adapter->vf_res) {
1754 adapter->vf_res = kzalloc(IAVF_VIRTCHNL_VF_RESOURCE_SIZE,
1755 GFP_KERNEL);
1756 if (!adapter->vf_res) {
1757 err = -ENOMEM;
1758 goto err;
1759 }
1760 }
1761 err = iavf_get_vf_config(adapter);
1762 if (err == IAVF_ERR_ADMIN_QUEUE_NO_WORK) {
1763 err = iavf_send_vf_config_msg(adapter);
1764 goto err;
1765 } else if (err == IAVF_ERR_PARAM) {
1766 /* We only get ERR_PARAM if the device is in a very bad
1767 * state or if we've been disabled for previous bad
1768 * behavior. Either way, we're done now.
1769 */
1770 iavf_shutdown_adminq(hw);
1771 dev_err(&pdev->dev, "Unable to get VF config due to PF error condition, not retrying\n");
1772 return 0;
1773 }
1774 if (err) {
1775 dev_err(&pdev->dev, "Unable to get VF config (%d)\n", err);
1776 goto err_alloc;
1777 }
1778
1779 if (iavf_process_config(adapter))
1780 goto err_alloc;
1781 adapter->current_op = VIRTCHNL_OP_UNKNOWN;
1782
1783 adapter->flags |= IAVF_FLAG_RX_CSUM_ENABLED;
1784
1785 netdev->netdev_ops = &iavf_netdev_ops;
1786 iavf_set_ethtool_ops(netdev);
1787 netdev->watchdog_timeo = 5 * HZ;
1788
1789 /* MTU range: 68 - 9710 */
1790 netdev->min_mtu = ETH_MIN_MTU;
1791 netdev->max_mtu = IAVF_MAX_RXBUFFER - IAVF_PACKET_HDR_PAD;
1792
1793 if (!is_valid_ether_addr(adapter->hw.mac.addr)) {
1794 dev_info(&pdev->dev, "Invalid MAC address %pM, using random\n",
1795 adapter->hw.mac.addr);
1796 eth_hw_addr_random(netdev);
1797 ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr);
1798 } else {
1799 ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr);
1800 ether_addr_copy(netdev->perm_addr, adapter->hw.mac.addr);
1801 }
1802
1803 adapter->tx_desc_count = IAVF_DEFAULT_TXD;
1804 adapter->rx_desc_count = IAVF_DEFAULT_RXD;
1805 err = iavf_init_interrupt_scheme(adapter);
1806 if (err)
1807 goto err_sw_init;
1808 iavf_map_rings_to_vectors(adapter);
1809 if (adapter->vf_res->vf_cap_flags &
1810 VIRTCHNL_VF_OFFLOAD_WB_ON_ITR)
1811 adapter->flags |= IAVF_FLAG_WB_ON_ITR_CAPABLE;
1812
1813 err = iavf_request_misc_irq(adapter);
1814 if (err)
1815 goto err_sw_init;
1816
1817 netif_carrier_off(netdev);
1818 adapter->link_up = false;
1819
1820 /* set the semaphore to prevent any callbacks after device registration
1821 * up to time when state of driver will be set to __IAVF_DOWN
1822 */
1823 rtnl_lock();
1824 if (!adapter->netdev_registered) {
1825 err = register_netdevice(netdev);
1826 if (err) {
1827 rtnl_unlock();
1828 goto err_register;
1829 }
1830 }
1831
1832 adapter->netdev_registered = true;
1833
1834 netif_tx_stop_all_queues(netdev);
1835 if (CLIENT_ALLOWED(adapter)) {
1836 err = iavf_lan_add_device(adapter);
1837 if (err) {
1838 rtnl_unlock();
1839 dev_info(&pdev->dev, "Failed to add VF to client API service list: %d\n",
1840 err);
1841 }
1842 }
1843 dev_info(&pdev->dev, "MAC address: %pM\n", adapter->hw.mac.addr);
1844 if (netdev->features & NETIF_F_GRO)
1845 dev_info(&pdev->dev, "GRO is enabled\n");
1846
1847 adapter->state = __IAVF_DOWN;
1848 set_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
1849 rtnl_unlock();
1850
1851 iavf_misc_irq_enable(adapter);
1852 wake_up(&adapter->down_waitqueue);
1853
1854 adapter->rss_key = kzalloc(adapter->rss_key_size, GFP_KERNEL);
1855 adapter->rss_lut = kzalloc(adapter->rss_lut_size, GFP_KERNEL);
1856 if (!adapter->rss_key || !adapter->rss_lut) {
1857 err = -ENOMEM;
1858 goto err_mem;
1859 }
1860 if (RSS_AQ(adapter))
1861 adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_RSS;
1862 else
1863 iavf_init_rss(adapter);
1864
1865 return err;
1866 err_mem:
1867 iavf_free_rss(adapter);
1868 err_register:
1869 iavf_free_misc_irq(adapter);
1870 err_sw_init:
1871 iavf_reset_interrupt_capability(adapter);
1872 err_alloc:
1873 kfree(adapter->vf_res);
1874 adapter->vf_res = NULL;
1875 err:
1876 return err;
1877 }
1878
1879 /**
1880 * iavf_watchdog_task - Periodic call-back task
1881 * @work: pointer to work_struct
1882 **/
iavf_watchdog_task(struct work_struct * work)1883 static void iavf_watchdog_task(struct work_struct *work)
1884 {
1885 struct iavf_adapter *adapter = container_of(work,
1886 struct iavf_adapter,
1887 watchdog_task.work);
1888 struct iavf_hw *hw = &adapter->hw;
1889 u32 reg_val;
1890
1891 if (test_and_set_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section))
1892 goto restart_watchdog;
1893
1894 if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED)
1895 adapter->state = __IAVF_COMM_FAILED;
1896
1897 switch (adapter->state) {
1898 case __IAVF_COMM_FAILED:
1899 reg_val = rd32(hw, IAVF_VFGEN_RSTAT) &
1900 IAVF_VFGEN_RSTAT_VFR_STATE_MASK;
1901 if (reg_val == VIRTCHNL_VFR_VFACTIVE ||
1902 reg_val == VIRTCHNL_VFR_COMPLETED) {
1903 /* A chance for redemption! */
1904 dev_err(&adapter->pdev->dev,
1905 "Hardware came out of reset. Attempting reinit.\n");
1906 adapter->state = __IAVF_STARTUP;
1907 adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED;
1908 queue_delayed_work(iavf_wq, &adapter->init_task, 10);
1909 clear_bit(__IAVF_IN_CRITICAL_TASK,
1910 &adapter->crit_section);
1911 /* Don't reschedule the watchdog, since we've restarted
1912 * the init task. When init_task contacts the PF and
1913 * gets everything set up again, it'll restart the
1914 * watchdog for us. Down, boy. Sit. Stay. Woof.
1915 */
1916 return;
1917 }
1918 adapter->aq_required = 0;
1919 adapter->current_op = VIRTCHNL_OP_UNKNOWN;
1920 clear_bit(__IAVF_IN_CRITICAL_TASK,
1921 &adapter->crit_section);
1922 queue_delayed_work(iavf_wq,
1923 &adapter->watchdog_task,
1924 msecs_to_jiffies(10));
1925 goto watchdog_done;
1926 case __IAVF_RESETTING:
1927 clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
1928 queue_delayed_work(iavf_wq, &adapter->watchdog_task, HZ * 2);
1929 return;
1930 case __IAVF_DOWN:
1931 case __IAVF_DOWN_PENDING:
1932 case __IAVF_TESTING:
1933 case __IAVF_RUNNING:
1934 if (adapter->current_op) {
1935 if (!iavf_asq_done(hw)) {
1936 dev_dbg(&adapter->pdev->dev,
1937 "Admin queue timeout\n");
1938 iavf_send_api_ver(adapter);
1939 }
1940 } else {
1941 /* An error will be returned if no commands were
1942 * processed; use this opportunity to update stats
1943 */
1944 if (iavf_process_aq_command(adapter) &&
1945 adapter->state == __IAVF_RUNNING)
1946 iavf_request_stats(adapter);
1947 }
1948 break;
1949 case __IAVF_REMOVE:
1950 clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
1951 return;
1952 default:
1953 goto restart_watchdog;
1954 }
1955
1956 /* check for hw reset */
1957 reg_val = rd32(hw, IAVF_VF_ARQLEN1) & IAVF_VF_ARQLEN1_ARQENABLE_MASK;
1958 if (!reg_val) {
1959 adapter->state = __IAVF_RESETTING;
1960 adapter->flags |= IAVF_FLAG_RESET_PENDING;
1961 adapter->aq_required = 0;
1962 adapter->current_op = VIRTCHNL_OP_UNKNOWN;
1963 dev_err(&adapter->pdev->dev, "Hardware reset detected\n");
1964 queue_work(iavf_wq, &adapter->reset_task);
1965 goto watchdog_done;
1966 }
1967
1968 schedule_delayed_work(&adapter->client_task, msecs_to_jiffies(5));
1969 watchdog_done:
1970 if (adapter->state == __IAVF_RUNNING ||
1971 adapter->state == __IAVF_COMM_FAILED)
1972 iavf_detect_recover_hung(&adapter->vsi);
1973 clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
1974 restart_watchdog:
1975 if (adapter->aq_required)
1976 queue_delayed_work(iavf_wq, &adapter->watchdog_task,
1977 msecs_to_jiffies(20));
1978 else
1979 queue_delayed_work(iavf_wq, &adapter->watchdog_task, HZ * 2);
1980 queue_work(iavf_wq, &adapter->adminq_task);
1981 }
1982
iavf_disable_vf(struct iavf_adapter * adapter)1983 static void iavf_disable_vf(struct iavf_adapter *adapter)
1984 {
1985 struct iavf_mac_filter *f, *ftmp;
1986 struct iavf_vlan_filter *fv, *fvtmp;
1987 struct iavf_cloud_filter *cf, *cftmp;
1988
1989 adapter->flags |= IAVF_FLAG_PF_COMMS_FAILED;
1990
1991 /* We don't use netif_running() because it may be true prior to
1992 * ndo_open() returning, so we can't assume it means all our open
1993 * tasks have finished, since we're not holding the rtnl_lock here.
1994 */
1995 if (adapter->state == __IAVF_RUNNING) {
1996 set_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
1997 netif_carrier_off(adapter->netdev);
1998 netif_tx_disable(adapter->netdev);
1999 adapter->link_up = false;
2000 iavf_napi_disable_all(adapter);
2001 iavf_irq_disable(adapter);
2002 iavf_free_traffic_irqs(adapter);
2003 iavf_free_all_tx_resources(adapter);
2004 iavf_free_all_rx_resources(adapter);
2005 }
2006
2007 spin_lock_bh(&adapter->mac_vlan_list_lock);
2008
2009 /* Delete all of the filters */
2010 list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
2011 list_del(&f->list);
2012 kfree(f);
2013 }
2014
2015 list_for_each_entry_safe(fv, fvtmp, &adapter->vlan_filter_list, list) {
2016 list_del(&fv->list);
2017 kfree(fv);
2018 }
2019
2020 spin_unlock_bh(&adapter->mac_vlan_list_lock);
2021
2022 spin_lock_bh(&adapter->cloud_filter_list_lock);
2023 list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list, list) {
2024 list_del(&cf->list);
2025 kfree(cf);
2026 adapter->num_cloud_filters--;
2027 }
2028 spin_unlock_bh(&adapter->cloud_filter_list_lock);
2029
2030 iavf_free_misc_irq(adapter);
2031 iavf_reset_interrupt_capability(adapter);
2032 iavf_free_queues(adapter);
2033 iavf_free_q_vectors(adapter);
2034 memset(adapter->vf_res, 0, IAVF_VIRTCHNL_VF_RESOURCE_SIZE);
2035 iavf_shutdown_adminq(&adapter->hw);
2036 adapter->netdev->flags &= ~IFF_UP;
2037 clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
2038 adapter->flags &= ~IAVF_FLAG_RESET_PENDING;
2039 adapter->state = __IAVF_DOWN;
2040 wake_up(&adapter->down_waitqueue);
2041 dev_info(&adapter->pdev->dev, "Reset task did not complete, VF disabled\n");
2042 }
2043
2044 /**
2045 * iavf_reset_task - Call-back task to handle hardware reset
2046 * @work: pointer to work_struct
2047 *
2048 * During reset we need to shut down and reinitialize the admin queue
2049 * before we can use it to communicate with the PF again. We also clear
2050 * and reinit the rings because that context is lost as well.
2051 **/
iavf_reset_task(struct work_struct * work)2052 static void iavf_reset_task(struct work_struct *work)
2053 {
2054 struct iavf_adapter *adapter = container_of(work,
2055 struct iavf_adapter,
2056 reset_task);
2057 struct virtchnl_vf_resource *vfres = adapter->vf_res;
2058 struct net_device *netdev = adapter->netdev;
2059 struct iavf_hw *hw = &adapter->hw;
2060 struct iavf_mac_filter *f, *ftmp;
2061 struct iavf_vlan_filter *vlf;
2062 struct iavf_cloud_filter *cf;
2063 u32 reg_val;
2064 int i = 0, err;
2065 bool running;
2066
2067 /* When device is being removed it doesn't make sense to run the reset
2068 * task, just return in such a case.
2069 */
2070 if (test_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section))
2071 return;
2072
2073 while (test_and_set_bit(__IAVF_IN_CLIENT_TASK,
2074 &adapter->crit_section))
2075 usleep_range(500, 1000);
2076 if (CLIENT_ENABLED(adapter)) {
2077 adapter->flags &= ~(IAVF_FLAG_CLIENT_NEEDS_OPEN |
2078 IAVF_FLAG_CLIENT_NEEDS_CLOSE |
2079 IAVF_FLAG_CLIENT_NEEDS_L2_PARAMS |
2080 IAVF_FLAG_SERVICE_CLIENT_REQUESTED);
2081 cancel_delayed_work_sync(&adapter->client_task);
2082 iavf_notify_client_close(&adapter->vsi, true);
2083 }
2084 iavf_misc_irq_disable(adapter);
2085 if (adapter->flags & IAVF_FLAG_RESET_NEEDED) {
2086 adapter->flags &= ~IAVF_FLAG_RESET_NEEDED;
2087 /* Restart the AQ here. If we have been reset but didn't
2088 * detect it, or if the PF had to reinit, our AQ will be hosed.
2089 */
2090 iavf_shutdown_adminq(hw);
2091 iavf_init_adminq(hw);
2092 iavf_request_reset(adapter);
2093 }
2094 adapter->flags |= IAVF_FLAG_RESET_PENDING;
2095
2096 /* poll until we see the reset actually happen */
2097 for (i = 0; i < IAVF_RESET_WAIT_DETECTED_COUNT; i++) {
2098 reg_val = rd32(hw, IAVF_VF_ARQLEN1) &
2099 IAVF_VF_ARQLEN1_ARQENABLE_MASK;
2100 if (!reg_val)
2101 break;
2102 usleep_range(5000, 10000);
2103 }
2104 if (i == IAVF_RESET_WAIT_DETECTED_COUNT) {
2105 dev_info(&adapter->pdev->dev, "Never saw reset\n");
2106 goto continue_reset; /* act like the reset happened */
2107 }
2108
2109 /* wait until the reset is complete and the PF is responding to us */
2110 for (i = 0; i < IAVF_RESET_WAIT_COMPLETE_COUNT; i++) {
2111 /* sleep first to make sure a minimum wait time is met */
2112 msleep(IAVF_RESET_WAIT_MS);
2113
2114 reg_val = rd32(hw, IAVF_VFGEN_RSTAT) &
2115 IAVF_VFGEN_RSTAT_VFR_STATE_MASK;
2116 if (reg_val == VIRTCHNL_VFR_VFACTIVE)
2117 break;
2118 }
2119
2120 pci_set_master(adapter->pdev);
2121
2122 if (i == IAVF_RESET_WAIT_COMPLETE_COUNT) {
2123 dev_err(&adapter->pdev->dev, "Reset never finished (%x)\n",
2124 reg_val);
2125 iavf_disable_vf(adapter);
2126 clear_bit(__IAVF_IN_CLIENT_TASK, &adapter->crit_section);
2127 return; /* Do not attempt to reinit. It's dead, Jim. */
2128 }
2129
2130 continue_reset:
2131 /* We don't use netif_running() because it may be true prior to
2132 * ndo_open() returning, so we can't assume it means all our open
2133 * tasks have finished, since we're not holding the rtnl_lock here.
2134 */
2135 running = ((adapter->state == __IAVF_RUNNING) ||
2136 (adapter->state == __IAVF_RESETTING));
2137
2138 if (running) {
2139 netif_carrier_off(netdev);
2140 netif_tx_stop_all_queues(netdev);
2141 adapter->link_up = false;
2142 iavf_napi_disable_all(adapter);
2143 }
2144 iavf_irq_disable(adapter);
2145
2146 adapter->state = __IAVF_RESETTING;
2147 adapter->flags &= ~IAVF_FLAG_RESET_PENDING;
2148
2149 /* free the Tx/Rx rings and descriptors, might be better to just
2150 * re-use them sometime in the future
2151 */
2152 iavf_free_all_rx_resources(adapter);
2153 iavf_free_all_tx_resources(adapter);
2154
2155 adapter->flags |= IAVF_FLAG_QUEUES_DISABLED;
2156 /* kill and reinit the admin queue */
2157 iavf_shutdown_adminq(hw);
2158 adapter->current_op = VIRTCHNL_OP_UNKNOWN;
2159 err = iavf_init_adminq(hw);
2160 if (err)
2161 dev_info(&adapter->pdev->dev, "Failed to init adminq: %d\n",
2162 err);
2163 adapter->aq_required = 0;
2164
2165 if (adapter->flags & IAVF_FLAG_REINIT_ITR_NEEDED) {
2166 err = iavf_reinit_interrupt_scheme(adapter);
2167 if (err)
2168 goto reset_err;
2169 }
2170
2171 adapter->aq_required |= IAVF_FLAG_AQ_GET_CONFIG;
2172 adapter->aq_required |= IAVF_FLAG_AQ_MAP_VECTORS;
2173
2174 spin_lock_bh(&adapter->mac_vlan_list_lock);
2175
2176 /* Delete filter for the current MAC address, it could have
2177 * been changed by the PF via administratively set MAC.
2178 * Will be re-added via VIRTCHNL_OP_GET_VF_RESOURCES.
2179 */
2180 list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
2181 if (ether_addr_equal(f->macaddr, adapter->hw.mac.addr)) {
2182 list_del(&f->list);
2183 kfree(f);
2184 }
2185 }
2186 /* re-add all MAC filters */
2187 list_for_each_entry(f, &adapter->mac_filter_list, list) {
2188 f->add = true;
2189 }
2190 /* re-add all VLAN filters */
2191 list_for_each_entry(vlf, &adapter->vlan_filter_list, list) {
2192 vlf->add = true;
2193 }
2194
2195 spin_unlock_bh(&adapter->mac_vlan_list_lock);
2196
2197 /* check if TCs are running and re-add all cloud filters */
2198 spin_lock_bh(&adapter->cloud_filter_list_lock);
2199 if ((vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) &&
2200 adapter->num_tc) {
2201 list_for_each_entry(cf, &adapter->cloud_filter_list, list) {
2202 cf->add = true;
2203 }
2204 }
2205 spin_unlock_bh(&adapter->cloud_filter_list_lock);
2206
2207 adapter->aq_required |= IAVF_FLAG_AQ_ADD_MAC_FILTER;
2208 adapter->aq_required |= IAVF_FLAG_AQ_ADD_VLAN_FILTER;
2209 adapter->aq_required |= IAVF_FLAG_AQ_ADD_CLOUD_FILTER;
2210 iavf_misc_irq_enable(adapter);
2211
2212 mod_delayed_work(iavf_wq, &adapter->watchdog_task, 2);
2213
2214 /* We were running when the reset started, so we need to restore some
2215 * state here.
2216 */
2217 if (running) {
2218 /* allocate transmit descriptors */
2219 err = iavf_setup_all_tx_resources(adapter);
2220 if (err)
2221 goto reset_err;
2222
2223 /* allocate receive descriptors */
2224 err = iavf_setup_all_rx_resources(adapter);
2225 if (err)
2226 goto reset_err;
2227
2228 if (adapter->flags & IAVF_FLAG_REINIT_ITR_NEEDED) {
2229 err = iavf_request_traffic_irqs(adapter, netdev->name);
2230 if (err)
2231 goto reset_err;
2232
2233 adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED;
2234 }
2235
2236 iavf_configure(adapter);
2237
2238 iavf_up_complete(adapter);
2239
2240 iavf_irq_enable(adapter, true);
2241 } else {
2242 adapter->state = __IAVF_DOWN;
2243 wake_up(&adapter->down_waitqueue);
2244 }
2245 clear_bit(__IAVF_IN_CLIENT_TASK, &adapter->crit_section);
2246 clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
2247
2248 return;
2249 reset_err:
2250 clear_bit(__IAVF_IN_CLIENT_TASK, &adapter->crit_section);
2251 clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
2252 dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n");
2253 iavf_close(netdev);
2254 }
2255
2256 /**
2257 * iavf_adminq_task - worker thread to clean the admin queue
2258 * @work: pointer to work_struct containing our data
2259 **/
iavf_adminq_task(struct work_struct * work)2260 static void iavf_adminq_task(struct work_struct *work)
2261 {
2262 struct iavf_adapter *adapter =
2263 container_of(work, struct iavf_adapter, adminq_task);
2264 struct iavf_hw *hw = &adapter->hw;
2265 struct iavf_arq_event_info event;
2266 enum virtchnl_ops v_op;
2267 enum iavf_status ret, v_ret;
2268 u32 val, oldval;
2269 u16 pending;
2270
2271 if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED)
2272 goto out;
2273
2274 event.buf_len = IAVF_MAX_AQ_BUF_SIZE;
2275 event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
2276 if (!event.msg_buf)
2277 goto out;
2278
2279 do {
2280 ret = iavf_clean_arq_element(hw, &event, &pending);
2281 v_op = (enum virtchnl_ops)le32_to_cpu(event.desc.cookie_high);
2282 v_ret = (enum iavf_status)le32_to_cpu(event.desc.cookie_low);
2283
2284 if (ret || !v_op)
2285 break; /* No event to process or error cleaning ARQ */
2286
2287 iavf_virtchnl_completion(adapter, v_op, v_ret, event.msg_buf,
2288 event.msg_len);
2289 if (pending != 0)
2290 memset(event.msg_buf, 0, IAVF_MAX_AQ_BUF_SIZE);
2291 } while (pending);
2292
2293 if ((adapter->flags &
2294 (IAVF_FLAG_RESET_PENDING | IAVF_FLAG_RESET_NEEDED)) ||
2295 adapter->state == __IAVF_RESETTING)
2296 goto freedom;
2297
2298 /* check for error indications */
2299 val = rd32(hw, hw->aq.arq.len);
2300 if (val == 0xdeadbeef) /* indicates device in reset */
2301 goto freedom;
2302 oldval = val;
2303 if (val & IAVF_VF_ARQLEN1_ARQVFE_MASK) {
2304 dev_info(&adapter->pdev->dev, "ARQ VF Error detected\n");
2305 val &= ~IAVF_VF_ARQLEN1_ARQVFE_MASK;
2306 }
2307 if (val & IAVF_VF_ARQLEN1_ARQOVFL_MASK) {
2308 dev_info(&adapter->pdev->dev, "ARQ Overflow Error detected\n");
2309 val &= ~IAVF_VF_ARQLEN1_ARQOVFL_MASK;
2310 }
2311 if (val & IAVF_VF_ARQLEN1_ARQCRIT_MASK) {
2312 dev_info(&adapter->pdev->dev, "ARQ Critical Error detected\n");
2313 val &= ~IAVF_VF_ARQLEN1_ARQCRIT_MASK;
2314 }
2315 if (oldval != val)
2316 wr32(hw, hw->aq.arq.len, val);
2317
2318 val = rd32(hw, hw->aq.asq.len);
2319 oldval = val;
2320 if (val & IAVF_VF_ATQLEN1_ATQVFE_MASK) {
2321 dev_info(&adapter->pdev->dev, "ASQ VF Error detected\n");
2322 val &= ~IAVF_VF_ATQLEN1_ATQVFE_MASK;
2323 }
2324 if (val & IAVF_VF_ATQLEN1_ATQOVFL_MASK) {
2325 dev_info(&adapter->pdev->dev, "ASQ Overflow Error detected\n");
2326 val &= ~IAVF_VF_ATQLEN1_ATQOVFL_MASK;
2327 }
2328 if (val & IAVF_VF_ATQLEN1_ATQCRIT_MASK) {
2329 dev_info(&adapter->pdev->dev, "ASQ Critical Error detected\n");
2330 val &= ~IAVF_VF_ATQLEN1_ATQCRIT_MASK;
2331 }
2332 if (oldval != val)
2333 wr32(hw, hw->aq.asq.len, val);
2334
2335 freedom:
2336 kfree(event.msg_buf);
2337 out:
2338 /* re-enable Admin queue interrupt cause */
2339 iavf_misc_irq_enable(adapter);
2340 }
2341
2342 /**
2343 * iavf_client_task - worker thread to perform client work
2344 * @work: pointer to work_struct containing our data
2345 *
2346 * This task handles client interactions. Because client calls can be
2347 * reentrant, we can't handle them in the watchdog.
2348 **/
iavf_client_task(struct work_struct * work)2349 static void iavf_client_task(struct work_struct *work)
2350 {
2351 struct iavf_adapter *adapter =
2352 container_of(work, struct iavf_adapter, client_task.work);
2353
2354 /* If we can't get the client bit, just give up. We'll be rescheduled
2355 * later.
2356 */
2357
2358 if (test_and_set_bit(__IAVF_IN_CLIENT_TASK, &adapter->crit_section))
2359 return;
2360
2361 if (adapter->flags & IAVF_FLAG_SERVICE_CLIENT_REQUESTED) {
2362 iavf_client_subtask(adapter);
2363 adapter->flags &= ~IAVF_FLAG_SERVICE_CLIENT_REQUESTED;
2364 goto out;
2365 }
2366 if (adapter->flags & IAVF_FLAG_CLIENT_NEEDS_L2_PARAMS) {
2367 iavf_notify_client_l2_params(&adapter->vsi);
2368 adapter->flags &= ~IAVF_FLAG_CLIENT_NEEDS_L2_PARAMS;
2369 goto out;
2370 }
2371 if (adapter->flags & IAVF_FLAG_CLIENT_NEEDS_CLOSE) {
2372 iavf_notify_client_close(&adapter->vsi, false);
2373 adapter->flags &= ~IAVF_FLAG_CLIENT_NEEDS_CLOSE;
2374 goto out;
2375 }
2376 if (adapter->flags & IAVF_FLAG_CLIENT_NEEDS_OPEN) {
2377 iavf_notify_client_open(&adapter->vsi);
2378 adapter->flags &= ~IAVF_FLAG_CLIENT_NEEDS_OPEN;
2379 }
2380 out:
2381 clear_bit(__IAVF_IN_CLIENT_TASK, &adapter->crit_section);
2382 }
2383
2384 /**
2385 * iavf_free_all_tx_resources - Free Tx Resources for All Queues
2386 * @adapter: board private structure
2387 *
2388 * Free all transmit software resources
2389 **/
iavf_free_all_tx_resources(struct iavf_adapter * adapter)2390 void iavf_free_all_tx_resources(struct iavf_adapter *adapter)
2391 {
2392 int i;
2393
2394 if (!adapter->tx_rings)
2395 return;
2396
2397 for (i = 0; i < adapter->num_active_queues; i++)
2398 if (adapter->tx_rings[i].desc)
2399 iavf_free_tx_resources(&adapter->tx_rings[i]);
2400 }
2401
2402 /**
2403 * iavf_setup_all_tx_resources - allocate all queues Tx resources
2404 * @adapter: board private structure
2405 *
2406 * If this function returns with an error, then it's possible one or
2407 * more of the rings is populated (while the rest are not). It is the
2408 * callers duty to clean those orphaned rings.
2409 *
2410 * Return 0 on success, negative on failure
2411 **/
iavf_setup_all_tx_resources(struct iavf_adapter * adapter)2412 static int iavf_setup_all_tx_resources(struct iavf_adapter *adapter)
2413 {
2414 int i, err = 0;
2415
2416 for (i = 0; i < adapter->num_active_queues; i++) {
2417 adapter->tx_rings[i].count = adapter->tx_desc_count;
2418 err = iavf_setup_tx_descriptors(&adapter->tx_rings[i]);
2419 if (!err)
2420 continue;
2421 dev_err(&adapter->pdev->dev,
2422 "Allocation for Tx Queue %u failed\n", i);
2423 break;
2424 }
2425
2426 return err;
2427 }
2428
2429 /**
2430 * iavf_setup_all_rx_resources - allocate all queues Rx resources
2431 * @adapter: board private structure
2432 *
2433 * If this function returns with an error, then it's possible one or
2434 * more of the rings is populated (while the rest are not). It is the
2435 * callers duty to clean those orphaned rings.
2436 *
2437 * Return 0 on success, negative on failure
2438 **/
iavf_setup_all_rx_resources(struct iavf_adapter * adapter)2439 static int iavf_setup_all_rx_resources(struct iavf_adapter *adapter)
2440 {
2441 int i, err = 0;
2442
2443 for (i = 0; i < adapter->num_active_queues; i++) {
2444 adapter->rx_rings[i].count = adapter->rx_desc_count;
2445 err = iavf_setup_rx_descriptors(&adapter->rx_rings[i]);
2446 if (!err)
2447 continue;
2448 dev_err(&adapter->pdev->dev,
2449 "Allocation for Rx Queue %u failed\n", i);
2450 break;
2451 }
2452 return err;
2453 }
2454
2455 /**
2456 * iavf_free_all_rx_resources - Free Rx Resources for All Queues
2457 * @adapter: board private structure
2458 *
2459 * Free all receive software resources
2460 **/
iavf_free_all_rx_resources(struct iavf_adapter * adapter)2461 void iavf_free_all_rx_resources(struct iavf_adapter *adapter)
2462 {
2463 int i;
2464
2465 if (!adapter->rx_rings)
2466 return;
2467
2468 for (i = 0; i < adapter->num_active_queues; i++)
2469 if (adapter->rx_rings[i].desc)
2470 iavf_free_rx_resources(&adapter->rx_rings[i]);
2471 }
2472
2473 /**
2474 * iavf_validate_tx_bandwidth - validate the max Tx bandwidth
2475 * @adapter: board private structure
2476 * @max_tx_rate: max Tx bw for a tc
2477 **/
iavf_validate_tx_bandwidth(struct iavf_adapter * adapter,u64 max_tx_rate)2478 static int iavf_validate_tx_bandwidth(struct iavf_adapter *adapter,
2479 u64 max_tx_rate)
2480 {
2481 int speed = 0, ret = 0;
2482
2483 if (ADV_LINK_SUPPORT(adapter)) {
2484 if (adapter->link_speed_mbps < U32_MAX) {
2485 speed = adapter->link_speed_mbps;
2486 goto validate_bw;
2487 } else {
2488 dev_err(&adapter->pdev->dev, "Unknown link speed\n");
2489 return -EINVAL;
2490 }
2491 }
2492
2493 switch (adapter->link_speed) {
2494 case VIRTCHNL_LINK_SPEED_40GB:
2495 speed = SPEED_40000;
2496 break;
2497 case VIRTCHNL_LINK_SPEED_25GB:
2498 speed = SPEED_25000;
2499 break;
2500 case VIRTCHNL_LINK_SPEED_20GB:
2501 speed = SPEED_20000;
2502 break;
2503 case VIRTCHNL_LINK_SPEED_10GB:
2504 speed = SPEED_10000;
2505 break;
2506 case VIRTCHNL_LINK_SPEED_5GB:
2507 speed = SPEED_5000;
2508 break;
2509 case VIRTCHNL_LINK_SPEED_2_5GB:
2510 speed = SPEED_2500;
2511 break;
2512 case VIRTCHNL_LINK_SPEED_1GB:
2513 speed = SPEED_1000;
2514 break;
2515 case VIRTCHNL_LINK_SPEED_100MB:
2516 speed = SPEED_100;
2517 break;
2518 default:
2519 break;
2520 }
2521
2522 validate_bw:
2523 if (max_tx_rate > speed) {
2524 dev_err(&adapter->pdev->dev,
2525 "Invalid tx rate specified\n");
2526 ret = -EINVAL;
2527 }
2528
2529 return ret;
2530 }
2531
2532 /**
2533 * iavf_validate_channel_config - validate queue mapping info
2534 * @adapter: board private structure
2535 * @mqprio_qopt: queue parameters
2536 *
2537 * This function validates if the config provided by the user to
2538 * configure queue channels is valid or not. Returns 0 on a valid
2539 * config.
2540 **/
iavf_validate_ch_config(struct iavf_adapter * adapter,struct tc_mqprio_qopt_offload * mqprio_qopt)2541 static int iavf_validate_ch_config(struct iavf_adapter *adapter,
2542 struct tc_mqprio_qopt_offload *mqprio_qopt)
2543 {
2544 u64 total_max_rate = 0;
2545 int i, num_qps = 0;
2546 u64 tx_rate = 0;
2547 int ret = 0;
2548
2549 if (mqprio_qopt->qopt.num_tc > IAVF_MAX_TRAFFIC_CLASS ||
2550 mqprio_qopt->qopt.num_tc < 1)
2551 return -EINVAL;
2552
2553 for (i = 0; i <= mqprio_qopt->qopt.num_tc - 1; i++) {
2554 if (!mqprio_qopt->qopt.count[i] ||
2555 mqprio_qopt->qopt.offset[i] != num_qps)
2556 return -EINVAL;
2557 if (mqprio_qopt->min_rate[i]) {
2558 dev_err(&adapter->pdev->dev,
2559 "Invalid min tx rate (greater than 0) specified\n");
2560 return -EINVAL;
2561 }
2562 /*convert to Mbps */
2563 tx_rate = div_u64(mqprio_qopt->max_rate[i],
2564 IAVF_MBPS_DIVISOR);
2565 total_max_rate += tx_rate;
2566 num_qps += mqprio_qopt->qopt.count[i];
2567 }
2568 if (num_qps > IAVF_MAX_REQ_QUEUES)
2569 return -EINVAL;
2570
2571 ret = iavf_validate_tx_bandwidth(adapter, total_max_rate);
2572 return ret;
2573 }
2574
2575 /**
2576 * iavf_del_all_cloud_filters - delete all cloud filters on the traffic classes
2577 * @adapter: board private structure
2578 **/
iavf_del_all_cloud_filters(struct iavf_adapter * adapter)2579 static void iavf_del_all_cloud_filters(struct iavf_adapter *adapter)
2580 {
2581 struct iavf_cloud_filter *cf, *cftmp;
2582
2583 spin_lock_bh(&adapter->cloud_filter_list_lock);
2584 list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list,
2585 list) {
2586 list_del(&cf->list);
2587 kfree(cf);
2588 adapter->num_cloud_filters--;
2589 }
2590 spin_unlock_bh(&adapter->cloud_filter_list_lock);
2591 }
2592
2593 /**
2594 * __iavf_setup_tc - configure multiple traffic classes
2595 * @netdev: network interface device structure
2596 * @type_data: tc offload data
2597 *
2598 * This function processes the config information provided by the
2599 * user to configure traffic classes/queue channels and packages the
2600 * information to request the PF to setup traffic classes.
2601 *
2602 * Returns 0 on success.
2603 **/
__iavf_setup_tc(struct net_device * netdev,void * type_data)2604 static int __iavf_setup_tc(struct net_device *netdev, void *type_data)
2605 {
2606 struct tc_mqprio_qopt_offload *mqprio_qopt = type_data;
2607 struct iavf_adapter *adapter = netdev_priv(netdev);
2608 struct virtchnl_vf_resource *vfres = adapter->vf_res;
2609 u8 num_tc = 0, total_qps = 0;
2610 int ret = 0, netdev_tc = 0;
2611 u64 max_tx_rate;
2612 u16 mode;
2613 int i;
2614
2615 num_tc = mqprio_qopt->qopt.num_tc;
2616 mode = mqprio_qopt->mode;
2617
2618 /* delete queue_channel */
2619 if (!mqprio_qopt->qopt.hw) {
2620 if (adapter->ch_config.state == __IAVF_TC_RUNNING) {
2621 /* reset the tc configuration */
2622 netdev_reset_tc(netdev);
2623 adapter->num_tc = 0;
2624 netif_tx_stop_all_queues(netdev);
2625 netif_tx_disable(netdev);
2626 iavf_del_all_cloud_filters(adapter);
2627 adapter->aq_required = IAVF_FLAG_AQ_DISABLE_CHANNELS;
2628 goto exit;
2629 } else {
2630 return -EINVAL;
2631 }
2632 }
2633
2634 /* add queue channel */
2635 if (mode == TC_MQPRIO_MODE_CHANNEL) {
2636 if (!(vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ)) {
2637 dev_err(&adapter->pdev->dev, "ADq not supported\n");
2638 return -EOPNOTSUPP;
2639 }
2640 if (adapter->ch_config.state != __IAVF_TC_INVALID) {
2641 dev_err(&adapter->pdev->dev, "TC configuration already exists\n");
2642 return -EINVAL;
2643 }
2644
2645 ret = iavf_validate_ch_config(adapter, mqprio_qopt);
2646 if (ret)
2647 return ret;
2648 /* Return if same TC config is requested */
2649 if (adapter->num_tc == num_tc)
2650 return 0;
2651 adapter->num_tc = num_tc;
2652
2653 for (i = 0; i < IAVF_MAX_TRAFFIC_CLASS; i++) {
2654 if (i < num_tc) {
2655 adapter->ch_config.ch_info[i].count =
2656 mqprio_qopt->qopt.count[i];
2657 adapter->ch_config.ch_info[i].offset =
2658 mqprio_qopt->qopt.offset[i];
2659 total_qps += mqprio_qopt->qopt.count[i];
2660 max_tx_rate = mqprio_qopt->max_rate[i];
2661 /* convert to Mbps */
2662 max_tx_rate = div_u64(max_tx_rate,
2663 IAVF_MBPS_DIVISOR);
2664 adapter->ch_config.ch_info[i].max_tx_rate =
2665 max_tx_rate;
2666 } else {
2667 adapter->ch_config.ch_info[i].count = 1;
2668 adapter->ch_config.ch_info[i].offset = 0;
2669 }
2670 }
2671 adapter->ch_config.total_qps = total_qps;
2672 netif_tx_stop_all_queues(netdev);
2673 netif_tx_disable(netdev);
2674 adapter->aq_required |= IAVF_FLAG_AQ_ENABLE_CHANNELS;
2675 netdev_reset_tc(netdev);
2676 /* Report the tc mapping up the stack */
2677 netdev_set_num_tc(adapter->netdev, num_tc);
2678 for (i = 0; i < IAVF_MAX_TRAFFIC_CLASS; i++) {
2679 u16 qcount = mqprio_qopt->qopt.count[i];
2680 u16 qoffset = mqprio_qopt->qopt.offset[i];
2681
2682 if (i < num_tc)
2683 netdev_set_tc_queue(netdev, netdev_tc++, qcount,
2684 qoffset);
2685 }
2686 }
2687 exit:
2688 return ret;
2689 }
2690
2691 /**
2692 * iavf_parse_cls_flower - Parse tc flower filters provided by kernel
2693 * @adapter: board private structure
2694 * @f: pointer to struct flow_cls_offload
2695 * @filter: pointer to cloud filter structure
2696 */
iavf_parse_cls_flower(struct iavf_adapter * adapter,struct flow_cls_offload * f,struct iavf_cloud_filter * filter)2697 static int iavf_parse_cls_flower(struct iavf_adapter *adapter,
2698 struct flow_cls_offload *f,
2699 struct iavf_cloud_filter *filter)
2700 {
2701 struct flow_rule *rule = flow_cls_offload_flow_rule(f);
2702 struct flow_dissector *dissector = rule->match.dissector;
2703 u16 n_proto_mask = 0;
2704 u16 n_proto_key = 0;
2705 u8 field_flags = 0;
2706 u16 addr_type = 0;
2707 u16 n_proto = 0;
2708 int i = 0;
2709 struct virtchnl_filter *vf = &filter->f;
2710
2711 if (dissector->used_keys &
2712 ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
2713 BIT(FLOW_DISSECTOR_KEY_BASIC) |
2714 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
2715 BIT(FLOW_DISSECTOR_KEY_VLAN) |
2716 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
2717 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
2718 BIT(FLOW_DISSECTOR_KEY_PORTS) |
2719 BIT(FLOW_DISSECTOR_KEY_ENC_KEYID))) {
2720 dev_err(&adapter->pdev->dev, "Unsupported key used: 0x%x\n",
2721 dissector->used_keys);
2722 return -EOPNOTSUPP;
2723 }
2724
2725 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
2726 struct flow_match_enc_keyid match;
2727
2728 flow_rule_match_enc_keyid(rule, &match);
2729 if (match.mask->keyid != 0)
2730 field_flags |= IAVF_CLOUD_FIELD_TEN_ID;
2731 }
2732
2733 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
2734 struct flow_match_basic match;
2735
2736 flow_rule_match_basic(rule, &match);
2737 n_proto_key = ntohs(match.key->n_proto);
2738 n_proto_mask = ntohs(match.mask->n_proto);
2739
2740 if (n_proto_key == ETH_P_ALL) {
2741 n_proto_key = 0;
2742 n_proto_mask = 0;
2743 }
2744 n_proto = n_proto_key & n_proto_mask;
2745 if (n_proto != ETH_P_IP && n_proto != ETH_P_IPV6)
2746 return -EINVAL;
2747 if (n_proto == ETH_P_IPV6) {
2748 /* specify flow type as TCP IPv6 */
2749 vf->flow_type = VIRTCHNL_TCP_V6_FLOW;
2750 }
2751
2752 if (match.key->ip_proto != IPPROTO_TCP) {
2753 dev_info(&adapter->pdev->dev, "Only TCP transport is supported\n");
2754 return -EINVAL;
2755 }
2756 }
2757
2758 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
2759 struct flow_match_eth_addrs match;
2760
2761 flow_rule_match_eth_addrs(rule, &match);
2762
2763 /* use is_broadcast and is_zero to check for all 0xf or 0 */
2764 if (!is_zero_ether_addr(match.mask->dst)) {
2765 if (is_broadcast_ether_addr(match.mask->dst)) {
2766 field_flags |= IAVF_CLOUD_FIELD_OMAC;
2767 } else {
2768 dev_err(&adapter->pdev->dev, "Bad ether dest mask %pM\n",
2769 match.mask->dst);
2770 return IAVF_ERR_CONFIG;
2771 }
2772 }
2773
2774 if (!is_zero_ether_addr(match.mask->src)) {
2775 if (is_broadcast_ether_addr(match.mask->src)) {
2776 field_flags |= IAVF_CLOUD_FIELD_IMAC;
2777 } else {
2778 dev_err(&adapter->pdev->dev, "Bad ether src mask %pM\n",
2779 match.mask->src);
2780 return IAVF_ERR_CONFIG;
2781 }
2782 }
2783
2784 if (!is_zero_ether_addr(match.key->dst))
2785 if (is_valid_ether_addr(match.key->dst) ||
2786 is_multicast_ether_addr(match.key->dst)) {
2787 /* set the mask if a valid dst_mac address */
2788 for (i = 0; i < ETH_ALEN; i++)
2789 vf->mask.tcp_spec.dst_mac[i] |= 0xff;
2790 ether_addr_copy(vf->data.tcp_spec.dst_mac,
2791 match.key->dst);
2792 }
2793
2794 if (!is_zero_ether_addr(match.key->src))
2795 if (is_valid_ether_addr(match.key->src) ||
2796 is_multicast_ether_addr(match.key->src)) {
2797 /* set the mask if a valid dst_mac address */
2798 for (i = 0; i < ETH_ALEN; i++)
2799 vf->mask.tcp_spec.src_mac[i] |= 0xff;
2800 ether_addr_copy(vf->data.tcp_spec.src_mac,
2801 match.key->src);
2802 }
2803 }
2804
2805 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
2806 struct flow_match_vlan match;
2807
2808 flow_rule_match_vlan(rule, &match);
2809 if (match.mask->vlan_id) {
2810 if (match.mask->vlan_id == VLAN_VID_MASK) {
2811 field_flags |= IAVF_CLOUD_FIELD_IVLAN;
2812 } else {
2813 dev_err(&adapter->pdev->dev, "Bad vlan mask %u\n",
2814 match.mask->vlan_id);
2815 return IAVF_ERR_CONFIG;
2816 }
2817 }
2818 vf->mask.tcp_spec.vlan_id |= cpu_to_be16(0xffff);
2819 vf->data.tcp_spec.vlan_id = cpu_to_be16(match.key->vlan_id);
2820 }
2821
2822 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
2823 struct flow_match_control match;
2824
2825 flow_rule_match_control(rule, &match);
2826 addr_type = match.key->addr_type;
2827 }
2828
2829 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
2830 struct flow_match_ipv4_addrs match;
2831
2832 flow_rule_match_ipv4_addrs(rule, &match);
2833 if (match.mask->dst) {
2834 if (match.mask->dst == cpu_to_be32(0xffffffff)) {
2835 field_flags |= IAVF_CLOUD_FIELD_IIP;
2836 } else {
2837 dev_err(&adapter->pdev->dev, "Bad ip dst mask 0x%08x\n",
2838 be32_to_cpu(match.mask->dst));
2839 return IAVF_ERR_CONFIG;
2840 }
2841 }
2842
2843 if (match.mask->src) {
2844 if (match.mask->src == cpu_to_be32(0xffffffff)) {
2845 field_flags |= IAVF_CLOUD_FIELD_IIP;
2846 } else {
2847 dev_err(&adapter->pdev->dev, "Bad ip src mask 0x%08x\n",
2848 be32_to_cpu(match.mask->dst));
2849 return IAVF_ERR_CONFIG;
2850 }
2851 }
2852
2853 if (field_flags & IAVF_CLOUD_FIELD_TEN_ID) {
2854 dev_info(&adapter->pdev->dev, "Tenant id not allowed for ip filter\n");
2855 return IAVF_ERR_CONFIG;
2856 }
2857 if (match.key->dst) {
2858 vf->mask.tcp_spec.dst_ip[0] |= cpu_to_be32(0xffffffff);
2859 vf->data.tcp_spec.dst_ip[0] = match.key->dst;
2860 }
2861 if (match.key->src) {
2862 vf->mask.tcp_spec.src_ip[0] |= cpu_to_be32(0xffffffff);
2863 vf->data.tcp_spec.src_ip[0] = match.key->src;
2864 }
2865 }
2866
2867 if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
2868 struct flow_match_ipv6_addrs match;
2869
2870 flow_rule_match_ipv6_addrs(rule, &match);
2871
2872 /* validate mask, make sure it is not IPV6_ADDR_ANY */
2873 if (ipv6_addr_any(&match.mask->dst)) {
2874 dev_err(&adapter->pdev->dev, "Bad ipv6 dst mask 0x%02x\n",
2875 IPV6_ADDR_ANY);
2876 return IAVF_ERR_CONFIG;
2877 }
2878
2879 /* src and dest IPv6 address should not be LOOPBACK
2880 * (0:0:0:0:0:0:0:1) which can be represented as ::1
2881 */
2882 if (ipv6_addr_loopback(&match.key->dst) ||
2883 ipv6_addr_loopback(&match.key->src)) {
2884 dev_err(&adapter->pdev->dev,
2885 "ipv6 addr should not be loopback\n");
2886 return IAVF_ERR_CONFIG;
2887 }
2888 if (!ipv6_addr_any(&match.mask->dst) ||
2889 !ipv6_addr_any(&match.mask->src))
2890 field_flags |= IAVF_CLOUD_FIELD_IIP;
2891
2892 for (i = 0; i < 4; i++)
2893 vf->mask.tcp_spec.dst_ip[i] |= cpu_to_be32(0xffffffff);
2894 memcpy(&vf->data.tcp_spec.dst_ip, &match.key->dst.s6_addr32,
2895 sizeof(vf->data.tcp_spec.dst_ip));
2896 for (i = 0; i < 4; i++)
2897 vf->mask.tcp_spec.src_ip[i] |= cpu_to_be32(0xffffffff);
2898 memcpy(&vf->data.tcp_spec.src_ip, &match.key->src.s6_addr32,
2899 sizeof(vf->data.tcp_spec.src_ip));
2900 }
2901 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
2902 struct flow_match_ports match;
2903
2904 flow_rule_match_ports(rule, &match);
2905 if (match.mask->src) {
2906 if (match.mask->src == cpu_to_be16(0xffff)) {
2907 field_flags |= IAVF_CLOUD_FIELD_IIP;
2908 } else {
2909 dev_err(&adapter->pdev->dev, "Bad src port mask %u\n",
2910 be16_to_cpu(match.mask->src));
2911 return IAVF_ERR_CONFIG;
2912 }
2913 }
2914
2915 if (match.mask->dst) {
2916 if (match.mask->dst == cpu_to_be16(0xffff)) {
2917 field_flags |= IAVF_CLOUD_FIELD_IIP;
2918 } else {
2919 dev_err(&adapter->pdev->dev, "Bad dst port mask %u\n",
2920 be16_to_cpu(match.mask->dst));
2921 return IAVF_ERR_CONFIG;
2922 }
2923 }
2924 if (match.key->dst) {
2925 vf->mask.tcp_spec.dst_port |= cpu_to_be16(0xffff);
2926 vf->data.tcp_spec.dst_port = match.key->dst;
2927 }
2928
2929 if (match.key->src) {
2930 vf->mask.tcp_spec.src_port |= cpu_to_be16(0xffff);
2931 vf->data.tcp_spec.src_port = match.key->src;
2932 }
2933 }
2934 vf->field_flags = field_flags;
2935
2936 return 0;
2937 }
2938
2939 /**
2940 * iavf_handle_tclass - Forward to a traffic class on the device
2941 * @adapter: board private structure
2942 * @tc: traffic class index on the device
2943 * @filter: pointer to cloud filter structure
2944 */
iavf_handle_tclass(struct iavf_adapter * adapter,u32 tc,struct iavf_cloud_filter * filter)2945 static int iavf_handle_tclass(struct iavf_adapter *adapter, u32 tc,
2946 struct iavf_cloud_filter *filter)
2947 {
2948 if (tc == 0)
2949 return 0;
2950 if (tc < adapter->num_tc) {
2951 if (!filter->f.data.tcp_spec.dst_port) {
2952 dev_err(&adapter->pdev->dev,
2953 "Specify destination port to redirect to traffic class other than TC0\n");
2954 return -EINVAL;
2955 }
2956 }
2957 /* redirect to a traffic class on the same device */
2958 filter->f.action = VIRTCHNL_ACTION_TC_REDIRECT;
2959 filter->f.action_meta = tc;
2960 return 0;
2961 }
2962
2963 /**
2964 * iavf_configure_clsflower - Add tc flower filters
2965 * @adapter: board private structure
2966 * @cls_flower: Pointer to struct flow_cls_offload
2967 */
iavf_configure_clsflower(struct iavf_adapter * adapter,struct flow_cls_offload * cls_flower)2968 static int iavf_configure_clsflower(struct iavf_adapter *adapter,
2969 struct flow_cls_offload *cls_flower)
2970 {
2971 int tc = tc_classid_to_hwtc(adapter->netdev, cls_flower->classid);
2972 struct iavf_cloud_filter *filter = NULL;
2973 int err = -EINVAL, count = 50;
2974
2975 if (tc < 0) {
2976 dev_err(&adapter->pdev->dev, "Invalid traffic class\n");
2977 return -EINVAL;
2978 }
2979
2980 filter = kzalloc(sizeof(*filter), GFP_KERNEL);
2981 if (!filter)
2982 return -ENOMEM;
2983
2984 while (test_and_set_bit(__IAVF_IN_CRITICAL_TASK,
2985 &adapter->crit_section)) {
2986 if (--count == 0)
2987 goto err;
2988 udelay(1);
2989 }
2990
2991 filter->cookie = cls_flower->cookie;
2992
2993 /* set the mask to all zeroes to begin with */
2994 memset(&filter->f.mask.tcp_spec, 0, sizeof(struct virtchnl_l4_spec));
2995 /* start out with flow type and eth type IPv4 to begin with */
2996 filter->f.flow_type = VIRTCHNL_TCP_V4_FLOW;
2997 err = iavf_parse_cls_flower(adapter, cls_flower, filter);
2998 if (err < 0)
2999 goto err;
3000
3001 err = iavf_handle_tclass(adapter, tc, filter);
3002 if (err < 0)
3003 goto err;
3004
3005 /* add filter to the list */
3006 spin_lock_bh(&adapter->cloud_filter_list_lock);
3007 list_add_tail(&filter->list, &adapter->cloud_filter_list);
3008 adapter->num_cloud_filters++;
3009 filter->add = true;
3010 adapter->aq_required |= IAVF_FLAG_AQ_ADD_CLOUD_FILTER;
3011 spin_unlock_bh(&adapter->cloud_filter_list_lock);
3012 err:
3013 if (err)
3014 kfree(filter);
3015
3016 clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
3017 return err;
3018 }
3019
3020 /* iavf_find_cf - Find the cloud filter in the list
3021 * @adapter: Board private structure
3022 * @cookie: filter specific cookie
3023 *
3024 * Returns ptr to the filter object or NULL. Must be called while holding the
3025 * cloud_filter_list_lock.
3026 */
iavf_find_cf(struct iavf_adapter * adapter,unsigned long * cookie)3027 static struct iavf_cloud_filter *iavf_find_cf(struct iavf_adapter *adapter,
3028 unsigned long *cookie)
3029 {
3030 struct iavf_cloud_filter *filter = NULL;
3031
3032 if (!cookie)
3033 return NULL;
3034
3035 list_for_each_entry(filter, &adapter->cloud_filter_list, list) {
3036 if (!memcmp(cookie, &filter->cookie, sizeof(filter->cookie)))
3037 return filter;
3038 }
3039 return NULL;
3040 }
3041
3042 /**
3043 * iavf_delete_clsflower - Remove tc flower filters
3044 * @adapter: board private structure
3045 * @cls_flower: Pointer to struct flow_cls_offload
3046 */
iavf_delete_clsflower(struct iavf_adapter * adapter,struct flow_cls_offload * cls_flower)3047 static int iavf_delete_clsflower(struct iavf_adapter *adapter,
3048 struct flow_cls_offload *cls_flower)
3049 {
3050 struct iavf_cloud_filter *filter = NULL;
3051 int err = 0;
3052
3053 spin_lock_bh(&adapter->cloud_filter_list_lock);
3054 filter = iavf_find_cf(adapter, &cls_flower->cookie);
3055 if (filter) {
3056 filter->del = true;
3057 adapter->aq_required |= IAVF_FLAG_AQ_DEL_CLOUD_FILTER;
3058 } else {
3059 err = -EINVAL;
3060 }
3061 spin_unlock_bh(&adapter->cloud_filter_list_lock);
3062
3063 return err;
3064 }
3065
3066 /**
3067 * iavf_setup_tc_cls_flower - flower classifier offloads
3068 * @adapter: board private structure
3069 * @cls_flower: pointer to flow_cls_offload struct with flow info
3070 */
iavf_setup_tc_cls_flower(struct iavf_adapter * adapter,struct flow_cls_offload * cls_flower)3071 static int iavf_setup_tc_cls_flower(struct iavf_adapter *adapter,
3072 struct flow_cls_offload *cls_flower)
3073 {
3074 switch (cls_flower->command) {
3075 case FLOW_CLS_REPLACE:
3076 return iavf_configure_clsflower(adapter, cls_flower);
3077 case FLOW_CLS_DESTROY:
3078 return iavf_delete_clsflower(adapter, cls_flower);
3079 case FLOW_CLS_STATS:
3080 return -EOPNOTSUPP;
3081 default:
3082 return -EOPNOTSUPP;
3083 }
3084 }
3085
3086 /**
3087 * iavf_setup_tc_block_cb - block callback for tc
3088 * @type: type of offload
3089 * @type_data: offload data
3090 * @cb_priv:
3091 *
3092 * This function is the block callback for traffic classes
3093 **/
iavf_setup_tc_block_cb(enum tc_setup_type type,void * type_data,void * cb_priv)3094 static int iavf_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
3095 void *cb_priv)
3096 {
3097 struct iavf_adapter *adapter = cb_priv;
3098
3099 if (!tc_cls_can_offload_and_chain0(adapter->netdev, type_data))
3100 return -EOPNOTSUPP;
3101
3102 switch (type) {
3103 case TC_SETUP_CLSFLOWER:
3104 return iavf_setup_tc_cls_flower(cb_priv, type_data);
3105 default:
3106 return -EOPNOTSUPP;
3107 }
3108 }
3109
3110 static LIST_HEAD(iavf_block_cb_list);
3111
3112 /**
3113 * iavf_setup_tc - configure multiple traffic classes
3114 * @netdev: network interface device structure
3115 * @type: type of offload
3116 * @type_data: tc offload data
3117 *
3118 * This function is the callback to ndo_setup_tc in the
3119 * netdev_ops.
3120 *
3121 * Returns 0 on success
3122 **/
iavf_setup_tc(struct net_device * netdev,enum tc_setup_type type,void * type_data)3123 static int iavf_setup_tc(struct net_device *netdev, enum tc_setup_type type,
3124 void *type_data)
3125 {
3126 struct iavf_adapter *adapter = netdev_priv(netdev);
3127
3128 switch (type) {
3129 case TC_SETUP_QDISC_MQPRIO:
3130 return __iavf_setup_tc(netdev, type_data);
3131 case TC_SETUP_BLOCK:
3132 return flow_block_cb_setup_simple(type_data,
3133 &iavf_block_cb_list,
3134 iavf_setup_tc_block_cb,
3135 adapter, adapter, true);
3136 default:
3137 return -EOPNOTSUPP;
3138 }
3139 }
3140
3141 /**
3142 * iavf_open - Called when a network interface is made active
3143 * @netdev: network interface device structure
3144 *
3145 * Returns 0 on success, negative value on failure
3146 *
3147 * The open entry point is called when a network interface is made
3148 * active by the system (IFF_UP). At this point all resources needed
3149 * for transmit and receive operations are allocated, the interrupt
3150 * handler is registered with the OS, the watchdog is started,
3151 * and the stack is notified that the interface is ready.
3152 **/
iavf_open(struct net_device * netdev)3153 static int iavf_open(struct net_device *netdev)
3154 {
3155 struct iavf_adapter *adapter = netdev_priv(netdev);
3156 int err;
3157
3158 if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) {
3159 dev_err(&adapter->pdev->dev, "Unable to open device due to PF driver failure.\n");
3160 return -EIO;
3161 }
3162
3163 while (test_and_set_bit(__IAVF_IN_CRITICAL_TASK,
3164 &adapter->crit_section))
3165 usleep_range(500, 1000);
3166
3167 if (adapter->state != __IAVF_DOWN) {
3168 err = -EBUSY;
3169 goto err_unlock;
3170 }
3171
3172 /* allocate transmit descriptors */
3173 err = iavf_setup_all_tx_resources(adapter);
3174 if (err)
3175 goto err_setup_tx;
3176
3177 /* allocate receive descriptors */
3178 err = iavf_setup_all_rx_resources(adapter);
3179 if (err)
3180 goto err_setup_rx;
3181
3182 /* clear any pending interrupts, may auto mask */
3183 err = iavf_request_traffic_irqs(adapter, netdev->name);
3184 if (err)
3185 goto err_req_irq;
3186
3187 spin_lock_bh(&adapter->mac_vlan_list_lock);
3188
3189 iavf_add_filter(adapter, adapter->hw.mac.addr);
3190
3191 spin_unlock_bh(&adapter->mac_vlan_list_lock);
3192
3193 iavf_configure(adapter);
3194
3195 iavf_up_complete(adapter);
3196
3197 iavf_irq_enable(adapter, true);
3198
3199 clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
3200
3201 return 0;
3202
3203 err_req_irq:
3204 iavf_down(adapter);
3205 iavf_free_traffic_irqs(adapter);
3206 err_setup_rx:
3207 iavf_free_all_rx_resources(adapter);
3208 err_setup_tx:
3209 iavf_free_all_tx_resources(adapter);
3210 err_unlock:
3211 clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
3212
3213 return err;
3214 }
3215
3216 /**
3217 * iavf_close - Disables a network interface
3218 * @netdev: network interface device structure
3219 *
3220 * Returns 0, this is not allowed to fail
3221 *
3222 * The close entry point is called when an interface is de-activated
3223 * by the OS. The hardware is still under the drivers control, but
3224 * needs to be disabled. All IRQs except vector 0 (reserved for admin queue)
3225 * are freed, along with all transmit and receive resources.
3226 **/
iavf_close(struct net_device * netdev)3227 static int iavf_close(struct net_device *netdev)
3228 {
3229 struct iavf_adapter *adapter = netdev_priv(netdev);
3230 int status;
3231
3232 if (adapter->state <= __IAVF_DOWN_PENDING)
3233 return 0;
3234
3235 while (test_and_set_bit(__IAVF_IN_CRITICAL_TASK,
3236 &adapter->crit_section))
3237 usleep_range(500, 1000);
3238
3239 set_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
3240 if (CLIENT_ENABLED(adapter))
3241 adapter->flags |= IAVF_FLAG_CLIENT_NEEDS_CLOSE;
3242
3243 iavf_down(adapter);
3244 adapter->state = __IAVF_DOWN_PENDING;
3245 iavf_free_traffic_irqs(adapter);
3246
3247 clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
3248
3249 /* We explicitly don't free resources here because the hardware is
3250 * still active and can DMA into memory. Resources are cleared in
3251 * iavf_virtchnl_completion() after we get confirmation from the PF
3252 * driver that the rings have been stopped.
3253 *
3254 * Also, we wait for state to transition to __IAVF_DOWN before
3255 * returning. State change occurs in iavf_virtchnl_completion() after
3256 * VF resources are released (which occurs after PF driver processes and
3257 * responds to admin queue commands).
3258 */
3259
3260 status = wait_event_timeout(adapter->down_waitqueue,
3261 adapter->state == __IAVF_DOWN,
3262 msecs_to_jiffies(500));
3263 if (!status)
3264 netdev_warn(netdev, "Device resources not yet released\n");
3265 return 0;
3266 }
3267
3268 /**
3269 * iavf_change_mtu - Change the Maximum Transfer Unit
3270 * @netdev: network interface device structure
3271 * @new_mtu: new value for maximum frame size
3272 *
3273 * Returns 0 on success, negative on failure
3274 **/
iavf_change_mtu(struct net_device * netdev,int new_mtu)3275 static int iavf_change_mtu(struct net_device *netdev, int new_mtu)
3276 {
3277 struct iavf_adapter *adapter = netdev_priv(netdev);
3278
3279 netdev->mtu = new_mtu;
3280 if (CLIENT_ENABLED(adapter)) {
3281 iavf_notify_client_l2_params(&adapter->vsi);
3282 adapter->flags |= IAVF_FLAG_SERVICE_CLIENT_REQUESTED;
3283 }
3284 adapter->flags |= IAVF_FLAG_RESET_NEEDED;
3285 queue_work(iavf_wq, &adapter->reset_task);
3286
3287 return 0;
3288 }
3289
3290 /**
3291 * iavf_set_features - set the netdev feature flags
3292 * @netdev: ptr to the netdev being adjusted
3293 * @features: the feature set that the stack is suggesting
3294 * Note: expects to be called while under rtnl_lock()
3295 **/
iavf_set_features(struct net_device * netdev,netdev_features_t features)3296 static int iavf_set_features(struct net_device *netdev,
3297 netdev_features_t features)
3298 {
3299 struct iavf_adapter *adapter = netdev_priv(netdev);
3300
3301 /* Don't allow changing VLAN_RX flag when adapter is not capable
3302 * of VLAN offload
3303 */
3304 if (!VLAN_ALLOWED(adapter)) {
3305 if ((netdev->features ^ features) & NETIF_F_HW_VLAN_CTAG_RX)
3306 return -EINVAL;
3307 } else if ((netdev->features ^ features) & NETIF_F_HW_VLAN_CTAG_RX) {
3308 if (features & NETIF_F_HW_VLAN_CTAG_RX)
3309 adapter->aq_required |=
3310 IAVF_FLAG_AQ_ENABLE_VLAN_STRIPPING;
3311 else
3312 adapter->aq_required |=
3313 IAVF_FLAG_AQ_DISABLE_VLAN_STRIPPING;
3314 }
3315
3316 return 0;
3317 }
3318
3319 /**
3320 * iavf_features_check - Validate encapsulated packet conforms to limits
3321 * @skb: skb buff
3322 * @dev: This physical port's netdev
3323 * @features: Offload features that the stack believes apply
3324 **/
iavf_features_check(struct sk_buff * skb,struct net_device * dev,netdev_features_t features)3325 static netdev_features_t iavf_features_check(struct sk_buff *skb,
3326 struct net_device *dev,
3327 netdev_features_t features)
3328 {
3329 size_t len;
3330
3331 /* No point in doing any of this if neither checksum nor GSO are
3332 * being requested for this frame. We can rule out both by just
3333 * checking for CHECKSUM_PARTIAL
3334 */
3335 if (skb->ip_summed != CHECKSUM_PARTIAL)
3336 return features;
3337
3338 /* We cannot support GSO if the MSS is going to be less than
3339 * 64 bytes. If it is then we need to drop support for GSO.
3340 */
3341 if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_size < 64))
3342 features &= ~NETIF_F_GSO_MASK;
3343
3344 /* MACLEN can support at most 63 words */
3345 len = skb_network_header(skb) - skb->data;
3346 if (len & ~(63 * 2))
3347 goto out_err;
3348
3349 /* IPLEN and EIPLEN can support at most 127 dwords */
3350 len = skb_transport_header(skb) - skb_network_header(skb);
3351 if (len & ~(127 * 4))
3352 goto out_err;
3353
3354 if (skb->encapsulation) {
3355 /* L4TUNLEN can support 127 words */
3356 len = skb_inner_network_header(skb) - skb_transport_header(skb);
3357 if (len & ~(127 * 2))
3358 goto out_err;
3359
3360 /* IPLEN can support at most 127 dwords */
3361 len = skb_inner_transport_header(skb) -
3362 skb_inner_network_header(skb);
3363 if (len & ~(127 * 4))
3364 goto out_err;
3365 }
3366
3367 /* No need to validate L4LEN as TCP is the only protocol with a
3368 * a flexible value and we support all possible values supported
3369 * by TCP, which is at most 15 dwords
3370 */
3371
3372 return features;
3373 out_err:
3374 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
3375 }
3376
3377 /**
3378 * iavf_fix_features - fix up the netdev feature bits
3379 * @netdev: our net device
3380 * @features: desired feature bits
3381 *
3382 * Returns fixed-up features bits
3383 **/
iavf_fix_features(struct net_device * netdev,netdev_features_t features)3384 static netdev_features_t iavf_fix_features(struct net_device *netdev,
3385 netdev_features_t features)
3386 {
3387 struct iavf_adapter *adapter = netdev_priv(netdev);
3388
3389 if (!(adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN))
3390 features &= ~(NETIF_F_HW_VLAN_CTAG_TX |
3391 NETIF_F_HW_VLAN_CTAG_RX |
3392 NETIF_F_HW_VLAN_CTAG_FILTER);
3393
3394 return features;
3395 }
3396
3397 static const struct net_device_ops iavf_netdev_ops = {
3398 .ndo_open = iavf_open,
3399 .ndo_stop = iavf_close,
3400 .ndo_start_xmit = iavf_xmit_frame,
3401 .ndo_set_rx_mode = iavf_set_rx_mode,
3402 .ndo_validate_addr = eth_validate_addr,
3403 .ndo_set_mac_address = iavf_set_mac,
3404 .ndo_change_mtu = iavf_change_mtu,
3405 .ndo_tx_timeout = iavf_tx_timeout,
3406 .ndo_vlan_rx_add_vid = iavf_vlan_rx_add_vid,
3407 .ndo_vlan_rx_kill_vid = iavf_vlan_rx_kill_vid,
3408 .ndo_features_check = iavf_features_check,
3409 .ndo_fix_features = iavf_fix_features,
3410 .ndo_set_features = iavf_set_features,
3411 .ndo_setup_tc = iavf_setup_tc,
3412 };
3413
3414 /**
3415 * iavf_check_reset_complete - check that VF reset is complete
3416 * @hw: pointer to hw struct
3417 *
3418 * Returns 0 if device is ready to use, or -EBUSY if it's in reset.
3419 **/
iavf_check_reset_complete(struct iavf_hw * hw)3420 static int iavf_check_reset_complete(struct iavf_hw *hw)
3421 {
3422 u32 rstat;
3423 int i;
3424
3425 for (i = 0; i < IAVF_RESET_WAIT_COMPLETE_COUNT; i++) {
3426 rstat = rd32(hw, IAVF_VFGEN_RSTAT) &
3427 IAVF_VFGEN_RSTAT_VFR_STATE_MASK;
3428 if ((rstat == VIRTCHNL_VFR_VFACTIVE) ||
3429 (rstat == VIRTCHNL_VFR_COMPLETED))
3430 return 0;
3431 usleep_range(10, 20);
3432 }
3433 return -EBUSY;
3434 }
3435
3436 /**
3437 * iavf_process_config - Process the config information we got from the PF
3438 * @adapter: board private structure
3439 *
3440 * Verify that we have a valid config struct, and set up our netdev features
3441 * and our VSI struct.
3442 **/
iavf_process_config(struct iavf_adapter * adapter)3443 int iavf_process_config(struct iavf_adapter *adapter)
3444 {
3445 struct virtchnl_vf_resource *vfres = adapter->vf_res;
3446 int i, num_req_queues = adapter->num_req_queues;
3447 struct net_device *netdev = adapter->netdev;
3448 struct iavf_vsi *vsi = &adapter->vsi;
3449 netdev_features_t hw_enc_features;
3450 netdev_features_t hw_features;
3451
3452 /* got VF config message back from PF, now we can parse it */
3453 for (i = 0; i < vfres->num_vsis; i++) {
3454 if (vfres->vsi_res[i].vsi_type == VIRTCHNL_VSI_SRIOV)
3455 adapter->vsi_res = &vfres->vsi_res[i];
3456 }
3457 if (!adapter->vsi_res) {
3458 dev_err(&adapter->pdev->dev, "No LAN VSI found\n");
3459 return -ENODEV;
3460 }
3461
3462 if (num_req_queues &&
3463 num_req_queues > adapter->vsi_res->num_queue_pairs) {
3464 /* Problem. The PF gave us fewer queues than what we had
3465 * negotiated in our request. Need a reset to see if we can't
3466 * get back to a working state.
3467 */
3468 dev_err(&adapter->pdev->dev,
3469 "Requested %d queues, but PF only gave us %d.\n",
3470 num_req_queues,
3471 adapter->vsi_res->num_queue_pairs);
3472 adapter->flags |= IAVF_FLAG_REINIT_ITR_NEEDED;
3473 adapter->num_req_queues = adapter->vsi_res->num_queue_pairs;
3474 iavf_schedule_reset(adapter);
3475 return -ENODEV;
3476 }
3477 adapter->num_req_queues = 0;
3478
3479 hw_enc_features = NETIF_F_SG |
3480 NETIF_F_IP_CSUM |
3481 NETIF_F_IPV6_CSUM |
3482 NETIF_F_HIGHDMA |
3483 NETIF_F_SOFT_FEATURES |
3484 NETIF_F_TSO |
3485 NETIF_F_TSO_ECN |
3486 NETIF_F_TSO6 |
3487 NETIF_F_SCTP_CRC |
3488 NETIF_F_RXHASH |
3489 NETIF_F_RXCSUM |
3490 0;
3491
3492 /* advertise to stack only if offloads for encapsulated packets is
3493 * supported
3494 */
3495 if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ENCAP) {
3496 hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL |
3497 NETIF_F_GSO_GRE |
3498 NETIF_F_GSO_GRE_CSUM |
3499 NETIF_F_GSO_IPXIP4 |
3500 NETIF_F_GSO_IPXIP6 |
3501 NETIF_F_GSO_UDP_TUNNEL_CSUM |
3502 NETIF_F_GSO_PARTIAL |
3503 0;
3504
3505 if (!(vfres->vf_cap_flags &
3506 VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM))
3507 netdev->gso_partial_features |=
3508 NETIF_F_GSO_UDP_TUNNEL_CSUM;
3509
3510 netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM;
3511 netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
3512 netdev->hw_enc_features |= hw_enc_features;
3513 }
3514 /* record features VLANs can make use of */
3515 netdev->vlan_features |= hw_enc_features | NETIF_F_TSO_MANGLEID;
3516
3517 /* Write features and hw_features separately to avoid polluting
3518 * with, or dropping, features that are set when we registered.
3519 */
3520 hw_features = hw_enc_features;
3521
3522 /* Enable VLAN features if supported */
3523 if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN)
3524 hw_features |= (NETIF_F_HW_VLAN_CTAG_TX |
3525 NETIF_F_HW_VLAN_CTAG_RX);
3526 /* Enable cloud filter if ADQ is supported */
3527 if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ)
3528 hw_features |= NETIF_F_HW_TC;
3529
3530 netdev->hw_features |= hw_features;
3531
3532 netdev->features |= hw_features;
3533
3534 if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN)
3535 netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
3536
3537 netdev->priv_flags |= IFF_UNICAST_FLT;
3538
3539 /* Do not turn on offloads when they are requested to be turned off.
3540 * TSO needs minimum 576 bytes to work correctly.
3541 */
3542 if (netdev->wanted_features) {
3543 if (!(netdev->wanted_features & NETIF_F_TSO) ||
3544 netdev->mtu < 576)
3545 netdev->features &= ~NETIF_F_TSO;
3546 if (!(netdev->wanted_features & NETIF_F_TSO6) ||
3547 netdev->mtu < 576)
3548 netdev->features &= ~NETIF_F_TSO6;
3549 if (!(netdev->wanted_features & NETIF_F_TSO_ECN))
3550 netdev->features &= ~NETIF_F_TSO_ECN;
3551 if (!(netdev->wanted_features & NETIF_F_GRO))
3552 netdev->features &= ~NETIF_F_GRO;
3553 if (!(netdev->wanted_features & NETIF_F_GSO))
3554 netdev->features &= ~NETIF_F_GSO;
3555 }
3556
3557 adapter->vsi.id = adapter->vsi_res->vsi_id;
3558
3559 adapter->vsi.back = adapter;
3560 adapter->vsi.base_vector = 1;
3561 adapter->vsi.work_limit = IAVF_DEFAULT_IRQ_WORK;
3562 vsi->netdev = adapter->netdev;
3563 vsi->qs_handle = adapter->vsi_res->qset_handle;
3564 if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
3565 adapter->rss_key_size = vfres->rss_key_size;
3566 adapter->rss_lut_size = vfres->rss_lut_size;
3567 } else {
3568 adapter->rss_key_size = IAVF_HKEY_ARRAY_SIZE;
3569 adapter->rss_lut_size = IAVF_HLUT_ARRAY_SIZE;
3570 }
3571
3572 return 0;
3573 }
3574
3575 /**
3576 * iavf_init_task - worker thread to perform delayed initialization
3577 * @work: pointer to work_struct containing our data
3578 *
3579 * This task completes the work that was begun in probe. Due to the nature
3580 * of VF-PF communications, we may need to wait tens of milliseconds to get
3581 * responses back from the PF. Rather than busy-wait in probe and bog down the
3582 * whole system, we'll do it in a task so we can sleep.
3583 * This task only runs during driver init. Once we've established
3584 * communications with the PF driver and set up our netdev, the watchdog
3585 * takes over.
3586 **/
iavf_init_task(struct work_struct * work)3587 static void iavf_init_task(struct work_struct *work)
3588 {
3589 struct iavf_adapter *adapter = container_of(work,
3590 struct iavf_adapter,
3591 init_task.work);
3592 struct iavf_hw *hw = &adapter->hw;
3593
3594 switch (adapter->state) {
3595 case __IAVF_STARTUP:
3596 if (iavf_startup(adapter) < 0)
3597 goto init_failed;
3598 break;
3599 case __IAVF_INIT_VERSION_CHECK:
3600 if (iavf_init_version_check(adapter) < 0)
3601 goto init_failed;
3602 break;
3603 case __IAVF_INIT_GET_RESOURCES:
3604 if (iavf_init_get_resources(adapter) < 0)
3605 goto init_failed;
3606 return;
3607 default:
3608 goto init_failed;
3609 }
3610
3611 queue_delayed_work(iavf_wq, &adapter->init_task,
3612 msecs_to_jiffies(30));
3613 return;
3614 init_failed:
3615 if (++adapter->aq_wait_count > IAVF_AQ_MAX_ERR) {
3616 dev_err(&adapter->pdev->dev,
3617 "Failed to communicate with PF; waiting before retry\n");
3618 adapter->flags |= IAVF_FLAG_PF_COMMS_FAILED;
3619 iavf_shutdown_adminq(hw);
3620 adapter->state = __IAVF_STARTUP;
3621 queue_delayed_work(iavf_wq, &adapter->init_task, HZ * 5);
3622 return;
3623 }
3624 queue_delayed_work(iavf_wq, &adapter->init_task, HZ);
3625 }
3626
3627 /**
3628 * iavf_shutdown - Shutdown the device in preparation for a reboot
3629 * @pdev: pci device structure
3630 **/
iavf_shutdown(struct pci_dev * pdev)3631 static void iavf_shutdown(struct pci_dev *pdev)
3632 {
3633 struct net_device *netdev = pci_get_drvdata(pdev);
3634 struct iavf_adapter *adapter = netdev_priv(netdev);
3635
3636 netif_device_detach(netdev);
3637
3638 if (netif_running(netdev))
3639 iavf_close(netdev);
3640
3641 /* Prevent the watchdog from running. */
3642 adapter->state = __IAVF_REMOVE;
3643 adapter->aq_required = 0;
3644
3645 #ifdef CONFIG_PM
3646 pci_save_state(pdev);
3647
3648 #endif
3649 pci_disable_device(pdev);
3650 }
3651
3652 /**
3653 * iavf_probe - Device Initialization Routine
3654 * @pdev: PCI device information struct
3655 * @ent: entry in iavf_pci_tbl
3656 *
3657 * Returns 0 on success, negative on failure
3658 *
3659 * iavf_probe initializes an adapter identified by a pci_dev structure.
3660 * The OS initialization, configuring of the adapter private structure,
3661 * and a hardware reset occur.
3662 **/
iavf_probe(struct pci_dev * pdev,const struct pci_device_id * ent)3663 static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3664 {
3665 struct net_device *netdev;
3666 struct iavf_adapter *adapter = NULL;
3667 struct iavf_hw *hw = NULL;
3668 int err;
3669
3670 err = pci_enable_device(pdev);
3671 if (err)
3672 return err;
3673
3674 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
3675 if (err) {
3676 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
3677 if (err) {
3678 dev_err(&pdev->dev,
3679 "DMA configuration failed: 0x%x\n", err);
3680 goto err_dma;
3681 }
3682 }
3683
3684 err = pci_request_regions(pdev, iavf_driver_name);
3685 if (err) {
3686 dev_err(&pdev->dev,
3687 "pci_request_regions failed 0x%x\n", err);
3688 goto err_pci_reg;
3689 }
3690
3691 pci_enable_pcie_error_reporting(pdev);
3692
3693 pci_set_master(pdev);
3694
3695 netdev = alloc_etherdev_mq(sizeof(struct iavf_adapter),
3696 IAVF_MAX_REQ_QUEUES);
3697 if (!netdev) {
3698 err = -ENOMEM;
3699 goto err_alloc_etherdev;
3700 }
3701
3702 SET_NETDEV_DEV(netdev, &pdev->dev);
3703
3704 pci_set_drvdata(pdev, netdev);
3705 adapter = netdev_priv(netdev);
3706
3707 adapter->netdev = netdev;
3708 adapter->pdev = pdev;
3709
3710 hw = &adapter->hw;
3711 hw->back = adapter;
3712
3713 adapter->msg_enable = BIT(DEFAULT_DEBUG_LEVEL_SHIFT) - 1;
3714 adapter->state = __IAVF_STARTUP;
3715
3716 /* Call save state here because it relies on the adapter struct. */
3717 pci_save_state(pdev);
3718
3719 hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
3720 pci_resource_len(pdev, 0));
3721 if (!hw->hw_addr) {
3722 err = -EIO;
3723 goto err_ioremap;
3724 }
3725 hw->vendor_id = pdev->vendor;
3726 hw->device_id = pdev->device;
3727 pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
3728 hw->subsystem_vendor_id = pdev->subsystem_vendor;
3729 hw->subsystem_device_id = pdev->subsystem_device;
3730 hw->bus.device = PCI_SLOT(pdev->devfn);
3731 hw->bus.func = PCI_FUNC(pdev->devfn);
3732 hw->bus.bus_id = pdev->bus->number;
3733
3734 /* set up the locks for the AQ, do this only once in probe
3735 * and destroy them only once in remove
3736 */
3737 mutex_init(&hw->aq.asq_mutex);
3738 mutex_init(&hw->aq.arq_mutex);
3739
3740 spin_lock_init(&adapter->mac_vlan_list_lock);
3741 spin_lock_init(&adapter->cloud_filter_list_lock);
3742
3743 INIT_LIST_HEAD(&adapter->mac_filter_list);
3744 INIT_LIST_HEAD(&adapter->vlan_filter_list);
3745 INIT_LIST_HEAD(&adapter->cloud_filter_list);
3746
3747 INIT_WORK(&adapter->reset_task, iavf_reset_task);
3748 INIT_WORK(&adapter->adminq_task, iavf_adminq_task);
3749 INIT_DELAYED_WORK(&adapter->watchdog_task, iavf_watchdog_task);
3750 INIT_DELAYED_WORK(&adapter->client_task, iavf_client_task);
3751 INIT_DELAYED_WORK(&adapter->init_task, iavf_init_task);
3752 queue_delayed_work(iavf_wq, &adapter->init_task,
3753 msecs_to_jiffies(5 * (pdev->devfn & 0x07)));
3754
3755 /* Setup the wait queue for indicating transition to down status */
3756 init_waitqueue_head(&adapter->down_waitqueue);
3757
3758 return 0;
3759
3760 err_ioremap:
3761 free_netdev(netdev);
3762 err_alloc_etherdev:
3763 pci_release_regions(pdev);
3764 err_pci_reg:
3765 err_dma:
3766 pci_disable_device(pdev);
3767 return err;
3768 }
3769
3770 /**
3771 * iavf_suspend - Power management suspend routine
3772 * @dev_d: device info pointer
3773 *
3774 * Called when the system (VM) is entering sleep/suspend.
3775 **/
iavf_suspend(struct device * dev_d)3776 static int __maybe_unused iavf_suspend(struct device *dev_d)
3777 {
3778 struct net_device *netdev = dev_get_drvdata(dev_d);
3779 struct iavf_adapter *adapter = netdev_priv(netdev);
3780
3781 netif_device_detach(netdev);
3782
3783 while (test_and_set_bit(__IAVF_IN_CRITICAL_TASK,
3784 &adapter->crit_section))
3785 usleep_range(500, 1000);
3786
3787 if (netif_running(netdev)) {
3788 rtnl_lock();
3789 iavf_down(adapter);
3790 rtnl_unlock();
3791 }
3792 iavf_free_misc_irq(adapter);
3793 iavf_reset_interrupt_capability(adapter);
3794
3795 clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
3796
3797 return 0;
3798 }
3799
3800 /**
3801 * iavf_resume - Power management resume routine
3802 * @dev_d: device info pointer
3803 *
3804 * Called when the system (VM) is resumed from sleep/suspend.
3805 **/
iavf_resume(struct device * dev_d)3806 static int __maybe_unused iavf_resume(struct device *dev_d)
3807 {
3808 struct pci_dev *pdev = to_pci_dev(dev_d);
3809 struct net_device *netdev = pci_get_drvdata(pdev);
3810 struct iavf_adapter *adapter = netdev_priv(netdev);
3811 u32 err;
3812
3813 pci_set_master(pdev);
3814
3815 rtnl_lock();
3816 err = iavf_set_interrupt_capability(adapter);
3817 if (err) {
3818 rtnl_unlock();
3819 dev_err(&pdev->dev, "Cannot enable MSI-X interrupts.\n");
3820 return err;
3821 }
3822 err = iavf_request_misc_irq(adapter);
3823 rtnl_unlock();
3824 if (err) {
3825 dev_err(&pdev->dev, "Cannot get interrupt vector.\n");
3826 return err;
3827 }
3828
3829 queue_work(iavf_wq, &adapter->reset_task);
3830
3831 netif_device_attach(netdev);
3832
3833 return err;
3834 }
3835
3836 /**
3837 * iavf_remove - Device Removal Routine
3838 * @pdev: PCI device information struct
3839 *
3840 * iavf_remove is called by the PCI subsystem to alert the driver
3841 * that it should release a PCI device. The could be caused by a
3842 * Hot-Plug event, or because the driver is going to be removed from
3843 * memory.
3844 **/
iavf_remove(struct pci_dev * pdev)3845 static void iavf_remove(struct pci_dev *pdev)
3846 {
3847 struct net_device *netdev = pci_get_drvdata(pdev);
3848 struct iavf_adapter *adapter = netdev_priv(netdev);
3849 struct iavf_vlan_filter *vlf, *vlftmp;
3850 struct iavf_mac_filter *f, *ftmp;
3851 struct iavf_cloud_filter *cf, *cftmp;
3852 struct iavf_hw *hw = &adapter->hw;
3853 int err;
3854 /* Indicate we are in remove and not to run reset_task */
3855 set_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section);
3856 cancel_delayed_work_sync(&adapter->init_task);
3857 cancel_work_sync(&adapter->reset_task);
3858 cancel_delayed_work_sync(&adapter->client_task);
3859 if (adapter->netdev_registered) {
3860 unregister_netdev(netdev);
3861 adapter->netdev_registered = false;
3862 }
3863 if (CLIENT_ALLOWED(adapter)) {
3864 err = iavf_lan_del_device(adapter);
3865 if (err)
3866 dev_warn(&pdev->dev, "Failed to delete client device: %d\n",
3867 err);
3868 }
3869
3870 /* Shut down all the garbage mashers on the detention level */
3871 adapter->state = __IAVF_REMOVE;
3872 adapter->aq_required = 0;
3873 adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED;
3874 iavf_request_reset(adapter);
3875 msleep(50);
3876 /* If the FW isn't responding, kick it once, but only once. */
3877 if (!iavf_asq_done(hw)) {
3878 iavf_request_reset(adapter);
3879 msleep(50);
3880 }
3881 iavf_free_all_tx_resources(adapter);
3882 iavf_free_all_rx_resources(adapter);
3883 iavf_misc_irq_disable(adapter);
3884 iavf_free_misc_irq(adapter);
3885 iavf_reset_interrupt_capability(adapter);
3886 iavf_free_q_vectors(adapter);
3887
3888 cancel_delayed_work_sync(&adapter->watchdog_task);
3889
3890 cancel_work_sync(&adapter->adminq_task);
3891
3892 iavf_free_rss(adapter);
3893
3894 if (hw->aq.asq.count)
3895 iavf_shutdown_adminq(hw);
3896
3897 /* destroy the locks only once, here */
3898 mutex_destroy(&hw->aq.arq_mutex);
3899 mutex_destroy(&hw->aq.asq_mutex);
3900
3901 iounmap(hw->hw_addr);
3902 pci_release_regions(pdev);
3903 iavf_free_all_tx_resources(adapter);
3904 iavf_free_all_rx_resources(adapter);
3905 iavf_free_queues(adapter);
3906 kfree(adapter->vf_res);
3907 spin_lock_bh(&adapter->mac_vlan_list_lock);
3908 /* If we got removed before an up/down sequence, we've got a filter
3909 * hanging out there that we need to get rid of.
3910 */
3911 list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
3912 list_del(&f->list);
3913 kfree(f);
3914 }
3915 list_for_each_entry_safe(vlf, vlftmp, &adapter->vlan_filter_list,
3916 list) {
3917 list_del(&vlf->list);
3918 kfree(vlf);
3919 }
3920
3921 spin_unlock_bh(&adapter->mac_vlan_list_lock);
3922
3923 spin_lock_bh(&adapter->cloud_filter_list_lock);
3924 list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list, list) {
3925 list_del(&cf->list);
3926 kfree(cf);
3927 }
3928 spin_unlock_bh(&adapter->cloud_filter_list_lock);
3929
3930 free_netdev(netdev);
3931
3932 pci_disable_pcie_error_reporting(pdev);
3933
3934 pci_disable_device(pdev);
3935 }
3936
3937 static SIMPLE_DEV_PM_OPS(iavf_pm_ops, iavf_suspend, iavf_resume);
3938
3939 static struct pci_driver iavf_driver = {
3940 .name = iavf_driver_name,
3941 .id_table = iavf_pci_tbl,
3942 .probe = iavf_probe,
3943 .remove = iavf_remove,
3944 .driver.pm = &iavf_pm_ops,
3945 .shutdown = iavf_shutdown,
3946 };
3947
3948 /**
3949 * iavf_init_module - Driver Registration Routine
3950 *
3951 * iavf_init_module is the first routine called when the driver is
3952 * loaded. All it does is register with the PCI subsystem.
3953 **/
iavf_init_module(void)3954 static int __init iavf_init_module(void)
3955 {
3956 int ret;
3957
3958 pr_info("iavf: %s\n", iavf_driver_string);
3959
3960 pr_info("%s\n", iavf_copyright);
3961
3962 iavf_wq = alloc_workqueue("%s", WQ_UNBOUND | WQ_MEM_RECLAIM, 1,
3963 iavf_driver_name);
3964 if (!iavf_wq) {
3965 pr_err("%s: Failed to create workqueue\n", iavf_driver_name);
3966 return -ENOMEM;
3967 }
3968 ret = pci_register_driver(&iavf_driver);
3969 return ret;
3970 }
3971
3972 module_init(iavf_init_module);
3973
3974 /**
3975 * iavf_exit_module - Driver Exit Cleanup Routine
3976 *
3977 * iavf_exit_module is called just before the driver is removed
3978 * from memory.
3979 **/
iavf_exit_module(void)3980 static void __exit iavf_exit_module(void)
3981 {
3982 pci_unregister_driver(&iavf_driver);
3983 destroy_workqueue(iavf_wq);
3984 }
3985
3986 module_exit(iavf_exit_module);
3987
3988 /* iavf_main.c */
3989