1 // SPDX-License-Identifier: (GPL-2.0 OR MIT)
2 /* Google virtual Ethernet (gve) driver
3 *
4 * Copyright (C) 2015-2019 Google, Inc.
5 */
6
7 #include <linux/cpumask.h>
8 #include <linux/etherdevice.h>
9 #include <linux/interrupt.h>
10 #include <linux/module.h>
11 #include <linux/pci.h>
12 #include <linux/sched.h>
13 #include <linux/timer.h>
14 #include <linux/workqueue.h>
15 #include <net/sch_generic.h>
16 #include "gve.h"
17 #include "gve_adminq.h"
18 #include "gve_register.h"
19
20 #define GVE_DEFAULT_RX_COPYBREAK (256)
21
22 #define DEFAULT_MSG_LEVEL (NETIF_MSG_DRV | NETIF_MSG_LINK)
23 #define GVE_VERSION "1.0.0"
24 #define GVE_VERSION_PREFIX "GVE-"
25
26 const char gve_version_str[] = GVE_VERSION;
27 static const char gve_version_prefix[] = GVE_VERSION_PREFIX;
28
gve_get_stats(struct net_device * dev,struct rtnl_link_stats64 * s)29 static void gve_get_stats(struct net_device *dev, struct rtnl_link_stats64 *s)
30 {
31 struct gve_priv *priv = netdev_priv(dev);
32 unsigned int start;
33 int ring;
34
35 if (priv->rx) {
36 for (ring = 0; ring < priv->rx_cfg.num_queues; ring++) {
37 do {
38 start =
39 u64_stats_fetch_begin(&priv->rx[ring].statss);
40 s->rx_packets += priv->rx[ring].rpackets;
41 s->rx_bytes += priv->rx[ring].rbytes;
42 } while (u64_stats_fetch_retry(&priv->rx[ring].statss,
43 start));
44 }
45 }
46 if (priv->tx) {
47 for (ring = 0; ring < priv->tx_cfg.num_queues; ring++) {
48 do {
49 start =
50 u64_stats_fetch_begin(&priv->tx[ring].statss);
51 s->tx_packets += priv->tx[ring].pkt_done;
52 s->tx_bytes += priv->tx[ring].bytes_done;
53 } while (u64_stats_fetch_retry(&priv->tx[ring].statss,
54 start));
55 }
56 }
57 }
58
gve_alloc_counter_array(struct gve_priv * priv)59 static int gve_alloc_counter_array(struct gve_priv *priv)
60 {
61 priv->counter_array =
62 dma_alloc_coherent(&priv->pdev->dev,
63 priv->num_event_counters *
64 sizeof(*priv->counter_array),
65 &priv->counter_array_bus, GFP_KERNEL);
66 if (!priv->counter_array)
67 return -ENOMEM;
68
69 return 0;
70 }
71
gve_free_counter_array(struct gve_priv * priv)72 static void gve_free_counter_array(struct gve_priv *priv)
73 {
74 dma_free_coherent(&priv->pdev->dev,
75 priv->num_event_counters *
76 sizeof(*priv->counter_array),
77 priv->counter_array, priv->counter_array_bus);
78 priv->counter_array = NULL;
79 }
80
81 /* NIC requests to report stats */
gve_stats_report_task(struct work_struct * work)82 static void gve_stats_report_task(struct work_struct *work)
83 {
84 struct gve_priv *priv = container_of(work, struct gve_priv,
85 stats_report_task);
86 if (gve_get_do_report_stats(priv)) {
87 gve_handle_report_stats(priv);
88 gve_clear_do_report_stats(priv);
89 }
90 }
91
gve_stats_report_schedule(struct gve_priv * priv)92 static void gve_stats_report_schedule(struct gve_priv *priv)
93 {
94 if (!gve_get_probe_in_progress(priv) &&
95 !gve_get_reset_in_progress(priv)) {
96 gve_set_do_report_stats(priv);
97 queue_work(priv->gve_wq, &priv->stats_report_task);
98 }
99 }
100
gve_stats_report_timer(struct timer_list * t)101 static void gve_stats_report_timer(struct timer_list *t)
102 {
103 struct gve_priv *priv = from_timer(priv, t, stats_report_timer);
104
105 mod_timer(&priv->stats_report_timer,
106 round_jiffies(jiffies +
107 msecs_to_jiffies(priv->stats_report_timer_period)));
108 gve_stats_report_schedule(priv);
109 }
110
gve_alloc_stats_report(struct gve_priv * priv)111 static int gve_alloc_stats_report(struct gve_priv *priv)
112 {
113 int tx_stats_num, rx_stats_num;
114
115 tx_stats_num = (GVE_TX_STATS_REPORT_NUM + NIC_TX_STATS_REPORT_NUM) *
116 priv->tx_cfg.num_queues;
117 rx_stats_num = (GVE_RX_STATS_REPORT_NUM + NIC_RX_STATS_REPORT_NUM) *
118 priv->rx_cfg.num_queues;
119 priv->stats_report_len = struct_size(priv->stats_report, stats,
120 tx_stats_num + rx_stats_num);
121 priv->stats_report =
122 dma_alloc_coherent(&priv->pdev->dev, priv->stats_report_len,
123 &priv->stats_report_bus, GFP_KERNEL);
124 if (!priv->stats_report)
125 return -ENOMEM;
126 /* Set up timer for the report-stats task */
127 timer_setup(&priv->stats_report_timer, gve_stats_report_timer, 0);
128 priv->stats_report_timer_period = GVE_STATS_REPORT_TIMER_PERIOD;
129 return 0;
130 }
131
gve_free_stats_report(struct gve_priv * priv)132 static void gve_free_stats_report(struct gve_priv *priv)
133 {
134 del_timer_sync(&priv->stats_report_timer);
135 dma_free_coherent(&priv->pdev->dev, priv->stats_report_len,
136 priv->stats_report, priv->stats_report_bus);
137 priv->stats_report = NULL;
138 }
139
gve_mgmnt_intr(int irq,void * arg)140 static irqreturn_t gve_mgmnt_intr(int irq, void *arg)
141 {
142 struct gve_priv *priv = arg;
143
144 queue_work(priv->gve_wq, &priv->service_task);
145 return IRQ_HANDLED;
146 }
147
gve_intr(int irq,void * arg)148 static irqreturn_t gve_intr(int irq, void *arg)
149 {
150 struct gve_notify_block *block = arg;
151 struct gve_priv *priv = block->priv;
152
153 iowrite32be(GVE_IRQ_MASK, gve_irq_doorbell(priv, block));
154 napi_schedule_irqoff(&block->napi);
155 return IRQ_HANDLED;
156 }
157
gve_napi_poll(struct napi_struct * napi,int budget)158 static int gve_napi_poll(struct napi_struct *napi, int budget)
159 {
160 struct gve_notify_block *block;
161 __be32 __iomem *irq_doorbell;
162 bool reschedule = false;
163 struct gve_priv *priv;
164
165 block = container_of(napi, struct gve_notify_block, napi);
166 priv = block->priv;
167
168 if (block->tx)
169 reschedule |= gve_tx_poll(block, budget);
170 if (block->rx)
171 reschedule |= gve_rx_poll(block, budget);
172
173 if (reschedule)
174 return budget;
175
176 napi_complete(napi);
177 irq_doorbell = gve_irq_doorbell(priv, block);
178 iowrite32be(GVE_IRQ_ACK | GVE_IRQ_EVENT, irq_doorbell);
179
180 /* Double check we have no extra work.
181 * Ensure unmask synchronizes with checking for work.
182 */
183 dma_rmb();
184 if (block->tx)
185 reschedule |= gve_tx_poll(block, -1);
186 if (block->rx)
187 reschedule |= gve_rx_poll(block, -1);
188 if (reschedule && napi_reschedule(napi))
189 iowrite32be(GVE_IRQ_MASK, irq_doorbell);
190
191 return 0;
192 }
193
gve_alloc_notify_blocks(struct gve_priv * priv)194 static int gve_alloc_notify_blocks(struct gve_priv *priv)
195 {
196 int num_vecs_requested = priv->num_ntfy_blks + 1;
197 char *name = priv->dev->name;
198 unsigned int active_cpus;
199 int vecs_enabled;
200 int i, j;
201 int err;
202
203 priv->msix_vectors = kvzalloc(num_vecs_requested *
204 sizeof(*priv->msix_vectors), GFP_KERNEL);
205 if (!priv->msix_vectors)
206 return -ENOMEM;
207 for (i = 0; i < num_vecs_requested; i++)
208 priv->msix_vectors[i].entry = i;
209 vecs_enabled = pci_enable_msix_range(priv->pdev, priv->msix_vectors,
210 GVE_MIN_MSIX, num_vecs_requested);
211 if (vecs_enabled < 0) {
212 dev_err(&priv->pdev->dev, "Could not enable min msix %d/%d\n",
213 GVE_MIN_MSIX, vecs_enabled);
214 err = vecs_enabled;
215 goto abort_with_msix_vectors;
216 }
217 if (vecs_enabled != num_vecs_requested) {
218 int new_num_ntfy_blks = (vecs_enabled - 1) & ~0x1;
219 int vecs_per_type = new_num_ntfy_blks / 2;
220 int vecs_left = new_num_ntfy_blks % 2;
221
222 priv->num_ntfy_blks = new_num_ntfy_blks;
223 priv->tx_cfg.max_queues = min_t(int, priv->tx_cfg.max_queues,
224 vecs_per_type);
225 priv->rx_cfg.max_queues = min_t(int, priv->rx_cfg.max_queues,
226 vecs_per_type + vecs_left);
227 dev_err(&priv->pdev->dev,
228 "Could not enable desired msix, only enabled %d, adjusting tx max queues to %d, and rx max queues to %d\n",
229 vecs_enabled, priv->tx_cfg.max_queues,
230 priv->rx_cfg.max_queues);
231 if (priv->tx_cfg.num_queues > priv->tx_cfg.max_queues)
232 priv->tx_cfg.num_queues = priv->tx_cfg.max_queues;
233 if (priv->rx_cfg.num_queues > priv->rx_cfg.max_queues)
234 priv->rx_cfg.num_queues = priv->rx_cfg.max_queues;
235 }
236 /* Half the notification blocks go to TX and half to RX */
237 active_cpus = min_t(int, priv->num_ntfy_blks / 2, num_online_cpus());
238
239 /* Setup Management Vector - the last vector */
240 snprintf(priv->mgmt_msix_name, sizeof(priv->mgmt_msix_name), "%s-mgmnt",
241 name);
242 err = request_irq(priv->msix_vectors[priv->mgmt_msix_idx].vector,
243 gve_mgmnt_intr, 0, priv->mgmt_msix_name, priv);
244 if (err) {
245 dev_err(&priv->pdev->dev, "Did not receive management vector.\n");
246 goto abort_with_msix_enabled;
247 }
248 priv->ntfy_blocks =
249 dma_alloc_coherent(&priv->pdev->dev,
250 priv->num_ntfy_blks *
251 sizeof(*priv->ntfy_blocks),
252 &priv->ntfy_block_bus, GFP_KERNEL);
253 if (!priv->ntfy_blocks) {
254 err = -ENOMEM;
255 goto abort_with_mgmt_vector;
256 }
257 /* Setup the other blocks - the first n-1 vectors */
258 for (i = 0; i < priv->num_ntfy_blks; i++) {
259 struct gve_notify_block *block = &priv->ntfy_blocks[i];
260 int msix_idx = i;
261
262 snprintf(block->name, sizeof(block->name), "%s-ntfy-block.%d",
263 name, i);
264 block->priv = priv;
265 err = request_irq(priv->msix_vectors[msix_idx].vector,
266 gve_intr, 0, block->name, block);
267 if (err) {
268 dev_err(&priv->pdev->dev,
269 "Failed to receive msix vector %d\n", i);
270 goto abort_with_some_ntfy_blocks;
271 }
272 irq_set_affinity_hint(priv->msix_vectors[msix_idx].vector,
273 get_cpu_mask(i % active_cpus));
274 }
275 return 0;
276 abort_with_some_ntfy_blocks:
277 for (j = 0; j < i; j++) {
278 struct gve_notify_block *block = &priv->ntfy_blocks[j];
279 int msix_idx = j;
280
281 irq_set_affinity_hint(priv->msix_vectors[msix_idx].vector,
282 NULL);
283 free_irq(priv->msix_vectors[msix_idx].vector, block);
284 }
285 dma_free_coherent(&priv->pdev->dev, priv->num_ntfy_blks *
286 sizeof(*priv->ntfy_blocks),
287 priv->ntfy_blocks, priv->ntfy_block_bus);
288 priv->ntfy_blocks = NULL;
289 abort_with_mgmt_vector:
290 free_irq(priv->msix_vectors[priv->mgmt_msix_idx].vector, priv);
291 abort_with_msix_enabled:
292 pci_disable_msix(priv->pdev);
293 abort_with_msix_vectors:
294 kvfree(priv->msix_vectors);
295 priv->msix_vectors = NULL;
296 return err;
297 }
298
gve_free_notify_blocks(struct gve_priv * priv)299 static void gve_free_notify_blocks(struct gve_priv *priv)
300 {
301 int i;
302
303 /* Free the irqs */
304 for (i = 0; i < priv->num_ntfy_blks; i++) {
305 struct gve_notify_block *block = &priv->ntfy_blocks[i];
306 int msix_idx = i;
307
308 irq_set_affinity_hint(priv->msix_vectors[msix_idx].vector,
309 NULL);
310 free_irq(priv->msix_vectors[msix_idx].vector, block);
311 }
312 dma_free_coherent(&priv->pdev->dev,
313 priv->num_ntfy_blks * sizeof(*priv->ntfy_blocks),
314 priv->ntfy_blocks, priv->ntfy_block_bus);
315 priv->ntfy_blocks = NULL;
316 free_irq(priv->msix_vectors[priv->mgmt_msix_idx].vector, priv);
317 pci_disable_msix(priv->pdev);
318 kvfree(priv->msix_vectors);
319 priv->msix_vectors = NULL;
320 }
321
gve_setup_device_resources(struct gve_priv * priv)322 static int gve_setup_device_resources(struct gve_priv *priv)
323 {
324 int err;
325
326 err = gve_alloc_counter_array(priv);
327 if (err)
328 return err;
329 err = gve_alloc_notify_blocks(priv);
330 if (err)
331 goto abort_with_counter;
332 err = gve_alloc_stats_report(priv);
333 if (err)
334 goto abort_with_ntfy_blocks;
335 err = gve_adminq_configure_device_resources(priv,
336 priv->counter_array_bus,
337 priv->num_event_counters,
338 priv->ntfy_block_bus,
339 priv->num_ntfy_blks);
340 if (unlikely(err)) {
341 dev_err(&priv->pdev->dev,
342 "could not setup device_resources: err=%d\n", err);
343 err = -ENXIO;
344 goto abort_with_stats_report;
345 }
346 err = gve_adminq_report_stats(priv, priv->stats_report_len,
347 priv->stats_report_bus,
348 GVE_STATS_REPORT_TIMER_PERIOD);
349 if (err)
350 dev_err(&priv->pdev->dev,
351 "Failed to report stats: err=%d\n", err);
352 gve_set_device_resources_ok(priv);
353 return 0;
354 abort_with_stats_report:
355 gve_free_stats_report(priv);
356 abort_with_ntfy_blocks:
357 gve_free_notify_blocks(priv);
358 abort_with_counter:
359 gve_free_counter_array(priv);
360 return err;
361 }
362
363 static void gve_trigger_reset(struct gve_priv *priv);
364
gve_teardown_device_resources(struct gve_priv * priv)365 static void gve_teardown_device_resources(struct gve_priv *priv)
366 {
367 int err;
368
369 /* Tell device its resources are being freed */
370 if (gve_get_device_resources_ok(priv)) {
371 /* detach the stats report */
372 err = gve_adminq_report_stats(priv, 0, 0x0, GVE_STATS_REPORT_TIMER_PERIOD);
373 if (err) {
374 dev_err(&priv->pdev->dev,
375 "Failed to detach stats report: err=%d\n", err);
376 gve_trigger_reset(priv);
377 }
378 err = gve_adminq_deconfigure_device_resources(priv);
379 if (err) {
380 dev_err(&priv->pdev->dev,
381 "Could not deconfigure device resources: err=%d\n",
382 err);
383 gve_trigger_reset(priv);
384 }
385 }
386 gve_free_counter_array(priv);
387 gve_free_notify_blocks(priv);
388 gve_free_stats_report(priv);
389 gve_clear_device_resources_ok(priv);
390 }
391
gve_add_napi(struct gve_priv * priv,int ntfy_idx)392 static void gve_add_napi(struct gve_priv *priv, int ntfy_idx)
393 {
394 struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
395
396 netif_napi_add(priv->dev, &block->napi, gve_napi_poll,
397 NAPI_POLL_WEIGHT);
398 }
399
gve_remove_napi(struct gve_priv * priv,int ntfy_idx)400 static void gve_remove_napi(struct gve_priv *priv, int ntfy_idx)
401 {
402 struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
403
404 netif_napi_del(&block->napi);
405 }
406
gve_register_qpls(struct gve_priv * priv)407 static int gve_register_qpls(struct gve_priv *priv)
408 {
409 int num_qpls = gve_num_tx_qpls(priv) + gve_num_rx_qpls(priv);
410 int err;
411 int i;
412
413 for (i = 0; i < num_qpls; i++) {
414 err = gve_adminq_register_page_list(priv, &priv->qpls[i]);
415 if (err) {
416 netif_err(priv, drv, priv->dev,
417 "failed to register queue page list %d\n",
418 priv->qpls[i].id);
419 /* This failure will trigger a reset - no need to clean
420 * up
421 */
422 return err;
423 }
424 }
425 return 0;
426 }
427
gve_unregister_qpls(struct gve_priv * priv)428 static int gve_unregister_qpls(struct gve_priv *priv)
429 {
430 int num_qpls = gve_num_tx_qpls(priv) + gve_num_rx_qpls(priv);
431 int err;
432 int i;
433
434 for (i = 0; i < num_qpls; i++) {
435 err = gve_adminq_unregister_page_list(priv, priv->qpls[i].id);
436 /* This failure will trigger a reset - no need to clean up */
437 if (err) {
438 netif_err(priv, drv, priv->dev,
439 "Failed to unregister queue page list %d\n",
440 priv->qpls[i].id);
441 return err;
442 }
443 }
444 return 0;
445 }
446
gve_create_rings(struct gve_priv * priv)447 static int gve_create_rings(struct gve_priv *priv)
448 {
449 int err;
450 int i;
451
452 err = gve_adminq_create_tx_queues(priv, priv->tx_cfg.num_queues);
453 if (err) {
454 netif_err(priv, drv, priv->dev, "failed to create %d tx queues\n",
455 priv->tx_cfg.num_queues);
456 /* This failure will trigger a reset - no need to clean
457 * up
458 */
459 return err;
460 }
461 netif_dbg(priv, drv, priv->dev, "created %d tx queues\n",
462 priv->tx_cfg.num_queues);
463
464 err = gve_adminq_create_rx_queues(priv, priv->rx_cfg.num_queues);
465 if (err) {
466 netif_err(priv, drv, priv->dev, "failed to create %d rx queues\n",
467 priv->rx_cfg.num_queues);
468 /* This failure will trigger a reset - no need to clean
469 * up
470 */
471 return err;
472 }
473 netif_dbg(priv, drv, priv->dev, "created %d rx queues\n",
474 priv->rx_cfg.num_queues);
475
476 /* Rx data ring has been prefilled with packet buffers at queue
477 * allocation time.
478 * Write the doorbell to provide descriptor slots and packet buffers
479 * to the NIC.
480 */
481 for (i = 0; i < priv->rx_cfg.num_queues; i++)
482 gve_rx_write_doorbell(priv, &priv->rx[i]);
483
484 return 0;
485 }
486
gve_alloc_rings(struct gve_priv * priv)487 static int gve_alloc_rings(struct gve_priv *priv)
488 {
489 int ntfy_idx;
490 int err;
491 int i;
492
493 /* Setup tx rings */
494 priv->tx = kvzalloc(priv->tx_cfg.num_queues * sizeof(*priv->tx),
495 GFP_KERNEL);
496 if (!priv->tx)
497 return -ENOMEM;
498 err = gve_tx_alloc_rings(priv);
499 if (err)
500 goto free_tx;
501 /* Setup rx rings */
502 priv->rx = kvzalloc(priv->rx_cfg.num_queues * sizeof(*priv->rx),
503 GFP_KERNEL);
504 if (!priv->rx) {
505 err = -ENOMEM;
506 goto free_tx_queue;
507 }
508 err = gve_rx_alloc_rings(priv);
509 if (err)
510 goto free_rx;
511 /* Add tx napi & init sync stats*/
512 for (i = 0; i < priv->tx_cfg.num_queues; i++) {
513 u64_stats_init(&priv->tx[i].statss);
514 ntfy_idx = gve_tx_idx_to_ntfy(priv, i);
515 gve_add_napi(priv, ntfy_idx);
516 }
517 /* Add rx napi & init sync stats*/
518 for (i = 0; i < priv->rx_cfg.num_queues; i++) {
519 u64_stats_init(&priv->rx[i].statss);
520 ntfy_idx = gve_rx_idx_to_ntfy(priv, i);
521 gve_add_napi(priv, ntfy_idx);
522 }
523
524 return 0;
525
526 free_rx:
527 kvfree(priv->rx);
528 priv->rx = NULL;
529 free_tx_queue:
530 gve_tx_free_rings(priv);
531 free_tx:
532 kvfree(priv->tx);
533 priv->tx = NULL;
534 return err;
535 }
536
gve_destroy_rings(struct gve_priv * priv)537 static int gve_destroy_rings(struct gve_priv *priv)
538 {
539 int err;
540
541 err = gve_adminq_destroy_tx_queues(priv, priv->tx_cfg.num_queues);
542 if (err) {
543 netif_err(priv, drv, priv->dev,
544 "failed to destroy tx queues\n");
545 /* This failure will trigger a reset - no need to clean up */
546 return err;
547 }
548 netif_dbg(priv, drv, priv->dev, "destroyed tx queues\n");
549 err = gve_adminq_destroy_rx_queues(priv, priv->rx_cfg.num_queues);
550 if (err) {
551 netif_err(priv, drv, priv->dev,
552 "failed to destroy rx queues\n");
553 /* This failure will trigger a reset - no need to clean up */
554 return err;
555 }
556 netif_dbg(priv, drv, priv->dev, "destroyed rx queues\n");
557 return 0;
558 }
559
gve_free_rings(struct gve_priv * priv)560 static void gve_free_rings(struct gve_priv *priv)
561 {
562 int ntfy_idx;
563 int i;
564
565 if (priv->tx) {
566 for (i = 0; i < priv->tx_cfg.num_queues; i++) {
567 ntfy_idx = gve_tx_idx_to_ntfy(priv, i);
568 gve_remove_napi(priv, ntfy_idx);
569 }
570 gve_tx_free_rings(priv);
571 kvfree(priv->tx);
572 priv->tx = NULL;
573 }
574 if (priv->rx) {
575 for (i = 0; i < priv->rx_cfg.num_queues; i++) {
576 ntfy_idx = gve_rx_idx_to_ntfy(priv, i);
577 gve_remove_napi(priv, ntfy_idx);
578 }
579 gve_rx_free_rings(priv);
580 kvfree(priv->rx);
581 priv->rx = NULL;
582 }
583 }
584
gve_alloc_page(struct gve_priv * priv,struct device * dev,struct page ** page,dma_addr_t * dma,enum dma_data_direction dir)585 int gve_alloc_page(struct gve_priv *priv, struct device *dev,
586 struct page **page, dma_addr_t *dma,
587 enum dma_data_direction dir)
588 {
589 *page = alloc_page(GFP_KERNEL);
590 if (!*page) {
591 priv->page_alloc_fail++;
592 return -ENOMEM;
593 }
594 *dma = dma_map_page(dev, *page, 0, PAGE_SIZE, dir);
595 if (dma_mapping_error(dev, *dma)) {
596 priv->dma_mapping_error++;
597 put_page(*page);
598 return -ENOMEM;
599 }
600 return 0;
601 }
602
gve_alloc_queue_page_list(struct gve_priv * priv,u32 id,int pages)603 static int gve_alloc_queue_page_list(struct gve_priv *priv, u32 id,
604 int pages)
605 {
606 struct gve_queue_page_list *qpl = &priv->qpls[id];
607 int err;
608 int i;
609
610 if (pages + priv->num_registered_pages > priv->max_registered_pages) {
611 netif_err(priv, drv, priv->dev,
612 "Reached max number of registered pages %llu > %llu\n",
613 pages + priv->num_registered_pages,
614 priv->max_registered_pages);
615 return -EINVAL;
616 }
617
618 qpl->id = id;
619 qpl->num_entries = 0;
620 qpl->pages = kvzalloc(pages * sizeof(*qpl->pages), GFP_KERNEL);
621 /* caller handles clean up */
622 if (!qpl->pages)
623 return -ENOMEM;
624 qpl->page_buses = kvzalloc(pages * sizeof(*qpl->page_buses),
625 GFP_KERNEL);
626 /* caller handles clean up */
627 if (!qpl->page_buses)
628 return -ENOMEM;
629
630 for (i = 0; i < pages; i++) {
631 err = gve_alloc_page(priv, &priv->pdev->dev, &qpl->pages[i],
632 &qpl->page_buses[i],
633 gve_qpl_dma_dir(priv, id));
634 /* caller handles clean up */
635 if (err)
636 return -ENOMEM;
637 qpl->num_entries++;
638 }
639 priv->num_registered_pages += pages;
640
641 return 0;
642 }
643
gve_free_page(struct device * dev,struct page * page,dma_addr_t dma,enum dma_data_direction dir)644 void gve_free_page(struct device *dev, struct page *page, dma_addr_t dma,
645 enum dma_data_direction dir)
646 {
647 if (!dma_mapping_error(dev, dma))
648 dma_unmap_page(dev, dma, PAGE_SIZE, dir);
649 if (page)
650 put_page(page);
651 }
652
gve_free_queue_page_list(struct gve_priv * priv,int id)653 static void gve_free_queue_page_list(struct gve_priv *priv,
654 int id)
655 {
656 struct gve_queue_page_list *qpl = &priv->qpls[id];
657 int i;
658
659 if (!qpl->pages)
660 return;
661 if (!qpl->page_buses)
662 goto free_pages;
663
664 for (i = 0; i < qpl->num_entries; i++)
665 gve_free_page(&priv->pdev->dev, qpl->pages[i],
666 qpl->page_buses[i], gve_qpl_dma_dir(priv, id));
667
668 kvfree(qpl->page_buses);
669 free_pages:
670 kvfree(qpl->pages);
671 priv->num_registered_pages -= qpl->num_entries;
672 }
673
gve_alloc_qpls(struct gve_priv * priv)674 static int gve_alloc_qpls(struct gve_priv *priv)
675 {
676 int num_qpls = gve_num_tx_qpls(priv) + gve_num_rx_qpls(priv);
677 int i, j;
678 int err;
679
680 priv->qpls = kvzalloc(num_qpls * sizeof(*priv->qpls), GFP_KERNEL);
681 if (!priv->qpls)
682 return -ENOMEM;
683
684 for (i = 0; i < gve_num_tx_qpls(priv); i++) {
685 err = gve_alloc_queue_page_list(priv, i,
686 priv->tx_pages_per_qpl);
687 if (err)
688 goto free_qpls;
689 }
690 for (; i < num_qpls; i++) {
691 err = gve_alloc_queue_page_list(priv, i,
692 priv->rx_pages_per_qpl);
693 if (err)
694 goto free_qpls;
695 }
696
697 priv->qpl_cfg.qpl_map_size = BITS_TO_LONGS(num_qpls) *
698 sizeof(unsigned long) * BITS_PER_BYTE;
699 priv->qpl_cfg.qpl_id_map = kvzalloc(BITS_TO_LONGS(num_qpls) *
700 sizeof(unsigned long), GFP_KERNEL);
701 if (!priv->qpl_cfg.qpl_id_map) {
702 err = -ENOMEM;
703 goto free_qpls;
704 }
705
706 return 0;
707
708 free_qpls:
709 for (j = 0; j <= i; j++)
710 gve_free_queue_page_list(priv, j);
711 kvfree(priv->qpls);
712 return err;
713 }
714
gve_free_qpls(struct gve_priv * priv)715 static void gve_free_qpls(struct gve_priv *priv)
716 {
717 int num_qpls = gve_num_tx_qpls(priv) + gve_num_rx_qpls(priv);
718 int i;
719
720 kvfree(priv->qpl_cfg.qpl_id_map);
721
722 for (i = 0; i < num_qpls; i++)
723 gve_free_queue_page_list(priv, i);
724
725 kvfree(priv->qpls);
726 }
727
728 /* Use this to schedule a reset when the device is capable of continuing
729 * to handle other requests in its current state. If it is not, do a reset
730 * in thread instead.
731 */
gve_schedule_reset(struct gve_priv * priv)732 void gve_schedule_reset(struct gve_priv *priv)
733 {
734 gve_set_do_reset(priv);
735 queue_work(priv->gve_wq, &priv->service_task);
736 }
737
738 static void gve_reset_and_teardown(struct gve_priv *priv, bool was_up);
739 static int gve_reset_recovery(struct gve_priv *priv, bool was_up);
740 static void gve_turndown(struct gve_priv *priv);
741 static void gve_turnup(struct gve_priv *priv);
742
gve_open(struct net_device * dev)743 static int gve_open(struct net_device *dev)
744 {
745 struct gve_priv *priv = netdev_priv(dev);
746 int err;
747
748 err = gve_alloc_qpls(priv);
749 if (err)
750 return err;
751 err = gve_alloc_rings(priv);
752 if (err)
753 goto free_qpls;
754
755 err = netif_set_real_num_tx_queues(dev, priv->tx_cfg.num_queues);
756 if (err)
757 goto free_rings;
758 err = netif_set_real_num_rx_queues(dev, priv->rx_cfg.num_queues);
759 if (err)
760 goto free_rings;
761
762 err = gve_register_qpls(priv);
763 if (err)
764 goto reset;
765 err = gve_create_rings(priv);
766 if (err)
767 goto reset;
768 gve_set_device_rings_ok(priv);
769
770 if (gve_get_report_stats(priv))
771 mod_timer(&priv->stats_report_timer,
772 round_jiffies(jiffies +
773 msecs_to_jiffies(priv->stats_report_timer_period)));
774
775 gve_turnup(priv);
776 queue_work(priv->gve_wq, &priv->service_task);
777 priv->interface_up_cnt++;
778 return 0;
779
780 free_rings:
781 gve_free_rings(priv);
782 free_qpls:
783 gve_free_qpls(priv);
784 return err;
785
786 reset:
787 /* This must have been called from a reset due to the rtnl lock
788 * so just return at this point.
789 */
790 if (gve_get_reset_in_progress(priv))
791 return err;
792 /* Otherwise reset before returning */
793 gve_reset_and_teardown(priv, true);
794 /* if this fails there is nothing we can do so just ignore the return */
795 gve_reset_recovery(priv, false);
796 /* return the original error */
797 return err;
798 }
799
gve_close(struct net_device * dev)800 static int gve_close(struct net_device *dev)
801 {
802 struct gve_priv *priv = netdev_priv(dev);
803 int err;
804
805 netif_carrier_off(dev);
806 if (gve_get_device_rings_ok(priv)) {
807 gve_turndown(priv);
808 err = gve_destroy_rings(priv);
809 if (err)
810 goto err;
811 err = gve_unregister_qpls(priv);
812 if (err)
813 goto err;
814 gve_clear_device_rings_ok(priv);
815 }
816 del_timer_sync(&priv->stats_report_timer);
817
818 gve_free_rings(priv);
819 gve_free_qpls(priv);
820 priv->interface_down_cnt++;
821 return 0;
822
823 err:
824 /* This must have been called from a reset due to the rtnl lock
825 * so just return at this point.
826 */
827 if (gve_get_reset_in_progress(priv))
828 return err;
829 /* Otherwise reset before returning */
830 gve_reset_and_teardown(priv, true);
831 return gve_reset_recovery(priv, false);
832 }
833
gve_adjust_queues(struct gve_priv * priv,struct gve_queue_config new_rx_config,struct gve_queue_config new_tx_config)834 int gve_adjust_queues(struct gve_priv *priv,
835 struct gve_queue_config new_rx_config,
836 struct gve_queue_config new_tx_config)
837 {
838 int err;
839
840 if (netif_carrier_ok(priv->dev)) {
841 /* To make this process as simple as possible we teardown the
842 * device, set the new configuration, and then bring the device
843 * up again.
844 */
845 err = gve_close(priv->dev);
846 /* we have already tried to reset in close,
847 * just fail at this point
848 */
849 if (err)
850 return err;
851 priv->tx_cfg = new_tx_config;
852 priv->rx_cfg = new_rx_config;
853
854 err = gve_open(priv->dev);
855 if (err)
856 goto err;
857
858 return 0;
859 }
860 /* Set the config for the next up. */
861 priv->tx_cfg = new_tx_config;
862 priv->rx_cfg = new_rx_config;
863
864 return 0;
865 err:
866 netif_err(priv, drv, priv->dev,
867 "Adjust queues failed! !!! DISABLING ALL QUEUES !!!\n");
868 gve_turndown(priv);
869 return err;
870 }
871
gve_turndown(struct gve_priv * priv)872 static void gve_turndown(struct gve_priv *priv)
873 {
874 int idx;
875
876 if (netif_carrier_ok(priv->dev))
877 netif_carrier_off(priv->dev);
878
879 if (!gve_get_napi_enabled(priv))
880 return;
881
882 /* Disable napi to prevent more work from coming in */
883 for (idx = 0; idx < priv->tx_cfg.num_queues; idx++) {
884 int ntfy_idx = gve_tx_idx_to_ntfy(priv, idx);
885 struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
886
887 napi_disable(&block->napi);
888 }
889 for (idx = 0; idx < priv->rx_cfg.num_queues; idx++) {
890 int ntfy_idx = gve_rx_idx_to_ntfy(priv, idx);
891 struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
892
893 napi_disable(&block->napi);
894 }
895
896 /* Stop tx queues */
897 netif_tx_disable(priv->dev);
898
899 gve_clear_napi_enabled(priv);
900 gve_clear_report_stats(priv);
901 }
902
gve_turnup(struct gve_priv * priv)903 static void gve_turnup(struct gve_priv *priv)
904 {
905 int idx;
906
907 /* Start the tx queues */
908 netif_tx_start_all_queues(priv->dev);
909
910 /* Enable napi and unmask interrupts for all queues */
911 for (idx = 0; idx < priv->tx_cfg.num_queues; idx++) {
912 int ntfy_idx = gve_tx_idx_to_ntfy(priv, idx);
913 struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
914
915 napi_enable(&block->napi);
916 iowrite32be(0, gve_irq_doorbell(priv, block));
917 }
918 for (idx = 0; idx < priv->rx_cfg.num_queues; idx++) {
919 int ntfy_idx = gve_rx_idx_to_ntfy(priv, idx);
920 struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
921
922 napi_enable(&block->napi);
923 iowrite32be(0, gve_irq_doorbell(priv, block));
924 }
925
926 gve_set_napi_enabled(priv);
927 }
928
gve_tx_timeout(struct net_device * dev,unsigned int txqueue)929 static void gve_tx_timeout(struct net_device *dev, unsigned int txqueue)
930 {
931 struct gve_priv *priv = netdev_priv(dev);
932
933 gve_schedule_reset(priv);
934 priv->tx_timeo_cnt++;
935 }
936
937 static const struct net_device_ops gve_netdev_ops = {
938 .ndo_start_xmit = gve_tx,
939 .ndo_open = gve_open,
940 .ndo_stop = gve_close,
941 .ndo_get_stats64 = gve_get_stats,
942 .ndo_tx_timeout = gve_tx_timeout,
943 };
944
gve_handle_status(struct gve_priv * priv,u32 status)945 static void gve_handle_status(struct gve_priv *priv, u32 status)
946 {
947 if (GVE_DEVICE_STATUS_RESET_MASK & status) {
948 dev_info(&priv->pdev->dev, "Device requested reset.\n");
949 gve_set_do_reset(priv);
950 }
951 if (GVE_DEVICE_STATUS_REPORT_STATS_MASK & status) {
952 priv->stats_report_trigger_cnt++;
953 gve_set_do_report_stats(priv);
954 }
955 }
956
gve_handle_reset(struct gve_priv * priv)957 static void gve_handle_reset(struct gve_priv *priv)
958 {
959 /* A service task will be scheduled at the end of probe to catch any
960 * resets that need to happen, and we don't want to reset until
961 * probe is done.
962 */
963 if (gve_get_probe_in_progress(priv))
964 return;
965
966 if (gve_get_do_reset(priv)) {
967 rtnl_lock();
968 gve_reset(priv, false);
969 rtnl_unlock();
970 }
971 }
972
gve_handle_report_stats(struct gve_priv * priv)973 void gve_handle_report_stats(struct gve_priv *priv)
974 {
975 int idx, stats_idx = 0, tx_bytes;
976 unsigned int start = 0;
977 struct stats *stats = priv->stats_report->stats;
978
979 if (!gve_get_report_stats(priv))
980 return;
981
982 be64_add_cpu(&priv->stats_report->written_count, 1);
983 /* tx stats */
984 if (priv->tx) {
985 for (idx = 0; idx < priv->tx_cfg.num_queues; idx++) {
986 do {
987 start = u64_stats_fetch_begin(&priv->tx[idx].statss);
988 tx_bytes = priv->tx[idx].bytes_done;
989 } while (u64_stats_fetch_retry(&priv->tx[idx].statss, start));
990 stats[stats_idx++] = (struct stats) {
991 .stat_name = cpu_to_be32(TX_WAKE_CNT),
992 .value = cpu_to_be64(priv->tx[idx].wake_queue),
993 .queue_id = cpu_to_be32(idx),
994 };
995 stats[stats_idx++] = (struct stats) {
996 .stat_name = cpu_to_be32(TX_STOP_CNT),
997 .value = cpu_to_be64(priv->tx[idx].stop_queue),
998 .queue_id = cpu_to_be32(idx),
999 };
1000 stats[stats_idx++] = (struct stats) {
1001 .stat_name = cpu_to_be32(TX_FRAMES_SENT),
1002 .value = cpu_to_be64(priv->tx[idx].req),
1003 .queue_id = cpu_to_be32(idx),
1004 };
1005 stats[stats_idx++] = (struct stats) {
1006 .stat_name = cpu_to_be32(TX_BYTES_SENT),
1007 .value = cpu_to_be64(tx_bytes),
1008 .queue_id = cpu_to_be32(idx),
1009 };
1010 stats[stats_idx++] = (struct stats) {
1011 .stat_name = cpu_to_be32(TX_LAST_COMPLETION_PROCESSED),
1012 .value = cpu_to_be64(priv->tx[idx].done),
1013 .queue_id = cpu_to_be32(idx),
1014 };
1015 }
1016 }
1017 /* rx stats */
1018 if (priv->rx) {
1019 for (idx = 0; idx < priv->rx_cfg.num_queues; idx++) {
1020 stats[stats_idx++] = (struct stats) {
1021 .stat_name = cpu_to_be32(RX_NEXT_EXPECTED_SEQUENCE),
1022 .value = cpu_to_be64(priv->rx[idx].desc.seqno),
1023 .queue_id = cpu_to_be32(idx),
1024 };
1025 stats[stats_idx++] = (struct stats) {
1026 .stat_name = cpu_to_be32(RX_BUFFERS_POSTED),
1027 .value = cpu_to_be64(priv->rx[0].fill_cnt),
1028 .queue_id = cpu_to_be32(idx),
1029 };
1030 }
1031 }
1032 }
1033
gve_handle_link_status(struct gve_priv * priv,bool link_status)1034 static void gve_handle_link_status(struct gve_priv *priv, bool link_status)
1035 {
1036 if (!gve_get_napi_enabled(priv))
1037 return;
1038
1039 if (link_status == netif_carrier_ok(priv->dev))
1040 return;
1041
1042 if (link_status) {
1043 netdev_info(priv->dev, "Device link is up.\n");
1044 netif_carrier_on(priv->dev);
1045 } else {
1046 netdev_info(priv->dev, "Device link is down.\n");
1047 netif_carrier_off(priv->dev);
1048 }
1049 }
1050
1051 /* Handle NIC status register changes, reset requests and report stats */
gve_service_task(struct work_struct * work)1052 static void gve_service_task(struct work_struct *work)
1053 {
1054 struct gve_priv *priv = container_of(work, struct gve_priv,
1055 service_task);
1056 u32 status = ioread32be(&priv->reg_bar0->device_status);
1057
1058 gve_handle_status(priv, status);
1059
1060 gve_handle_reset(priv);
1061 gve_handle_link_status(priv, GVE_DEVICE_STATUS_LINK_STATUS_MASK & status);
1062 }
1063
gve_init_priv(struct gve_priv * priv,bool skip_describe_device)1064 static int gve_init_priv(struct gve_priv *priv, bool skip_describe_device)
1065 {
1066 int num_ntfy;
1067 int err;
1068
1069 /* Set up the adminq */
1070 err = gve_adminq_alloc(&priv->pdev->dev, priv);
1071 if (err) {
1072 dev_err(&priv->pdev->dev,
1073 "Failed to alloc admin queue: err=%d\n", err);
1074 return err;
1075 }
1076
1077 if (skip_describe_device)
1078 goto setup_device;
1079
1080 /* Get the initial information we need from the device */
1081 err = gve_adminq_describe_device(priv);
1082 if (err) {
1083 dev_err(&priv->pdev->dev,
1084 "Could not get device information: err=%d\n", err);
1085 goto err;
1086 }
1087 if (priv->dev->max_mtu > PAGE_SIZE) {
1088 priv->dev->max_mtu = PAGE_SIZE;
1089 err = gve_adminq_set_mtu(priv, priv->dev->mtu);
1090 if (err) {
1091 dev_err(&priv->pdev->dev, "Could not set mtu");
1092 goto err;
1093 }
1094 }
1095 priv->dev->mtu = priv->dev->max_mtu;
1096 num_ntfy = pci_msix_vec_count(priv->pdev);
1097 if (num_ntfy <= 0) {
1098 dev_err(&priv->pdev->dev,
1099 "could not count MSI-x vectors: err=%d\n", num_ntfy);
1100 err = num_ntfy;
1101 goto err;
1102 } else if (num_ntfy < GVE_MIN_MSIX) {
1103 dev_err(&priv->pdev->dev, "gve needs at least %d MSI-x vectors, but only has %d\n",
1104 GVE_MIN_MSIX, num_ntfy);
1105 err = -EINVAL;
1106 goto err;
1107 }
1108
1109 priv->num_registered_pages = 0;
1110 priv->rx_copybreak = GVE_DEFAULT_RX_COPYBREAK;
1111 /* gvnic has one Notification Block per MSI-x vector, except for the
1112 * management vector
1113 */
1114 priv->num_ntfy_blks = (num_ntfy - 1) & ~0x1;
1115 priv->mgmt_msix_idx = priv->num_ntfy_blks;
1116
1117 priv->tx_cfg.max_queues =
1118 min_t(int, priv->tx_cfg.max_queues, priv->num_ntfy_blks / 2);
1119 priv->rx_cfg.max_queues =
1120 min_t(int, priv->rx_cfg.max_queues, priv->num_ntfy_blks / 2);
1121
1122 priv->tx_cfg.num_queues = priv->tx_cfg.max_queues;
1123 priv->rx_cfg.num_queues = priv->rx_cfg.max_queues;
1124 if (priv->default_num_queues > 0) {
1125 priv->tx_cfg.num_queues = min_t(int, priv->default_num_queues,
1126 priv->tx_cfg.num_queues);
1127 priv->rx_cfg.num_queues = min_t(int, priv->default_num_queues,
1128 priv->rx_cfg.num_queues);
1129 }
1130
1131 dev_info(&priv->pdev->dev, "TX queues %d, RX queues %d\n",
1132 priv->tx_cfg.num_queues, priv->rx_cfg.num_queues);
1133 dev_info(&priv->pdev->dev, "Max TX queues %d, Max RX queues %d\n",
1134 priv->tx_cfg.max_queues, priv->rx_cfg.max_queues);
1135
1136 setup_device:
1137 err = gve_setup_device_resources(priv);
1138 if (!err)
1139 return 0;
1140 err:
1141 gve_adminq_free(&priv->pdev->dev, priv);
1142 return err;
1143 }
1144
gve_teardown_priv_resources(struct gve_priv * priv)1145 static void gve_teardown_priv_resources(struct gve_priv *priv)
1146 {
1147 gve_teardown_device_resources(priv);
1148 gve_adminq_free(&priv->pdev->dev, priv);
1149 }
1150
gve_trigger_reset(struct gve_priv * priv)1151 static void gve_trigger_reset(struct gve_priv *priv)
1152 {
1153 /* Reset the device by releasing the AQ */
1154 gve_adminq_release(priv);
1155 }
1156
gve_reset_and_teardown(struct gve_priv * priv,bool was_up)1157 static void gve_reset_and_teardown(struct gve_priv *priv, bool was_up)
1158 {
1159 gve_trigger_reset(priv);
1160 /* With the reset having already happened, close cannot fail */
1161 if (was_up)
1162 gve_close(priv->dev);
1163 gve_teardown_priv_resources(priv);
1164 }
1165
gve_reset_recovery(struct gve_priv * priv,bool was_up)1166 static int gve_reset_recovery(struct gve_priv *priv, bool was_up)
1167 {
1168 int err;
1169
1170 err = gve_init_priv(priv, true);
1171 if (err)
1172 goto err;
1173 if (was_up) {
1174 err = gve_open(priv->dev);
1175 if (err)
1176 goto err;
1177 }
1178 return 0;
1179 err:
1180 dev_err(&priv->pdev->dev, "Reset failed! !!! DISABLING ALL QUEUES !!!\n");
1181 gve_turndown(priv);
1182 return err;
1183 }
1184
gve_reset(struct gve_priv * priv,bool attempt_teardown)1185 int gve_reset(struct gve_priv *priv, bool attempt_teardown)
1186 {
1187 bool was_up = netif_carrier_ok(priv->dev);
1188 int err;
1189
1190 dev_info(&priv->pdev->dev, "Performing reset\n");
1191 gve_clear_do_reset(priv);
1192 gve_set_reset_in_progress(priv);
1193 /* If we aren't attempting to teardown normally, just go turndown and
1194 * reset right away.
1195 */
1196 if (!attempt_teardown) {
1197 gve_turndown(priv);
1198 gve_reset_and_teardown(priv, was_up);
1199 } else {
1200 /* Otherwise attempt to close normally */
1201 if (was_up) {
1202 err = gve_close(priv->dev);
1203 /* If that fails reset as we did above */
1204 if (err)
1205 gve_reset_and_teardown(priv, was_up);
1206 }
1207 /* Clean up any remaining resources */
1208 gve_teardown_priv_resources(priv);
1209 }
1210
1211 /* Set it all back up */
1212 err = gve_reset_recovery(priv, was_up);
1213 gve_clear_reset_in_progress(priv);
1214 priv->reset_cnt++;
1215 priv->interface_up_cnt = 0;
1216 priv->interface_down_cnt = 0;
1217 priv->stats_report_trigger_cnt = 0;
1218 return err;
1219 }
1220
gve_write_version(u8 __iomem * driver_version_register)1221 static void gve_write_version(u8 __iomem *driver_version_register)
1222 {
1223 const char *c = gve_version_prefix;
1224
1225 while (*c) {
1226 writeb(*c, driver_version_register);
1227 c++;
1228 }
1229
1230 c = gve_version_str;
1231 while (*c) {
1232 writeb(*c, driver_version_register);
1233 c++;
1234 }
1235 writeb('\n', driver_version_register);
1236 }
1237
gve_probe(struct pci_dev * pdev,const struct pci_device_id * ent)1238 static int gve_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1239 {
1240 int max_tx_queues, max_rx_queues;
1241 struct net_device *dev;
1242 __be32 __iomem *db_bar;
1243 struct gve_registers __iomem *reg_bar;
1244 struct gve_priv *priv;
1245 int err;
1246
1247 err = pci_enable_device(pdev);
1248 if (err)
1249 return -ENXIO;
1250
1251 err = pci_request_regions(pdev, "gvnic-cfg");
1252 if (err)
1253 goto abort_with_enabled;
1254
1255 pci_set_master(pdev);
1256
1257 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
1258 if (err) {
1259 dev_err(&pdev->dev, "Failed to set dma mask: err=%d\n", err);
1260 goto abort_with_pci_region;
1261 }
1262
1263 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
1264 if (err) {
1265 dev_err(&pdev->dev,
1266 "Failed to set consistent dma mask: err=%d\n", err);
1267 goto abort_with_pci_region;
1268 }
1269
1270 reg_bar = pci_iomap(pdev, GVE_REGISTER_BAR, 0);
1271 if (!reg_bar) {
1272 dev_err(&pdev->dev, "Failed to map pci bar!\n");
1273 err = -ENOMEM;
1274 goto abort_with_pci_region;
1275 }
1276
1277 db_bar = pci_iomap(pdev, GVE_DOORBELL_BAR, 0);
1278 if (!db_bar) {
1279 dev_err(&pdev->dev, "Failed to map doorbell bar!\n");
1280 err = -ENOMEM;
1281 goto abort_with_reg_bar;
1282 }
1283
1284 gve_write_version(®_bar->driver_version);
1285 /* Get max queues to alloc etherdev */
1286 max_rx_queues = ioread32be(®_bar->max_tx_queues);
1287 max_tx_queues = ioread32be(®_bar->max_rx_queues);
1288 /* Alloc and setup the netdev and priv */
1289 dev = alloc_etherdev_mqs(sizeof(*priv), max_tx_queues, max_rx_queues);
1290 if (!dev) {
1291 dev_err(&pdev->dev, "could not allocate netdev\n");
1292 goto abort_with_db_bar;
1293 }
1294 SET_NETDEV_DEV(dev, &pdev->dev);
1295 pci_set_drvdata(pdev, dev);
1296 dev->ethtool_ops = &gve_ethtool_ops;
1297 dev->netdev_ops = &gve_netdev_ops;
1298 /* advertise features */
1299 dev->hw_features = NETIF_F_HIGHDMA;
1300 dev->hw_features |= NETIF_F_SG;
1301 dev->hw_features |= NETIF_F_HW_CSUM;
1302 dev->hw_features |= NETIF_F_TSO;
1303 dev->hw_features |= NETIF_F_TSO6;
1304 dev->hw_features |= NETIF_F_TSO_ECN;
1305 dev->hw_features |= NETIF_F_RXCSUM;
1306 dev->hw_features |= NETIF_F_RXHASH;
1307 dev->features = dev->hw_features;
1308 dev->watchdog_timeo = 5 * HZ;
1309 dev->min_mtu = ETH_MIN_MTU;
1310 netif_carrier_off(dev);
1311
1312 priv = netdev_priv(dev);
1313 priv->dev = dev;
1314 priv->pdev = pdev;
1315 priv->msg_enable = DEFAULT_MSG_LEVEL;
1316 priv->reg_bar0 = reg_bar;
1317 priv->db_bar2 = db_bar;
1318 priv->service_task_flags = 0x0;
1319 priv->state_flags = 0x0;
1320 priv->ethtool_flags = 0x0;
1321
1322 gve_set_probe_in_progress(priv);
1323 priv->gve_wq = alloc_ordered_workqueue("gve", 0);
1324 if (!priv->gve_wq) {
1325 dev_err(&pdev->dev, "Could not allocate workqueue");
1326 err = -ENOMEM;
1327 goto abort_with_netdev;
1328 }
1329 INIT_WORK(&priv->service_task, gve_service_task);
1330 INIT_WORK(&priv->stats_report_task, gve_stats_report_task);
1331 priv->tx_cfg.max_queues = max_tx_queues;
1332 priv->rx_cfg.max_queues = max_rx_queues;
1333
1334 err = gve_init_priv(priv, false);
1335 if (err)
1336 goto abort_with_wq;
1337
1338 err = register_netdev(dev);
1339 if (err)
1340 goto abort_with_wq;
1341
1342 dev_info(&pdev->dev, "GVE version %s\n", gve_version_str);
1343 gve_clear_probe_in_progress(priv);
1344 queue_work(priv->gve_wq, &priv->service_task);
1345 return 0;
1346
1347 abort_with_wq:
1348 destroy_workqueue(priv->gve_wq);
1349
1350 abort_with_netdev:
1351 free_netdev(dev);
1352
1353 abort_with_db_bar:
1354 pci_iounmap(pdev, db_bar);
1355
1356 abort_with_reg_bar:
1357 pci_iounmap(pdev, reg_bar);
1358
1359 abort_with_pci_region:
1360 pci_release_regions(pdev);
1361
1362 abort_with_enabled:
1363 pci_disable_device(pdev);
1364 return -ENXIO;
1365 }
1366
gve_remove(struct pci_dev * pdev)1367 static void gve_remove(struct pci_dev *pdev)
1368 {
1369 struct net_device *netdev = pci_get_drvdata(pdev);
1370 struct gve_priv *priv = netdev_priv(netdev);
1371 __be32 __iomem *db_bar = priv->db_bar2;
1372 void __iomem *reg_bar = priv->reg_bar0;
1373
1374 unregister_netdev(netdev);
1375 gve_teardown_priv_resources(priv);
1376 destroy_workqueue(priv->gve_wq);
1377 free_netdev(netdev);
1378 pci_iounmap(pdev, db_bar);
1379 pci_iounmap(pdev, reg_bar);
1380 pci_release_regions(pdev);
1381 pci_disable_device(pdev);
1382 }
1383
1384 static const struct pci_device_id gve_id_table[] = {
1385 { PCI_DEVICE(PCI_VENDOR_ID_GOOGLE, PCI_DEV_ID_GVNIC) },
1386 { }
1387 };
1388
1389 static struct pci_driver gvnic_driver = {
1390 .name = "gvnic",
1391 .id_table = gve_id_table,
1392 .probe = gve_probe,
1393 .remove = gve_remove,
1394 };
1395
1396 module_pci_driver(gvnic_driver);
1397
1398 MODULE_DEVICE_TABLE(pci, gve_id_table);
1399 MODULE_AUTHOR("Google, Inc.");
1400 MODULE_DESCRIPTION("gVNIC Driver");
1401 MODULE_LICENSE("Dual MIT/GPL");
1402 MODULE_VERSION(GVE_VERSION);
1403