1 // SPDX-License-Identifier: GPL-2.0 2 /* Marvell Octeon EP (EndPoint) Ethernet Driver 3 * 4 * Copyright (C) 2020 Marvell. 5 * 6 */ 7 8 #include <linux/types.h> 9 #include <linux/module.h> 10 #include <linux/pci.h> 11 #include <linux/netdevice.h> 12 #include <linux/etherdevice.h> 13 #include <linux/rtnetlink.h> 14 #include <linux/vmalloc.h> 15 16 #include "octep_config.h" 17 #include "octep_main.h" 18 #include "octep_ctrl_net.h" 19 #include "octep_pfvf_mbox.h" 20 21 #define OCTEP_INTR_POLL_TIME_MSECS 100 22 struct workqueue_struct *octep_wq; 23 24 /* Supported Devices */ 25 static const struct pci_device_id octep_pci_id_tbl[] = { 26 {PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, OCTEP_PCI_DEVICE_ID_CN98_PF)}, 27 {PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, OCTEP_PCI_DEVICE_ID_CN93_PF)}, 28 {PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, OCTEP_PCI_DEVICE_ID_CNF95N_PF)}, 29 {PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, OCTEP_PCI_DEVICE_ID_CN10KA_PF)}, 30 {PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, OCTEP_PCI_DEVICE_ID_CNF10KA_PF)}, 31 {PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, OCTEP_PCI_DEVICE_ID_CNF10KB_PF)}, 32 {PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, OCTEP_PCI_DEVICE_ID_CN10KB_PF)}, 33 {0, }, 34 }; 35 MODULE_DEVICE_TABLE(pci, octep_pci_id_tbl); 36 37 MODULE_AUTHOR("Veerasenareddy Burru <vburru@marvell.com>"); 38 MODULE_DESCRIPTION(OCTEP_DRV_STRING); 39 MODULE_LICENSE("GPL"); 40 41 /** 42 * octep_alloc_ioq_vectors() - Allocate Tx/Rx Queue interrupt info. 43 * 44 * @oct: Octeon device private data structure. 45 * 46 * Allocate resources to hold per Tx/Rx queue interrupt info. 47 * This is the information passed to interrupt handler, from which napi poll 48 * is scheduled and includes quick access to private data of Tx/Rx queue 49 * corresponding to the interrupt being handled. 50 * 51 * Return: 0, on successful allocation of resources for all queue interrupts. 52 * -1, if failed to allocate any resource. 53 */ 54 static int octep_alloc_ioq_vectors(struct octep_device *oct) 55 { 56 int i; 57 struct octep_ioq_vector *ioq_vector; 58 59 for (i = 0; i < oct->num_oqs; i++) { 60 oct->ioq_vector[i] = vzalloc(sizeof(*oct->ioq_vector[i])); 61 if (!oct->ioq_vector[i]) 62 goto free_ioq_vector; 63 64 ioq_vector = oct->ioq_vector[i]; 65 ioq_vector->iq = oct->iq[i]; 66 ioq_vector->oq = oct->oq[i]; 67 ioq_vector->octep_dev = oct; 68 } 69 70 dev_info(&oct->pdev->dev, "Allocated %d IOQ vectors\n", oct->num_oqs); 71 return 0; 72 73 free_ioq_vector: 74 while (i) { 75 i--; 76 vfree(oct->ioq_vector[i]); 77 oct->ioq_vector[i] = NULL; 78 } 79 return -1; 80 } 81 82 /** 83 * octep_free_ioq_vectors() - Free Tx/Rx Queue interrupt vector info. 84 * 85 * @oct: Octeon device private data structure. 86 */ 87 static void octep_free_ioq_vectors(struct octep_device *oct) 88 { 89 int i; 90 91 for (i = 0; i < oct->num_oqs; i++) { 92 if (oct->ioq_vector[i]) { 93 vfree(oct->ioq_vector[i]); 94 oct->ioq_vector[i] = NULL; 95 } 96 } 97 netdev_info(oct->netdev, "Freed IOQ Vectors\n"); 98 } 99 100 /** 101 * octep_enable_msix_range() - enable MSI-x interrupts. 102 * 103 * @oct: Octeon device private data structure. 104 * 105 * Allocate and enable all MSI-x interrupts (queue and non-queue interrupts) 106 * for the Octeon device. 107 * 108 * Return: 0, on successfully enabling all MSI-x interrupts. 109 * -1, if failed to enable any MSI-x interrupt. 110 */ 111 static int octep_enable_msix_range(struct octep_device *oct) 112 { 113 int num_msix, msix_allocated; 114 int i; 115 116 /* Generic interrupts apart from input/output queues */ 117 num_msix = oct->num_oqs + CFG_GET_NON_IOQ_MSIX(oct->conf); 118 oct->msix_entries = kcalloc(num_msix, 119 sizeof(struct msix_entry), GFP_KERNEL); 120 if (!oct->msix_entries) 121 goto msix_alloc_err; 122 123 for (i = 0; i < num_msix; i++) 124 oct->msix_entries[i].entry = i; 125 126 msix_allocated = pci_enable_msix_range(oct->pdev, oct->msix_entries, 127 num_msix, num_msix); 128 if (msix_allocated != num_msix) { 129 dev_err(&oct->pdev->dev, 130 "Failed to enable %d msix irqs; got only %d\n", 131 num_msix, msix_allocated); 132 goto enable_msix_err; 133 } 134 oct->num_irqs = msix_allocated; 135 dev_info(&oct->pdev->dev, "MSI-X enabled successfully\n"); 136 137 return 0; 138 139 enable_msix_err: 140 if (msix_allocated > 0) 141 pci_disable_msix(oct->pdev); 142 kfree(oct->msix_entries); 143 oct->msix_entries = NULL; 144 msix_alloc_err: 145 return -1; 146 } 147 148 /** 149 * octep_disable_msix() - disable MSI-x interrupts. 150 * 151 * @oct: Octeon device private data structure. 152 * 153 * Disable MSI-x on the Octeon device. 154 */ 155 static void octep_disable_msix(struct octep_device *oct) 156 { 157 pci_disable_msix(oct->pdev); 158 kfree(oct->msix_entries); 159 oct->msix_entries = NULL; 160 dev_info(&oct->pdev->dev, "Disabled MSI-X\n"); 161 } 162 163 /** 164 * octep_mbox_intr_handler() - common handler for pfvf mbox interrupts. 165 * 166 * @irq: Interrupt number. 167 * @data: interrupt data. 168 * 169 * this is common handler for pfvf mbox interrupts. 170 */ 171 static irqreturn_t octep_mbox_intr_handler(int irq, void *data) 172 { 173 struct octep_device *oct = data; 174 175 return oct->hw_ops.mbox_intr_handler(oct); 176 } 177 178 /** 179 * octep_oei_intr_handler() - common handler for output endpoint interrupts. 180 * 181 * @irq: Interrupt number. 182 * @data: interrupt data. 183 * 184 * this is common handler for all output endpoint interrupts. 185 */ 186 static irqreturn_t octep_oei_intr_handler(int irq, void *data) 187 { 188 struct octep_device *oct = data; 189 190 return oct->hw_ops.oei_intr_handler(oct); 191 } 192 193 /** 194 * octep_ire_intr_handler() - common handler for input ring error interrupts. 195 * 196 * @irq: Interrupt number. 197 * @data: interrupt data. 198 * 199 * this is common handler for input ring error interrupts. 200 */ 201 static irqreturn_t octep_ire_intr_handler(int irq, void *data) 202 { 203 struct octep_device *oct = data; 204 205 return oct->hw_ops.ire_intr_handler(oct); 206 } 207 208 /** 209 * octep_ore_intr_handler() - common handler for output ring error interrupts. 210 * 211 * @irq: Interrupt number. 212 * @data: interrupt data. 213 * 214 * this is common handler for output ring error interrupts. 215 */ 216 static irqreturn_t octep_ore_intr_handler(int irq, void *data) 217 { 218 struct octep_device *oct = data; 219 220 return oct->hw_ops.ore_intr_handler(oct); 221 } 222 223 /** 224 * octep_vfire_intr_handler() - common handler for vf input ring error interrupts. 225 * 226 * @irq: Interrupt number. 227 * @data: interrupt data. 228 * 229 * this is common handler for vf input ring error interrupts. 230 */ 231 static irqreturn_t octep_vfire_intr_handler(int irq, void *data) 232 { 233 struct octep_device *oct = data; 234 235 return oct->hw_ops.vfire_intr_handler(oct); 236 } 237 238 /** 239 * octep_vfore_intr_handler() - common handler for vf output ring error interrupts. 240 * 241 * @irq: Interrupt number. 242 * @data: interrupt data. 243 * 244 * this is common handler for vf output ring error interrupts. 245 */ 246 static irqreturn_t octep_vfore_intr_handler(int irq, void *data) 247 { 248 struct octep_device *oct = data; 249 250 return oct->hw_ops.vfore_intr_handler(oct); 251 } 252 253 /** 254 * octep_dma_intr_handler() - common handler for dpi dma related interrupts. 255 * 256 * @irq: Interrupt number. 257 * @data: interrupt data. 258 * 259 * this is common handler for dpi dma related interrupts. 260 */ 261 static irqreturn_t octep_dma_intr_handler(int irq, void *data) 262 { 263 struct octep_device *oct = data; 264 265 return oct->hw_ops.dma_intr_handler(oct); 266 } 267 268 /** 269 * octep_dma_vf_intr_handler() - common handler for dpi dma transaction error interrupts for VFs. 270 * 271 * @irq: Interrupt number. 272 * @data: interrupt data. 273 * 274 * this is common handler for dpi dma transaction error interrupts for VFs. 275 */ 276 static irqreturn_t octep_dma_vf_intr_handler(int irq, void *data) 277 { 278 struct octep_device *oct = data; 279 280 return oct->hw_ops.dma_vf_intr_handler(oct); 281 } 282 283 /** 284 * octep_pp_vf_intr_handler() - common handler for pp transaction error interrupts for VFs. 285 * 286 * @irq: Interrupt number. 287 * @data: interrupt data. 288 * 289 * this is common handler for pp transaction error interrupts for VFs. 290 */ 291 static irqreturn_t octep_pp_vf_intr_handler(int irq, void *data) 292 { 293 struct octep_device *oct = data; 294 295 return oct->hw_ops.pp_vf_intr_handler(oct); 296 } 297 298 /** 299 * octep_misc_intr_handler() - common handler for mac related interrupts. 300 * 301 * @irq: Interrupt number. 302 * @data: interrupt data. 303 * 304 * this is common handler for mac related interrupts. 305 */ 306 static irqreturn_t octep_misc_intr_handler(int irq, void *data) 307 { 308 struct octep_device *oct = data; 309 310 return oct->hw_ops.misc_intr_handler(oct); 311 } 312 313 /** 314 * octep_rsvd_intr_handler() - common handler for reserved interrupts (future use). 315 * 316 * @irq: Interrupt number. 317 * @data: interrupt data. 318 * 319 * this is common handler for all reserved interrupts. 320 */ 321 static irqreturn_t octep_rsvd_intr_handler(int irq, void *data) 322 { 323 struct octep_device *oct = data; 324 325 return oct->hw_ops.rsvd_intr_handler(oct); 326 } 327 328 /** 329 * octep_ioq_intr_handler() - handler for all Tx/Rx queue interrupts. 330 * 331 * @irq: Interrupt number. 332 * @data: interrupt data contains pointers to Tx/Rx queue private data 333 * and correspong NAPI context. 334 * 335 * this is common handler for all non-queue (generic) interrupts. 336 */ 337 static irqreturn_t octep_ioq_intr_handler(int irq, void *data) 338 { 339 struct octep_ioq_vector *ioq_vector = data; 340 struct octep_device *oct = ioq_vector->octep_dev; 341 342 return oct->hw_ops.ioq_intr_handler(ioq_vector); 343 } 344 345 /** 346 * octep_request_irqs() - Register interrupt handlers. 347 * 348 * @oct: Octeon device private data structure. 349 * 350 * Register handlers for all queue and non-queue interrupts. 351 * 352 * Return: 0, on successful registration of all interrupt handlers. 353 * -1, on any error. 354 */ 355 static int octep_request_irqs(struct octep_device *oct) 356 { 357 struct net_device *netdev = oct->netdev; 358 struct octep_ioq_vector *ioq_vector; 359 struct msix_entry *msix_entry; 360 char **non_ioq_msix_names; 361 int num_non_ioq_msix; 362 int ret, i, j; 363 364 num_non_ioq_msix = CFG_GET_NON_IOQ_MSIX(oct->conf); 365 non_ioq_msix_names = CFG_GET_NON_IOQ_MSIX_NAMES(oct->conf); 366 367 oct->non_ioq_irq_names = kcalloc(num_non_ioq_msix, 368 OCTEP_MSIX_NAME_SIZE, GFP_KERNEL); 369 if (!oct->non_ioq_irq_names) 370 goto alloc_err; 371 372 /* First few MSI-X interrupts are non-queue interrupts */ 373 for (i = 0; i < num_non_ioq_msix; i++) { 374 char *irq_name; 375 376 irq_name = &oct->non_ioq_irq_names[i * OCTEP_MSIX_NAME_SIZE]; 377 msix_entry = &oct->msix_entries[i]; 378 379 snprintf(irq_name, OCTEP_MSIX_NAME_SIZE, 380 "%s-%s", netdev->name, non_ioq_msix_names[i]); 381 if (!strncmp(non_ioq_msix_names[i], "epf_mbox_rint", strlen("epf_mbox_rint"))) { 382 ret = request_irq(msix_entry->vector, 383 octep_mbox_intr_handler, 0, 384 irq_name, oct); 385 } else if (!strncmp(non_ioq_msix_names[i], "epf_oei_rint", 386 strlen("epf_oei_rint"))) { 387 ret = request_irq(msix_entry->vector, 388 octep_oei_intr_handler, 0, 389 irq_name, oct); 390 } else if (!strncmp(non_ioq_msix_names[i], "epf_ire_rint", 391 strlen("epf_ire_rint"))) { 392 ret = request_irq(msix_entry->vector, 393 octep_ire_intr_handler, 0, 394 irq_name, oct); 395 } else if (!strncmp(non_ioq_msix_names[i], "epf_ore_rint", 396 strlen("epf_ore_rint"))) { 397 ret = request_irq(msix_entry->vector, 398 octep_ore_intr_handler, 0, 399 irq_name, oct); 400 } else if (!strncmp(non_ioq_msix_names[i], "epf_vfire_rint", 401 strlen("epf_vfire_rint"))) { 402 ret = request_irq(msix_entry->vector, 403 octep_vfire_intr_handler, 0, 404 irq_name, oct); 405 } else if (!strncmp(non_ioq_msix_names[i], "epf_vfore_rint", 406 strlen("epf_vfore_rint"))) { 407 ret = request_irq(msix_entry->vector, 408 octep_vfore_intr_handler, 0, 409 irq_name, oct); 410 } else if (!strncmp(non_ioq_msix_names[i], "epf_dma_rint", 411 strlen("epf_dma_rint"))) { 412 ret = request_irq(msix_entry->vector, 413 octep_dma_intr_handler, 0, 414 irq_name, oct); 415 } else if (!strncmp(non_ioq_msix_names[i], "epf_dma_vf_rint", 416 strlen("epf_dma_vf_rint"))) { 417 ret = request_irq(msix_entry->vector, 418 octep_dma_vf_intr_handler, 0, 419 irq_name, oct); 420 } else if (!strncmp(non_ioq_msix_names[i], "epf_pp_vf_rint", 421 strlen("epf_pp_vf_rint"))) { 422 ret = request_irq(msix_entry->vector, 423 octep_pp_vf_intr_handler, 0, 424 irq_name, oct); 425 } else if (!strncmp(non_ioq_msix_names[i], "epf_misc_rint", 426 strlen("epf_misc_rint"))) { 427 ret = request_irq(msix_entry->vector, 428 octep_misc_intr_handler, 0, 429 irq_name, oct); 430 } else { 431 ret = request_irq(msix_entry->vector, 432 octep_rsvd_intr_handler, 0, 433 irq_name, oct); 434 } 435 436 if (ret) { 437 netdev_err(netdev, 438 "request_irq failed for %s; err=%d", 439 irq_name, ret); 440 goto non_ioq_irq_err; 441 } 442 } 443 444 /* Request IRQs for Tx/Rx queues */ 445 for (j = 0; j < oct->num_oqs; j++) { 446 ioq_vector = oct->ioq_vector[j]; 447 msix_entry = &oct->msix_entries[j + num_non_ioq_msix]; 448 449 snprintf(ioq_vector->name, sizeof(ioq_vector->name), 450 "%s-q%d", netdev->name, j); 451 ret = request_irq(msix_entry->vector, 452 octep_ioq_intr_handler, 0, 453 ioq_vector->name, ioq_vector); 454 if (ret) { 455 netdev_err(netdev, 456 "request_irq failed for Q-%d; err=%d", 457 j, ret); 458 goto ioq_irq_err; 459 } 460 461 cpumask_set_cpu(j % num_online_cpus(), 462 &ioq_vector->affinity_mask); 463 irq_set_affinity_hint(msix_entry->vector, 464 &ioq_vector->affinity_mask); 465 } 466 467 return 0; 468 ioq_irq_err: 469 while (j) { 470 --j; 471 ioq_vector = oct->ioq_vector[j]; 472 msix_entry = &oct->msix_entries[j + num_non_ioq_msix]; 473 474 irq_set_affinity_hint(msix_entry->vector, NULL); 475 free_irq(msix_entry->vector, ioq_vector); 476 } 477 non_ioq_irq_err: 478 while (i) { 479 --i; 480 free_irq(oct->msix_entries[i].vector, oct); 481 } 482 kfree(oct->non_ioq_irq_names); 483 oct->non_ioq_irq_names = NULL; 484 alloc_err: 485 return -1; 486 } 487 488 /** 489 * octep_free_irqs() - free all registered interrupts. 490 * 491 * @oct: Octeon device private data structure. 492 * 493 * Free all queue and non-queue interrupts of the Octeon device. 494 */ 495 static void octep_free_irqs(struct octep_device *oct) 496 { 497 int i; 498 499 /* First few MSI-X interrupts are non queue interrupts; free them */ 500 for (i = 0; i < CFG_GET_NON_IOQ_MSIX(oct->conf); i++) 501 free_irq(oct->msix_entries[i].vector, oct); 502 kfree(oct->non_ioq_irq_names); 503 504 /* Free IRQs for Input/Output (Tx/Rx) queues */ 505 for (i = CFG_GET_NON_IOQ_MSIX(oct->conf); i < oct->num_irqs; i++) { 506 irq_set_affinity_hint(oct->msix_entries[i].vector, NULL); 507 free_irq(oct->msix_entries[i].vector, 508 oct->ioq_vector[i - CFG_GET_NON_IOQ_MSIX(oct->conf)]); 509 } 510 netdev_info(oct->netdev, "IRQs freed\n"); 511 } 512 513 /** 514 * octep_setup_irqs() - setup interrupts for the Octeon device. 515 * 516 * @oct: Octeon device private data structure. 517 * 518 * Allocate data structures to hold per interrupt information, allocate/enable 519 * MSI-x interrupt and register interrupt handlers. 520 * 521 * Return: 0, on successful allocation and registration of all interrupts. 522 * -1, on any error. 523 */ 524 static int octep_setup_irqs(struct octep_device *oct) 525 { 526 if (octep_alloc_ioq_vectors(oct)) 527 goto ioq_vector_err; 528 529 if (octep_enable_msix_range(oct)) 530 goto enable_msix_err; 531 532 if (octep_request_irqs(oct)) 533 goto request_irq_err; 534 535 return 0; 536 537 request_irq_err: 538 octep_disable_msix(oct); 539 enable_msix_err: 540 octep_free_ioq_vectors(oct); 541 ioq_vector_err: 542 return -1; 543 } 544 545 /** 546 * octep_clean_irqs() - free all interrupts and its resources. 547 * 548 * @oct: Octeon device private data structure. 549 */ 550 static void octep_clean_irqs(struct octep_device *oct) 551 { 552 octep_free_irqs(oct); 553 octep_disable_msix(oct); 554 octep_free_ioq_vectors(oct); 555 } 556 557 /** 558 * octep_enable_ioq_irq() - Enable MSI-x interrupt of a Tx/Rx queue. 559 * 560 * @iq: Octeon Tx queue data structure. 561 * @oq: Octeon Rx queue data structure. 562 */ 563 static void octep_enable_ioq_irq(struct octep_iq *iq, struct octep_oq *oq) 564 { 565 u32 pkts_pend = oq->pkts_pending; 566 567 netdev_dbg(iq->netdev, "enabling intr for Q-%u\n", iq->q_no); 568 if (iq->pkts_processed) { 569 writel(iq->pkts_processed, iq->inst_cnt_reg); 570 iq->pkt_in_done -= iq->pkts_processed; 571 iq->pkts_processed = 0; 572 } 573 if (oq->last_pkt_count - pkts_pend) { 574 writel(oq->last_pkt_count - pkts_pend, oq->pkts_sent_reg); 575 oq->last_pkt_count = pkts_pend; 576 } 577 578 /* Flush the previous wrties before writing to RESEND bit */ 579 wmb(); 580 writeq(1UL << OCTEP_OQ_INTR_RESEND_BIT, oq->pkts_sent_reg); 581 writeq(1UL << OCTEP_IQ_INTR_RESEND_BIT, iq->inst_cnt_reg); 582 } 583 584 /** 585 * octep_napi_poll() - NAPI poll function for Tx/Rx. 586 * 587 * @napi: pointer to napi context. 588 * @budget: max number of packets to be processed in single invocation. 589 */ 590 static int octep_napi_poll(struct napi_struct *napi, int budget) 591 { 592 struct octep_ioq_vector *ioq_vector = 593 container_of(napi, struct octep_ioq_vector, napi); 594 u32 tx_pending, rx_done; 595 596 tx_pending = octep_iq_process_completions(ioq_vector->iq, budget); 597 rx_done = octep_oq_process_rx(ioq_vector->oq, budget); 598 599 /* need more polling if tx completion processing is still pending or 600 * processed at least 'budget' number of rx packets. 601 */ 602 if (tx_pending || rx_done >= budget) 603 return budget; 604 605 napi_complete(napi); 606 octep_enable_ioq_irq(ioq_vector->iq, ioq_vector->oq); 607 return rx_done; 608 } 609 610 /** 611 * octep_napi_add() - Add NAPI poll for all Tx/Rx queues. 612 * 613 * @oct: Octeon device private data structure. 614 */ 615 static void octep_napi_add(struct octep_device *oct) 616 { 617 int i; 618 619 for (i = 0; i < oct->num_oqs; i++) { 620 netdev_dbg(oct->netdev, "Adding NAPI on Q-%d\n", i); 621 netif_napi_add(oct->netdev, &oct->ioq_vector[i]->napi, 622 octep_napi_poll); 623 oct->oq[i]->napi = &oct->ioq_vector[i]->napi; 624 } 625 } 626 627 /** 628 * octep_napi_delete() - delete NAPI poll callback for all Tx/Rx queues. 629 * 630 * @oct: Octeon device private data structure. 631 */ 632 static void octep_napi_delete(struct octep_device *oct) 633 { 634 int i; 635 636 for (i = 0; i < oct->num_oqs; i++) { 637 netdev_dbg(oct->netdev, "Deleting NAPI on Q-%d\n", i); 638 netif_napi_del(&oct->ioq_vector[i]->napi); 639 oct->oq[i]->napi = NULL; 640 } 641 } 642 643 /** 644 * octep_napi_enable() - enable NAPI for all Tx/Rx queues. 645 * 646 * @oct: Octeon device private data structure. 647 */ 648 static void octep_napi_enable(struct octep_device *oct) 649 { 650 int i; 651 652 for (i = 0; i < oct->num_oqs; i++) { 653 netdev_dbg(oct->netdev, "Enabling NAPI on Q-%d\n", i); 654 napi_enable(&oct->ioq_vector[i]->napi); 655 } 656 } 657 658 /** 659 * octep_napi_disable() - disable NAPI for all Tx/Rx queues. 660 * 661 * @oct: Octeon device private data structure. 662 */ 663 static void octep_napi_disable(struct octep_device *oct) 664 { 665 int i; 666 667 for (i = 0; i < oct->num_oqs; i++) { 668 netdev_dbg(oct->netdev, "Disabling NAPI on Q-%d\n", i); 669 napi_disable(&oct->ioq_vector[i]->napi); 670 } 671 } 672 673 static void octep_link_up(struct net_device *netdev) 674 { 675 netif_carrier_on(netdev); 676 netif_tx_start_all_queues(netdev); 677 } 678 679 /** 680 * octep_open() - start the octeon network device. 681 * 682 * @netdev: pointer to kernel network device. 683 * 684 * setup Tx/Rx queues, interrupts and enable hardware operation of Tx/Rx queues 685 * and interrupts.. 686 * 687 * Return: 0, on successfully setting up device and bring it up. 688 * -1, on any error. 689 */ 690 static int octep_open(struct net_device *netdev) 691 { 692 struct octep_device *oct = netdev_priv(netdev); 693 int err, ret; 694 695 netdev_info(netdev, "Starting netdev ...\n"); 696 netif_carrier_off(netdev); 697 698 oct->hw_ops.reset_io_queues(oct); 699 700 if (octep_setup_iqs(oct)) 701 goto setup_iq_err; 702 if (octep_setup_oqs(oct)) 703 goto setup_oq_err; 704 if (octep_setup_irqs(oct)) 705 goto setup_irq_err; 706 707 err = netif_set_real_num_tx_queues(netdev, oct->num_oqs); 708 if (err) 709 goto set_queues_err; 710 err = netif_set_real_num_rx_queues(netdev, oct->num_iqs); 711 if (err) 712 goto set_queues_err; 713 714 octep_napi_add(oct); 715 octep_napi_enable(oct); 716 717 oct->link_info.admin_up = 1; 718 octep_ctrl_net_set_rx_state(oct, OCTEP_CTRL_NET_INVALID_VFID, true, 719 false); 720 octep_ctrl_net_set_link_status(oct, OCTEP_CTRL_NET_INVALID_VFID, true, 721 false); 722 oct->poll_non_ioq_intr = false; 723 724 /* Enable the input and output queues for this Octeon device */ 725 oct->hw_ops.enable_io_queues(oct); 726 727 /* Enable Octeon device interrupts */ 728 oct->hw_ops.enable_interrupts(oct); 729 730 octep_oq_dbell_init(oct); 731 732 ret = octep_ctrl_net_get_link_status(oct, OCTEP_CTRL_NET_INVALID_VFID); 733 if (ret > 0) 734 octep_link_up(netdev); 735 736 return 0; 737 738 set_queues_err: 739 octep_clean_irqs(oct); 740 setup_irq_err: 741 octep_free_oqs(oct); 742 setup_oq_err: 743 octep_free_iqs(oct); 744 setup_iq_err: 745 return -1; 746 } 747 748 /** 749 * octep_stop() - stop the octeon network device. 750 * 751 * @netdev: pointer to kernel network device. 752 * 753 * stop the device Tx/Rx operations, bring down the link and 754 * free up all resources allocated for Tx/Rx queues and interrupts. 755 */ 756 static int octep_stop(struct net_device *netdev) 757 { 758 struct octep_device *oct = netdev_priv(netdev); 759 760 netdev_info(netdev, "Stopping the device ...\n"); 761 762 octep_ctrl_net_set_link_status(oct, OCTEP_CTRL_NET_INVALID_VFID, false, 763 false); 764 octep_ctrl_net_set_rx_state(oct, OCTEP_CTRL_NET_INVALID_VFID, false, 765 false); 766 767 /* Stop Tx from stack */ 768 netif_tx_stop_all_queues(netdev); 769 netif_carrier_off(netdev); 770 netif_tx_disable(netdev); 771 772 oct->link_info.admin_up = 0; 773 oct->link_info.oper_up = 0; 774 775 oct->hw_ops.disable_interrupts(oct); 776 octep_napi_disable(oct); 777 octep_napi_delete(oct); 778 779 octep_clean_irqs(oct); 780 octep_clean_iqs(oct); 781 782 oct->hw_ops.disable_io_queues(oct); 783 oct->hw_ops.reset_io_queues(oct); 784 octep_free_oqs(oct); 785 octep_free_iqs(oct); 786 787 oct->poll_non_ioq_intr = true; 788 queue_delayed_work(octep_wq, &oct->intr_poll_task, 789 msecs_to_jiffies(OCTEP_INTR_POLL_TIME_MSECS)); 790 791 netdev_info(netdev, "Device stopped !!\n"); 792 return 0; 793 } 794 795 /** 796 * octep_iq_full_check() - check if a Tx queue is full. 797 * 798 * @iq: Octeon Tx queue data structure. 799 * 800 * Return: 0, if the Tx queue is not full. 801 * 1, if the Tx queue is full. 802 */ 803 static inline int octep_iq_full_check(struct octep_iq *iq) 804 { 805 if (likely((IQ_INSTR_SPACE(iq)) > 806 OCTEP_WAKE_QUEUE_THRESHOLD)) 807 return 0; 808 809 /* Stop the queue if unable to send */ 810 netif_stop_subqueue(iq->netdev, iq->q_no); 811 812 /* Allow for pending updates in write index 813 * from iq_process_completion in other cpus 814 * to reflect, in case queue gets free 815 * entries. 816 */ 817 smp_mb(); 818 819 /* check again and restart the queue, in case NAPI has just freed 820 * enough Tx ring entries. 821 */ 822 if (unlikely(IQ_INSTR_SPACE(iq) > 823 OCTEP_WAKE_QUEUE_THRESHOLD)) { 824 netif_start_subqueue(iq->netdev, iq->q_no); 825 iq->stats->restart_cnt++; 826 return 0; 827 } 828 829 return 1; 830 } 831 832 /** 833 * octep_start_xmit() - Enqueue packet to Octoen hardware Tx Queue. 834 * 835 * @skb: packet skbuff pointer. 836 * @netdev: kernel network device. 837 * 838 * Return: NETDEV_TX_BUSY, if Tx Queue is full. 839 * NETDEV_TX_OK, if successfully enqueued to hardware Tx queue. 840 */ 841 static netdev_tx_t octep_start_xmit(struct sk_buff *skb, 842 struct net_device *netdev) 843 { 844 struct octep_device *oct = netdev_priv(netdev); 845 netdev_features_t feat = netdev->features; 846 struct octep_tx_sglist_desc *sglist; 847 struct octep_tx_buffer *tx_buffer; 848 struct octep_tx_desc_hw *hw_desc; 849 struct skb_shared_info *shinfo; 850 struct octep_instr_hdr *ih; 851 struct octep_iq *iq; 852 skb_frag_t *frag; 853 u16 nr_frags, si; 854 int xmit_more; 855 u16 q_no, wi; 856 857 if (skb_put_padto(skb, ETH_ZLEN)) 858 return NETDEV_TX_OK; 859 860 q_no = skb_get_queue_mapping(skb); 861 if (q_no >= oct->num_iqs) { 862 netdev_err(netdev, "Invalid Tx skb->queue_mapping=%d\n", q_no); 863 q_no = q_no % oct->num_iqs; 864 } 865 866 iq = oct->iq[q_no]; 867 868 shinfo = skb_shinfo(skb); 869 nr_frags = shinfo->nr_frags; 870 871 wi = iq->host_write_index; 872 hw_desc = &iq->desc_ring[wi]; 873 hw_desc->ih64 = 0; 874 875 tx_buffer = iq->buff_info + wi; 876 tx_buffer->skb = skb; 877 878 ih = &hw_desc->ih; 879 ih->pkind = oct->conf->fw_info.pkind; 880 ih->fsz = oct->conf->fw_info.fsz; 881 ih->tlen = skb->len + ih->fsz; 882 883 if (!nr_frags) { 884 tx_buffer->gather = 0; 885 tx_buffer->dma = dma_map_single(iq->dev, skb->data, 886 skb->len, DMA_TO_DEVICE); 887 if (dma_mapping_error(iq->dev, tx_buffer->dma)) 888 goto dma_map_err; 889 hw_desc->dptr = tx_buffer->dma; 890 } else { 891 /* Scatter/Gather */ 892 dma_addr_t dma; 893 u16 len; 894 895 sglist = tx_buffer->sglist; 896 897 ih->gsz = nr_frags + 1; 898 ih->gather = 1; 899 tx_buffer->gather = 1; 900 901 len = skb_headlen(skb); 902 dma = dma_map_single(iq->dev, skb->data, len, DMA_TO_DEVICE); 903 if (dma_mapping_error(iq->dev, dma)) 904 goto dma_map_err; 905 906 memset(sglist, 0, OCTEP_SGLIST_SIZE_PER_PKT); 907 sglist[0].len[3] = len; 908 sglist[0].dma_ptr[0] = dma; 909 910 si = 1; /* entry 0 is main skb, mapped above */ 911 frag = &shinfo->frags[0]; 912 while (nr_frags--) { 913 len = skb_frag_size(frag); 914 dma = skb_frag_dma_map(iq->dev, frag, 0, 915 len, DMA_TO_DEVICE); 916 if (dma_mapping_error(iq->dev, dma)) 917 goto dma_map_sg_err; 918 919 sglist[si >> 2].len[3 - (si & 3)] = len; 920 sglist[si >> 2].dma_ptr[si & 3] = dma; 921 922 frag++; 923 si++; 924 } 925 hw_desc->dptr = tx_buffer->sglist_dma; 926 } 927 928 if (oct->conf->fw_info.tx_ol_flags) { 929 if ((feat & (NETIF_F_TSO)) && (skb_is_gso(skb))) { 930 hw_desc->txm.ol_flags = OCTEP_TX_OFFLOAD_CKSUM; 931 hw_desc->txm.ol_flags |= OCTEP_TX_OFFLOAD_TSO; 932 hw_desc->txm.gso_size = skb_shinfo(skb)->gso_size; 933 hw_desc->txm.gso_segs = skb_shinfo(skb)->gso_segs; 934 } else if (feat & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) { 935 hw_desc->txm.ol_flags = OCTEP_TX_OFFLOAD_CKSUM; 936 } 937 /* due to ESR txm will be swapped by hw */ 938 hw_desc->txm64[0] = (__force u64)cpu_to_be64(hw_desc->txm64[0]); 939 } 940 941 xmit_more = netdev_xmit_more(); 942 943 __netdev_tx_sent_queue(iq->netdev_q, skb->len, xmit_more); 944 945 skb_tx_timestamp(skb); 946 iq->fill_cnt++; 947 wi++; 948 iq->host_write_index = wi & iq->ring_size_mask; 949 950 /* octep_iq_full_check stops the queue and returns 951 * true if so, in case the queue has become full 952 * by inserting current packet. If so, we can 953 * go ahead and ring doorbell. 954 */ 955 if (!octep_iq_full_check(iq) && xmit_more && 956 iq->fill_cnt < iq->fill_threshold) 957 return NETDEV_TX_OK; 958 959 /* Flush the hw descriptor before writing to doorbell */ 960 wmb(); 961 /* Ring Doorbell to notify the NIC of new packets */ 962 writel(iq->fill_cnt, iq->doorbell_reg); 963 iq->stats->instr_posted += iq->fill_cnt; 964 iq->fill_cnt = 0; 965 return NETDEV_TX_OK; 966 967 dma_map_sg_err: 968 if (si > 0) { 969 dma_unmap_single(iq->dev, sglist[0].dma_ptr[0], 970 sglist[0].len[3], DMA_TO_DEVICE); 971 sglist[0].len[3] = 0; 972 } 973 while (si > 1) { 974 dma_unmap_page(iq->dev, sglist[si >> 2].dma_ptr[si & 3], 975 sglist[si >> 2].len[3 - (si & 3)], DMA_TO_DEVICE); 976 sglist[si >> 2].len[3 - (si & 3)] = 0; 977 si--; 978 } 979 tx_buffer->gather = 0; 980 dma_map_err: 981 dev_kfree_skb_any(skb); 982 return NETDEV_TX_OK; 983 } 984 985 /** 986 * octep_get_stats64() - Get Octeon network device statistics. 987 * 988 * @netdev: kernel network device. 989 * @stats: pointer to stats structure to be filled in. 990 */ 991 static void octep_get_stats64(struct net_device *netdev, 992 struct rtnl_link_stats64 *stats) 993 { 994 struct octep_device *oct = netdev_priv(netdev); 995 u64 tx_packets, tx_bytes, rx_packets, rx_bytes; 996 int q; 997 998 tx_packets = 0; 999 tx_bytes = 0; 1000 rx_packets = 0; 1001 rx_bytes = 0; 1002 for (q = 0; q < OCTEP_MAX_QUEUES; q++) { 1003 tx_packets += oct->stats_iq[q].instr_completed; 1004 tx_bytes += oct->stats_iq[q].bytes_sent; 1005 rx_packets += oct->stats_oq[q].packets; 1006 rx_bytes += oct->stats_oq[q].bytes; 1007 } 1008 stats->tx_packets = tx_packets; 1009 stats->tx_bytes = tx_bytes; 1010 stats->rx_packets = rx_packets; 1011 stats->rx_bytes = rx_bytes; 1012 } 1013 1014 /** 1015 * octep_tx_timeout_task - work queue task to Handle Tx queue timeout. 1016 * 1017 * @work: pointer to Tx queue timeout work_struct 1018 * 1019 * Stop and start the device so that it frees up all queue resources 1020 * and restarts the queues, that potentially clears a Tx queue timeout 1021 * condition. 1022 **/ 1023 static void octep_tx_timeout_task(struct work_struct *work) 1024 { 1025 struct octep_device *oct = container_of(work, struct octep_device, 1026 tx_timeout_task); 1027 struct net_device *netdev = oct->netdev; 1028 1029 rtnl_lock(); 1030 if (netif_running(netdev)) { 1031 octep_stop(netdev); 1032 octep_open(netdev); 1033 } 1034 rtnl_unlock(); 1035 } 1036 1037 /** 1038 * octep_tx_timeout() - Handle Tx Queue timeout. 1039 * 1040 * @netdev: pointer to kernel network device. 1041 * @txqueue: Timed out Tx queue number. 1042 * 1043 * Schedule a work to handle Tx queue timeout. 1044 */ 1045 static void octep_tx_timeout(struct net_device *netdev, unsigned int txqueue) 1046 { 1047 struct octep_device *oct = netdev_priv(netdev); 1048 1049 queue_work(octep_wq, &oct->tx_timeout_task); 1050 } 1051 1052 static int octep_set_mac(struct net_device *netdev, void *p) 1053 { 1054 struct octep_device *oct = netdev_priv(netdev); 1055 struct sockaddr *addr = (struct sockaddr *)p; 1056 int err; 1057 1058 if (!is_valid_ether_addr(addr->sa_data)) 1059 return -EADDRNOTAVAIL; 1060 1061 err = octep_ctrl_net_set_mac_addr(oct, OCTEP_CTRL_NET_INVALID_VFID, 1062 addr->sa_data, true); 1063 if (err) 1064 return err; 1065 1066 memcpy(oct->mac_addr, addr->sa_data, ETH_ALEN); 1067 eth_hw_addr_set(netdev, addr->sa_data); 1068 1069 return 0; 1070 } 1071 1072 static int octep_change_mtu(struct net_device *netdev, int new_mtu) 1073 { 1074 struct octep_device *oct = netdev_priv(netdev); 1075 struct octep_iface_link_info *link_info; 1076 int err = 0; 1077 1078 link_info = &oct->link_info; 1079 if (link_info->mtu == new_mtu) 1080 return 0; 1081 1082 err = octep_ctrl_net_set_mtu(oct, OCTEP_CTRL_NET_INVALID_VFID, new_mtu, 1083 true); 1084 if (!err) { 1085 oct->link_info.mtu = new_mtu; 1086 WRITE_ONCE(netdev->mtu, new_mtu); 1087 } 1088 1089 return err; 1090 } 1091 1092 static int octep_set_features(struct net_device *dev, netdev_features_t features) 1093 { 1094 struct octep_ctrl_net_offloads offloads = { 0 }; 1095 struct octep_device *oct = netdev_priv(dev); 1096 int err; 1097 1098 /* We only support features received from firmware */ 1099 if ((features & dev->hw_features) != features) 1100 return -EINVAL; 1101 1102 if (features & NETIF_F_TSO) 1103 offloads.tx_offloads |= OCTEP_TX_OFFLOAD_TSO; 1104 1105 if (features & NETIF_F_TSO6) 1106 offloads.tx_offloads |= OCTEP_TX_OFFLOAD_TSO; 1107 1108 if (features & NETIF_F_IP_CSUM) 1109 offloads.tx_offloads |= OCTEP_TX_OFFLOAD_CKSUM; 1110 1111 if (features & NETIF_F_IPV6_CSUM) 1112 offloads.tx_offloads |= OCTEP_TX_OFFLOAD_CKSUM; 1113 1114 if (features & NETIF_F_RXCSUM) 1115 offloads.rx_offloads |= OCTEP_RX_OFFLOAD_CKSUM; 1116 1117 err = octep_ctrl_net_set_offloads(oct, 1118 OCTEP_CTRL_NET_INVALID_VFID, 1119 &offloads, 1120 true); 1121 if (!err) 1122 dev->features = features; 1123 1124 return err; 1125 } 1126 1127 static int octep_get_vf_config(struct net_device *dev, int vf, 1128 struct ifla_vf_info *ivi) 1129 { 1130 struct octep_device *oct = netdev_priv(dev); 1131 1132 ivi->vf = vf; 1133 ether_addr_copy(ivi->mac, oct->vf_info[vf].mac_addr); 1134 ivi->spoofchk = true; 1135 ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE; 1136 ivi->trusted = false; 1137 1138 return 0; 1139 } 1140 1141 static int octep_set_vf_mac(struct net_device *dev, int vf, u8 *mac) 1142 { 1143 struct octep_device *oct = netdev_priv(dev); 1144 int err; 1145 1146 if (!is_valid_ether_addr(mac)) { 1147 dev_err(&oct->pdev->dev, "Invalid MAC Address %pM\n", mac); 1148 return -EADDRNOTAVAIL; 1149 } 1150 1151 dev_dbg(&oct->pdev->dev, "set vf-%d mac to %pM\n", vf, mac); 1152 ether_addr_copy(oct->vf_info[vf].mac_addr, mac); 1153 oct->vf_info[vf].flags |= OCTEON_PFVF_FLAG_MAC_SET_BY_PF; 1154 1155 err = octep_ctrl_net_set_mac_addr(oct, vf, mac, true); 1156 if (err) 1157 dev_err(&oct->pdev->dev, 1158 "Set VF%d MAC address failed via host control Mbox\n", 1159 vf); 1160 1161 return err; 1162 } 1163 1164 static const struct net_device_ops octep_netdev_ops = { 1165 .ndo_open = octep_open, 1166 .ndo_stop = octep_stop, 1167 .ndo_start_xmit = octep_start_xmit, 1168 .ndo_get_stats64 = octep_get_stats64, 1169 .ndo_tx_timeout = octep_tx_timeout, 1170 .ndo_set_mac_address = octep_set_mac, 1171 .ndo_change_mtu = octep_change_mtu, 1172 .ndo_set_features = octep_set_features, 1173 .ndo_get_vf_config = octep_get_vf_config, 1174 .ndo_set_vf_mac = octep_set_vf_mac 1175 }; 1176 1177 /** 1178 * octep_intr_poll_task - work queue task to process non-ioq interrupts. 1179 * 1180 * @work: pointer to mbox work_struct 1181 * 1182 * Process non-ioq interrupts to handle control mailbox, pfvf mailbox. 1183 **/ 1184 static void octep_intr_poll_task(struct work_struct *work) 1185 { 1186 struct octep_device *oct = container_of(work, struct octep_device, 1187 intr_poll_task.work); 1188 1189 if (!oct->poll_non_ioq_intr) { 1190 dev_info(&oct->pdev->dev, "Interrupt poll task stopped.\n"); 1191 return; 1192 } 1193 1194 oct->hw_ops.poll_non_ioq_interrupts(oct); 1195 queue_delayed_work(octep_wq, &oct->intr_poll_task, 1196 msecs_to_jiffies(OCTEP_INTR_POLL_TIME_MSECS)); 1197 } 1198 1199 /** 1200 * octep_hb_timeout_task - work queue task to check firmware heartbeat. 1201 * 1202 * @work: pointer to hb work_struct 1203 * 1204 * Check for heartbeat miss count. Uninitialize oct device if miss count 1205 * exceeds configured max heartbeat miss count. 1206 * 1207 **/ 1208 static void octep_hb_timeout_task(struct work_struct *work) 1209 { 1210 struct octep_device *oct = container_of(work, struct octep_device, 1211 hb_task.work); 1212 1213 int miss_cnt; 1214 1215 miss_cnt = atomic_inc_return(&oct->hb_miss_cnt); 1216 if (miss_cnt < oct->conf->fw_info.hb_miss_count) { 1217 queue_delayed_work(octep_wq, &oct->hb_task, 1218 msecs_to_jiffies(oct->conf->fw_info.hb_interval)); 1219 return; 1220 } 1221 1222 dev_err(&oct->pdev->dev, "Missed %u heartbeats. Uninitializing\n", 1223 miss_cnt); 1224 rtnl_lock(); 1225 if (netif_running(oct->netdev)) 1226 dev_close(oct->netdev); 1227 rtnl_unlock(); 1228 } 1229 1230 /** 1231 * octep_ctrl_mbox_task - work queue task to handle ctrl mbox messages. 1232 * 1233 * @work: pointer to ctrl mbox work_struct 1234 * 1235 * Poll ctrl mbox message queue and handle control messages from firmware. 1236 **/ 1237 static void octep_ctrl_mbox_task(struct work_struct *work) 1238 { 1239 struct octep_device *oct = container_of(work, struct octep_device, 1240 ctrl_mbox_task); 1241 1242 octep_ctrl_net_recv_fw_messages(oct); 1243 } 1244 1245 static const char *octep_devid_to_str(struct octep_device *oct) 1246 { 1247 switch (oct->chip_id) { 1248 case OCTEP_PCI_DEVICE_ID_CN98_PF: 1249 return "CN98XX"; 1250 case OCTEP_PCI_DEVICE_ID_CN93_PF: 1251 return "CN93XX"; 1252 case OCTEP_PCI_DEVICE_ID_CNF95N_PF: 1253 return "CNF95N"; 1254 case OCTEP_PCI_DEVICE_ID_CN10KA_PF: 1255 return "CN10KA"; 1256 case OCTEP_PCI_DEVICE_ID_CNF10KA_PF: 1257 return "CNF10KA"; 1258 case OCTEP_PCI_DEVICE_ID_CNF10KB_PF: 1259 return "CNF10KB"; 1260 case OCTEP_PCI_DEVICE_ID_CN10KB_PF: 1261 return "CN10KB"; 1262 default: 1263 return "Unsupported"; 1264 } 1265 } 1266 1267 /** 1268 * octep_device_setup() - Setup Octeon Device. 1269 * 1270 * @oct: Octeon device private data structure. 1271 * 1272 * Setup Octeon device hardware operations, configuration, etc ... 1273 */ 1274 int octep_device_setup(struct octep_device *oct) 1275 { 1276 struct pci_dev *pdev = oct->pdev; 1277 int i, ret; 1278 1279 /* allocate memory for oct->conf */ 1280 oct->conf = kzalloc(sizeof(*oct->conf), GFP_KERNEL); 1281 if (!oct->conf) 1282 return -ENOMEM; 1283 1284 /* Map BAR regions */ 1285 for (i = 0; i < OCTEP_MMIO_REGIONS; i++) { 1286 oct->mmio[i].hw_addr = 1287 ioremap(pci_resource_start(oct->pdev, i * 2), 1288 pci_resource_len(oct->pdev, i * 2)); 1289 if (!oct->mmio[i].hw_addr) 1290 goto unmap_prev; 1291 1292 oct->mmio[i].mapped = 1; 1293 } 1294 1295 oct->chip_id = pdev->device; 1296 oct->rev_id = pdev->revision; 1297 dev_info(&pdev->dev, "chip_id = 0x%x\n", pdev->device); 1298 1299 switch (oct->chip_id) { 1300 case OCTEP_PCI_DEVICE_ID_CN98_PF: 1301 case OCTEP_PCI_DEVICE_ID_CN93_PF: 1302 case OCTEP_PCI_DEVICE_ID_CNF95N_PF: 1303 dev_info(&pdev->dev, "Setting up OCTEON %s PF PASS%d.%d\n", 1304 octep_devid_to_str(oct), OCTEP_MAJOR_REV(oct), 1305 OCTEP_MINOR_REV(oct)); 1306 octep_device_setup_cn93_pf(oct); 1307 break; 1308 case OCTEP_PCI_DEVICE_ID_CNF10KA_PF: 1309 case OCTEP_PCI_DEVICE_ID_CN10KA_PF: 1310 case OCTEP_PCI_DEVICE_ID_CNF10KB_PF: 1311 case OCTEP_PCI_DEVICE_ID_CN10KB_PF: 1312 dev_info(&pdev->dev, "Setting up OCTEON %s PF PASS%d.%d\n", 1313 octep_devid_to_str(oct), OCTEP_MAJOR_REV(oct), OCTEP_MINOR_REV(oct)); 1314 octep_device_setup_cnxk_pf(oct); 1315 break; 1316 default: 1317 dev_err(&pdev->dev, 1318 "%s: unsupported device\n", __func__); 1319 goto unsupported_dev; 1320 } 1321 1322 1323 ret = octep_ctrl_net_init(oct); 1324 if (ret) 1325 return ret; 1326 1327 INIT_WORK(&oct->tx_timeout_task, octep_tx_timeout_task); 1328 INIT_WORK(&oct->ctrl_mbox_task, octep_ctrl_mbox_task); 1329 INIT_DELAYED_WORK(&oct->intr_poll_task, octep_intr_poll_task); 1330 oct->poll_non_ioq_intr = true; 1331 queue_delayed_work(octep_wq, &oct->intr_poll_task, 1332 msecs_to_jiffies(OCTEP_INTR_POLL_TIME_MSECS)); 1333 1334 atomic_set(&oct->hb_miss_cnt, 0); 1335 INIT_DELAYED_WORK(&oct->hb_task, octep_hb_timeout_task); 1336 1337 return 0; 1338 1339 unsupported_dev: 1340 i = OCTEP_MMIO_REGIONS; 1341 unmap_prev: 1342 while (i--) 1343 iounmap(oct->mmio[i].hw_addr); 1344 1345 kfree(oct->conf); 1346 return -1; 1347 } 1348 1349 /** 1350 * octep_device_cleanup() - Cleanup Octeon Device. 1351 * 1352 * @oct: Octeon device private data structure. 1353 * 1354 * Cleanup Octeon device allocated resources. 1355 */ 1356 static void octep_device_cleanup(struct octep_device *oct) 1357 { 1358 int i; 1359 1360 oct->poll_non_ioq_intr = false; 1361 cancel_delayed_work_sync(&oct->intr_poll_task); 1362 cancel_work_sync(&oct->ctrl_mbox_task); 1363 1364 dev_info(&oct->pdev->dev, "Cleaning up Octeon Device ...\n"); 1365 1366 for (i = 0; i < OCTEP_MAX_VF; i++) { 1367 vfree(oct->mbox[i]); 1368 oct->mbox[i] = NULL; 1369 } 1370 1371 octep_delete_pfvf_mbox(oct); 1372 octep_ctrl_net_uninit(oct); 1373 cancel_delayed_work_sync(&oct->hb_task); 1374 1375 oct->hw_ops.soft_reset(oct); 1376 for (i = 0; i < OCTEP_MMIO_REGIONS; i++) { 1377 if (oct->mmio[i].mapped) 1378 iounmap(oct->mmio[i].hw_addr); 1379 } 1380 1381 kfree(oct->conf); 1382 oct->conf = NULL; 1383 } 1384 1385 static bool get_fw_ready_status(struct pci_dev *pdev) 1386 { 1387 u32 pos = 0; 1388 u16 vsec_id; 1389 u8 status; 1390 1391 while ((pos = pci_find_next_ext_capability(pdev, pos, 1392 PCI_EXT_CAP_ID_VNDR))) { 1393 pci_read_config_word(pdev, pos + 4, &vsec_id); 1394 #define FW_STATUS_VSEC_ID 0xA3 1395 if (vsec_id != FW_STATUS_VSEC_ID) 1396 continue; 1397 1398 pci_read_config_byte(pdev, (pos + 8), &status); 1399 dev_info(&pdev->dev, "Firmware ready status = %u\n", status); 1400 #define FW_STATUS_READY 1ULL 1401 return status == FW_STATUS_READY; 1402 } 1403 return false; 1404 } 1405 1406 /** 1407 * octep_probe() - Octeon PCI device probe handler. 1408 * 1409 * @pdev: PCI device structure. 1410 * @ent: entry in Octeon PCI device ID table. 1411 * 1412 * Initializes and enables the Octeon PCI device for network operations. 1413 * Initializes Octeon private data structure and registers a network device. 1414 */ 1415 static int octep_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 1416 { 1417 struct octep_device *octep_dev = NULL; 1418 struct net_device *netdev; 1419 int max_rx_pktlen; 1420 int err; 1421 1422 err = pci_enable_device(pdev); 1423 if (err) { 1424 dev_err(&pdev->dev, "Failed to enable PCI device\n"); 1425 return err; 1426 } 1427 1428 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 1429 if (err) { 1430 dev_err(&pdev->dev, "Failed to set DMA mask !!\n"); 1431 goto err_dma_mask; 1432 } 1433 1434 err = pci_request_mem_regions(pdev, OCTEP_DRV_NAME); 1435 if (err) { 1436 dev_err(&pdev->dev, "Failed to map PCI memory regions\n"); 1437 goto err_pci_regions; 1438 } 1439 1440 pci_set_master(pdev); 1441 1442 if (!get_fw_ready_status(pdev)) { 1443 dev_notice(&pdev->dev, "Firmware not ready; defer probe.\n"); 1444 err = -EPROBE_DEFER; 1445 goto err_alloc_netdev; 1446 } 1447 1448 netdev = alloc_etherdev_mq(sizeof(struct octep_device), 1449 OCTEP_MAX_QUEUES); 1450 if (!netdev) { 1451 dev_err(&pdev->dev, "Failed to allocate netdev\n"); 1452 err = -ENOMEM; 1453 goto err_alloc_netdev; 1454 } 1455 SET_NETDEV_DEV(netdev, &pdev->dev); 1456 1457 octep_dev = netdev_priv(netdev); 1458 octep_dev->netdev = netdev; 1459 octep_dev->pdev = pdev; 1460 octep_dev->dev = &pdev->dev; 1461 pci_set_drvdata(pdev, octep_dev); 1462 1463 err = octep_device_setup(octep_dev); 1464 if (err) { 1465 dev_err(&pdev->dev, "Device setup failed\n"); 1466 goto err_octep_config; 1467 } 1468 1469 err = octep_setup_pfvf_mbox(octep_dev); 1470 if (err) { 1471 dev_err(&pdev->dev, "PF-VF mailbox setup failed\n"); 1472 goto register_dev_err; 1473 } 1474 1475 err = octep_ctrl_net_get_info(octep_dev, OCTEP_CTRL_NET_INVALID_VFID, 1476 &octep_dev->conf->fw_info); 1477 if (err) { 1478 dev_err(&pdev->dev, "Failed to get firmware info\n"); 1479 goto register_dev_err; 1480 } 1481 dev_info(&octep_dev->pdev->dev, "Heartbeat interval %u msecs Heartbeat miss count %u\n", 1482 octep_dev->conf->fw_info.hb_interval, 1483 octep_dev->conf->fw_info.hb_miss_count); 1484 queue_delayed_work(octep_wq, &octep_dev->hb_task, 1485 msecs_to_jiffies(octep_dev->conf->fw_info.hb_interval)); 1486 1487 netdev->netdev_ops = &octep_netdev_ops; 1488 octep_set_ethtool_ops(netdev); 1489 netif_carrier_off(netdev); 1490 1491 netdev->hw_features = NETIF_F_SG; 1492 if (OCTEP_TX_IP_CSUM(octep_dev->conf->fw_info.tx_ol_flags)) 1493 netdev->hw_features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM); 1494 1495 if (OCTEP_RX_IP_CSUM(octep_dev->conf->fw_info.rx_ol_flags)) 1496 netdev->hw_features |= NETIF_F_RXCSUM; 1497 1498 max_rx_pktlen = octep_ctrl_net_get_mtu(octep_dev, OCTEP_CTRL_NET_INVALID_VFID); 1499 if (max_rx_pktlen < 0) { 1500 dev_err(&octep_dev->pdev->dev, 1501 "Failed to get max receive packet size; err = %d\n", max_rx_pktlen); 1502 err = max_rx_pktlen; 1503 goto register_dev_err; 1504 } 1505 netdev->min_mtu = OCTEP_MIN_MTU; 1506 netdev->max_mtu = max_rx_pktlen - (ETH_HLEN + ETH_FCS_LEN); 1507 netdev->mtu = OCTEP_DEFAULT_MTU; 1508 1509 if (OCTEP_TX_TSO(octep_dev->conf->fw_info.tx_ol_flags)) { 1510 netdev->hw_features |= NETIF_F_TSO; 1511 netif_set_tso_max_size(netdev, netdev->max_mtu); 1512 } 1513 1514 netdev->features |= netdev->hw_features; 1515 err = octep_ctrl_net_get_mac_addr(octep_dev, OCTEP_CTRL_NET_INVALID_VFID, 1516 octep_dev->mac_addr); 1517 if (err) { 1518 dev_err(&pdev->dev, "Failed to get mac address\n"); 1519 goto register_dev_err; 1520 } 1521 eth_hw_addr_set(netdev, octep_dev->mac_addr); 1522 1523 err = register_netdev(netdev); 1524 if (err) { 1525 dev_err(&pdev->dev, "Failed to register netdev\n"); 1526 goto register_dev_err; 1527 } 1528 dev_info(&pdev->dev, "Device probe successful\n"); 1529 return 0; 1530 1531 register_dev_err: 1532 octep_device_cleanup(octep_dev); 1533 err_octep_config: 1534 free_netdev(netdev); 1535 err_alloc_netdev: 1536 pci_release_mem_regions(pdev); 1537 err_pci_regions: 1538 err_dma_mask: 1539 pci_disable_device(pdev); 1540 return err; 1541 } 1542 1543 static int octep_sriov_disable(struct octep_device *oct) 1544 { 1545 struct pci_dev *pdev = oct->pdev; 1546 1547 if (pci_vfs_assigned(oct->pdev)) { 1548 dev_warn(&pdev->dev, "Can't disable SRIOV while VFs are assigned\n"); 1549 return -EPERM; 1550 } 1551 1552 pci_disable_sriov(pdev); 1553 CFG_GET_ACTIVE_VFS(oct->conf) = 0; 1554 1555 return 0; 1556 } 1557 1558 /** 1559 * octep_remove() - Remove Octeon PCI device from driver control. 1560 * 1561 * @pdev: PCI device structure of the Octeon device. 1562 * 1563 * Cleanup all resources allocated for the Octeon device. 1564 * Unregister from network device and disable the PCI device. 1565 */ 1566 static void octep_remove(struct pci_dev *pdev) 1567 { 1568 struct octep_device *oct = pci_get_drvdata(pdev); 1569 struct net_device *netdev; 1570 1571 if (!oct) 1572 return; 1573 1574 netdev = oct->netdev; 1575 octep_sriov_disable(oct); 1576 if (netdev->reg_state == NETREG_REGISTERED) 1577 unregister_netdev(netdev); 1578 1579 cancel_work_sync(&oct->tx_timeout_task); 1580 octep_device_cleanup(oct); 1581 pci_release_mem_regions(pdev); 1582 free_netdev(netdev); 1583 pci_disable_device(pdev); 1584 } 1585 1586 static int octep_sriov_enable(struct octep_device *oct, int num_vfs) 1587 { 1588 struct pci_dev *pdev = oct->pdev; 1589 int err; 1590 1591 CFG_GET_ACTIVE_VFS(oct->conf) = num_vfs; 1592 err = pci_enable_sriov(pdev, num_vfs); 1593 if (err) { 1594 dev_warn(&pdev->dev, "Failed to enable SRIOV err=%d\n", err); 1595 CFG_GET_ACTIVE_VFS(oct->conf) = 0; 1596 return err; 1597 } 1598 1599 return num_vfs; 1600 } 1601 1602 static int octep_sriov_configure(struct pci_dev *pdev, int num_vfs) 1603 { 1604 struct octep_device *oct = pci_get_drvdata(pdev); 1605 int max_nvfs; 1606 1607 if (num_vfs == 0) 1608 return octep_sriov_disable(oct); 1609 1610 max_nvfs = CFG_GET_MAX_VFS(oct->conf); 1611 1612 if (num_vfs > max_nvfs) { 1613 dev_err(&pdev->dev, "Invalid VF count Max supported VFs = %d\n", 1614 max_nvfs); 1615 return -EINVAL; 1616 } 1617 1618 return octep_sriov_enable(oct, num_vfs); 1619 } 1620 1621 static struct pci_driver octep_driver = { 1622 .name = OCTEP_DRV_NAME, 1623 .id_table = octep_pci_id_tbl, 1624 .probe = octep_probe, 1625 .remove = octep_remove, 1626 .sriov_configure = octep_sriov_configure, 1627 }; 1628 1629 /** 1630 * octep_init_module() - Module initialiation. 1631 * 1632 * create common resource for the driver and register PCI driver. 1633 */ 1634 static int __init octep_init_module(void) 1635 { 1636 int ret; 1637 1638 pr_info("%s: Loading %s ...\n", OCTEP_DRV_NAME, OCTEP_DRV_STRING); 1639 1640 /* work queue for all deferred tasks */ 1641 octep_wq = create_singlethread_workqueue(OCTEP_DRV_NAME); 1642 if (!octep_wq) { 1643 pr_err("%s: Failed to create common workqueue\n", 1644 OCTEP_DRV_NAME); 1645 return -ENOMEM; 1646 } 1647 1648 ret = pci_register_driver(&octep_driver); 1649 if (ret < 0) { 1650 pr_err("%s: Failed to register PCI driver; err=%d\n", 1651 OCTEP_DRV_NAME, ret); 1652 destroy_workqueue(octep_wq); 1653 return ret; 1654 } 1655 1656 pr_info("%s: Loaded successfully !\n", OCTEP_DRV_NAME); 1657 1658 return ret; 1659 } 1660 1661 /** 1662 * octep_exit_module() - Module exit routine. 1663 * 1664 * unregister the driver with PCI subsystem and cleanup common resources. 1665 */ 1666 static void __exit octep_exit_module(void) 1667 { 1668 pr_info("%s: Unloading ...\n", OCTEP_DRV_NAME); 1669 1670 pci_unregister_driver(&octep_driver); 1671 destroy_workqueue(octep_wq); 1672 1673 pr_info("%s: Unloading complete\n", OCTEP_DRV_NAME); 1674 } 1675 1676 module_init(octep_init_module); 1677 module_exit(octep_exit_module); 1678