1 // SPDX-License-Identifier: GPL-2.0-only 2 /**************************************************************************** 3 * Driver for Solarflare network controllers and boards 4 * Copyright 2005-2006 Fen Systems Ltd. 5 * Copyright 2005-2013 Solarflare Communications Inc. 6 */ 7 8 #include <linux/filter.h> 9 #include <linux/module.h> 10 #include <linux/pci.h> 11 #include <linux/netdevice.h> 12 #include <linux/etherdevice.h> 13 #include <linux/delay.h> 14 #include <linux/notifier.h> 15 #include <linux/ip.h> 16 #include <linux/tcp.h> 17 #include <linux/in.h> 18 #include <linux/ethtool.h> 19 #include <linux/topology.h> 20 #include <linux/gfp.h> 21 #include <linux/interrupt.h> 22 #include "net_driver.h" 23 #include <net/gre.h> 24 #include <net/udp_tunnel.h> 25 #include <net/netdev_queues.h> 26 #include "efx.h" 27 #include "efx_common.h" 28 #include "efx_channels.h" 29 #include "ef100.h" 30 #include "rx_common.h" 31 #include "tx_common.h" 32 #include "nic.h" 33 #include "io.h" 34 #include "selftest.h" 35 #include "sriov.h" 36 #include "efx_devlink.h" 37 38 #include "mcdi_port_common.h" 39 #include "mcdi_pcol.h" 40 #include "workarounds.h" 41 42 /************************************************************************** 43 * 44 * Configurable values 45 * 46 *************************************************************************/ 47 48 module_param_named(interrupt_mode, efx_interrupt_mode, uint, 0444); 49 MODULE_PARM_DESC(interrupt_mode, 50 "Interrupt mode (0=>MSIX 1=>MSI 2=>legacy)"); 51 52 module_param(rss_cpus, uint, 0444); 53 MODULE_PARM_DESC(rss_cpus, "Number of CPUs to use for Receive-Side Scaling"); 54 55 /* 56 * Use separate channels for TX and RX events 57 * 58 * Set this to 1 to use separate channels for TX and RX. It allows us 59 * to control interrupt affinity separately for TX and RX. 60 * 61 * This is only used in MSI-X interrupt mode 62 */ 63 bool efx_separate_tx_channels; 64 module_param(efx_separate_tx_channels, bool, 0444); 65 MODULE_PARM_DESC(efx_separate_tx_channels, 66 "Use separate channels for TX and RX"); 67 68 /* Initial interrupt moderation settings. They can be modified after 69 * module load with ethtool. 70 * 71 * The default for RX should strike a balance between increasing the 72 * round-trip latency and reducing overhead. 73 */ 74 static unsigned int rx_irq_mod_usec = 60; 75 76 /* Initial interrupt moderation settings. They can be modified after 77 * module load with ethtool. 78 * 79 * This default is chosen to ensure that a 10G link does not go idle 80 * while a TX queue is stopped after it has become full. A queue is 81 * restarted when it drops below half full. The time this takes (assuming 82 * worst case 3 descriptors per packet and 1024 descriptors) is 83 * 512 / 3 * 1.2 = 205 usec. 84 */ 85 static unsigned int tx_irq_mod_usec = 150; 86 87 static bool phy_flash_cfg; 88 module_param(phy_flash_cfg, bool, 0644); 89 MODULE_PARM_DESC(phy_flash_cfg, "Set PHYs into reflash mode initially"); 90 91 static unsigned debug = (NETIF_MSG_DRV | NETIF_MSG_PROBE | 92 NETIF_MSG_LINK | NETIF_MSG_IFDOWN | 93 NETIF_MSG_IFUP | NETIF_MSG_RX_ERR | 94 NETIF_MSG_TX_ERR | NETIF_MSG_HW); 95 module_param(debug, uint, 0); 96 MODULE_PARM_DESC(debug, "Bitmapped debugging message enable value"); 97 98 /************************************************************************** 99 * 100 * Utility functions and prototypes 101 * 102 *************************************************************************/ 103 104 static void efx_remove_port(struct efx_nic *efx); 105 static int efx_xdp_setup_prog(struct efx_nic *efx, struct bpf_prog *prog); 106 static int efx_xdp(struct net_device *dev, struct netdev_bpf *xdp); 107 static int efx_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **xdpfs, 108 u32 flags); 109 110 /************************************************************************** 111 * 112 * Port handling 113 * 114 **************************************************************************/ 115 116 static void efx_fini_port(struct efx_nic *efx); 117 118 static int efx_probe_port(struct efx_nic *efx) 119 { 120 int rc; 121 122 netif_dbg(efx, probe, efx->net_dev, "create port\n"); 123 124 if (phy_flash_cfg) 125 efx->phy_mode = PHY_MODE_SPECIAL; 126 127 /* Connect up MAC/PHY operations table */ 128 rc = efx->type->probe_port(efx); 129 if (rc) 130 return rc; 131 132 /* Initialise MAC address to permanent address */ 133 eth_hw_addr_set(efx->net_dev, efx->net_dev->perm_addr); 134 135 return 0; 136 } 137 138 static int efx_init_port(struct efx_nic *efx) 139 { 140 int rc; 141 142 netif_dbg(efx, drv, efx->net_dev, "init port\n"); 143 144 mutex_lock(&efx->mac_lock); 145 146 efx->port_initialized = true; 147 148 /* Ensure the PHY advertises the correct flow control settings */ 149 rc = efx_mcdi_port_reconfigure(efx); 150 if (rc && rc != -EPERM) 151 goto fail; 152 153 mutex_unlock(&efx->mac_lock); 154 return 0; 155 156 fail: 157 mutex_unlock(&efx->mac_lock); 158 return rc; 159 } 160 161 static void efx_fini_port(struct efx_nic *efx) 162 { 163 netif_dbg(efx, drv, efx->net_dev, "shut down port\n"); 164 165 if (!efx->port_initialized) 166 return; 167 168 efx->port_initialized = false; 169 170 efx->link_state.up = false; 171 efx_link_status_changed(efx); 172 } 173 174 static void efx_remove_port(struct efx_nic *efx) 175 { 176 netif_dbg(efx, drv, efx->net_dev, "destroying port\n"); 177 178 efx->type->remove_port(efx); 179 } 180 181 /************************************************************************** 182 * 183 * NIC handling 184 * 185 **************************************************************************/ 186 187 static LIST_HEAD(efx_primary_list); 188 static LIST_HEAD(efx_unassociated_list); 189 190 static bool efx_same_controller(struct efx_nic *left, struct efx_nic *right) 191 { 192 return left->type == right->type && 193 left->vpd_sn && right->vpd_sn && 194 !strcmp(left->vpd_sn, right->vpd_sn); 195 } 196 197 static void efx_associate(struct efx_nic *efx) 198 { 199 struct efx_nic *other, *next; 200 201 if (efx->primary == efx) { 202 /* Adding primary function; look for secondaries */ 203 204 netif_dbg(efx, probe, efx->net_dev, "adding to primary list\n"); 205 list_add_tail(&efx->node, &efx_primary_list); 206 207 list_for_each_entry_safe(other, next, &efx_unassociated_list, 208 node) { 209 if (efx_same_controller(efx, other)) { 210 list_del(&other->node); 211 netif_dbg(other, probe, other->net_dev, 212 "moving to secondary list of %s %s\n", 213 pci_name(efx->pci_dev), 214 efx->net_dev->name); 215 list_add_tail(&other->node, 216 &efx->secondary_list); 217 other->primary = efx; 218 } 219 } 220 } else { 221 /* Adding secondary function; look for primary */ 222 223 list_for_each_entry(other, &efx_primary_list, node) { 224 if (efx_same_controller(efx, other)) { 225 netif_dbg(efx, probe, efx->net_dev, 226 "adding to secondary list of %s %s\n", 227 pci_name(other->pci_dev), 228 other->net_dev->name); 229 list_add_tail(&efx->node, 230 &other->secondary_list); 231 efx->primary = other; 232 return; 233 } 234 } 235 236 netif_dbg(efx, probe, efx->net_dev, 237 "adding to unassociated list\n"); 238 list_add_tail(&efx->node, &efx_unassociated_list); 239 } 240 } 241 242 static void efx_dissociate(struct efx_nic *efx) 243 { 244 struct efx_nic *other, *next; 245 246 list_del(&efx->node); 247 efx->primary = NULL; 248 249 list_for_each_entry_safe(other, next, &efx->secondary_list, node) { 250 list_del(&other->node); 251 netif_dbg(other, probe, other->net_dev, 252 "moving to unassociated list\n"); 253 list_add_tail(&other->node, &efx_unassociated_list); 254 other->primary = NULL; 255 } 256 } 257 258 static int efx_probe_nic(struct efx_nic *efx) 259 { 260 int rc; 261 262 netif_dbg(efx, probe, efx->net_dev, "creating NIC\n"); 263 264 /* Carry out hardware-type specific initialisation */ 265 rc = efx->type->probe(efx); 266 if (rc) 267 return rc; 268 269 do { 270 if (!efx->max_channels || !efx->max_tx_channels) { 271 netif_err(efx, drv, efx->net_dev, 272 "Insufficient resources to allocate" 273 " any channels\n"); 274 rc = -ENOSPC; 275 goto fail1; 276 } 277 278 /* Determine the number of channels and queues by trying 279 * to hook in MSI-X interrupts. 280 */ 281 rc = efx_probe_interrupts(efx); 282 if (rc) 283 goto fail1; 284 285 rc = efx_set_channels(efx); 286 if (rc) 287 goto fail1; 288 289 /* dimension_resources can fail with EAGAIN */ 290 rc = efx->type->dimension_resources(efx); 291 if (rc != 0 && rc != -EAGAIN) 292 goto fail2; 293 294 if (rc == -EAGAIN) 295 /* try again with new max_channels */ 296 efx_remove_interrupts(efx); 297 298 } while (rc == -EAGAIN); 299 300 if (efx->n_channels > 1) 301 netdev_rss_key_fill(efx->rss_context.rx_hash_key, 302 sizeof(efx->rss_context.rx_hash_key)); 303 efx_set_default_rx_indir_table(efx, efx->rss_context.rx_indir_table); 304 305 /* Initialise the interrupt moderation settings */ 306 efx->irq_mod_step_us = DIV_ROUND_UP(efx->timer_quantum_ns, 1000); 307 efx_init_irq_moderation(efx, tx_irq_mod_usec, rx_irq_mod_usec, true, 308 true); 309 310 return 0; 311 312 fail2: 313 efx_remove_interrupts(efx); 314 fail1: 315 efx->type->remove(efx); 316 return rc; 317 } 318 319 static void efx_remove_nic(struct efx_nic *efx) 320 { 321 netif_dbg(efx, drv, efx->net_dev, "destroying NIC\n"); 322 323 efx_remove_interrupts(efx); 324 efx->type->remove(efx); 325 } 326 327 /************************************************************************** 328 * 329 * NIC startup/shutdown 330 * 331 *************************************************************************/ 332 333 static int efx_probe_all(struct efx_nic *efx) 334 { 335 int rc; 336 337 rc = efx_probe_nic(efx); 338 if (rc) { 339 netif_err(efx, probe, efx->net_dev, "failed to create NIC\n"); 340 goto fail1; 341 } 342 343 rc = efx_probe_port(efx); 344 if (rc) { 345 netif_err(efx, probe, efx->net_dev, "failed to create port\n"); 346 goto fail2; 347 } 348 349 BUILD_BUG_ON(EFX_DEFAULT_DMAQ_SIZE < EFX_RXQ_MIN_ENT); 350 if (WARN_ON(EFX_DEFAULT_DMAQ_SIZE < EFX_TXQ_MIN_ENT(efx))) { 351 rc = -EINVAL; 352 goto fail3; 353 } 354 355 #ifdef CONFIG_SFC_SRIOV 356 rc = efx->type->vswitching_probe(efx); 357 if (rc) /* not fatal; the PF will still work fine */ 358 netif_warn(efx, probe, efx->net_dev, 359 "failed to setup vswitching rc=%d;" 360 " VFs may not function\n", rc); 361 #endif 362 363 rc = efx_probe_filters(efx); 364 if (rc) { 365 netif_err(efx, probe, efx->net_dev, 366 "failed to create filter tables\n"); 367 goto fail4; 368 } 369 370 rc = efx_probe_channels(efx); 371 if (rc) 372 goto fail5; 373 374 efx->state = STATE_NET_DOWN; 375 376 return 0; 377 378 fail5: 379 efx_remove_filters(efx); 380 fail4: 381 #ifdef CONFIG_SFC_SRIOV 382 efx->type->vswitching_remove(efx); 383 #endif 384 fail3: 385 efx_remove_port(efx); 386 fail2: 387 efx_remove_nic(efx); 388 fail1: 389 return rc; 390 } 391 392 static void efx_remove_all(struct efx_nic *efx) 393 { 394 rtnl_lock(); 395 efx_xdp_setup_prog(efx, NULL); 396 rtnl_unlock(); 397 398 efx_remove_channels(efx); 399 efx_remove_filters(efx); 400 #ifdef CONFIG_SFC_SRIOV 401 efx->type->vswitching_remove(efx); 402 #endif 403 efx_remove_port(efx); 404 efx_remove_nic(efx); 405 } 406 407 /************************************************************************** 408 * 409 * Interrupt moderation 410 * 411 **************************************************************************/ 412 unsigned int efx_usecs_to_ticks(struct efx_nic *efx, unsigned int usecs) 413 { 414 if (usecs == 0) 415 return 0; 416 if (usecs * 1000 < efx->timer_quantum_ns) 417 return 1; /* never round down to 0 */ 418 return usecs * 1000 / efx->timer_quantum_ns; 419 } 420 421 /* Set interrupt moderation parameters */ 422 int efx_init_irq_moderation(struct efx_nic *efx, unsigned int tx_usecs, 423 unsigned int rx_usecs, bool rx_adaptive, 424 bool rx_may_override_tx) 425 { 426 struct efx_channel *channel; 427 unsigned int timer_max_us; 428 429 EFX_ASSERT_RESET_SERIALISED(efx); 430 431 timer_max_us = efx->timer_max_ns / 1000; 432 433 if (tx_usecs > timer_max_us || rx_usecs > timer_max_us) 434 return -EINVAL; 435 436 if (tx_usecs != rx_usecs && efx->tx_channel_offset == 0 && 437 !rx_may_override_tx) { 438 netif_err(efx, drv, efx->net_dev, "Channels are shared. " 439 "RX and TX IRQ moderation must be equal\n"); 440 return -EINVAL; 441 } 442 443 efx->irq_rx_adaptive = rx_adaptive; 444 efx->irq_rx_moderation_us = rx_usecs; 445 efx_for_each_channel(channel, efx) { 446 if (efx_channel_has_rx_queue(channel)) 447 channel->irq_moderation_us = rx_usecs; 448 else if (efx_channel_has_tx_queues(channel)) 449 channel->irq_moderation_us = tx_usecs; 450 else if (efx_channel_is_xdp_tx(channel)) 451 channel->irq_moderation_us = tx_usecs; 452 } 453 454 return 0; 455 } 456 457 void efx_get_irq_moderation(struct efx_nic *efx, unsigned int *tx_usecs, 458 unsigned int *rx_usecs, bool *rx_adaptive) 459 { 460 *rx_adaptive = efx->irq_rx_adaptive; 461 *rx_usecs = efx->irq_rx_moderation_us; 462 463 /* If channels are shared between RX and TX, so is IRQ 464 * moderation. Otherwise, IRQ moderation is the same for all 465 * TX channels and is not adaptive. 466 */ 467 if (efx->tx_channel_offset == 0) { 468 *tx_usecs = *rx_usecs; 469 } else { 470 struct efx_channel *tx_channel; 471 472 tx_channel = efx->channel[efx->tx_channel_offset]; 473 *tx_usecs = tx_channel->irq_moderation_us; 474 } 475 } 476 477 /************************************************************************** 478 * 479 * Kernel net device interface 480 * 481 *************************************************************************/ 482 483 /* Context: process, rtnl_lock() held. */ 484 int efx_net_open(struct net_device *net_dev) 485 { 486 struct efx_nic *efx = efx_netdev_priv(net_dev); 487 int rc; 488 489 netif_dbg(efx, ifup, efx->net_dev, "opening device on CPU %d\n", 490 raw_smp_processor_id()); 491 492 rc = efx_check_disabled(efx); 493 if (rc) 494 return rc; 495 if (efx->phy_mode & PHY_MODE_SPECIAL) 496 return -EBUSY; 497 if (efx_mcdi_poll_reboot(efx) && efx_reset(efx, RESET_TYPE_ALL)) 498 return -EIO; 499 500 /* Notify the kernel of the link state polled during driver load, 501 * before the monitor starts running */ 502 efx_link_status_changed(efx); 503 504 efx_start_all(efx); 505 if (efx->state == STATE_DISABLED || efx->reset_pending) 506 netif_device_detach(efx->net_dev); 507 else 508 efx->state = STATE_NET_UP; 509 510 return 0; 511 } 512 513 /* Context: process, rtnl_lock() held. 514 * Note that the kernel will ignore our return code; this method 515 * should really be a void. 516 */ 517 int efx_net_stop(struct net_device *net_dev) 518 { 519 struct efx_nic *efx = efx_netdev_priv(net_dev); 520 521 netif_dbg(efx, ifdown, efx->net_dev, "closing on CPU %d\n", 522 raw_smp_processor_id()); 523 524 /* Stop the device and flush all the channels */ 525 efx_stop_all(efx); 526 527 return 0; 528 } 529 530 static int efx_vlan_rx_add_vid(struct net_device *net_dev, __be16 proto, u16 vid) 531 { 532 struct efx_nic *efx = efx_netdev_priv(net_dev); 533 534 if (efx->type->vlan_rx_add_vid) 535 return efx->type->vlan_rx_add_vid(efx, proto, vid); 536 else 537 return -EOPNOTSUPP; 538 } 539 540 static int efx_vlan_rx_kill_vid(struct net_device *net_dev, __be16 proto, u16 vid) 541 { 542 struct efx_nic *efx = efx_netdev_priv(net_dev); 543 544 if (efx->type->vlan_rx_kill_vid) 545 return efx->type->vlan_rx_kill_vid(efx, proto, vid); 546 else 547 return -EOPNOTSUPP; 548 } 549 550 static int efx_hwtstamp_set(struct net_device *net_dev, 551 struct kernel_hwtstamp_config *config, 552 struct netlink_ext_ack *extack) 553 { 554 struct efx_nic *efx = efx_netdev_priv(net_dev); 555 556 return efx_ptp_set_ts_config(efx, config, extack); 557 } 558 559 static int efx_hwtstamp_get(struct net_device *net_dev, 560 struct kernel_hwtstamp_config *config) 561 { 562 struct efx_nic *efx = efx_netdev_priv(net_dev); 563 564 return efx_ptp_get_ts_config(efx, config); 565 } 566 567 static const struct net_device_ops efx_netdev_ops = { 568 .ndo_open = efx_net_open, 569 .ndo_stop = efx_net_stop, 570 .ndo_get_stats64 = efx_net_stats, 571 .ndo_tx_timeout = efx_watchdog, 572 .ndo_start_xmit = efx_hard_start_xmit, 573 .ndo_validate_addr = eth_validate_addr, 574 .ndo_change_mtu = efx_change_mtu, 575 .ndo_set_mac_address = efx_set_mac_address, 576 .ndo_set_rx_mode = efx_set_rx_mode, 577 .ndo_set_features = efx_set_features, 578 .ndo_features_check = efx_features_check, 579 .ndo_vlan_rx_add_vid = efx_vlan_rx_add_vid, 580 .ndo_vlan_rx_kill_vid = efx_vlan_rx_kill_vid, 581 .ndo_hwtstamp_set = efx_hwtstamp_set, 582 .ndo_hwtstamp_get = efx_hwtstamp_get, 583 #ifdef CONFIG_SFC_SRIOV 584 .ndo_set_vf_mac = efx_sriov_set_vf_mac, 585 .ndo_set_vf_vlan = efx_sriov_set_vf_vlan, 586 .ndo_set_vf_spoofchk = efx_sriov_set_vf_spoofchk, 587 .ndo_get_vf_config = efx_sriov_get_vf_config, 588 .ndo_set_vf_link_state = efx_sriov_set_vf_link_state, 589 #endif 590 .ndo_get_phys_port_id = efx_get_phys_port_id, 591 .ndo_get_phys_port_name = efx_get_phys_port_name, 592 #ifdef CONFIG_RFS_ACCEL 593 .ndo_rx_flow_steer = efx_filter_rfs, 594 #endif 595 .ndo_xdp_xmit = efx_xdp_xmit, 596 .ndo_bpf = efx_xdp 597 }; 598 599 static void efx_get_queue_stats_rx(struct net_device *net_dev, int idx, 600 struct netdev_queue_stats_rx *stats) 601 { 602 struct efx_nic *efx = efx_netdev_priv(net_dev); 603 struct efx_rx_queue *rx_queue; 604 struct efx_channel *channel; 605 606 channel = efx_get_channel(efx, idx); 607 rx_queue = efx_channel_get_rx_queue(channel); 608 /* Count only packets since last time datapath was started */ 609 stats->packets = rx_queue->rx_packets - rx_queue->old_rx_packets; 610 stats->bytes = rx_queue->rx_bytes - rx_queue->old_rx_bytes; 611 stats->hw_drops = efx_get_queue_stat_rx_hw_drops(channel) - 612 channel->old_n_rx_hw_drops; 613 stats->hw_drop_overruns = channel->n_rx_nodesc_trunc - 614 channel->old_n_rx_hw_drop_overruns; 615 } 616 617 static void efx_get_queue_stats_tx(struct net_device *net_dev, int idx, 618 struct netdev_queue_stats_tx *stats) 619 { 620 struct efx_nic *efx = efx_netdev_priv(net_dev); 621 struct efx_tx_queue *tx_queue; 622 struct efx_channel *channel; 623 624 channel = efx_get_tx_channel(efx, idx); 625 stats->packets = 0; 626 stats->bytes = 0; 627 stats->hw_gso_packets = 0; 628 stats->hw_gso_wire_packets = 0; 629 efx_for_each_channel_tx_queue(tx_queue, channel) { 630 stats->packets += tx_queue->complete_packets - 631 tx_queue->old_complete_packets; 632 stats->bytes += tx_queue->complete_bytes - 633 tx_queue->old_complete_bytes; 634 /* Note that, unlike stats->packets and stats->bytes, 635 * these count TXes enqueued, rather than completed, 636 * which may not be what users expect. 637 */ 638 stats->hw_gso_packets += tx_queue->tso_bursts - 639 tx_queue->old_tso_bursts; 640 stats->hw_gso_wire_packets += tx_queue->tso_packets - 641 tx_queue->old_tso_packets; 642 } 643 } 644 645 static void efx_get_base_stats(struct net_device *net_dev, 646 struct netdev_queue_stats_rx *rx, 647 struct netdev_queue_stats_tx *tx) 648 { 649 struct efx_nic *efx = efx_netdev_priv(net_dev); 650 struct efx_tx_queue *tx_queue; 651 struct efx_rx_queue *rx_queue; 652 struct efx_channel *channel; 653 654 rx->packets = 0; 655 rx->bytes = 0; 656 rx->hw_drops = 0; 657 rx->hw_drop_overruns = 0; 658 tx->packets = 0; 659 tx->bytes = 0; 660 tx->hw_gso_packets = 0; 661 tx->hw_gso_wire_packets = 0; 662 663 /* Count all packets on non-core queues, and packets before last 664 * datapath start on core queues. 665 */ 666 efx_for_each_channel(channel, efx) { 667 rx_queue = efx_channel_get_rx_queue(channel); 668 if (channel->channel >= net_dev->real_num_rx_queues) { 669 rx->packets += rx_queue->rx_packets; 670 rx->bytes += rx_queue->rx_bytes; 671 rx->hw_drops += efx_get_queue_stat_rx_hw_drops(channel); 672 rx->hw_drop_overruns += channel->n_rx_nodesc_trunc; 673 } else { 674 rx->packets += rx_queue->old_rx_packets; 675 rx->bytes += rx_queue->old_rx_bytes; 676 rx->hw_drops += channel->old_n_rx_hw_drops; 677 rx->hw_drop_overruns += channel->old_n_rx_hw_drop_overruns; 678 } 679 efx_for_each_channel_tx_queue(tx_queue, channel) { 680 if (channel->channel < efx->tx_channel_offset || 681 channel->channel >= efx->tx_channel_offset + 682 net_dev->real_num_tx_queues) { 683 tx->packets += tx_queue->complete_packets; 684 tx->bytes += tx_queue->complete_bytes; 685 tx->hw_gso_packets += tx_queue->tso_bursts; 686 tx->hw_gso_wire_packets += tx_queue->tso_packets; 687 } else { 688 tx->packets += tx_queue->old_complete_packets; 689 tx->bytes += tx_queue->old_complete_bytes; 690 tx->hw_gso_packets += tx_queue->old_tso_bursts; 691 tx->hw_gso_wire_packets += tx_queue->old_tso_packets; 692 } 693 /* Include XDP TX in device-wide stats */ 694 tx->packets += tx_queue->complete_xdp_packets; 695 tx->bytes += tx_queue->complete_xdp_bytes; 696 } 697 } 698 } 699 700 static const struct netdev_stat_ops efx_stat_ops = { 701 .get_queue_stats_rx = efx_get_queue_stats_rx, 702 .get_queue_stats_tx = efx_get_queue_stats_tx, 703 .get_base_stats = efx_get_base_stats, 704 }; 705 706 static int efx_xdp_setup_prog(struct efx_nic *efx, struct bpf_prog *prog) 707 { 708 struct bpf_prog *old_prog; 709 710 if (efx->xdp_rxq_info_failed) { 711 netif_err(efx, drv, efx->net_dev, 712 "Unable to bind XDP program due to previous failure of rxq_info\n"); 713 return -EINVAL; 714 } 715 716 if (prog && efx->net_dev->mtu > efx_xdp_max_mtu(efx)) { 717 netif_err(efx, drv, efx->net_dev, 718 "Unable to configure XDP with MTU of %d (max: %d)\n", 719 efx->net_dev->mtu, efx_xdp_max_mtu(efx)); 720 return -EINVAL; 721 } 722 723 old_prog = rtnl_dereference(efx->xdp_prog); 724 rcu_assign_pointer(efx->xdp_prog, prog); 725 /* Release the reference that was originally passed by the caller. */ 726 if (old_prog) 727 bpf_prog_put(old_prog); 728 729 return 0; 730 } 731 732 /* Context: process, rtnl_lock() held. */ 733 static int efx_xdp(struct net_device *dev, struct netdev_bpf *xdp) 734 { 735 struct efx_nic *efx = efx_netdev_priv(dev); 736 737 switch (xdp->command) { 738 case XDP_SETUP_PROG: 739 return efx_xdp_setup_prog(efx, xdp->prog); 740 default: 741 return -EINVAL; 742 } 743 } 744 745 static int efx_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **xdpfs, 746 u32 flags) 747 { 748 struct efx_nic *efx = efx_netdev_priv(dev); 749 750 if (!netif_running(dev)) 751 return -EINVAL; 752 753 return efx_xdp_tx_buffers(efx, n, xdpfs, flags & XDP_XMIT_FLUSH); 754 } 755 756 static void efx_update_name(struct efx_nic *efx) 757 { 758 strcpy(efx->name, efx->net_dev->name); 759 efx_mtd_rename(efx); 760 efx_set_channel_names(efx); 761 } 762 763 static int efx_netdev_event(struct notifier_block *this, 764 unsigned long event, void *ptr) 765 { 766 struct net_device *net_dev = netdev_notifier_info_to_dev(ptr); 767 768 if ((net_dev->netdev_ops == &efx_netdev_ops) && 769 event == NETDEV_CHANGENAME) 770 efx_update_name(efx_netdev_priv(net_dev)); 771 772 return NOTIFY_DONE; 773 } 774 775 static struct notifier_block efx_netdev_notifier = { 776 .notifier_call = efx_netdev_event, 777 }; 778 779 static ssize_t phy_type_show(struct device *dev, 780 struct device_attribute *attr, char *buf) 781 { 782 struct efx_nic *efx = dev_get_drvdata(dev); 783 return sprintf(buf, "%d\n", efx->phy_type); 784 } 785 static DEVICE_ATTR_RO(phy_type); 786 787 static int efx_register_netdev(struct efx_nic *efx) 788 { 789 struct net_device *net_dev = efx->net_dev; 790 struct efx_channel *channel; 791 int rc; 792 793 net_dev->watchdog_timeo = 5 * HZ; 794 net_dev->irq = efx->pci_dev->irq; 795 net_dev->netdev_ops = &efx_netdev_ops; 796 net_dev->stat_ops = &efx_stat_ops; 797 if (efx_nic_rev(efx) >= EFX_REV_HUNT_A0) 798 net_dev->priv_flags |= IFF_UNICAST_FLT; 799 net_dev->ethtool_ops = &efx_ethtool_ops; 800 netif_set_tso_max_segs(net_dev, EFX_TSO_MAX_SEGS); 801 net_dev->min_mtu = EFX_MIN_MTU; 802 net_dev->max_mtu = EFX_MAX_MTU; 803 804 rtnl_lock(); 805 806 /* Enable resets to be scheduled and check whether any were 807 * already requested. If so, the NIC is probably hosed so we 808 * abort. 809 */ 810 if (efx->reset_pending) { 811 pci_err(efx->pci_dev, "aborting probe due to scheduled reset\n"); 812 rc = -EIO; 813 goto fail_locked; 814 } 815 816 rc = dev_alloc_name(net_dev, net_dev->name); 817 if (rc < 0) 818 goto fail_locked; 819 efx_update_name(efx); 820 821 /* Always start with carrier off; PHY events will detect the link */ 822 netif_carrier_off(net_dev); 823 824 rc = register_netdevice(net_dev); 825 if (rc) 826 goto fail_locked; 827 828 efx_for_each_channel(channel, efx) { 829 struct efx_tx_queue *tx_queue; 830 efx_for_each_channel_tx_queue(tx_queue, channel) 831 efx_init_tx_queue_core_txq(tx_queue); 832 } 833 834 efx_associate(efx); 835 836 efx->state = STATE_NET_DOWN; 837 838 rtnl_unlock(); 839 840 rc = device_create_file(&efx->pci_dev->dev, &dev_attr_phy_type); 841 if (rc) { 842 netif_err(efx, drv, efx->net_dev, 843 "failed to init net dev attributes\n"); 844 goto fail_registered; 845 } 846 847 efx_init_mcdi_logging(efx); 848 849 return 0; 850 851 fail_registered: 852 rtnl_lock(); 853 efx_dissociate(efx); 854 unregister_netdevice(net_dev); 855 fail_locked: 856 efx->state = STATE_UNINIT; 857 rtnl_unlock(); 858 netif_err(efx, drv, efx->net_dev, "could not register net dev\n"); 859 return rc; 860 } 861 862 static void efx_unregister_netdev(struct efx_nic *efx) 863 { 864 if (!efx->net_dev) 865 return; 866 867 if (WARN_ON(efx_netdev_priv(efx->net_dev) != efx)) 868 return; 869 870 if (efx_dev_registered(efx)) { 871 strscpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name)); 872 efx_fini_mcdi_logging(efx); 873 device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_type); 874 unregister_netdev(efx->net_dev); 875 } 876 } 877 878 /************************************************************************** 879 * 880 * List of NICs we support 881 * 882 **************************************************************************/ 883 884 /* PCI device ID table */ 885 static const struct pci_device_id efx_pci_table[] = { 886 {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0903), /* SFC9120 PF */ 887 .driver_data = (unsigned long) &efx_hunt_a0_nic_type}, 888 {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x1903), /* SFC9120 VF */ 889 .driver_data = (unsigned long) &efx_hunt_a0_vf_nic_type}, 890 {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0923), /* SFC9140 PF */ 891 .driver_data = (unsigned long) &efx_hunt_a0_nic_type}, 892 {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x1923), /* SFC9140 VF */ 893 .driver_data = (unsigned long) &efx_hunt_a0_vf_nic_type}, 894 {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0a03), /* SFC9220 PF */ 895 .driver_data = (unsigned long) &efx_hunt_a0_nic_type}, 896 {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x1a03), /* SFC9220 VF */ 897 .driver_data = (unsigned long) &efx_hunt_a0_vf_nic_type}, 898 {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0b03), /* SFC9250 PF */ 899 .driver_data = (unsigned long) &efx_hunt_a0_nic_type}, 900 {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x1b03), /* SFC9250 VF */ 901 .driver_data = (unsigned long) &efx_hunt_a0_vf_nic_type}, 902 {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0c03), /* X4 PF (FF/LL) */ 903 .driver_data = (unsigned long)&efx_x4_nic_type}, 904 {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x2c03), /* X4 PF (FF only) */ 905 .driver_data = (unsigned long)&efx_x4_nic_type}, 906 {0} /* end of list */ 907 }; 908 909 /************************************************************************** 910 * 911 * Data housekeeping 912 * 913 **************************************************************************/ 914 915 void efx_update_sw_stats(struct efx_nic *efx, u64 *stats) 916 { 917 u64 n_rx_nodesc_trunc = 0; 918 struct efx_channel *channel; 919 920 efx_for_each_channel(channel, efx) 921 n_rx_nodesc_trunc += channel->n_rx_nodesc_trunc; 922 stats[GENERIC_STAT_rx_nodesc_trunc] = n_rx_nodesc_trunc; 923 stats[GENERIC_STAT_rx_noskb_drops] = atomic_read(&efx->n_rx_noskb_drops); 924 } 925 926 /************************************************************************** 927 * 928 * PCI interface 929 * 930 **************************************************************************/ 931 932 /* Main body of final NIC shutdown code 933 * This is called only at module unload (or hotplug removal). 934 */ 935 static void efx_pci_remove_main(struct efx_nic *efx) 936 { 937 /* Flush reset_work. It can no longer be scheduled since we 938 * are not READY. 939 */ 940 WARN_ON(efx_net_active(efx->state)); 941 efx_flush_reset_workqueue(efx); 942 943 efx_disable_interrupts(efx); 944 efx_clear_interrupt_affinity(efx); 945 efx_nic_fini_interrupt(efx); 946 efx_fini_port(efx); 947 efx->type->fini(efx); 948 efx_fini_napi(efx); 949 efx_remove_all(efx); 950 } 951 952 /* Final NIC shutdown 953 * This is called only at module unload (or hotplug removal). A PF can call 954 * this on its VFs to ensure they are unbound first. 955 */ 956 static void efx_pci_remove(struct pci_dev *pci_dev) 957 { 958 struct efx_probe_data *probe_data; 959 struct efx_nic *efx; 960 961 efx = pci_get_drvdata(pci_dev); 962 if (!efx) 963 return; 964 965 /* Mark the NIC as fini, then stop the interface */ 966 rtnl_lock(); 967 efx_dissociate(efx); 968 dev_close(efx->net_dev); 969 efx_disable_interrupts(efx); 970 efx->state = STATE_UNINIT; 971 rtnl_unlock(); 972 973 if (efx->type->sriov_fini) 974 efx->type->sriov_fini(efx); 975 976 efx_fini_devlink_lock(efx); 977 efx_unregister_netdev(efx); 978 979 efx_mtd_remove(efx); 980 981 efx_pci_remove_main(efx); 982 983 efx_fini_io(efx); 984 pci_dbg(efx->pci_dev, "shutdown successful\n"); 985 986 efx_fini_devlink_and_unlock(efx); 987 efx_fini_struct(efx); 988 free_netdev(efx->net_dev); 989 probe_data = container_of(efx, struct efx_probe_data, efx); 990 kfree(probe_data); 991 }; 992 993 /* NIC VPD information 994 * Called during probe to display the part number of the 995 * installed NIC. 996 */ 997 static void efx_probe_vpd_strings(struct efx_nic *efx) 998 { 999 struct pci_dev *dev = efx->pci_dev; 1000 unsigned int vpd_size, kw_len; 1001 u8 *vpd_data; 1002 int start; 1003 1004 vpd_data = pci_vpd_alloc(dev, &vpd_size); 1005 if (IS_ERR(vpd_data)) { 1006 pci_warn(dev, "Unable to read VPD\n"); 1007 return; 1008 } 1009 1010 start = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size, 1011 PCI_VPD_RO_KEYWORD_PARTNO, &kw_len); 1012 if (start < 0) 1013 pci_err(dev, "Part number not found or incomplete\n"); 1014 else 1015 pci_info(dev, "Part Number : %.*s\n", kw_len, vpd_data + start); 1016 1017 start = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size, 1018 PCI_VPD_RO_KEYWORD_SERIALNO, &kw_len); 1019 if (start < 0) 1020 pci_err(dev, "Serial number not found or incomplete\n"); 1021 else 1022 efx->vpd_sn = kmemdup_nul(vpd_data + start, kw_len, GFP_KERNEL); 1023 1024 kfree(vpd_data); 1025 } 1026 1027 1028 /* Main body of NIC initialisation 1029 * This is called at module load (or hotplug insertion, theoretically). 1030 */ 1031 static int efx_pci_probe_main(struct efx_nic *efx) 1032 { 1033 int rc; 1034 1035 /* Do start-of-day initialisation */ 1036 rc = efx_probe_all(efx); 1037 if (rc) 1038 goto fail1; 1039 1040 efx_init_napi(efx); 1041 1042 down_write(&efx->filter_sem); 1043 rc = efx->type->init(efx); 1044 up_write(&efx->filter_sem); 1045 if (rc) { 1046 pci_err(efx->pci_dev, "failed to initialise NIC\n"); 1047 goto fail3; 1048 } 1049 1050 rc = efx_init_port(efx); 1051 if (rc) { 1052 netif_err(efx, probe, efx->net_dev, 1053 "failed to initialise port\n"); 1054 goto fail4; 1055 } 1056 1057 rc = efx_nic_init_interrupt(efx); 1058 if (rc) 1059 goto fail5; 1060 1061 efx_set_interrupt_affinity(efx); 1062 rc = efx_enable_interrupts(efx); 1063 if (rc) 1064 goto fail6; 1065 1066 return 0; 1067 1068 fail6: 1069 efx_clear_interrupt_affinity(efx); 1070 efx_nic_fini_interrupt(efx); 1071 fail5: 1072 efx_fini_port(efx); 1073 fail4: 1074 efx->type->fini(efx); 1075 fail3: 1076 efx_fini_napi(efx); 1077 efx_remove_all(efx); 1078 fail1: 1079 return rc; 1080 } 1081 1082 static int efx_pci_probe_post_io(struct efx_nic *efx) 1083 { 1084 struct net_device *net_dev = efx->net_dev; 1085 int rc = efx_pci_probe_main(efx); 1086 1087 if (rc) 1088 return rc; 1089 1090 if (efx->type->sriov_init) { 1091 rc = efx->type->sriov_init(efx); 1092 if (rc) 1093 pci_err(efx->pci_dev, "SR-IOV can't be enabled rc %d\n", 1094 rc); 1095 } 1096 1097 /* Determine netdevice features */ 1098 net_dev->features |= efx->type->offload_features; 1099 1100 /* Add TSO features */ 1101 if (efx->type->tso_versions && efx->type->tso_versions(efx)) 1102 net_dev->features |= NETIF_F_TSO | NETIF_F_TSO6; 1103 1104 /* Mask for features that also apply to VLAN devices */ 1105 net_dev->vlan_features |= (NETIF_F_HW_CSUM | NETIF_F_SG | 1106 NETIF_F_HIGHDMA | NETIF_F_ALL_TSO | 1107 NETIF_F_RXCSUM); 1108 1109 /* Determine user configurable features */ 1110 net_dev->hw_features |= net_dev->features & ~efx->fixed_features; 1111 1112 /* Disable receiving frames with bad FCS, by default. */ 1113 net_dev->features &= ~NETIF_F_RXALL; 1114 1115 /* Disable VLAN filtering by default. It may be enforced if 1116 * the feature is fixed (i.e. VLAN filters are required to 1117 * receive VLAN tagged packets due to vPort restrictions). 1118 */ 1119 net_dev->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER; 1120 net_dev->features |= efx->fixed_features; 1121 1122 net_dev->xdp_features = NETDEV_XDP_ACT_BASIC | 1123 NETDEV_XDP_ACT_REDIRECT | 1124 NETDEV_XDP_ACT_NDO_XMIT; 1125 1126 /* devlink creation, registration and lock */ 1127 rc = efx_probe_devlink_and_lock(efx); 1128 if (rc) 1129 pci_err(efx->pci_dev, "devlink registration failed"); 1130 1131 rc = efx_register_netdev(efx); 1132 efx_probe_devlink_unlock(efx); 1133 if (!rc) 1134 return 0; 1135 1136 efx_pci_remove_main(efx); 1137 return rc; 1138 } 1139 1140 /* NIC initialisation 1141 * 1142 * This is called at module load (or hotplug insertion, 1143 * theoretically). It sets up PCI mappings, resets the NIC, 1144 * sets up and registers the network devices with the kernel and hooks 1145 * the interrupt service routine. It does not prepare the device for 1146 * transmission; this is left to the first time one of the network 1147 * interfaces is brought up (i.e. efx_net_open). 1148 */ 1149 static int efx_pci_probe(struct pci_dev *pci_dev, 1150 const struct pci_device_id *entry) 1151 { 1152 struct efx_probe_data *probe_data, **probe_ptr; 1153 struct net_device *net_dev; 1154 struct efx_nic *efx; 1155 int rc; 1156 1157 /* Allocate probe data and struct efx_nic */ 1158 probe_data = kzalloc(sizeof(*probe_data), GFP_KERNEL); 1159 if (!probe_data) 1160 return -ENOMEM; 1161 probe_data->pci_dev = pci_dev; 1162 efx = &probe_data->efx; 1163 1164 /* Allocate and initialise a struct net_device */ 1165 net_dev = alloc_etherdev_mq(sizeof(probe_data), EFX_MAX_CORE_TX_QUEUES); 1166 if (!net_dev) { 1167 rc = -ENOMEM; 1168 goto fail0; 1169 } 1170 probe_ptr = netdev_priv(net_dev); 1171 *probe_ptr = probe_data; 1172 efx->net_dev = net_dev; 1173 efx->type = (const struct efx_nic_type *) entry->driver_data; 1174 efx->fixed_features |= NETIF_F_HIGHDMA; 1175 1176 pci_set_drvdata(pci_dev, efx); 1177 SET_NETDEV_DEV(net_dev, &pci_dev->dev); 1178 rc = efx_init_struct(efx, pci_dev); 1179 if (rc) 1180 goto fail1; 1181 1182 pci_info(pci_dev, "Solarflare NIC detected\n"); 1183 1184 if (!efx->type->is_vf) 1185 efx_probe_vpd_strings(efx); 1186 1187 /* Set up basic I/O (BAR mappings etc) */ 1188 rc = efx_init_io(efx, efx->type->mem_bar(efx), efx->type->max_dma_mask, 1189 efx->type->mem_map_size(efx)); 1190 if (rc) 1191 goto fail2; 1192 1193 rc = efx_pci_probe_post_io(efx); 1194 if (rc) { 1195 /* On failure, retry once immediately. 1196 * If we aborted probe due to a scheduled reset, dismiss it. 1197 */ 1198 efx->reset_pending = 0; 1199 rc = efx_pci_probe_post_io(efx); 1200 if (rc) { 1201 /* On another failure, retry once more 1202 * after a 50-305ms delay. 1203 */ 1204 unsigned char r; 1205 1206 get_random_bytes(&r, 1); 1207 msleep((unsigned int)r + 50); 1208 efx->reset_pending = 0; 1209 rc = efx_pci_probe_post_io(efx); 1210 } 1211 } 1212 if (rc) 1213 goto fail3; 1214 1215 netif_dbg(efx, probe, efx->net_dev, "initialisation successful\n"); 1216 1217 /* Try to create MTDs, but allow this to fail */ 1218 rtnl_lock(); 1219 rc = efx_mtd_probe(efx); 1220 rtnl_unlock(); 1221 if (rc && rc != -EPERM) 1222 netif_warn(efx, probe, efx->net_dev, 1223 "failed to create MTDs (%d)\n", rc); 1224 1225 if (efx->type->udp_tnl_push_ports) 1226 efx->type->udp_tnl_push_ports(efx); 1227 1228 return 0; 1229 1230 fail3: 1231 efx_fini_io(efx); 1232 fail2: 1233 efx_fini_struct(efx); 1234 fail1: 1235 WARN_ON(rc > 0); 1236 netif_dbg(efx, drv, efx->net_dev, "initialisation failed. rc=%d\n", rc); 1237 free_netdev(net_dev); 1238 fail0: 1239 kfree(probe_data); 1240 return rc; 1241 } 1242 1243 /* efx_pci_sriov_configure returns the actual number of Virtual Functions 1244 * enabled on success 1245 */ 1246 #ifdef CONFIG_SFC_SRIOV 1247 static int efx_pci_sriov_configure(struct pci_dev *dev, int num_vfs) 1248 { 1249 int rc; 1250 struct efx_nic *efx = pci_get_drvdata(dev); 1251 1252 if (efx->type->sriov_configure) { 1253 rc = efx->type->sriov_configure(efx, num_vfs); 1254 if (rc) 1255 return rc; 1256 else 1257 return num_vfs; 1258 } else 1259 return -EOPNOTSUPP; 1260 } 1261 #endif 1262 1263 static int efx_pm_freeze(struct device *dev) 1264 { 1265 struct efx_nic *efx = dev_get_drvdata(dev); 1266 1267 rtnl_lock(); 1268 1269 if (efx_net_active(efx->state)) { 1270 efx_device_detach_sync(efx); 1271 1272 efx_stop_all(efx); 1273 efx_disable_interrupts(efx); 1274 1275 efx->state = efx_freeze(efx->state); 1276 } 1277 1278 rtnl_unlock(); 1279 1280 return 0; 1281 } 1282 1283 static void efx_pci_shutdown(struct pci_dev *pci_dev) 1284 { 1285 struct efx_nic *efx = pci_get_drvdata(pci_dev); 1286 1287 if (!efx) 1288 return; 1289 1290 efx_pm_freeze(&pci_dev->dev); 1291 pci_disable_device(pci_dev); 1292 } 1293 1294 static int efx_pm_thaw(struct device *dev) 1295 { 1296 int rc; 1297 struct efx_nic *efx = dev_get_drvdata(dev); 1298 1299 rtnl_lock(); 1300 1301 if (efx_frozen(efx->state)) { 1302 rc = efx_enable_interrupts(efx); 1303 if (rc) 1304 goto fail; 1305 1306 mutex_lock(&efx->mac_lock); 1307 efx_mcdi_port_reconfigure(efx); 1308 mutex_unlock(&efx->mac_lock); 1309 1310 efx_start_all(efx); 1311 1312 efx_device_attach_if_not_resetting(efx); 1313 1314 efx->state = efx_thaw(efx->state); 1315 1316 efx->type->resume_wol(efx); 1317 } 1318 1319 rtnl_unlock(); 1320 1321 /* Reschedule any quenched resets scheduled during efx_pm_freeze() */ 1322 efx_queue_reset_work(efx); 1323 1324 return 0; 1325 1326 fail: 1327 rtnl_unlock(); 1328 1329 return rc; 1330 } 1331 1332 static int efx_pm_poweroff(struct device *dev) 1333 { 1334 struct pci_dev *pci_dev = to_pci_dev(dev); 1335 struct efx_nic *efx = pci_get_drvdata(pci_dev); 1336 1337 efx->type->fini(efx); 1338 1339 efx->reset_pending = 0; 1340 1341 pci_save_state(pci_dev); 1342 return pci_set_power_state(pci_dev, PCI_D3hot); 1343 } 1344 1345 /* Used for both resume and restore */ 1346 static int efx_pm_resume(struct device *dev) 1347 { 1348 struct pci_dev *pci_dev = to_pci_dev(dev); 1349 struct efx_nic *efx = pci_get_drvdata(pci_dev); 1350 int rc; 1351 1352 rc = pci_set_power_state(pci_dev, PCI_D0); 1353 if (rc) 1354 return rc; 1355 pci_restore_state(pci_dev); 1356 rc = pci_enable_device(pci_dev); 1357 if (rc) 1358 return rc; 1359 pci_set_master(efx->pci_dev); 1360 rc = efx->type->reset(efx, RESET_TYPE_ALL); 1361 if (rc) 1362 return rc; 1363 down_write(&efx->filter_sem); 1364 rc = efx->type->init(efx); 1365 up_write(&efx->filter_sem); 1366 if (rc) 1367 return rc; 1368 rc = efx_pm_thaw(dev); 1369 return rc; 1370 } 1371 1372 static int efx_pm_suspend(struct device *dev) 1373 { 1374 int rc; 1375 1376 efx_pm_freeze(dev); 1377 rc = efx_pm_poweroff(dev); 1378 if (rc) 1379 efx_pm_resume(dev); 1380 return rc; 1381 } 1382 1383 static const struct dev_pm_ops efx_pm_ops = { 1384 .suspend = efx_pm_suspend, 1385 .resume = efx_pm_resume, 1386 .freeze = efx_pm_freeze, 1387 .thaw = efx_pm_thaw, 1388 .poweroff = efx_pm_poweroff, 1389 .restore = efx_pm_resume, 1390 }; 1391 1392 static struct pci_driver efx_pci_driver = { 1393 .name = KBUILD_MODNAME, 1394 .id_table = efx_pci_table, 1395 .probe = efx_pci_probe, 1396 .remove = efx_pci_remove, 1397 .driver.pm = &efx_pm_ops, 1398 .shutdown = efx_pci_shutdown, 1399 .err_handler = &efx_err_handlers, 1400 #ifdef CONFIG_SFC_SRIOV 1401 .sriov_configure = efx_pci_sriov_configure, 1402 #endif 1403 }; 1404 1405 /************************************************************************** 1406 * 1407 * Kernel module interface 1408 * 1409 *************************************************************************/ 1410 1411 static int __init efx_init_module(void) 1412 { 1413 int rc; 1414 1415 printk(KERN_INFO "Solarflare NET driver\n"); 1416 1417 rc = register_netdevice_notifier(&efx_netdev_notifier); 1418 if (rc) 1419 goto err_notifier; 1420 1421 rc = efx_create_reset_workqueue(); 1422 if (rc) 1423 goto err_reset; 1424 1425 rc = pci_register_driver(&efx_pci_driver); 1426 if (rc < 0) 1427 goto err_pci; 1428 1429 rc = pci_register_driver(&ef100_pci_driver); 1430 if (rc < 0) 1431 goto err_pci_ef100; 1432 1433 return 0; 1434 1435 err_pci_ef100: 1436 pci_unregister_driver(&efx_pci_driver); 1437 err_pci: 1438 efx_destroy_reset_workqueue(); 1439 err_reset: 1440 unregister_netdevice_notifier(&efx_netdev_notifier); 1441 err_notifier: 1442 return rc; 1443 } 1444 1445 static void __exit efx_exit_module(void) 1446 { 1447 printk(KERN_INFO "Solarflare NET driver unloading\n"); 1448 1449 pci_unregister_driver(&ef100_pci_driver); 1450 pci_unregister_driver(&efx_pci_driver); 1451 efx_destroy_reset_workqueue(); 1452 unregister_netdevice_notifier(&efx_netdev_notifier); 1453 1454 } 1455 1456 module_init(efx_init_module); 1457 module_exit(efx_exit_module); 1458 1459 MODULE_AUTHOR("Solarflare Communications and " 1460 "Michael Brown <mbrown@fensystems.co.uk>"); 1461 MODULE_DESCRIPTION("Solarflare network driver"); 1462 MODULE_LICENSE("GPL"); 1463 MODULE_DEVICE_TABLE(pci, efx_pci_table); 1464