1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Intel IXP4xx Ethernet driver for Linux 4 * 5 * Copyright (C) 2007 Krzysztof Halasa <khc@pm.waw.pl> 6 * 7 * Ethernet port config (0x00 is not present on IXP42X): 8 * 9 * logical port 0x00 0x10 0x20 10 * NPE 0 (NPE-A) 1 (NPE-B) 2 (NPE-C) 11 * physical PortId 2 0 1 12 * TX queue 23 24 25 13 * RX-free queue 26 27 28 14 * TX-done queue is always 31, per-port RX and TX-ready queues are configurable 15 * 16 * Queue entries: 17 * bits 0 -> 1 - NPE ID (RX and TX-done) 18 * bits 0 -> 2 - priority (TX, per 802.1D) 19 * bits 3 -> 4 - port ID (user-set?) 20 * bits 5 -> 31 - physical descriptor address 21 */ 22 23 #include <linux/delay.h> 24 #include <linux/dma-mapping.h> 25 #include <linux/dmapool.h> 26 #include <linux/etherdevice.h> 27 #include <linux/if_vlan.h> 28 #include <linux/io.h> 29 #include <linux/kernel.h> 30 #include <linux/net_tstamp.h> 31 #include <linux/of.h> 32 #include <linux/of_mdio.h> 33 #include <linux/of_net.h> 34 #include <linux/phy.h> 35 #include <linux/platform_device.h> 36 #include <linux/ptp_classify.h> 37 #include <linux/slab.h> 38 #include <linux/module.h> 39 #include <linux/soc/ixp4xx/npe.h> 40 #include <linux/soc/ixp4xx/qmgr.h> 41 #include <linux/soc/ixp4xx/cpu.h> 42 #include <linux/types.h> 43 44 #define IXP4XX_ETH_NPEA 0x00 45 #define IXP4XX_ETH_NPEB 0x10 46 #define IXP4XX_ETH_NPEC 0x20 47 48 #include "ixp46x_ts.h" 49 50 #define DEBUG_DESC 0 51 #define DEBUG_RX 0 52 #define DEBUG_TX 0 53 #define DEBUG_PKT_BYTES 0 54 #define DEBUG_MDIO 0 55 #define DEBUG_CLOSE 0 56 57 #define DRV_NAME "ixp4xx_eth" 58 59 #define MAX_NPES 3 60 61 #define RX_DESCS 64 /* also length of all RX queues */ 62 #define TX_DESCS 16 /* also length of all TX queues */ 63 #define TXDONE_QUEUE_LEN 64 /* dwords */ 64 65 #define POOL_ALLOC_SIZE (sizeof(struct desc) * (RX_DESCS + TX_DESCS)) 66 #define REGS_SIZE 0x1000 67 68 /* MRU is said to be 14320 in a code dump, the SW manual says that 69 * MRU/MTU is 16320 and includes VLAN and ethernet headers. 70 * See "IXP400 Software Programmer's Guide" section 10.3.2, page 161. 71 * 72 * FIXME: we have chosen the safe default (14320) but if you can test 73 * jumboframes, experiment with 16320 and see what happens! 74 */ 75 #define MAX_MRU (14320 - VLAN_ETH_HLEN) 76 #define RX_BUFF_SIZE ALIGN((NET_IP_ALIGN) + MAX_MRU, 4) 77 78 #define NAPI_WEIGHT 16 79 #define MDIO_INTERVAL (3 * HZ) 80 #define MAX_MDIO_RETRIES 100 /* microseconds, typically 30 cycles */ 81 #define MAX_CLOSE_WAIT 1000 /* microseconds, typically 2-3 cycles */ 82 83 #define NPE_ID(port_id) ((port_id) >> 4) 84 #define PHYSICAL_ID(port_id) ((NPE_ID(port_id) + 2) % 3) 85 #define TX_QUEUE(port_id) (NPE_ID(port_id) + 23) 86 #define RXFREE_QUEUE(port_id) (NPE_ID(port_id) + 26) 87 #define TXDONE_QUEUE 31 88 89 #define PTP_SLAVE_MODE 1 90 #define PTP_MASTER_MODE 2 91 #define PORT2CHANNEL(p) NPE_ID(p->id) 92 93 /* TX Control Registers */ 94 #define TX_CNTRL0_TX_EN 0x01 95 #define TX_CNTRL0_HALFDUPLEX 0x02 96 #define TX_CNTRL0_RETRY 0x04 97 #define TX_CNTRL0_PAD_EN 0x08 98 #define TX_CNTRL0_APPEND_FCS 0x10 99 #define TX_CNTRL0_2DEFER 0x20 100 #define TX_CNTRL0_RMII 0x40 /* reduced MII */ 101 #define TX_CNTRL1_RETRIES 0x0F /* 4 bits */ 102 103 /* RX Control Registers */ 104 #define RX_CNTRL0_RX_EN 0x01 105 #define RX_CNTRL0_PADSTRIP_EN 0x02 106 #define RX_CNTRL0_SEND_FCS 0x04 107 #define RX_CNTRL0_PAUSE_EN 0x08 108 #define RX_CNTRL0_LOOP_EN 0x10 109 #define RX_CNTRL0_ADDR_FLTR_EN 0x20 110 #define RX_CNTRL0_RX_RUNT_EN 0x40 111 #define RX_CNTRL0_BCAST_DIS 0x80 112 #define RX_CNTRL1_DEFER_EN 0x01 113 114 /* Core Control Register */ 115 #define CORE_RESET 0x01 116 #define CORE_RX_FIFO_FLUSH 0x02 117 #define CORE_TX_FIFO_FLUSH 0x04 118 #define CORE_SEND_JAM 0x08 119 #define CORE_MDC_EN 0x10 /* MDIO using NPE-B ETH-0 only */ 120 121 #define DEFAULT_TX_CNTRL0 (TX_CNTRL0_TX_EN | TX_CNTRL0_RETRY | \ 122 TX_CNTRL0_PAD_EN | TX_CNTRL0_APPEND_FCS | \ 123 TX_CNTRL0_2DEFER) 124 #define DEFAULT_RX_CNTRL0 RX_CNTRL0_RX_EN 125 #define DEFAULT_CORE_CNTRL CORE_MDC_EN 126 127 128 /* NPE message codes */ 129 #define NPE_GETSTATUS 0x00 130 #define NPE_EDB_SETPORTADDRESS 0x01 131 #define NPE_EDB_GETMACADDRESSDATABASE 0x02 132 #define NPE_EDB_SETMACADDRESSSDATABASE 0x03 133 #define NPE_GETSTATS 0x04 134 #define NPE_RESETSTATS 0x05 135 #define NPE_SETMAXFRAMELENGTHS 0x06 136 #define NPE_VLAN_SETRXTAGMODE 0x07 137 #define NPE_VLAN_SETDEFAULTRXVID 0x08 138 #define NPE_VLAN_SETPORTVLANTABLEENTRY 0x09 139 #define NPE_VLAN_SETPORTVLANTABLERANGE 0x0A 140 #define NPE_VLAN_SETRXQOSENTRY 0x0B 141 #define NPE_VLAN_SETPORTIDEXTRACTIONMODE 0x0C 142 #define NPE_STP_SETBLOCKINGSTATE 0x0D 143 #define NPE_FW_SETFIREWALLMODE 0x0E 144 #define NPE_PC_SETFRAMECONTROLDURATIONID 0x0F 145 #define NPE_PC_SETAPMACTABLE 0x11 146 #define NPE_SETLOOPBACK_MODE 0x12 147 #define NPE_PC_SETBSSIDTABLE 0x13 148 #define NPE_ADDRESS_FILTER_CONFIG 0x14 149 #define NPE_APPENDFCSCONFIG 0x15 150 #define NPE_NOTIFY_MAC_RECOVERY_DONE 0x16 151 #define NPE_MAC_RECOVERY_START 0x17 152 153 154 #ifdef __ARMEB__ 155 typedef struct sk_buff buffer_t; 156 #define free_buffer dev_kfree_skb 157 #define free_buffer_irq dev_consume_skb_irq 158 #else 159 typedef void buffer_t; 160 #define free_buffer kfree 161 #define free_buffer_irq kfree 162 #endif 163 164 /* Information about built-in Ethernet MAC interfaces */ 165 struct eth_plat_info { 166 u8 rxq; /* configurable, currently 0 - 31 only */ 167 u8 txreadyq; 168 u8 hwaddr[ETH_ALEN]; 169 u8 npe; /* NPE instance used by this interface */ 170 bool has_mdio; /* If this instance has an MDIO bus */ 171 }; 172 173 struct eth_regs { 174 u32 tx_control[2], __res1[2]; /* 000 */ 175 u32 rx_control[2], __res2[2]; /* 010 */ 176 u32 random_seed, __res3[3]; /* 020 */ 177 u32 partial_empty_threshold, __res4; /* 030 */ 178 u32 partial_full_threshold, __res5; /* 038 */ 179 u32 tx_start_bytes, __res6[3]; /* 040 */ 180 u32 tx_deferral, rx_deferral, __res7[2];/* 050 */ 181 u32 tx_2part_deferral[2], __res8[2]; /* 060 */ 182 u32 slot_time, __res9[3]; /* 070 */ 183 u32 mdio_command[4]; /* 080 */ 184 u32 mdio_status[4]; /* 090 */ 185 u32 mcast_mask[6], __res10[2]; /* 0A0 */ 186 u32 mcast_addr[6], __res11[2]; /* 0C0 */ 187 u32 int_clock_threshold, __res12[3]; /* 0E0 */ 188 u32 hw_addr[6], __res13[61]; /* 0F0 */ 189 u32 core_control; /* 1FC */ 190 }; 191 192 struct port { 193 struct eth_regs __iomem *regs; 194 struct ixp46x_ts_regs __iomem *timesync_regs; 195 int phc_index; 196 struct npe *npe; 197 struct net_device *netdev; 198 struct napi_struct napi; 199 struct eth_plat_info *plat; 200 buffer_t *rx_buff_tab[RX_DESCS], *tx_buff_tab[TX_DESCS]; 201 struct desc *desc_tab; /* coherent */ 202 dma_addr_t desc_tab_phys; 203 int id; /* logical port ID */ 204 int speed, duplex; 205 u8 firmware[4]; 206 int hwts_tx_en; 207 int hwts_rx_en; 208 }; 209 210 /* NPE message structure */ 211 struct msg { 212 #ifdef __ARMEB__ 213 u8 cmd, eth_id, byte2, byte3; 214 u8 byte4, byte5, byte6, byte7; 215 #else 216 u8 byte3, byte2, eth_id, cmd; 217 u8 byte7, byte6, byte5, byte4; 218 #endif 219 }; 220 221 /* Ethernet packet descriptor */ 222 struct desc { 223 u32 next; /* pointer to next buffer, unused */ 224 225 #ifdef __ARMEB__ 226 u16 buf_len; /* buffer length */ 227 u16 pkt_len; /* packet length */ 228 u32 data; /* pointer to data buffer in RAM */ 229 u8 dest_id; 230 u8 src_id; 231 u16 flags; 232 u8 qos; 233 u8 padlen; 234 u16 vlan_tci; 235 #else 236 u16 pkt_len; /* packet length */ 237 u16 buf_len; /* buffer length */ 238 u32 data; /* pointer to data buffer in RAM */ 239 u16 flags; 240 u8 src_id; 241 u8 dest_id; 242 u16 vlan_tci; 243 u8 padlen; 244 u8 qos; 245 #endif 246 247 #ifdef __ARMEB__ 248 u8 dst_mac_0, dst_mac_1, dst_mac_2, dst_mac_3; 249 u8 dst_mac_4, dst_mac_5, src_mac_0, src_mac_1; 250 u8 src_mac_2, src_mac_3, src_mac_4, src_mac_5; 251 #else 252 u8 dst_mac_3, dst_mac_2, dst_mac_1, dst_mac_0; 253 u8 src_mac_1, src_mac_0, dst_mac_5, dst_mac_4; 254 u8 src_mac_5, src_mac_4, src_mac_3, src_mac_2; 255 #endif 256 }; 257 258 259 #define rx_desc_phys(port, n) ((port)->desc_tab_phys + \ 260 (n) * sizeof(struct desc)) 261 #define rx_desc_ptr(port, n) (&(port)->desc_tab[n]) 262 263 #define tx_desc_phys(port, n) ((port)->desc_tab_phys + \ 264 ((n) + RX_DESCS) * sizeof(struct desc)) 265 #define tx_desc_ptr(port, n) (&(port)->desc_tab[(n) + RX_DESCS]) 266 267 #ifndef __ARMEB__ 268 static inline void memcpy_swab32(u32 *dest, u32 *src, int cnt) 269 { 270 int i; 271 for (i = 0; i < cnt; i++) 272 dest[i] = swab32(src[i]); 273 } 274 #endif 275 276 static DEFINE_SPINLOCK(mdio_lock); 277 static struct eth_regs __iomem *mdio_regs; /* mdio command and status only */ 278 static struct mii_bus *mdio_bus; 279 static struct device_node *mdio_bus_np; 280 static int ports_open; 281 static struct port *npe_port_tab[MAX_NPES]; 282 static struct dma_pool *dma_pool; 283 284 static int ixp_ptp_match(struct sk_buff *skb, u16 uid_hi, u32 uid_lo, u16 seqid) 285 { 286 u8 *data = skb->data; 287 unsigned int offset; 288 u16 *hi, *id; 289 u32 lo; 290 291 if (ptp_classify_raw(skb) != PTP_CLASS_V1_IPV4) 292 return 0; 293 294 offset = ETH_HLEN + IPV4_HLEN(data) + UDP_HLEN; 295 296 if (skb->len < offset + OFF_PTP_SEQUENCE_ID + sizeof(seqid)) 297 return 0; 298 299 hi = (u16 *)(data + offset + OFF_PTP_SOURCE_UUID); 300 id = (u16 *)(data + offset + OFF_PTP_SEQUENCE_ID); 301 302 memcpy(&lo, &hi[1], sizeof(lo)); 303 304 return (uid_hi == ntohs(*hi) && 305 uid_lo == ntohl(lo) && 306 seqid == ntohs(*id)); 307 } 308 309 static void ixp_rx_timestamp(struct port *port, struct sk_buff *skb) 310 { 311 struct skb_shared_hwtstamps *shhwtstamps; 312 struct ixp46x_ts_regs *regs; 313 u64 ns; 314 u32 ch, hi, lo, val; 315 u16 uid, seq; 316 317 if (!port->hwts_rx_en) 318 return; 319 320 ch = PORT2CHANNEL(port); 321 322 regs = port->timesync_regs; 323 324 val = __raw_readl(®s->channel[ch].ch_event); 325 326 if (!(val & RX_SNAPSHOT_LOCKED)) 327 return; 328 329 lo = __raw_readl(®s->channel[ch].src_uuid_lo); 330 hi = __raw_readl(®s->channel[ch].src_uuid_hi); 331 332 uid = hi & 0xffff; 333 seq = (hi >> 16) & 0xffff; 334 335 if (!ixp_ptp_match(skb, htons(uid), htonl(lo), htons(seq))) 336 goto out; 337 338 lo = __raw_readl(®s->channel[ch].rx_snap_lo); 339 hi = __raw_readl(®s->channel[ch].rx_snap_hi); 340 ns = ((u64) hi) << 32; 341 ns |= lo; 342 ns <<= TICKS_NS_SHIFT; 343 344 shhwtstamps = skb_hwtstamps(skb); 345 memset(shhwtstamps, 0, sizeof(*shhwtstamps)); 346 shhwtstamps->hwtstamp = ns_to_ktime(ns); 347 out: 348 __raw_writel(RX_SNAPSHOT_LOCKED, ®s->channel[ch].ch_event); 349 } 350 351 static void ixp_tx_timestamp(struct port *port, struct sk_buff *skb) 352 { 353 struct skb_shared_hwtstamps shhwtstamps; 354 struct ixp46x_ts_regs *regs; 355 struct skb_shared_info *shtx; 356 u64 ns; 357 u32 ch, cnt, hi, lo, val; 358 359 shtx = skb_shinfo(skb); 360 if (unlikely(shtx->tx_flags & SKBTX_HW_TSTAMP && port->hwts_tx_en)) 361 shtx->tx_flags |= SKBTX_IN_PROGRESS; 362 else 363 return; 364 365 ch = PORT2CHANNEL(port); 366 367 regs = port->timesync_regs; 368 369 /* 370 * This really stinks, but we have to poll for the Tx time stamp. 371 * Usually, the time stamp is ready after 4 to 6 microseconds. 372 */ 373 for (cnt = 0; cnt < 100; cnt++) { 374 val = __raw_readl(®s->channel[ch].ch_event); 375 if (val & TX_SNAPSHOT_LOCKED) 376 break; 377 udelay(1); 378 } 379 if (!(val & TX_SNAPSHOT_LOCKED)) { 380 shtx->tx_flags &= ~SKBTX_IN_PROGRESS; 381 return; 382 } 383 384 lo = __raw_readl(®s->channel[ch].tx_snap_lo); 385 hi = __raw_readl(®s->channel[ch].tx_snap_hi); 386 ns = ((u64) hi) << 32; 387 ns |= lo; 388 ns <<= TICKS_NS_SHIFT; 389 390 memset(&shhwtstamps, 0, sizeof(shhwtstamps)); 391 shhwtstamps.hwtstamp = ns_to_ktime(ns); 392 skb_tstamp_tx(skb, &shhwtstamps); 393 394 __raw_writel(TX_SNAPSHOT_LOCKED, ®s->channel[ch].ch_event); 395 } 396 397 static int ixp4xx_hwtstamp_set(struct net_device *netdev, 398 struct kernel_hwtstamp_config *cfg, 399 struct netlink_ext_ack *extack) 400 { 401 struct ixp46x_ts_regs *regs; 402 struct port *port = netdev_priv(netdev); 403 int ret; 404 int ch; 405 406 if (!cpu_is_ixp46x()) 407 return -EOPNOTSUPP; 408 409 if (!netif_running(netdev)) 410 return -EINVAL; 411 412 ret = ixp46x_ptp_find(&port->timesync_regs, &port->phc_index); 413 if (ret) 414 return ret; 415 416 ch = PORT2CHANNEL(port); 417 regs = port->timesync_regs; 418 419 if (cfg->tx_type != HWTSTAMP_TX_OFF && cfg->tx_type != HWTSTAMP_TX_ON) 420 return -ERANGE; 421 422 switch (cfg->rx_filter) { 423 case HWTSTAMP_FILTER_NONE: 424 port->hwts_rx_en = 0; 425 break; 426 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: 427 port->hwts_rx_en = PTP_SLAVE_MODE; 428 __raw_writel(0, ®s->channel[ch].ch_control); 429 break; 430 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: 431 port->hwts_rx_en = PTP_MASTER_MODE; 432 __raw_writel(MASTER_MODE, ®s->channel[ch].ch_control); 433 break; 434 default: 435 return -ERANGE; 436 } 437 438 port->hwts_tx_en = cfg->tx_type == HWTSTAMP_TX_ON; 439 440 /* Clear out any old time stamps. */ 441 __raw_writel(TX_SNAPSHOT_LOCKED | RX_SNAPSHOT_LOCKED, 442 ®s->channel[ch].ch_event); 443 444 return 0; 445 } 446 447 static int ixp4xx_hwtstamp_get(struct net_device *netdev, 448 struct kernel_hwtstamp_config *cfg) 449 { 450 struct port *port = netdev_priv(netdev); 451 452 if (!cpu_is_ixp46x()) 453 return -EOPNOTSUPP; 454 455 if (!netif_running(netdev)) 456 return -EINVAL; 457 458 cfg->flags = 0; 459 cfg->tx_type = port->hwts_tx_en ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF; 460 461 switch (port->hwts_rx_en) { 462 case 0: 463 cfg->rx_filter = HWTSTAMP_FILTER_NONE; 464 break; 465 case PTP_SLAVE_MODE: 466 cfg->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC; 467 break; 468 case PTP_MASTER_MODE: 469 cfg->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ; 470 break; 471 default: 472 WARN_ON_ONCE(1); 473 return -ERANGE; 474 } 475 476 return 0; 477 } 478 479 static int ixp4xx_mdio_cmd(struct mii_bus *bus, int phy_id, int location, 480 int write, u16 cmd) 481 { 482 int cycles = 0; 483 484 if (__raw_readl(&mdio_regs->mdio_command[3]) & 0x80) { 485 printk(KERN_ERR "%s: MII not ready to transmit\n", bus->name); 486 return -1; 487 } 488 489 if (write) { 490 __raw_writel(cmd & 0xFF, &mdio_regs->mdio_command[0]); 491 __raw_writel(cmd >> 8, &mdio_regs->mdio_command[1]); 492 } 493 __raw_writel(((phy_id << 5) | location) & 0xFF, 494 &mdio_regs->mdio_command[2]); 495 __raw_writel((phy_id >> 3) | (write << 2) | 0x80 /* GO */, 496 &mdio_regs->mdio_command[3]); 497 498 while ((cycles < MAX_MDIO_RETRIES) && 499 (__raw_readl(&mdio_regs->mdio_command[3]) & 0x80)) { 500 udelay(1); 501 cycles++; 502 } 503 504 if (cycles == MAX_MDIO_RETRIES) { 505 printk(KERN_ERR "%s #%i: MII write failed\n", bus->name, 506 phy_id); 507 return -1; 508 } 509 510 #if DEBUG_MDIO 511 printk(KERN_DEBUG "%s #%i: mdio_%s() took %i cycles\n", bus->name, 512 phy_id, write ? "write" : "read", cycles); 513 #endif 514 515 if (write) 516 return 0; 517 518 if (__raw_readl(&mdio_regs->mdio_status[3]) & 0x80) { 519 #if DEBUG_MDIO 520 printk(KERN_DEBUG "%s #%i: MII read failed\n", bus->name, 521 phy_id); 522 #endif 523 return 0xFFFF; /* don't return error */ 524 } 525 526 return (__raw_readl(&mdio_regs->mdio_status[0]) & 0xFF) | 527 ((__raw_readl(&mdio_regs->mdio_status[1]) & 0xFF) << 8); 528 } 529 530 static int ixp4xx_mdio_read(struct mii_bus *bus, int phy_id, int location) 531 { 532 unsigned long flags; 533 int ret; 534 535 spin_lock_irqsave(&mdio_lock, flags); 536 ret = ixp4xx_mdio_cmd(bus, phy_id, location, 0, 0); 537 spin_unlock_irqrestore(&mdio_lock, flags); 538 #if DEBUG_MDIO 539 printk(KERN_DEBUG "%s #%i: MII read [%i] -> 0x%X\n", bus->name, 540 phy_id, location, ret); 541 #endif 542 return ret; 543 } 544 545 static int ixp4xx_mdio_write(struct mii_bus *bus, int phy_id, int location, 546 u16 val) 547 { 548 unsigned long flags; 549 int ret; 550 551 spin_lock_irqsave(&mdio_lock, flags); 552 ret = ixp4xx_mdio_cmd(bus, phy_id, location, 1, val); 553 spin_unlock_irqrestore(&mdio_lock, flags); 554 #if DEBUG_MDIO 555 printk(KERN_DEBUG "%s #%i: MII write [%i] <- 0x%X, err = %i\n", 556 bus->name, phy_id, location, val, ret); 557 #endif 558 return ret; 559 } 560 561 static int ixp4xx_mdio_register(struct eth_regs __iomem *regs) 562 { 563 int err; 564 565 if (!(mdio_bus = mdiobus_alloc())) 566 return -ENOMEM; 567 568 mdio_regs = regs; 569 __raw_writel(DEFAULT_CORE_CNTRL, &mdio_regs->core_control); 570 mdio_bus->name = "IXP4xx MII Bus"; 571 mdio_bus->read = &ixp4xx_mdio_read; 572 mdio_bus->write = &ixp4xx_mdio_write; 573 snprintf(mdio_bus->id, MII_BUS_ID_SIZE, "ixp4xx-eth-0"); 574 575 err = of_mdiobus_register(mdio_bus, mdio_bus_np); 576 if (err) 577 mdiobus_free(mdio_bus); 578 return err; 579 } 580 581 static void ixp4xx_mdio_remove(void) 582 { 583 mdiobus_unregister(mdio_bus); 584 mdiobus_free(mdio_bus); 585 } 586 587 588 static void ixp4xx_adjust_link(struct net_device *dev) 589 { 590 struct port *port = netdev_priv(dev); 591 struct phy_device *phydev = dev->phydev; 592 593 if (!phydev->link) { 594 if (port->speed) { 595 port->speed = 0; 596 printk(KERN_INFO "%s: link down\n", dev->name); 597 } 598 return; 599 } 600 601 if (port->speed == phydev->speed && port->duplex == phydev->duplex) 602 return; 603 604 port->speed = phydev->speed; 605 port->duplex = phydev->duplex; 606 607 if (port->duplex) 608 __raw_writel(DEFAULT_TX_CNTRL0 & ~TX_CNTRL0_HALFDUPLEX, 609 &port->regs->tx_control[0]); 610 else 611 __raw_writel(DEFAULT_TX_CNTRL0 | TX_CNTRL0_HALFDUPLEX, 612 &port->regs->tx_control[0]); 613 614 netdev_info(dev, "%s: link up, speed %u Mb/s, %s duplex\n", 615 dev->name, port->speed, port->duplex ? "full" : "half"); 616 } 617 618 619 static inline void debug_pkt(struct net_device *dev, const char *func, 620 u8 *data, int len) 621 { 622 #if DEBUG_PKT_BYTES 623 int i; 624 625 netdev_debug(dev, "%s(%i) ", func, len); 626 for (i = 0; i < len; i++) { 627 if (i >= DEBUG_PKT_BYTES) 628 break; 629 printk("%s%02X", 630 ((i == 6) || (i == 12) || (i >= 14)) ? " " : "", 631 data[i]); 632 } 633 printk("\n"); 634 #endif 635 } 636 637 638 static inline void debug_desc(u32 phys, struct desc *desc) 639 { 640 #if DEBUG_DESC 641 printk(KERN_DEBUG "%X: %X %3X %3X %08X %2X < %2X %4X %X" 642 " %X %X %02X%02X%02X%02X%02X%02X < %02X%02X%02X%02X%02X%02X\n", 643 phys, desc->next, desc->buf_len, desc->pkt_len, 644 desc->data, desc->dest_id, desc->src_id, desc->flags, 645 desc->qos, desc->padlen, desc->vlan_tci, 646 desc->dst_mac_0, desc->dst_mac_1, desc->dst_mac_2, 647 desc->dst_mac_3, desc->dst_mac_4, desc->dst_mac_5, 648 desc->src_mac_0, desc->src_mac_1, desc->src_mac_2, 649 desc->src_mac_3, desc->src_mac_4, desc->src_mac_5); 650 #endif 651 } 652 653 static inline int queue_get_desc(unsigned int queue, struct port *port, 654 int is_tx) 655 { 656 u32 phys, tab_phys, n_desc; 657 struct desc *tab; 658 659 if (!(phys = qmgr_get_entry(queue))) 660 return -1; 661 662 phys &= ~0x1F; /* mask out non-address bits */ 663 tab_phys = is_tx ? tx_desc_phys(port, 0) : rx_desc_phys(port, 0); 664 tab = is_tx ? tx_desc_ptr(port, 0) : rx_desc_ptr(port, 0); 665 n_desc = (phys - tab_phys) / sizeof(struct desc); 666 BUG_ON(n_desc >= (is_tx ? TX_DESCS : RX_DESCS)); 667 debug_desc(phys, &tab[n_desc]); 668 BUG_ON(tab[n_desc].next); 669 return n_desc; 670 } 671 672 static inline void queue_put_desc(unsigned int queue, u32 phys, 673 struct desc *desc) 674 { 675 debug_desc(phys, desc); 676 BUG_ON(phys & 0x1F); 677 qmgr_put_entry(queue, phys); 678 /* Don't check for queue overflow here, we've allocated sufficient 679 length and queues >= 32 don't support this check anyway. */ 680 } 681 682 683 static inline void dma_unmap_tx(struct port *port, struct desc *desc) 684 { 685 #ifdef __ARMEB__ 686 dma_unmap_single(&port->netdev->dev, desc->data, 687 desc->buf_len, DMA_TO_DEVICE); 688 #else 689 dma_unmap_single(&port->netdev->dev, desc->data & ~3, 690 ALIGN((desc->data & 3) + desc->buf_len, 4), 691 DMA_TO_DEVICE); 692 #endif 693 } 694 695 696 static void eth_rx_irq(void *pdev) 697 { 698 struct net_device *dev = pdev; 699 struct port *port = netdev_priv(dev); 700 701 #if DEBUG_RX 702 printk(KERN_DEBUG "%s: eth_rx_irq\n", dev->name); 703 #endif 704 qmgr_disable_irq(port->plat->rxq); 705 napi_schedule(&port->napi); 706 } 707 708 static int eth_poll(struct napi_struct *napi, int budget) 709 { 710 struct port *port = container_of(napi, struct port, napi); 711 struct net_device *dev = port->netdev; 712 unsigned int rxq = port->plat->rxq, rxfreeq = RXFREE_QUEUE(port->id); 713 int received = 0; 714 715 #if DEBUG_RX 716 netdev_debug(dev, "eth_poll\n"); 717 #endif 718 719 while (received < budget) { 720 struct sk_buff *skb; 721 struct desc *desc; 722 int n; 723 #ifdef __ARMEB__ 724 struct sk_buff *temp; 725 u32 phys; 726 #endif 727 728 if ((n = queue_get_desc(rxq, port, 0)) < 0) { 729 #if DEBUG_RX 730 netdev_debug(dev, "eth_poll napi_complete\n"); 731 #endif 732 napi_complete(napi); 733 qmgr_enable_irq(rxq); 734 if (!qmgr_stat_below_low_watermark(rxq) && 735 napi_schedule(napi)) { /* not empty again */ 736 #if DEBUG_RX 737 netdev_debug(dev, "eth_poll napi_schedule succeeded\n"); 738 #endif 739 qmgr_disable_irq(rxq); 740 continue; 741 } 742 #if DEBUG_RX 743 netdev_debug(dev, "eth_poll all done\n"); 744 #endif 745 return received; /* all work done */ 746 } 747 748 desc = rx_desc_ptr(port, n); 749 750 #ifdef __ARMEB__ 751 if ((skb = netdev_alloc_skb(dev, RX_BUFF_SIZE))) { 752 phys = dma_map_single(&dev->dev, skb->data, 753 RX_BUFF_SIZE, DMA_FROM_DEVICE); 754 if (dma_mapping_error(&dev->dev, phys)) { 755 dev_kfree_skb(skb); 756 skb = NULL; 757 } 758 } 759 #else 760 skb = netdev_alloc_skb(dev, 761 ALIGN(NET_IP_ALIGN + desc->pkt_len, 4)); 762 #endif 763 764 if (!skb) { 765 dev->stats.rx_dropped++; 766 /* put the desc back on RX-ready queue */ 767 desc->buf_len = MAX_MRU; 768 desc->pkt_len = 0; 769 queue_put_desc(rxfreeq, rx_desc_phys(port, n), desc); 770 continue; 771 } 772 773 /* process received frame */ 774 #ifdef __ARMEB__ 775 temp = skb; 776 skb = port->rx_buff_tab[n]; 777 dma_unmap_single(&dev->dev, desc->data - NET_IP_ALIGN, 778 RX_BUFF_SIZE, DMA_FROM_DEVICE); 779 #else 780 dma_sync_single_for_cpu(&dev->dev, desc->data - NET_IP_ALIGN, 781 RX_BUFF_SIZE, DMA_FROM_DEVICE); 782 memcpy_swab32((u32 *)skb->data, (u32 *)port->rx_buff_tab[n], 783 ALIGN(NET_IP_ALIGN + desc->pkt_len, 4) / 4); 784 #endif 785 skb_reserve(skb, NET_IP_ALIGN); 786 skb_put(skb, desc->pkt_len); 787 788 debug_pkt(dev, "eth_poll", skb->data, skb->len); 789 790 ixp_rx_timestamp(port, skb); 791 skb->protocol = eth_type_trans(skb, dev); 792 dev->stats.rx_packets++; 793 dev->stats.rx_bytes += skb->len; 794 netif_receive_skb(skb); 795 796 /* put the new buffer on RX-free queue */ 797 #ifdef __ARMEB__ 798 port->rx_buff_tab[n] = temp; 799 desc->data = phys + NET_IP_ALIGN; 800 #endif 801 desc->buf_len = MAX_MRU; 802 desc->pkt_len = 0; 803 queue_put_desc(rxfreeq, rx_desc_phys(port, n), desc); 804 received++; 805 } 806 807 #if DEBUG_RX 808 netdev_debug(dev, "eth_poll(): end, not all work done\n"); 809 #endif 810 return received; /* not all work done */ 811 } 812 813 814 static void eth_txdone_irq(void *unused) 815 { 816 u32 phys; 817 818 #if DEBUG_TX 819 printk(KERN_DEBUG DRV_NAME ": eth_txdone_irq\n"); 820 #endif 821 while ((phys = qmgr_get_entry(TXDONE_QUEUE)) != 0) { 822 u32 npe_id, n_desc; 823 struct port *port; 824 struct desc *desc; 825 int start; 826 827 npe_id = phys & 3; 828 BUG_ON(npe_id >= MAX_NPES); 829 port = npe_port_tab[npe_id]; 830 BUG_ON(!port); 831 phys &= ~0x1F; /* mask out non-address bits */ 832 n_desc = (phys - tx_desc_phys(port, 0)) / sizeof(struct desc); 833 BUG_ON(n_desc >= TX_DESCS); 834 desc = tx_desc_ptr(port, n_desc); 835 debug_desc(phys, desc); 836 837 if (port->tx_buff_tab[n_desc]) { /* not the draining packet */ 838 port->netdev->stats.tx_packets++; 839 port->netdev->stats.tx_bytes += desc->pkt_len; 840 841 dma_unmap_tx(port, desc); 842 #if DEBUG_TX 843 printk(KERN_DEBUG "%s: eth_txdone_irq free %p\n", 844 port->netdev->name, port->tx_buff_tab[n_desc]); 845 #endif 846 free_buffer_irq(port->tx_buff_tab[n_desc]); 847 port->tx_buff_tab[n_desc] = NULL; 848 } 849 850 start = qmgr_stat_below_low_watermark(port->plat->txreadyq); 851 queue_put_desc(port->plat->txreadyq, phys, desc); 852 if (start) { /* TX-ready queue was empty */ 853 #if DEBUG_TX 854 printk(KERN_DEBUG "%s: eth_txdone_irq xmit ready\n", 855 port->netdev->name); 856 #endif 857 netif_wake_queue(port->netdev); 858 } 859 } 860 } 861 862 static netdev_tx_t eth_xmit(struct sk_buff *skb, struct net_device *dev) 863 { 864 struct port *port = netdev_priv(dev); 865 unsigned int txreadyq = port->plat->txreadyq; 866 int len, offset, bytes, n; 867 void *mem; 868 u32 phys; 869 struct desc *desc; 870 871 #if DEBUG_TX 872 netdev_debug(dev, "eth_xmit\n"); 873 #endif 874 875 if (unlikely(skb->len > MAX_MRU)) { 876 dev_kfree_skb(skb); 877 dev->stats.tx_errors++; 878 return NETDEV_TX_OK; 879 } 880 881 debug_pkt(dev, "eth_xmit", skb->data, skb->len); 882 883 len = skb->len; 884 #ifdef __ARMEB__ 885 offset = 0; /* no need to keep alignment */ 886 bytes = len; 887 mem = skb->data; 888 #else 889 offset = (uintptr_t)skb->data & 3; /* keep 32-bit alignment */ 890 bytes = ALIGN(offset + len, 4); 891 if (!(mem = kmalloc(bytes, GFP_ATOMIC))) { 892 dev_kfree_skb(skb); 893 dev->stats.tx_dropped++; 894 return NETDEV_TX_OK; 895 } 896 memcpy_swab32(mem, (u32 *)((uintptr_t)skb->data & ~3), bytes / 4); 897 #endif 898 899 phys = dma_map_single(&dev->dev, mem, bytes, DMA_TO_DEVICE); 900 if (dma_mapping_error(&dev->dev, phys)) { 901 dev_kfree_skb(skb); 902 #ifndef __ARMEB__ 903 kfree(mem); 904 #endif 905 dev->stats.tx_dropped++; 906 return NETDEV_TX_OK; 907 } 908 909 n = queue_get_desc(txreadyq, port, 1); 910 BUG_ON(n < 0); 911 desc = tx_desc_ptr(port, n); 912 913 #ifdef __ARMEB__ 914 port->tx_buff_tab[n] = skb; 915 #else 916 port->tx_buff_tab[n] = mem; 917 #endif 918 desc->data = phys + offset; 919 desc->buf_len = desc->pkt_len = len; 920 921 /* NPE firmware pads short frames with zeros internally */ 922 wmb(); 923 queue_put_desc(TX_QUEUE(port->id), tx_desc_phys(port, n), desc); 924 925 if (qmgr_stat_below_low_watermark(txreadyq)) { /* empty */ 926 #if DEBUG_TX 927 netdev_debug(dev, "eth_xmit queue full\n"); 928 #endif 929 netif_stop_queue(dev); 930 /* we could miss TX ready interrupt */ 931 /* really empty in fact */ 932 if (!qmgr_stat_below_low_watermark(txreadyq)) { 933 #if DEBUG_TX 934 netdev_debug(dev, "eth_xmit ready again\n"); 935 #endif 936 netif_wake_queue(dev); 937 } 938 } 939 940 #if DEBUG_TX 941 netdev_debug(dev, "eth_xmit end\n"); 942 #endif 943 944 ixp_tx_timestamp(port, skb); 945 skb_tx_timestamp(skb); 946 947 #ifndef __ARMEB__ 948 dev_kfree_skb(skb); 949 #endif 950 return NETDEV_TX_OK; 951 } 952 953 954 static void eth_set_mcast_list(struct net_device *dev) 955 { 956 struct port *port = netdev_priv(dev); 957 struct netdev_hw_addr *ha; 958 u8 diffs[ETH_ALEN], *addr; 959 int i; 960 static const u8 allmulti[] = { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 }; 961 962 if ((dev->flags & IFF_ALLMULTI) && !(dev->flags & IFF_PROMISC)) { 963 for (i = 0; i < ETH_ALEN; i++) { 964 __raw_writel(allmulti[i], &port->regs->mcast_addr[i]); 965 __raw_writel(allmulti[i], &port->regs->mcast_mask[i]); 966 } 967 __raw_writel(DEFAULT_RX_CNTRL0 | RX_CNTRL0_ADDR_FLTR_EN, 968 &port->regs->rx_control[0]); 969 return; 970 } 971 972 if ((dev->flags & IFF_PROMISC) || netdev_mc_empty(dev)) { 973 __raw_writel(DEFAULT_RX_CNTRL0 & ~RX_CNTRL0_ADDR_FLTR_EN, 974 &port->regs->rx_control[0]); 975 return; 976 } 977 978 eth_zero_addr(diffs); 979 980 addr = NULL; 981 netdev_for_each_mc_addr(ha, dev) { 982 if (!addr) 983 addr = ha->addr; /* first MAC address */ 984 for (i = 0; i < ETH_ALEN; i++) 985 diffs[i] |= addr[i] ^ ha->addr[i]; 986 } 987 988 for (i = 0; i < ETH_ALEN; i++) { 989 __raw_writel(addr[i], &port->regs->mcast_addr[i]); 990 __raw_writel(~diffs[i], &port->regs->mcast_mask[i]); 991 } 992 993 __raw_writel(DEFAULT_RX_CNTRL0 | RX_CNTRL0_ADDR_FLTR_EN, 994 &port->regs->rx_control[0]); 995 } 996 997 998 /* ethtool support */ 999 1000 static void ixp4xx_get_drvinfo(struct net_device *dev, 1001 struct ethtool_drvinfo *info) 1002 { 1003 struct port *port = netdev_priv(dev); 1004 1005 strscpy(info->driver, DRV_NAME, sizeof(info->driver)); 1006 snprintf(info->fw_version, sizeof(info->fw_version), "%u:%u:%u:%u", 1007 port->firmware[0], port->firmware[1], 1008 port->firmware[2], port->firmware[3]); 1009 strscpy(info->bus_info, "internal", sizeof(info->bus_info)); 1010 } 1011 1012 static int ixp4xx_get_ts_info(struct net_device *dev, 1013 struct kernel_ethtool_ts_info *info) 1014 { 1015 struct port *port = netdev_priv(dev); 1016 1017 if (port->phc_index < 0) 1018 ixp46x_ptp_find(&port->timesync_regs, &port->phc_index); 1019 1020 info->phc_index = port->phc_index; 1021 1022 if (info->phc_index < 0) { 1023 info->so_timestamping = 1024 SOF_TIMESTAMPING_TX_SOFTWARE; 1025 return 0; 1026 } 1027 info->so_timestamping = 1028 SOF_TIMESTAMPING_TX_HARDWARE | 1029 SOF_TIMESTAMPING_RX_HARDWARE | 1030 SOF_TIMESTAMPING_RAW_HARDWARE; 1031 info->tx_types = 1032 (1 << HWTSTAMP_TX_OFF) | 1033 (1 << HWTSTAMP_TX_ON); 1034 info->rx_filters = 1035 (1 << HWTSTAMP_FILTER_NONE) | 1036 (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) | 1037 (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ); 1038 return 0; 1039 } 1040 1041 static const struct ethtool_ops ixp4xx_ethtool_ops = { 1042 .get_drvinfo = ixp4xx_get_drvinfo, 1043 .nway_reset = phy_ethtool_nway_reset, 1044 .get_link = ethtool_op_get_link, 1045 .get_ts_info = ixp4xx_get_ts_info, 1046 .get_link_ksettings = phy_ethtool_get_link_ksettings, 1047 .set_link_ksettings = phy_ethtool_set_link_ksettings, 1048 }; 1049 1050 1051 static int request_queues(struct port *port) 1052 { 1053 int err; 1054 1055 err = qmgr_request_queue(RXFREE_QUEUE(port->id), RX_DESCS, 0, 0, 1056 "%s:RX-free", port->netdev->name); 1057 if (err) 1058 return err; 1059 1060 err = qmgr_request_queue(port->plat->rxq, RX_DESCS, 0, 0, 1061 "%s:RX", port->netdev->name); 1062 if (err) 1063 goto rel_rxfree; 1064 1065 err = qmgr_request_queue(TX_QUEUE(port->id), TX_DESCS, 0, 0, 1066 "%s:TX", port->netdev->name); 1067 if (err) 1068 goto rel_rx; 1069 1070 err = qmgr_request_queue(port->plat->txreadyq, TX_DESCS, 0, 0, 1071 "%s:TX-ready", port->netdev->name); 1072 if (err) 1073 goto rel_tx; 1074 1075 /* TX-done queue handles skbs sent out by the NPEs */ 1076 if (!ports_open) { 1077 err = qmgr_request_queue(TXDONE_QUEUE, TXDONE_QUEUE_LEN, 0, 0, 1078 "%s:TX-done", DRV_NAME); 1079 if (err) 1080 goto rel_txready; 1081 } 1082 return 0; 1083 1084 rel_txready: 1085 qmgr_release_queue(port->plat->txreadyq); 1086 rel_tx: 1087 qmgr_release_queue(TX_QUEUE(port->id)); 1088 rel_rx: 1089 qmgr_release_queue(port->plat->rxq); 1090 rel_rxfree: 1091 qmgr_release_queue(RXFREE_QUEUE(port->id)); 1092 printk(KERN_DEBUG "%s: unable to request hardware queues\n", 1093 port->netdev->name); 1094 return err; 1095 } 1096 1097 static void release_queues(struct port *port) 1098 { 1099 qmgr_release_queue(RXFREE_QUEUE(port->id)); 1100 qmgr_release_queue(port->plat->rxq); 1101 qmgr_release_queue(TX_QUEUE(port->id)); 1102 qmgr_release_queue(port->plat->txreadyq); 1103 1104 if (!ports_open) 1105 qmgr_release_queue(TXDONE_QUEUE); 1106 } 1107 1108 static int init_queues(struct port *port) 1109 { 1110 int i; 1111 1112 if (!ports_open) { 1113 dma_pool = dma_pool_create(DRV_NAME, &port->netdev->dev, 1114 POOL_ALLOC_SIZE, 32, 0); 1115 if (!dma_pool) 1116 return -ENOMEM; 1117 } 1118 1119 port->desc_tab = dma_pool_zalloc(dma_pool, GFP_KERNEL, &port->desc_tab_phys); 1120 if (!port->desc_tab) 1121 return -ENOMEM; 1122 memset(port->rx_buff_tab, 0, sizeof(port->rx_buff_tab)); /* tables */ 1123 memset(port->tx_buff_tab, 0, sizeof(port->tx_buff_tab)); 1124 1125 /* Setup RX buffers */ 1126 for (i = 0; i < RX_DESCS; i++) { 1127 struct desc *desc = rx_desc_ptr(port, i); 1128 buffer_t *buff; /* skb or kmalloc()ated memory */ 1129 void *data; 1130 #ifdef __ARMEB__ 1131 if (!(buff = netdev_alloc_skb(port->netdev, RX_BUFF_SIZE))) 1132 return -ENOMEM; 1133 data = buff->data; 1134 #else 1135 if (!(buff = kmalloc(RX_BUFF_SIZE, GFP_KERNEL))) 1136 return -ENOMEM; 1137 data = buff; 1138 #endif 1139 desc->buf_len = MAX_MRU; 1140 desc->data = dma_map_single(&port->netdev->dev, data, 1141 RX_BUFF_SIZE, DMA_FROM_DEVICE); 1142 if (dma_mapping_error(&port->netdev->dev, desc->data)) { 1143 free_buffer(buff); 1144 return -EIO; 1145 } 1146 desc->data += NET_IP_ALIGN; 1147 port->rx_buff_tab[i] = buff; 1148 } 1149 1150 return 0; 1151 } 1152 1153 static void destroy_queues(struct port *port) 1154 { 1155 int i; 1156 1157 if (port->desc_tab) { 1158 for (i = 0; i < RX_DESCS; i++) { 1159 struct desc *desc = rx_desc_ptr(port, i); 1160 buffer_t *buff = port->rx_buff_tab[i]; 1161 if (buff) { 1162 dma_unmap_single(&port->netdev->dev, 1163 desc->data - NET_IP_ALIGN, 1164 RX_BUFF_SIZE, DMA_FROM_DEVICE); 1165 free_buffer(buff); 1166 } 1167 } 1168 for (i = 0; i < TX_DESCS; i++) { 1169 struct desc *desc = tx_desc_ptr(port, i); 1170 buffer_t *buff = port->tx_buff_tab[i]; 1171 if (buff) { 1172 dma_unmap_tx(port, desc); 1173 free_buffer(buff); 1174 } 1175 } 1176 dma_pool_free(dma_pool, port->desc_tab, port->desc_tab_phys); 1177 port->desc_tab = NULL; 1178 } 1179 1180 if (!ports_open && dma_pool) { 1181 dma_pool_destroy(dma_pool); 1182 dma_pool = NULL; 1183 } 1184 } 1185 1186 static int ixp4xx_do_change_mtu(struct net_device *dev, int new_mtu) 1187 { 1188 struct port *port = netdev_priv(dev); 1189 struct npe *npe = port->npe; 1190 int framesize, chunks; 1191 struct msg msg = {}; 1192 1193 /* adjust for ethernet headers */ 1194 framesize = new_mtu + VLAN_ETH_HLEN; 1195 /* max rx/tx 64 byte chunks */ 1196 chunks = DIV_ROUND_UP(framesize, 64); 1197 1198 msg.cmd = NPE_SETMAXFRAMELENGTHS; 1199 msg.eth_id = port->id; 1200 1201 /* Firmware wants to know buffer size in 64 byte chunks */ 1202 msg.byte2 = chunks << 8; 1203 msg.byte3 = chunks << 8; 1204 1205 msg.byte4 = msg.byte6 = framesize >> 8; 1206 msg.byte5 = msg.byte7 = framesize & 0xff; 1207 1208 if (npe_send_recv_message(npe, &msg, "ETH_SET_MAX_FRAME_LENGTH")) 1209 return -EIO; 1210 netdev_dbg(dev, "set MTU on NPE %s to %d bytes\n", 1211 npe_name(npe), new_mtu); 1212 1213 return 0; 1214 } 1215 1216 static int ixp4xx_eth_change_mtu(struct net_device *dev, int new_mtu) 1217 { 1218 int ret; 1219 1220 /* MTU can only be changed when the interface is up. We also 1221 * set the MTU from dev->mtu when opening the device. 1222 */ 1223 if (dev->flags & IFF_UP) { 1224 ret = ixp4xx_do_change_mtu(dev, new_mtu); 1225 if (ret < 0) 1226 return ret; 1227 } 1228 1229 WRITE_ONCE(dev->mtu, new_mtu); 1230 1231 return 0; 1232 } 1233 1234 static int eth_open(struct net_device *dev) 1235 { 1236 struct port *port = netdev_priv(dev); 1237 struct npe *npe = port->npe; 1238 struct msg msg; 1239 int i, err; 1240 1241 if (!npe_running(npe)) { 1242 err = npe_load_firmware(npe, npe_name(npe), &dev->dev); 1243 if (err) 1244 return err; 1245 1246 if (npe_recv_message(npe, &msg, "ETH_GET_STATUS")) { 1247 netdev_err(dev, "%s not responding\n", npe_name(npe)); 1248 return -EIO; 1249 } 1250 port->firmware[0] = msg.byte4; 1251 port->firmware[1] = msg.byte5; 1252 port->firmware[2] = msg.byte6; 1253 port->firmware[3] = msg.byte7; 1254 } 1255 1256 memset(&msg, 0, sizeof(msg)); 1257 msg.cmd = NPE_VLAN_SETRXQOSENTRY; 1258 msg.eth_id = port->id; 1259 msg.byte5 = port->plat->rxq | 0x80; 1260 msg.byte7 = port->plat->rxq << 4; 1261 for (i = 0; i < 8; i++) { 1262 msg.byte3 = i; 1263 if (npe_send_recv_message(port->npe, &msg, "ETH_SET_RXQ")) 1264 return -EIO; 1265 } 1266 1267 msg.cmd = NPE_EDB_SETPORTADDRESS; 1268 msg.eth_id = PHYSICAL_ID(port->id); 1269 msg.byte2 = dev->dev_addr[0]; 1270 msg.byte3 = dev->dev_addr[1]; 1271 msg.byte4 = dev->dev_addr[2]; 1272 msg.byte5 = dev->dev_addr[3]; 1273 msg.byte6 = dev->dev_addr[4]; 1274 msg.byte7 = dev->dev_addr[5]; 1275 if (npe_send_recv_message(port->npe, &msg, "ETH_SET_MAC")) 1276 return -EIO; 1277 1278 memset(&msg, 0, sizeof(msg)); 1279 msg.cmd = NPE_FW_SETFIREWALLMODE; 1280 msg.eth_id = port->id; 1281 if (npe_send_recv_message(port->npe, &msg, "ETH_SET_FIREWALL_MODE")) 1282 return -EIO; 1283 1284 ixp4xx_do_change_mtu(dev, dev->mtu); 1285 1286 if ((err = request_queues(port)) != 0) 1287 return err; 1288 1289 if ((err = init_queues(port)) != 0) { 1290 destroy_queues(port); 1291 release_queues(port); 1292 return err; 1293 } 1294 1295 port->speed = 0; /* force "link up" message */ 1296 phy_start(dev->phydev); 1297 1298 for (i = 0; i < ETH_ALEN; i++) 1299 __raw_writel(dev->dev_addr[i], &port->regs->hw_addr[i]); 1300 __raw_writel(0x08, &port->regs->random_seed); 1301 __raw_writel(0x12, &port->regs->partial_empty_threshold); 1302 __raw_writel(0x30, &port->regs->partial_full_threshold); 1303 __raw_writel(0x08, &port->regs->tx_start_bytes); 1304 __raw_writel(0x15, &port->regs->tx_deferral); 1305 __raw_writel(0x08, &port->regs->tx_2part_deferral[0]); 1306 __raw_writel(0x07, &port->regs->tx_2part_deferral[1]); 1307 __raw_writel(0x80, &port->regs->slot_time); 1308 __raw_writel(0x01, &port->regs->int_clock_threshold); 1309 1310 /* Populate queues with buffers, no failure after this point */ 1311 for (i = 0; i < TX_DESCS; i++) 1312 queue_put_desc(port->plat->txreadyq, 1313 tx_desc_phys(port, i), tx_desc_ptr(port, i)); 1314 1315 for (i = 0; i < RX_DESCS; i++) 1316 queue_put_desc(RXFREE_QUEUE(port->id), 1317 rx_desc_phys(port, i), rx_desc_ptr(port, i)); 1318 1319 __raw_writel(TX_CNTRL1_RETRIES, &port->regs->tx_control[1]); 1320 __raw_writel(DEFAULT_TX_CNTRL0, &port->regs->tx_control[0]); 1321 __raw_writel(0, &port->regs->rx_control[1]); 1322 __raw_writel(DEFAULT_RX_CNTRL0, &port->regs->rx_control[0]); 1323 1324 napi_enable(&port->napi); 1325 eth_set_mcast_list(dev); 1326 netif_start_queue(dev); 1327 1328 qmgr_set_irq(port->plat->rxq, QUEUE_IRQ_SRC_NOT_EMPTY, 1329 eth_rx_irq, dev); 1330 if (!ports_open) { 1331 qmgr_set_irq(TXDONE_QUEUE, QUEUE_IRQ_SRC_NOT_EMPTY, 1332 eth_txdone_irq, NULL); 1333 qmgr_enable_irq(TXDONE_QUEUE); 1334 } 1335 ports_open++; 1336 /* we may already have RX data, enables IRQ */ 1337 napi_schedule(&port->napi); 1338 return 0; 1339 } 1340 1341 static int eth_close(struct net_device *dev) 1342 { 1343 struct port *port = netdev_priv(dev); 1344 struct msg msg; 1345 int buffs = RX_DESCS; /* allocated RX buffers */ 1346 int i; 1347 1348 ports_open--; 1349 qmgr_disable_irq(port->plat->rxq); 1350 napi_disable(&port->napi); 1351 netif_stop_queue(dev); 1352 1353 while (queue_get_desc(RXFREE_QUEUE(port->id), port, 0) >= 0) 1354 buffs--; 1355 1356 memset(&msg, 0, sizeof(msg)); 1357 msg.cmd = NPE_SETLOOPBACK_MODE; 1358 msg.eth_id = port->id; 1359 msg.byte3 = 1; 1360 if (npe_send_recv_message(port->npe, &msg, "ETH_ENABLE_LOOPBACK")) 1361 netdev_crit(dev, "unable to enable loopback\n"); 1362 1363 i = 0; 1364 do { /* drain RX buffers */ 1365 while (queue_get_desc(port->plat->rxq, port, 0) >= 0) 1366 buffs--; 1367 if (!buffs) 1368 break; 1369 if (qmgr_stat_empty(TX_QUEUE(port->id))) { 1370 /* we have to inject some packet */ 1371 struct desc *desc; 1372 u32 phys; 1373 int n = queue_get_desc(port->plat->txreadyq, port, 1); 1374 BUG_ON(n < 0); 1375 desc = tx_desc_ptr(port, n); 1376 phys = tx_desc_phys(port, n); 1377 desc->buf_len = desc->pkt_len = 1; 1378 wmb(); 1379 queue_put_desc(TX_QUEUE(port->id), phys, desc); 1380 } 1381 udelay(1); 1382 } while (++i < MAX_CLOSE_WAIT); 1383 1384 if (buffs) 1385 netdev_crit(dev, "unable to drain RX queue, %i buffer(s)" 1386 " left in NPE\n", buffs); 1387 #if DEBUG_CLOSE 1388 if (!buffs) 1389 netdev_debug(dev, "draining RX queue took %i cycles\n", i); 1390 #endif 1391 1392 buffs = TX_DESCS; 1393 while (queue_get_desc(TX_QUEUE(port->id), port, 1) >= 0) 1394 buffs--; /* cancel TX */ 1395 1396 i = 0; 1397 do { 1398 while (queue_get_desc(port->plat->txreadyq, port, 1) >= 0) 1399 buffs--; 1400 if (!buffs) 1401 break; 1402 } while (++i < MAX_CLOSE_WAIT); 1403 1404 if (buffs) 1405 netdev_crit(dev, "unable to drain TX queue, %i buffer(s) " 1406 "left in NPE\n", buffs); 1407 #if DEBUG_CLOSE 1408 if (!buffs) 1409 netdev_debug(dev, "draining TX queues took %i cycles\n", i); 1410 #endif 1411 1412 msg.byte3 = 0; 1413 if (npe_send_recv_message(port->npe, &msg, "ETH_DISABLE_LOOPBACK")) 1414 netdev_crit(dev, "unable to disable loopback\n"); 1415 1416 phy_stop(dev->phydev); 1417 1418 if (!ports_open) 1419 qmgr_disable_irq(TXDONE_QUEUE); 1420 destroy_queues(port); 1421 release_queues(port); 1422 return 0; 1423 } 1424 1425 static const struct net_device_ops ixp4xx_netdev_ops = { 1426 .ndo_open = eth_open, 1427 .ndo_stop = eth_close, 1428 .ndo_change_mtu = ixp4xx_eth_change_mtu, 1429 .ndo_start_xmit = eth_xmit, 1430 .ndo_set_rx_mode = eth_set_mcast_list, 1431 .ndo_eth_ioctl = phy_do_ioctl_running, 1432 .ndo_set_mac_address = eth_mac_addr, 1433 .ndo_validate_addr = eth_validate_addr, 1434 .ndo_hwtstamp_get = ixp4xx_hwtstamp_get, 1435 .ndo_hwtstamp_set = ixp4xx_hwtstamp_set, 1436 }; 1437 1438 static struct eth_plat_info *ixp4xx_of_get_platdata(struct device *dev) 1439 { 1440 struct device_node *np = dev->of_node; 1441 struct of_phandle_args queue_spec; 1442 struct of_phandle_args npe_spec; 1443 struct device_node *mdio_np; 1444 struct eth_plat_info *plat; 1445 u8 mac[ETH_ALEN]; 1446 int ret; 1447 1448 plat = devm_kzalloc(dev, sizeof(*plat), GFP_KERNEL); 1449 if (!plat) 1450 return NULL; 1451 1452 ret = of_parse_phandle_with_fixed_args(np, "intel,npe-handle", 1, 0, 1453 &npe_spec); 1454 if (ret) { 1455 dev_err(dev, "no NPE engine specified\n"); 1456 return NULL; 1457 } 1458 /* NPE ID 0x00, 0x10, 0x20... */ 1459 plat->npe = (npe_spec.args[0] << 4); 1460 1461 /* Check if this device has an MDIO bus */ 1462 mdio_np = of_get_child_by_name(np, "mdio"); 1463 if (mdio_np) { 1464 plat->has_mdio = true; 1465 mdio_bus_np = mdio_np; 1466 /* DO NOT put the mdio_np, it will be used */ 1467 } 1468 1469 /* Get the rx queue as a resource from queue manager */ 1470 ret = of_parse_phandle_with_fixed_args(np, "queue-rx", 1, 0, 1471 &queue_spec); 1472 if (ret) { 1473 dev_err(dev, "no rx queue phandle\n"); 1474 return NULL; 1475 } 1476 plat->rxq = queue_spec.args[0]; 1477 1478 /* Get the txready queue as resource from queue manager */ 1479 ret = of_parse_phandle_with_fixed_args(np, "queue-txready", 1, 0, 1480 &queue_spec); 1481 if (ret) { 1482 dev_err(dev, "no txready queue phandle\n"); 1483 return NULL; 1484 } 1485 plat->txreadyq = queue_spec.args[0]; 1486 1487 ret = of_get_mac_address(np, mac); 1488 if (!ret) { 1489 dev_info(dev, "Setting macaddr from DT %pM\n", mac); 1490 memcpy(plat->hwaddr, mac, ETH_ALEN); 1491 } 1492 1493 return plat; 1494 } 1495 1496 static int ixp4xx_eth_probe(struct platform_device *pdev) 1497 { 1498 struct phy_device *phydev = NULL; 1499 struct device *dev = &pdev->dev; 1500 struct device_node *np = dev->of_node; 1501 struct eth_plat_info *plat; 1502 struct net_device *ndev; 1503 struct port *port; 1504 int err; 1505 1506 plat = ixp4xx_of_get_platdata(dev); 1507 if (!plat) 1508 return -ENODEV; 1509 1510 if (!(ndev = devm_alloc_etherdev(dev, sizeof(struct port)))) 1511 return -ENOMEM; 1512 1513 SET_NETDEV_DEV(ndev, dev); 1514 port = netdev_priv(ndev); 1515 port->netdev = ndev; 1516 port->id = plat->npe; 1517 port->phc_index = -1; 1518 1519 /* Get the port resource and remap */ 1520 port->regs = devm_platform_get_and_ioremap_resource(pdev, 0, NULL); 1521 if (IS_ERR(port->regs)) 1522 return PTR_ERR(port->regs); 1523 1524 /* Register the MDIO bus if we have it */ 1525 if (plat->has_mdio) { 1526 err = ixp4xx_mdio_register(port->regs); 1527 if (err) { 1528 dev_err(dev, "failed to register MDIO bus\n"); 1529 return err; 1530 } 1531 } 1532 /* If the instance with the MDIO bus has not yet appeared, 1533 * defer probing until it gets probed. 1534 */ 1535 if (!mdio_bus) 1536 return -EPROBE_DEFER; 1537 1538 ndev->netdev_ops = &ixp4xx_netdev_ops; 1539 ndev->ethtool_ops = &ixp4xx_ethtool_ops; 1540 ndev->tx_queue_len = 100; 1541 /* Inherit the DMA masks from the platform device */ 1542 ndev->dev.dma_mask = dev->dma_mask; 1543 ndev->dev.coherent_dma_mask = dev->coherent_dma_mask; 1544 1545 ndev->min_mtu = ETH_MIN_MTU; 1546 ndev->max_mtu = MAX_MRU; 1547 1548 netif_napi_add_weight(ndev, &port->napi, eth_poll, NAPI_WEIGHT); 1549 1550 if (!(port->npe = npe_request(NPE_ID(port->id)))) 1551 return -EIO; 1552 1553 port->plat = plat; 1554 npe_port_tab[NPE_ID(port->id)] = port; 1555 if (is_valid_ether_addr(plat->hwaddr)) 1556 eth_hw_addr_set(ndev, plat->hwaddr); 1557 else 1558 eth_hw_addr_random(ndev); 1559 1560 platform_set_drvdata(pdev, ndev); 1561 1562 __raw_writel(DEFAULT_CORE_CNTRL | CORE_RESET, 1563 &port->regs->core_control); 1564 udelay(50); 1565 __raw_writel(DEFAULT_CORE_CNTRL, &port->regs->core_control); 1566 udelay(50); 1567 1568 phydev = of_phy_get_and_connect(ndev, np, ixp4xx_adjust_link); 1569 if (!phydev) { 1570 err = -ENODEV; 1571 dev_err(dev, "no phydev\n"); 1572 goto err_free_mem; 1573 } 1574 1575 phydev->irq = PHY_POLL; 1576 1577 if ((err = register_netdev(ndev))) 1578 goto err_phy_dis; 1579 1580 netdev_info(ndev, "%s: MII PHY %s on %s\n", ndev->name, phydev_name(phydev), 1581 npe_name(port->npe)); 1582 1583 return 0; 1584 1585 err_phy_dis: 1586 phy_disconnect(phydev); 1587 err_free_mem: 1588 npe_port_tab[NPE_ID(port->id)] = NULL; 1589 npe_release(port->npe); 1590 return err; 1591 } 1592 1593 static void ixp4xx_eth_remove(struct platform_device *pdev) 1594 { 1595 struct net_device *ndev = platform_get_drvdata(pdev); 1596 struct phy_device *phydev = ndev->phydev; 1597 struct port *port = netdev_priv(ndev); 1598 1599 unregister_netdev(ndev); 1600 phy_disconnect(phydev); 1601 ixp4xx_mdio_remove(); 1602 npe_port_tab[NPE_ID(port->id)] = NULL; 1603 npe_release(port->npe); 1604 } 1605 1606 static const struct of_device_id ixp4xx_eth_of_match[] = { 1607 { 1608 .compatible = "intel,ixp4xx-ethernet", 1609 }, 1610 { }, 1611 }; 1612 1613 static struct platform_driver ixp4xx_eth_driver = { 1614 .driver = { 1615 .name = DRV_NAME, 1616 .of_match_table = of_match_ptr(ixp4xx_eth_of_match), 1617 }, 1618 .probe = ixp4xx_eth_probe, 1619 .remove = ixp4xx_eth_remove, 1620 }; 1621 module_platform_driver(ixp4xx_eth_driver); 1622 1623 MODULE_AUTHOR("Krzysztof Halasa"); 1624 MODULE_DESCRIPTION("Intel IXP4xx Ethernet driver"); 1625 MODULE_LICENSE("GPL v2"); 1626 MODULE_ALIAS("platform:ixp4xx_eth"); 1627