1 // SPDX-License-Identifier: GPL-2.0 2 3 /* Texas Instruments ICSSG Ethernet Driver 4 * 5 * Copyright (C) 2018-2022 Texas Instruments Incorporated - https://www.ti.com/ 6 * Copyright (C) Siemens AG, 2024 7 * 8 */ 9 10 #include <linux/dma-mapping.h> 11 #include <linux/dma/ti-cppi5.h> 12 #include <linux/etherdevice.h> 13 #include <linux/interrupt.h> 14 #include <linux/kernel.h> 15 #include <linux/of.h> 16 #include <linux/of_mdio.h> 17 #include <linux/phy.h> 18 #include <linux/remoteproc/pruss.h> 19 #include <linux/regmap.h> 20 #include <linux/remoteproc.h> 21 22 #include "icssg_prueth.h" 23 #include "../k3-cppi-desc-pool.h" 24 25 /* Netif debug messages possible */ 26 #define PRUETH_EMAC_DEBUG (NETIF_MSG_DRV | \ 27 NETIF_MSG_PROBE | \ 28 NETIF_MSG_LINK | \ 29 NETIF_MSG_TIMER | \ 30 NETIF_MSG_IFDOWN | \ 31 NETIF_MSG_IFUP | \ 32 NETIF_MSG_RX_ERR | \ 33 NETIF_MSG_TX_ERR | \ 34 NETIF_MSG_TX_QUEUED | \ 35 NETIF_MSG_INTR | \ 36 NETIF_MSG_TX_DONE | \ 37 NETIF_MSG_RX_STATUS | \ 38 NETIF_MSG_PKTDATA | \ 39 NETIF_MSG_HW | \ 40 NETIF_MSG_WOL) 41 42 #define prueth_napi_to_emac(napi) container_of(napi, struct prueth_emac, napi_rx) 43 44 void prueth_cleanup_rx_chns(struct prueth_emac *emac, 45 struct prueth_rx_chn *rx_chn, 46 int max_rflows) 47 { 48 if (rx_chn->pg_pool) { 49 page_pool_destroy(rx_chn->pg_pool); 50 rx_chn->pg_pool = NULL; 51 } 52 53 if (rx_chn->desc_pool) 54 k3_cppi_desc_pool_destroy(rx_chn->desc_pool); 55 56 if (rx_chn->rx_chn) 57 k3_udma_glue_release_rx_chn(rx_chn->rx_chn); 58 } 59 EXPORT_SYMBOL_GPL(prueth_cleanup_rx_chns); 60 61 void prueth_cleanup_tx_chns(struct prueth_emac *emac) 62 { 63 int i; 64 65 for (i = 0; i < emac->tx_ch_num; i++) { 66 struct prueth_tx_chn *tx_chn = &emac->tx_chns[i]; 67 68 if (tx_chn->desc_pool) 69 k3_cppi_desc_pool_destroy(tx_chn->desc_pool); 70 71 if (tx_chn->tx_chn) 72 k3_udma_glue_release_tx_chn(tx_chn->tx_chn); 73 74 /* Assume prueth_cleanup_tx_chns() is called at the 75 * end after all channel resources are freed 76 */ 77 memset(tx_chn, 0, sizeof(*tx_chn)); 78 } 79 } 80 EXPORT_SYMBOL_GPL(prueth_cleanup_tx_chns); 81 82 void prueth_ndev_del_tx_napi(struct prueth_emac *emac, int num) 83 { 84 int i; 85 86 for (i = 0; i < num; i++) { 87 struct prueth_tx_chn *tx_chn = &emac->tx_chns[i]; 88 89 if (tx_chn->irq) 90 free_irq(tx_chn->irq, tx_chn); 91 netif_napi_del(&tx_chn->napi_tx); 92 } 93 } 94 EXPORT_SYMBOL_GPL(prueth_ndev_del_tx_napi); 95 96 void prueth_xmit_free(struct prueth_tx_chn *tx_chn, 97 struct cppi5_host_desc_t *desc) 98 { 99 struct cppi5_host_desc_t *first_desc, *next_desc; 100 dma_addr_t buf_dma, next_desc_dma; 101 struct prueth_swdata *swdata; 102 struct page *page; 103 u32 buf_dma_len; 104 105 first_desc = desc; 106 next_desc = first_desc; 107 108 swdata = cppi5_hdesc_get_swdata(desc); 109 if (swdata->type == PRUETH_SWDATA_PAGE) { 110 page = swdata->data.page; 111 page_pool_recycle_direct(page->pp, swdata->data.page); 112 goto free_desc; 113 } 114 115 cppi5_hdesc_get_obuf(first_desc, &buf_dma, &buf_dma_len); 116 k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &buf_dma); 117 118 dma_unmap_single(tx_chn->dma_dev, buf_dma, buf_dma_len, 119 DMA_TO_DEVICE); 120 121 next_desc_dma = cppi5_hdesc_get_next_hbdesc(first_desc); 122 k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &next_desc_dma); 123 while (next_desc_dma) { 124 next_desc = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool, 125 next_desc_dma); 126 cppi5_hdesc_get_obuf(next_desc, &buf_dma, &buf_dma_len); 127 k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &buf_dma); 128 129 dma_unmap_page(tx_chn->dma_dev, buf_dma, buf_dma_len, 130 DMA_TO_DEVICE); 131 132 next_desc_dma = cppi5_hdesc_get_next_hbdesc(next_desc); 133 k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &next_desc_dma); 134 135 k3_cppi_desc_pool_free(tx_chn->desc_pool, next_desc); 136 } 137 138 free_desc: 139 k3_cppi_desc_pool_free(tx_chn->desc_pool, first_desc); 140 } 141 EXPORT_SYMBOL_GPL(prueth_xmit_free); 142 143 int emac_tx_complete_packets(struct prueth_emac *emac, int chn, 144 int budget, bool *tdown) 145 { 146 struct net_device *ndev = emac->ndev; 147 struct cppi5_host_desc_t *desc_tx; 148 struct netdev_queue *netif_txq; 149 struct prueth_swdata *swdata; 150 struct prueth_tx_chn *tx_chn; 151 unsigned int total_bytes = 0; 152 struct xdp_frame *xdpf; 153 struct sk_buff *skb; 154 dma_addr_t desc_dma; 155 int res, num_tx = 0; 156 157 tx_chn = &emac->tx_chns[chn]; 158 159 while (true) { 160 res = k3_udma_glue_pop_tx_chn(tx_chn->tx_chn, &desc_dma); 161 if (res == -ENODATA) 162 break; 163 164 /* teardown completion */ 165 if (cppi5_desc_is_tdcm(desc_dma)) { 166 if (atomic_dec_and_test(&emac->tdown_cnt)) 167 complete(&emac->tdown_complete); 168 *tdown = true; 169 break; 170 } 171 172 desc_tx = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool, 173 desc_dma); 174 swdata = cppi5_hdesc_get_swdata(desc_tx); 175 176 switch (swdata->type) { 177 case PRUETH_SWDATA_SKB: 178 skb = swdata->data.skb; 179 dev_sw_netstats_tx_add(skb->dev, 1, skb->len); 180 total_bytes += skb->len; 181 napi_consume_skb(skb, budget); 182 break; 183 case PRUETH_SWDATA_XDPF: 184 xdpf = swdata->data.xdpf; 185 dev_sw_netstats_tx_add(ndev, 1, xdpf->len); 186 total_bytes += xdpf->len; 187 xdp_return_frame(xdpf); 188 break; 189 default: 190 netdev_err(ndev, "tx_complete: invalid swdata type %d\n", swdata->type); 191 prueth_xmit_free(tx_chn, desc_tx); 192 ndev->stats.tx_dropped++; 193 continue; 194 } 195 196 prueth_xmit_free(tx_chn, desc_tx); 197 num_tx++; 198 } 199 200 if (!num_tx) 201 return 0; 202 203 netif_txq = netdev_get_tx_queue(ndev, chn); 204 netdev_tx_completed_queue(netif_txq, num_tx, total_bytes); 205 206 if (netif_tx_queue_stopped(netif_txq)) { 207 /* If the TX queue was stopped, wake it now 208 * if we have enough room. 209 */ 210 __netif_tx_lock(netif_txq, smp_processor_id()); 211 if (netif_running(ndev) && 212 (k3_cppi_desc_pool_avail(tx_chn->desc_pool) >= 213 MAX_SKB_FRAGS)) 214 netif_tx_wake_queue(netif_txq); 215 __netif_tx_unlock(netif_txq); 216 } 217 218 return num_tx; 219 } 220 221 static enum hrtimer_restart emac_tx_timer_callback(struct hrtimer *timer) 222 { 223 struct prueth_tx_chn *tx_chns = 224 container_of(timer, struct prueth_tx_chn, tx_hrtimer); 225 226 enable_irq(tx_chns->irq); 227 return HRTIMER_NORESTART; 228 } 229 230 static int emac_napi_tx_poll(struct napi_struct *napi_tx, int budget) 231 { 232 struct prueth_tx_chn *tx_chn = prueth_napi_to_tx_chn(napi_tx); 233 struct prueth_emac *emac = tx_chn->emac; 234 bool tdown = false; 235 int num_tx_packets; 236 237 num_tx_packets = emac_tx_complete_packets(emac, tx_chn->id, budget, 238 &tdown); 239 240 if (num_tx_packets >= budget) 241 return budget; 242 243 if (napi_complete_done(napi_tx, num_tx_packets)) { 244 if (unlikely(tx_chn->tx_pace_timeout_ns && !tdown)) { 245 hrtimer_start(&tx_chn->tx_hrtimer, 246 ns_to_ktime(tx_chn->tx_pace_timeout_ns), 247 HRTIMER_MODE_REL_PINNED); 248 } else { 249 enable_irq(tx_chn->irq); 250 } 251 } 252 253 return num_tx_packets; 254 } 255 256 static irqreturn_t prueth_tx_irq(int irq, void *dev_id) 257 { 258 struct prueth_tx_chn *tx_chn = dev_id; 259 260 disable_irq_nosync(irq); 261 napi_schedule(&tx_chn->napi_tx); 262 263 return IRQ_HANDLED; 264 } 265 266 int prueth_ndev_add_tx_napi(struct prueth_emac *emac) 267 { 268 struct prueth *prueth = emac->prueth; 269 int i, ret; 270 271 for (i = 0; i < emac->tx_ch_num; i++) { 272 struct prueth_tx_chn *tx_chn = &emac->tx_chns[i]; 273 274 netif_napi_add_tx(emac->ndev, &tx_chn->napi_tx, emac_napi_tx_poll); 275 hrtimer_setup(&tx_chn->tx_hrtimer, &emac_tx_timer_callback, CLOCK_MONOTONIC, 276 HRTIMER_MODE_REL_PINNED); 277 ret = request_irq(tx_chn->irq, prueth_tx_irq, 278 IRQF_TRIGGER_HIGH, tx_chn->name, 279 tx_chn); 280 if (ret) { 281 netif_napi_del(&tx_chn->napi_tx); 282 dev_err(prueth->dev, "unable to request TX IRQ %d\n", 283 tx_chn->irq); 284 goto fail; 285 } 286 } 287 288 return 0; 289 fail: 290 prueth_ndev_del_tx_napi(emac, i); 291 return ret; 292 } 293 EXPORT_SYMBOL_GPL(prueth_ndev_add_tx_napi); 294 295 int prueth_init_tx_chns(struct prueth_emac *emac) 296 { 297 static const struct k3_ring_cfg ring_cfg = { 298 .elm_size = K3_RINGACC_RING_ELSIZE_8, 299 .mode = K3_RINGACC_RING_MODE_RING, 300 .flags = 0, 301 .size = PRUETH_MAX_TX_DESC, 302 }; 303 struct k3_udma_glue_tx_channel_cfg tx_cfg; 304 struct device *dev = emac->prueth->dev; 305 struct net_device *ndev = emac->ndev; 306 int ret, slice, i; 307 u32 hdesc_size; 308 309 slice = prueth_emac_slice(emac); 310 if (slice < 0) 311 return slice; 312 313 init_completion(&emac->tdown_complete); 314 315 hdesc_size = cppi5_hdesc_calc_size(true, PRUETH_NAV_PS_DATA_SIZE, 316 PRUETH_NAV_SW_DATA_SIZE); 317 memset(&tx_cfg, 0, sizeof(tx_cfg)); 318 tx_cfg.swdata_size = PRUETH_NAV_SW_DATA_SIZE; 319 tx_cfg.tx_cfg = ring_cfg; 320 tx_cfg.txcq_cfg = ring_cfg; 321 322 for (i = 0; i < emac->tx_ch_num; i++) { 323 struct prueth_tx_chn *tx_chn = &emac->tx_chns[i]; 324 325 /* To differentiate channels for SLICE0 vs SLICE1 */ 326 snprintf(tx_chn->name, sizeof(tx_chn->name), 327 "tx%d-%d", slice, i); 328 329 tx_chn->emac = emac; 330 tx_chn->id = i; 331 tx_chn->descs_num = PRUETH_MAX_TX_DESC; 332 333 tx_chn->tx_chn = 334 k3_udma_glue_request_tx_chn(dev, tx_chn->name, 335 &tx_cfg); 336 if (IS_ERR(tx_chn->tx_chn)) { 337 ret = PTR_ERR(tx_chn->tx_chn); 338 tx_chn->tx_chn = NULL; 339 netdev_err(ndev, 340 "Failed to request tx dma ch: %d\n", ret); 341 goto fail; 342 } 343 344 tx_chn->dma_dev = k3_udma_glue_tx_get_dma_device(tx_chn->tx_chn); 345 tx_chn->desc_pool = 346 k3_cppi_desc_pool_create_name(tx_chn->dma_dev, 347 tx_chn->descs_num, 348 hdesc_size, 349 tx_chn->name); 350 if (IS_ERR(tx_chn->desc_pool)) { 351 ret = PTR_ERR(tx_chn->desc_pool); 352 tx_chn->desc_pool = NULL; 353 netdev_err(ndev, "Failed to create tx pool: %d\n", ret); 354 goto fail; 355 } 356 357 ret = k3_udma_glue_tx_get_irq(tx_chn->tx_chn); 358 if (ret < 0) { 359 netdev_err(ndev, "failed to get tx irq\n"); 360 goto fail; 361 } 362 tx_chn->irq = ret; 363 364 snprintf(tx_chn->name, sizeof(tx_chn->name), "%s-tx%d", 365 dev_name(dev), tx_chn->id); 366 } 367 368 return 0; 369 370 fail: 371 prueth_cleanup_tx_chns(emac); 372 return ret; 373 } 374 EXPORT_SYMBOL_GPL(prueth_init_tx_chns); 375 376 int prueth_init_rx_chns(struct prueth_emac *emac, 377 struct prueth_rx_chn *rx_chn, 378 char *name, u32 max_rflows, 379 u32 max_desc_num) 380 { 381 struct k3_udma_glue_rx_channel_cfg rx_cfg; 382 struct device *dev = emac->prueth->dev; 383 struct net_device *ndev = emac->ndev; 384 u32 fdqring_id, hdesc_size; 385 int i, ret = 0, slice; 386 int flow_id_base; 387 388 slice = prueth_emac_slice(emac); 389 if (slice < 0) 390 return slice; 391 392 /* To differentiate channels for SLICE0 vs SLICE1 */ 393 snprintf(rx_chn->name, sizeof(rx_chn->name), "%s%d", name, slice); 394 395 hdesc_size = cppi5_hdesc_calc_size(true, PRUETH_NAV_PS_DATA_SIZE, 396 PRUETH_NAV_SW_DATA_SIZE); 397 memset(&rx_cfg, 0, sizeof(rx_cfg)); 398 rx_cfg.swdata_size = PRUETH_NAV_SW_DATA_SIZE; 399 rx_cfg.flow_id_num = max_rflows; 400 rx_cfg.flow_id_base = -1; /* udmax will auto select flow id base */ 401 402 /* init all flows */ 403 rx_chn->dev = dev; 404 rx_chn->descs_num = max_desc_num; 405 406 rx_chn->rx_chn = k3_udma_glue_request_rx_chn(dev, rx_chn->name, 407 &rx_cfg); 408 if (IS_ERR(rx_chn->rx_chn)) { 409 ret = PTR_ERR(rx_chn->rx_chn); 410 rx_chn->rx_chn = NULL; 411 netdev_err(ndev, "Failed to request rx dma ch: %d\n", ret); 412 goto fail; 413 } 414 415 rx_chn->dma_dev = k3_udma_glue_rx_get_dma_device(rx_chn->rx_chn); 416 rx_chn->desc_pool = k3_cppi_desc_pool_create_name(rx_chn->dma_dev, 417 rx_chn->descs_num, 418 hdesc_size, 419 rx_chn->name); 420 if (IS_ERR(rx_chn->desc_pool)) { 421 ret = PTR_ERR(rx_chn->desc_pool); 422 rx_chn->desc_pool = NULL; 423 netdev_err(ndev, "Failed to create rx pool: %d\n", ret); 424 goto fail; 425 } 426 427 flow_id_base = k3_udma_glue_rx_get_flow_id_base(rx_chn->rx_chn); 428 if (emac->is_sr1 && !strcmp(name, "rxmgm")) { 429 emac->rx_mgm_flow_id_base = flow_id_base; 430 netdev_dbg(ndev, "mgm flow id base = %d\n", flow_id_base); 431 } else { 432 emac->rx_flow_id_base = flow_id_base; 433 netdev_dbg(ndev, "flow id base = %d\n", flow_id_base); 434 } 435 436 fdqring_id = K3_RINGACC_RING_ID_ANY; 437 for (i = 0; i < rx_cfg.flow_id_num; i++) { 438 struct k3_ring_cfg rxring_cfg = { 439 .elm_size = K3_RINGACC_RING_ELSIZE_8, 440 .mode = K3_RINGACC_RING_MODE_RING, 441 .flags = 0, 442 }; 443 struct k3_ring_cfg fdqring_cfg = { 444 .elm_size = K3_RINGACC_RING_ELSIZE_8, 445 .flags = K3_RINGACC_RING_SHARED, 446 }; 447 struct k3_udma_glue_rx_flow_cfg rx_flow_cfg = { 448 .rx_cfg = rxring_cfg, 449 .rxfdq_cfg = fdqring_cfg, 450 .ring_rxq_id = K3_RINGACC_RING_ID_ANY, 451 .src_tag_lo_sel = 452 K3_UDMA_GLUE_SRC_TAG_LO_USE_REMOTE_SRC_TAG, 453 }; 454 455 rx_flow_cfg.ring_rxfdq0_id = fdqring_id; 456 rx_flow_cfg.rx_cfg.size = max_desc_num; 457 rx_flow_cfg.rxfdq_cfg.size = max_desc_num; 458 rx_flow_cfg.rxfdq_cfg.mode = emac->prueth->pdata.fdqring_mode; 459 460 ret = k3_udma_glue_rx_flow_init(rx_chn->rx_chn, 461 i, &rx_flow_cfg); 462 if (ret) { 463 netdev_err(ndev, "Failed to init rx flow%d %d\n", 464 i, ret); 465 goto fail; 466 } 467 if (!i) 468 fdqring_id = k3_udma_glue_rx_flow_get_fdq_id(rx_chn->rx_chn, 469 i); 470 ret = k3_udma_glue_rx_get_irq(rx_chn->rx_chn, i); 471 if (ret < 0) { 472 netdev_err(ndev, "Failed to get rx dma irq"); 473 goto fail; 474 } 475 rx_chn->irq[i] = ret; 476 } 477 478 return 0; 479 480 fail: 481 prueth_cleanup_rx_chns(emac, rx_chn, max_rflows); 482 return ret; 483 } 484 EXPORT_SYMBOL_GPL(prueth_init_rx_chns); 485 486 int prueth_dma_rx_push_mapped(struct prueth_emac *emac, 487 struct prueth_rx_chn *rx_chn, 488 struct page *page, u32 buf_len) 489 { 490 struct net_device *ndev = emac->ndev; 491 struct cppi5_host_desc_t *desc_rx; 492 struct prueth_swdata *swdata; 493 dma_addr_t desc_dma; 494 dma_addr_t buf_dma; 495 496 buf_dma = page_pool_get_dma_addr(page) + PRUETH_HEADROOM; 497 desc_rx = k3_cppi_desc_pool_alloc(rx_chn->desc_pool); 498 if (!desc_rx) { 499 netdev_err(ndev, "rx push: failed to allocate descriptor\n"); 500 return -ENOMEM; 501 } 502 desc_dma = k3_cppi_desc_pool_virt2dma(rx_chn->desc_pool, desc_rx); 503 504 cppi5_hdesc_init(desc_rx, CPPI5_INFO0_HDESC_EPIB_PRESENT, 505 PRUETH_NAV_PS_DATA_SIZE); 506 k3_udma_glue_rx_dma_to_cppi5_addr(rx_chn->rx_chn, &buf_dma); 507 cppi5_hdesc_attach_buf(desc_rx, buf_dma, buf_len, buf_dma, buf_len); 508 509 swdata = cppi5_hdesc_get_swdata(desc_rx); 510 swdata->type = PRUETH_SWDATA_PAGE; 511 swdata->data.page = page; 512 513 return k3_udma_glue_push_rx_chn(rx_chn->rx_chn, PRUETH_RX_FLOW_DATA, 514 desc_rx, desc_dma); 515 } 516 EXPORT_SYMBOL_GPL(prueth_dma_rx_push_mapped); 517 518 u64 icssg_ts_to_ns(u32 hi_sw, u32 hi, u32 lo, u32 cycle_time_ns) 519 { 520 u32 iepcount_lo, iepcount_hi, hi_rollover_count; 521 u64 ns; 522 523 iepcount_lo = lo & GENMASK(19, 0); 524 iepcount_hi = (hi & GENMASK(11, 0)) << 12 | lo >> 20; 525 hi_rollover_count = hi >> 11; 526 527 ns = ((u64)hi_rollover_count) << 23 | (iepcount_hi + hi_sw); 528 ns = ns * cycle_time_ns + iepcount_lo; 529 530 return ns; 531 } 532 EXPORT_SYMBOL_GPL(icssg_ts_to_ns); 533 534 void emac_rx_timestamp(struct prueth_emac *emac, 535 struct sk_buff *skb, u32 *psdata) 536 { 537 struct skb_shared_hwtstamps *ssh; 538 u64 ns; 539 540 if (emac->is_sr1) { 541 ns = (u64)psdata[1] << 32 | psdata[0]; 542 } else { 543 u32 hi_sw = readl(emac->prueth->shram.va + 544 TIMESYNC_FW_WC_COUNT_HI_SW_OFFSET_OFFSET); 545 ns = icssg_ts_to_ns(hi_sw, psdata[1], psdata[0], 546 IEP_DEFAULT_CYCLE_TIME_NS); 547 } 548 549 ssh = skb_hwtstamps(skb); 550 memset(ssh, 0, sizeof(*ssh)); 551 ssh->hwtstamp = ns_to_ktime(ns); 552 } 553 554 /** 555 * emac_xmit_xdp_frame - transmits an XDP frame 556 * @emac: emac device 557 * @xdpf: data to transmit 558 * @page: page from page pool if already DMA mapped 559 * @q_idx: queue id 560 * 561 * Return: XDP state 562 */ 563 u32 emac_xmit_xdp_frame(struct prueth_emac *emac, 564 struct xdp_frame *xdpf, 565 struct page *page, 566 unsigned int q_idx) 567 { 568 struct cppi5_host_desc_t *first_desc; 569 struct net_device *ndev = emac->ndev; 570 struct prueth_tx_chn *tx_chn; 571 dma_addr_t desc_dma, buf_dma; 572 struct prueth_swdata *swdata; 573 u32 *epib; 574 int ret; 575 576 if (q_idx >= PRUETH_MAX_TX_QUEUES) { 577 netdev_err(ndev, "xdp tx: invalid q_id %d\n", q_idx); 578 return ICSSG_XDP_CONSUMED; /* drop */ 579 } 580 581 tx_chn = &emac->tx_chns[q_idx]; 582 583 first_desc = k3_cppi_desc_pool_alloc(tx_chn->desc_pool); 584 if (!first_desc) { 585 netdev_dbg(ndev, "xdp tx: failed to allocate descriptor\n"); 586 return ICSSG_XDP_CONSUMED; /* drop */ 587 } 588 589 if (page) { /* already DMA mapped by page_pool */ 590 buf_dma = page_pool_get_dma_addr(page); 591 buf_dma += xdpf->headroom + sizeof(struct xdp_frame); 592 } else { /* Map the linear buffer */ 593 buf_dma = dma_map_single(tx_chn->dma_dev, xdpf->data, xdpf->len, DMA_TO_DEVICE); 594 if (dma_mapping_error(tx_chn->dma_dev, buf_dma)) { 595 netdev_err(ndev, "xdp tx: failed to map data buffer\n"); 596 goto drop_free_descs; /* drop */ 597 } 598 } 599 600 cppi5_hdesc_init(first_desc, CPPI5_INFO0_HDESC_EPIB_PRESENT, 601 PRUETH_NAV_PS_DATA_SIZE); 602 cppi5_hdesc_set_pkttype(first_desc, 0); 603 epib = first_desc->epib; 604 epib[0] = 0; 605 epib[1] = 0; 606 607 /* set dst tag to indicate internal qid at the firmware which is at 608 * bit8..bit15. bit0..bit7 indicates port num for directed 609 * packets in case of switch mode operation 610 */ 611 cppi5_desc_set_tags_ids(&first_desc->hdr, 0, (emac->port_id | (q_idx << 8))); 612 k3_udma_glue_tx_dma_to_cppi5_addr(tx_chn->tx_chn, &buf_dma); 613 cppi5_hdesc_attach_buf(first_desc, buf_dma, xdpf->len, buf_dma, xdpf->len); 614 swdata = cppi5_hdesc_get_swdata(first_desc); 615 if (page) { 616 swdata->type = PRUETH_SWDATA_PAGE; 617 swdata->data.page = page; 618 } else { 619 swdata->type = PRUETH_SWDATA_XDPF; 620 swdata->data.xdpf = xdpf; 621 } 622 623 cppi5_hdesc_set_pktlen(first_desc, xdpf->len); 624 desc_dma = k3_cppi_desc_pool_virt2dma(tx_chn->desc_pool, first_desc); 625 626 ret = k3_udma_glue_push_tx_chn(tx_chn->tx_chn, first_desc, desc_dma); 627 if (ret) { 628 netdev_err(ndev, "xdp tx: push failed: %d\n", ret); 629 goto drop_free_descs; 630 } 631 632 return ICSSG_XDP_TX; 633 634 drop_free_descs: 635 prueth_xmit_free(tx_chn, first_desc); 636 return ICSSG_XDP_CONSUMED; 637 } 638 EXPORT_SYMBOL_GPL(emac_xmit_xdp_frame); 639 640 /** 641 * emac_run_xdp - run an XDP program 642 * @emac: emac device 643 * @xdp: XDP buffer containing the frame 644 * @page: page with RX data if already DMA mapped 645 * @len: Rx descriptor packet length 646 * 647 * Return: XDP state 648 */ 649 static u32 emac_run_xdp(struct prueth_emac *emac, struct xdp_buff *xdp, 650 struct page *page, u32 *len) 651 { 652 struct net_device *ndev = emac->ndev; 653 struct bpf_prog *xdp_prog; 654 struct xdp_frame *xdpf; 655 u32 pkt_len = *len; 656 u32 act, result; 657 int q_idx, err; 658 659 xdp_prog = READ_ONCE(emac->xdp_prog); 660 act = bpf_prog_run_xdp(xdp_prog, xdp); 661 switch (act) { 662 case XDP_PASS: 663 return ICSSG_XDP_PASS; 664 case XDP_TX: 665 /* Send packet to TX ring for immediate transmission */ 666 xdpf = xdp_convert_buff_to_frame(xdp); 667 if (unlikely(!xdpf)) { 668 ndev->stats.tx_dropped++; 669 goto drop; 670 } 671 672 q_idx = smp_processor_id() % emac->tx_ch_num; 673 result = emac_xmit_xdp_frame(emac, xdpf, page, q_idx); 674 if (result == ICSSG_XDP_CONSUMED) { 675 ndev->stats.tx_dropped++; 676 goto drop; 677 } 678 679 dev_sw_netstats_rx_add(ndev, xdpf->len); 680 return result; 681 case XDP_REDIRECT: 682 err = xdp_do_redirect(emac->ndev, xdp, xdp_prog); 683 if (err) 684 goto drop; 685 686 dev_sw_netstats_rx_add(ndev, pkt_len); 687 return ICSSG_XDP_REDIR; 688 default: 689 bpf_warn_invalid_xdp_action(emac->ndev, xdp_prog, act); 690 fallthrough; 691 case XDP_ABORTED: 692 drop: 693 trace_xdp_exception(emac->ndev, xdp_prog, act); 694 fallthrough; /* handle aborts by dropping packet */ 695 case XDP_DROP: 696 ndev->stats.rx_dropped++; 697 page_pool_recycle_direct(emac->rx_chns.pg_pool, page); 698 return ICSSG_XDP_CONSUMED; 699 } 700 } 701 702 static int emac_rx_packet(struct prueth_emac *emac, u32 flow_id, u32 *xdp_state) 703 { 704 struct prueth_rx_chn *rx_chn = &emac->rx_chns; 705 u32 buf_dma_len, pkt_len, port_id = 0; 706 struct net_device *ndev = emac->ndev; 707 struct cppi5_host_desc_t *desc_rx; 708 struct prueth_swdata *swdata; 709 dma_addr_t desc_dma, buf_dma; 710 struct page *page, *new_page; 711 struct page_pool *pool; 712 struct sk_buff *skb; 713 struct xdp_buff xdp; 714 u32 *psdata; 715 void *pa; 716 int ret; 717 718 *xdp_state = 0; 719 pool = rx_chn->pg_pool; 720 ret = k3_udma_glue_pop_rx_chn(rx_chn->rx_chn, flow_id, &desc_dma); 721 if (ret) { 722 if (ret != -ENODATA) 723 netdev_err(ndev, "rx pop: failed: %d\n", ret); 724 return ret; 725 } 726 727 if (cppi5_desc_is_tdcm(desc_dma)) /* Teardown ? */ 728 return 0; 729 730 desc_rx = k3_cppi_desc_pool_dma2virt(rx_chn->desc_pool, desc_dma); 731 swdata = cppi5_hdesc_get_swdata(desc_rx); 732 if (swdata->type != PRUETH_SWDATA_PAGE) { 733 netdev_err(ndev, "rx_pkt: invalid swdata->type %d\n", swdata->type); 734 k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx); 735 return 0; 736 } 737 738 page = swdata->data.page; 739 page_pool_dma_sync_for_cpu(pool, page, 0, PAGE_SIZE); 740 cppi5_hdesc_get_obuf(desc_rx, &buf_dma, &buf_dma_len); 741 k3_udma_glue_rx_cppi5_to_dma_addr(rx_chn->rx_chn, &buf_dma); 742 pkt_len = cppi5_hdesc_get_pktlen(desc_rx); 743 /* firmware adds 4 CRC bytes, strip them */ 744 pkt_len -= 4; 745 cppi5_desc_get_tags_ids(&desc_rx->hdr, &port_id, NULL); 746 747 k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx); 748 749 /* if allocation fails we drop the packet but push the 750 * descriptor back to the ring with old page to prevent a stall 751 */ 752 new_page = page_pool_dev_alloc_pages(pool); 753 if (unlikely(!new_page)) { 754 new_page = page; 755 ndev->stats.rx_dropped++; 756 goto requeue; 757 } 758 759 pa = page_address(page); 760 if (emac->xdp_prog) { 761 xdp_init_buff(&xdp, PAGE_SIZE, &rx_chn->xdp_rxq); 762 xdp_prepare_buff(&xdp, pa, PRUETH_HEADROOM, pkt_len, false); 763 764 *xdp_state = emac_run_xdp(emac, &xdp, page, &pkt_len); 765 if (*xdp_state == ICSSG_XDP_PASS) 766 skb = xdp_build_skb_from_buff(&xdp); 767 else 768 goto requeue; 769 } else { 770 /* prepare skb and send to n/w stack */ 771 skb = napi_build_skb(pa, PAGE_SIZE); 772 } 773 774 if (!skb) { 775 ndev->stats.rx_dropped++; 776 page_pool_recycle_direct(pool, page); 777 goto requeue; 778 } 779 780 skb_reserve(skb, PRUETH_HEADROOM); 781 skb_put(skb, pkt_len); 782 skb->dev = ndev; 783 784 psdata = cppi5_hdesc_get_psdata(desc_rx); 785 /* RX HW timestamp */ 786 if (emac->rx_ts_enabled) 787 emac_rx_timestamp(emac, skb, psdata); 788 789 if (emac->prueth->is_switch_mode) 790 skb->offload_fwd_mark = emac->offload_fwd_mark; 791 skb->protocol = eth_type_trans(skb, ndev); 792 793 skb_mark_for_recycle(skb); 794 napi_gro_receive(&emac->napi_rx, skb); 795 ndev->stats.rx_bytes += pkt_len; 796 ndev->stats.rx_packets++; 797 798 requeue: 799 /* queue another RX DMA */ 800 ret = prueth_dma_rx_push_mapped(emac, &emac->rx_chns, new_page, 801 PRUETH_MAX_PKT_SIZE); 802 if (WARN_ON(ret < 0)) { 803 page_pool_recycle_direct(pool, new_page); 804 ndev->stats.rx_errors++; 805 ndev->stats.rx_dropped++; 806 } 807 808 return ret; 809 } 810 811 static void prueth_rx_cleanup(void *data, dma_addr_t desc_dma) 812 { 813 struct prueth_rx_chn *rx_chn = data; 814 struct cppi5_host_desc_t *desc_rx; 815 struct prueth_swdata *swdata; 816 struct page_pool *pool; 817 struct page *page; 818 819 pool = rx_chn->pg_pool; 820 desc_rx = k3_cppi_desc_pool_dma2virt(rx_chn->desc_pool, desc_dma); 821 swdata = cppi5_hdesc_get_swdata(desc_rx); 822 if (swdata->type == PRUETH_SWDATA_PAGE) { 823 page = swdata->data.page; 824 page_pool_recycle_direct(pool, page); 825 } 826 827 k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx); 828 } 829 830 static int prueth_tx_ts_cookie_get(struct prueth_emac *emac) 831 { 832 int i; 833 834 /* search and get the next free slot */ 835 for (i = 0; i < PRUETH_MAX_TX_TS_REQUESTS; i++) { 836 if (!emac->tx_ts_skb[i]) { 837 emac->tx_ts_skb[i] = ERR_PTR(-EBUSY); /* reserve slot */ 838 return i; 839 } 840 } 841 842 return -EBUSY; 843 } 844 845 /** 846 * icssg_ndo_start_xmit - EMAC Transmit function 847 * @skb: SKB pointer 848 * @ndev: EMAC network adapter 849 * 850 * Called by the system to transmit a packet - we queue the packet in 851 * EMAC hardware transmit queue 852 * Doesn't wait for completion we'll check for TX completion in 853 * emac_tx_complete_packets(). 854 * 855 * Return: enum netdev_tx 856 */ 857 enum netdev_tx icssg_ndo_start_xmit(struct sk_buff *skb, struct net_device *ndev) 858 { 859 struct cppi5_host_desc_t *first_desc, *next_desc, *cur_desc; 860 struct prueth_emac *emac = netdev_priv(ndev); 861 struct prueth *prueth = emac->prueth; 862 struct netdev_queue *netif_txq; 863 struct prueth_swdata *swdata; 864 struct prueth_tx_chn *tx_chn; 865 dma_addr_t desc_dma, buf_dma; 866 u32 pkt_len, dst_tag_id; 867 int i, ret = 0, q_idx; 868 bool in_tx_ts = 0; 869 int tx_ts_cookie; 870 u32 *epib; 871 872 pkt_len = skb_headlen(skb); 873 q_idx = skb_get_queue_mapping(skb); 874 875 tx_chn = &emac->tx_chns[q_idx]; 876 netif_txq = netdev_get_tx_queue(ndev, q_idx); 877 878 /* Map the linear buffer */ 879 buf_dma = dma_map_single(tx_chn->dma_dev, skb->data, pkt_len, DMA_TO_DEVICE); 880 if (dma_mapping_error(tx_chn->dma_dev, buf_dma)) { 881 netdev_err(ndev, "tx: failed to map skb buffer\n"); 882 ret = NETDEV_TX_OK; 883 goto drop_free_skb; 884 } 885 886 first_desc = k3_cppi_desc_pool_alloc(tx_chn->desc_pool); 887 if (!first_desc) { 888 netdev_dbg(ndev, "tx: failed to allocate descriptor\n"); 889 dma_unmap_single(tx_chn->dma_dev, buf_dma, pkt_len, DMA_TO_DEVICE); 890 goto drop_stop_q_busy; 891 } 892 893 cppi5_hdesc_init(first_desc, CPPI5_INFO0_HDESC_EPIB_PRESENT, 894 PRUETH_NAV_PS_DATA_SIZE); 895 cppi5_hdesc_set_pkttype(first_desc, 0); 896 epib = first_desc->epib; 897 epib[0] = 0; 898 epib[1] = 0; 899 if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP && 900 emac->tx_ts_enabled) { 901 tx_ts_cookie = prueth_tx_ts_cookie_get(emac); 902 if (tx_ts_cookie >= 0) { 903 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 904 /* Request TX timestamp */ 905 epib[0] = (u32)tx_ts_cookie; 906 epib[1] = 0x80000000; /* TX TS request */ 907 emac->tx_ts_skb[tx_ts_cookie] = skb_get(skb); 908 in_tx_ts = 1; 909 } 910 } 911 912 /* set dst tag to indicate internal qid at the firmware which is at 913 * bit8..bit15. bit0..bit7 indicates port num for directed 914 * packets in case of switch mode operation and port num 0 915 * for undirected packets in case of HSR offload mode 916 */ 917 dst_tag_id = emac->port_id | (q_idx << 8); 918 919 if (prueth->is_hsr_offload_mode && 920 (ndev->features & NETIF_F_HW_HSR_DUP)) 921 dst_tag_id = PRUETH_UNDIRECTED_PKT_DST_TAG; 922 923 if (prueth->is_hsr_offload_mode && 924 (ndev->features & NETIF_F_HW_HSR_TAG_INS)) 925 epib[1] |= PRUETH_UNDIRECTED_PKT_TAG_INS; 926 927 cppi5_desc_set_tags_ids(&first_desc->hdr, 0, dst_tag_id); 928 k3_udma_glue_tx_dma_to_cppi5_addr(tx_chn->tx_chn, &buf_dma); 929 cppi5_hdesc_attach_buf(first_desc, buf_dma, pkt_len, buf_dma, pkt_len); 930 swdata = cppi5_hdesc_get_swdata(first_desc); 931 swdata->type = PRUETH_SWDATA_SKB; 932 swdata->data.skb = skb; 933 934 /* Handle the case where skb is fragmented in pages */ 935 cur_desc = first_desc; 936 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 937 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 938 u32 frag_size = skb_frag_size(frag); 939 940 next_desc = k3_cppi_desc_pool_alloc(tx_chn->desc_pool); 941 if (!next_desc) { 942 netdev_err(ndev, 943 "tx: failed to allocate frag. descriptor\n"); 944 goto free_desc_stop_q_busy_cleanup_tx_ts; 945 } 946 947 buf_dma = skb_frag_dma_map(tx_chn->dma_dev, frag, 0, frag_size, 948 DMA_TO_DEVICE); 949 if (dma_mapping_error(tx_chn->dma_dev, buf_dma)) { 950 netdev_err(ndev, "tx: Failed to map skb page\n"); 951 k3_cppi_desc_pool_free(tx_chn->desc_pool, next_desc); 952 ret = NETDEV_TX_OK; 953 goto cleanup_tx_ts; 954 } 955 956 cppi5_hdesc_reset_hbdesc(next_desc); 957 k3_udma_glue_tx_dma_to_cppi5_addr(tx_chn->tx_chn, &buf_dma); 958 cppi5_hdesc_attach_buf(next_desc, 959 buf_dma, frag_size, buf_dma, frag_size); 960 961 desc_dma = k3_cppi_desc_pool_virt2dma(tx_chn->desc_pool, 962 next_desc); 963 k3_udma_glue_tx_dma_to_cppi5_addr(tx_chn->tx_chn, &desc_dma); 964 cppi5_hdesc_link_hbdesc(cur_desc, desc_dma); 965 966 pkt_len += frag_size; 967 cur_desc = next_desc; 968 } 969 WARN_ON_ONCE(pkt_len != skb->len); 970 971 /* report bql before sending packet */ 972 netdev_tx_sent_queue(netif_txq, pkt_len); 973 974 cppi5_hdesc_set_pktlen(first_desc, pkt_len); 975 desc_dma = k3_cppi_desc_pool_virt2dma(tx_chn->desc_pool, first_desc); 976 /* cppi5_desc_dump(first_desc, 64); */ 977 978 skb_tx_timestamp(skb); /* SW timestamp if SKBTX_IN_PROGRESS not set */ 979 ret = k3_udma_glue_push_tx_chn(tx_chn->tx_chn, first_desc, desc_dma); 980 if (ret) { 981 netdev_err(ndev, "tx: push failed: %d\n", ret); 982 goto drop_free_descs; 983 } 984 985 if (in_tx_ts) 986 atomic_inc(&emac->tx_ts_pending); 987 988 if (k3_cppi_desc_pool_avail(tx_chn->desc_pool) < MAX_SKB_FRAGS) { 989 netif_tx_stop_queue(netif_txq); 990 /* Barrier, so that stop_queue visible to other cpus */ 991 smp_mb__after_atomic(); 992 993 if (k3_cppi_desc_pool_avail(tx_chn->desc_pool) >= 994 MAX_SKB_FRAGS) 995 netif_tx_wake_queue(netif_txq); 996 } 997 998 return NETDEV_TX_OK; 999 1000 cleanup_tx_ts: 1001 if (in_tx_ts) { 1002 dev_kfree_skb_any(emac->tx_ts_skb[tx_ts_cookie]); 1003 emac->tx_ts_skb[tx_ts_cookie] = NULL; 1004 } 1005 1006 drop_free_descs: 1007 prueth_xmit_free(tx_chn, first_desc); 1008 1009 drop_free_skb: 1010 dev_kfree_skb_any(skb); 1011 1012 /* error */ 1013 ndev->stats.tx_dropped++; 1014 netdev_err(ndev, "tx: error: %d\n", ret); 1015 1016 return ret; 1017 1018 free_desc_stop_q_busy_cleanup_tx_ts: 1019 if (in_tx_ts) { 1020 dev_kfree_skb_any(emac->tx_ts_skb[tx_ts_cookie]); 1021 emac->tx_ts_skb[tx_ts_cookie] = NULL; 1022 } 1023 prueth_xmit_free(tx_chn, first_desc); 1024 1025 drop_stop_q_busy: 1026 netif_tx_stop_queue(netif_txq); 1027 return NETDEV_TX_BUSY; 1028 } 1029 EXPORT_SYMBOL_GPL(icssg_ndo_start_xmit); 1030 1031 static void prueth_tx_cleanup(void *data, dma_addr_t desc_dma) 1032 { 1033 struct prueth_tx_chn *tx_chn = data; 1034 struct cppi5_host_desc_t *desc_tx; 1035 struct prueth_swdata *swdata; 1036 struct xdp_frame *xdpf; 1037 struct sk_buff *skb; 1038 1039 desc_tx = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool, desc_dma); 1040 swdata = cppi5_hdesc_get_swdata(desc_tx); 1041 1042 switch (swdata->type) { 1043 case PRUETH_SWDATA_SKB: 1044 skb = swdata->data.skb; 1045 dev_kfree_skb_any(skb); 1046 break; 1047 case PRUETH_SWDATA_XDPF: 1048 xdpf = swdata->data.xdpf; 1049 xdp_return_frame(xdpf); 1050 break; 1051 default: 1052 break; 1053 } 1054 1055 prueth_xmit_free(tx_chn, desc_tx); 1056 } 1057 1058 irqreturn_t prueth_rx_irq(int irq, void *dev_id) 1059 { 1060 struct prueth_emac *emac = dev_id; 1061 1062 disable_irq_nosync(irq); 1063 napi_schedule(&emac->napi_rx); 1064 1065 return IRQ_HANDLED; 1066 } 1067 EXPORT_SYMBOL_GPL(prueth_rx_irq); 1068 1069 void prueth_cleanup_tx_ts(struct prueth_emac *emac) 1070 { 1071 int i; 1072 1073 for (i = 0; i < PRUETH_MAX_TX_TS_REQUESTS; i++) { 1074 if (emac->tx_ts_skb[i]) { 1075 dev_kfree_skb_any(emac->tx_ts_skb[i]); 1076 emac->tx_ts_skb[i] = NULL; 1077 } 1078 } 1079 } 1080 EXPORT_SYMBOL_GPL(prueth_cleanup_tx_ts); 1081 1082 int icssg_napi_rx_poll(struct napi_struct *napi_rx, int budget) 1083 { 1084 struct prueth_emac *emac = prueth_napi_to_emac(napi_rx); 1085 int rx_flow = emac->is_sr1 ? 1086 PRUETH_RX_FLOW_DATA_SR1 : PRUETH_RX_FLOW_DATA; 1087 int flow = emac->is_sr1 ? 1088 PRUETH_MAX_RX_FLOWS_SR1 : PRUETH_MAX_RX_FLOWS; 1089 int xdp_state_or = 0; 1090 int num_rx = 0; 1091 int cur_budget; 1092 u32 xdp_state; 1093 int ret; 1094 1095 while (flow--) { 1096 cur_budget = budget - num_rx; 1097 1098 while (cur_budget--) { 1099 ret = emac_rx_packet(emac, flow, &xdp_state); 1100 xdp_state_or |= xdp_state; 1101 if (ret) 1102 break; 1103 num_rx++; 1104 } 1105 1106 if (num_rx >= budget) 1107 break; 1108 } 1109 1110 if (xdp_state_or & ICSSG_XDP_REDIR) 1111 xdp_do_flush(); 1112 1113 if (num_rx < budget && napi_complete_done(napi_rx, num_rx)) { 1114 if (unlikely(emac->rx_pace_timeout_ns)) { 1115 hrtimer_start(&emac->rx_hrtimer, 1116 ns_to_ktime(emac->rx_pace_timeout_ns), 1117 HRTIMER_MODE_REL_PINNED); 1118 } else { 1119 enable_irq(emac->rx_chns.irq[rx_flow]); 1120 } 1121 } 1122 1123 return num_rx; 1124 } 1125 EXPORT_SYMBOL_GPL(icssg_napi_rx_poll); 1126 1127 static struct page_pool *prueth_create_page_pool(struct prueth_emac *emac, 1128 struct device *dma_dev, 1129 int size) 1130 { 1131 struct page_pool_params pp_params = { 0 }; 1132 struct page_pool *pool; 1133 1134 pp_params.order = 0; 1135 pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV; 1136 pp_params.pool_size = size; 1137 pp_params.nid = dev_to_node(emac->prueth->dev); 1138 pp_params.dma_dir = DMA_BIDIRECTIONAL; 1139 pp_params.dev = dma_dev; 1140 pp_params.napi = &emac->napi_rx; 1141 pp_params.max_len = PAGE_SIZE; 1142 1143 pool = page_pool_create(&pp_params); 1144 if (IS_ERR(pool)) 1145 netdev_err(emac->ndev, "cannot create rx page pool\n"); 1146 1147 return pool; 1148 } 1149 1150 int prueth_prepare_rx_chan(struct prueth_emac *emac, 1151 struct prueth_rx_chn *chn, 1152 int buf_size) 1153 { 1154 struct page_pool *pool; 1155 struct page *page; 1156 int i, ret; 1157 1158 pool = prueth_create_page_pool(emac, chn->dma_dev, chn->descs_num); 1159 if (IS_ERR(pool)) 1160 return PTR_ERR(pool); 1161 1162 chn->pg_pool = pool; 1163 1164 for (i = 0; i < chn->descs_num; i++) { 1165 /* NOTE: we're not using memory efficiently here. 1166 * 1 full page (4KB?) used here instead of 1167 * PRUETH_MAX_PKT_SIZE (~1.5KB?) 1168 */ 1169 page = page_pool_dev_alloc_pages(pool); 1170 if (!page) { 1171 netdev_err(emac->ndev, "couldn't allocate rx page\n"); 1172 ret = -ENOMEM; 1173 goto recycle_alloc_pg; 1174 } 1175 1176 ret = prueth_dma_rx_push_mapped(emac, chn, page, buf_size); 1177 if (ret < 0) { 1178 netdev_err(emac->ndev, 1179 "cannot submit page for rx chan %s ret %d\n", 1180 chn->name, ret); 1181 page_pool_recycle_direct(pool, page); 1182 goto recycle_alloc_pg; 1183 } 1184 } 1185 1186 return 0; 1187 1188 recycle_alloc_pg: 1189 prueth_reset_rx_chan(&emac->rx_chns, PRUETH_MAX_RX_FLOWS, false); 1190 1191 return ret; 1192 } 1193 EXPORT_SYMBOL_GPL(prueth_prepare_rx_chan); 1194 1195 void prueth_reset_tx_chan(struct prueth_emac *emac, int ch_num, 1196 bool free_skb) 1197 { 1198 int i; 1199 1200 for (i = 0; i < ch_num; i++) { 1201 if (free_skb) 1202 k3_udma_glue_reset_tx_chn(emac->tx_chns[i].tx_chn, 1203 &emac->tx_chns[i], 1204 prueth_tx_cleanup); 1205 k3_udma_glue_disable_tx_chn(emac->tx_chns[i].tx_chn); 1206 } 1207 } 1208 EXPORT_SYMBOL_GPL(prueth_reset_tx_chan); 1209 1210 void prueth_reset_rx_chan(struct prueth_rx_chn *chn, 1211 int num_flows, bool disable) 1212 { 1213 int i; 1214 1215 for (i = 0; i < num_flows; i++) 1216 k3_udma_glue_reset_rx_chn(chn->rx_chn, i, chn, 1217 prueth_rx_cleanup); 1218 if (disable) 1219 k3_udma_glue_disable_rx_chn(chn->rx_chn); 1220 } 1221 EXPORT_SYMBOL_GPL(prueth_reset_rx_chan); 1222 1223 void icssg_ndo_tx_timeout(struct net_device *ndev, unsigned int txqueue) 1224 { 1225 ndev->stats.tx_errors++; 1226 } 1227 EXPORT_SYMBOL_GPL(icssg_ndo_tx_timeout); 1228 1229 static int emac_set_ts_config(struct net_device *ndev, struct ifreq *ifr) 1230 { 1231 struct prueth_emac *emac = netdev_priv(ndev); 1232 struct hwtstamp_config config; 1233 1234 if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) 1235 return -EFAULT; 1236 1237 switch (config.tx_type) { 1238 case HWTSTAMP_TX_OFF: 1239 emac->tx_ts_enabled = 0; 1240 break; 1241 case HWTSTAMP_TX_ON: 1242 emac->tx_ts_enabled = 1; 1243 break; 1244 default: 1245 return -ERANGE; 1246 } 1247 1248 switch (config.rx_filter) { 1249 case HWTSTAMP_FILTER_NONE: 1250 emac->rx_ts_enabled = 0; 1251 break; 1252 case HWTSTAMP_FILTER_ALL: 1253 case HWTSTAMP_FILTER_SOME: 1254 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: 1255 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: 1256 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: 1257 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: 1258 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: 1259 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: 1260 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: 1261 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: 1262 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: 1263 case HWTSTAMP_FILTER_PTP_V2_EVENT: 1264 case HWTSTAMP_FILTER_PTP_V2_SYNC: 1265 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: 1266 case HWTSTAMP_FILTER_NTP_ALL: 1267 emac->rx_ts_enabled = 1; 1268 config.rx_filter = HWTSTAMP_FILTER_ALL; 1269 break; 1270 default: 1271 return -ERANGE; 1272 } 1273 1274 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? 1275 -EFAULT : 0; 1276 } 1277 1278 static int emac_get_ts_config(struct net_device *ndev, struct ifreq *ifr) 1279 { 1280 struct prueth_emac *emac = netdev_priv(ndev); 1281 struct hwtstamp_config config; 1282 1283 config.flags = 0; 1284 config.tx_type = emac->tx_ts_enabled ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF; 1285 config.rx_filter = emac->rx_ts_enabled ? HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE; 1286 1287 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? 1288 -EFAULT : 0; 1289 } 1290 1291 int icssg_ndo_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd) 1292 { 1293 switch (cmd) { 1294 case SIOCGHWTSTAMP: 1295 return emac_get_ts_config(ndev, ifr); 1296 case SIOCSHWTSTAMP: 1297 return emac_set_ts_config(ndev, ifr); 1298 default: 1299 break; 1300 } 1301 1302 return phy_do_ioctl(ndev, ifr, cmd); 1303 } 1304 EXPORT_SYMBOL_GPL(icssg_ndo_ioctl); 1305 1306 void icssg_ndo_get_stats64(struct net_device *ndev, 1307 struct rtnl_link_stats64 *stats) 1308 { 1309 struct prueth_emac *emac = netdev_priv(ndev); 1310 1311 emac_update_hardware_stats(emac); 1312 1313 stats->rx_packets = emac_get_stat_by_name(emac, "rx_packets"); 1314 stats->rx_bytes = emac_get_stat_by_name(emac, "rx_bytes"); 1315 stats->tx_packets = emac_get_stat_by_name(emac, "tx_packets"); 1316 stats->tx_bytes = emac_get_stat_by_name(emac, "tx_bytes"); 1317 stats->rx_crc_errors = emac_get_stat_by_name(emac, "rx_crc_errors"); 1318 stats->rx_over_errors = emac_get_stat_by_name(emac, "rx_over_errors"); 1319 stats->multicast = emac_get_stat_by_name(emac, "rx_multicast_frames"); 1320 1321 stats->rx_errors = ndev->stats.rx_errors; 1322 stats->rx_dropped = ndev->stats.rx_dropped; 1323 stats->tx_errors = ndev->stats.tx_errors; 1324 stats->tx_dropped = ndev->stats.tx_dropped; 1325 } 1326 EXPORT_SYMBOL_GPL(icssg_ndo_get_stats64); 1327 1328 int icssg_ndo_get_phys_port_name(struct net_device *ndev, char *name, 1329 size_t len) 1330 { 1331 struct prueth_emac *emac = netdev_priv(ndev); 1332 int ret; 1333 1334 ret = snprintf(name, len, "p%d", emac->port_id); 1335 if (ret >= len) 1336 return -EINVAL; 1337 1338 return 0; 1339 } 1340 EXPORT_SYMBOL_GPL(icssg_ndo_get_phys_port_name); 1341 1342 /* get emac_port corresponding to eth_node name */ 1343 int prueth_node_port(struct device_node *eth_node) 1344 { 1345 u32 port_id; 1346 int ret; 1347 1348 ret = of_property_read_u32(eth_node, "reg", &port_id); 1349 if (ret) 1350 return ret; 1351 1352 if (port_id == 0) 1353 return PRUETH_PORT_MII0; 1354 else if (port_id == 1) 1355 return PRUETH_PORT_MII1; 1356 else 1357 return PRUETH_PORT_INVALID; 1358 } 1359 EXPORT_SYMBOL_GPL(prueth_node_port); 1360 1361 /* get MAC instance corresponding to eth_node name */ 1362 int prueth_node_mac(struct device_node *eth_node) 1363 { 1364 u32 port_id; 1365 int ret; 1366 1367 ret = of_property_read_u32(eth_node, "reg", &port_id); 1368 if (ret) 1369 return ret; 1370 1371 if (port_id == 0) 1372 return PRUETH_MAC0; 1373 else if (port_id == 1) 1374 return PRUETH_MAC1; 1375 else 1376 return PRUETH_MAC_INVALID; 1377 } 1378 EXPORT_SYMBOL_GPL(prueth_node_mac); 1379 1380 void prueth_netdev_exit(struct prueth *prueth, 1381 struct device_node *eth_node) 1382 { 1383 struct prueth_emac *emac; 1384 enum prueth_mac mac; 1385 1386 mac = prueth_node_mac(eth_node); 1387 if (mac == PRUETH_MAC_INVALID) 1388 return; 1389 1390 emac = prueth->emac[mac]; 1391 if (!emac) 1392 return; 1393 1394 if (of_phy_is_fixed_link(emac->phy_node)) 1395 of_phy_deregister_fixed_link(emac->phy_node); 1396 1397 netif_napi_del(&emac->napi_rx); 1398 1399 pruss_release_mem_region(prueth->pruss, &emac->dram); 1400 destroy_workqueue(emac->cmd_wq); 1401 free_netdev(emac->ndev); 1402 prueth->emac[mac] = NULL; 1403 } 1404 EXPORT_SYMBOL_GPL(prueth_netdev_exit); 1405 1406 int prueth_get_cores(struct prueth *prueth, int slice, bool is_sr1) 1407 { 1408 struct device *dev = prueth->dev; 1409 enum pruss_pru_id pruss_id; 1410 struct device_node *np; 1411 int idx = -1, ret; 1412 1413 np = dev->of_node; 1414 1415 switch (slice) { 1416 case ICSS_SLICE0: 1417 idx = 0; 1418 break; 1419 case ICSS_SLICE1: 1420 idx = is_sr1 ? 2 : 3; 1421 break; 1422 default: 1423 return -EINVAL; 1424 } 1425 1426 prueth->pru[slice] = pru_rproc_get(np, idx, &pruss_id); 1427 if (IS_ERR(prueth->pru[slice])) { 1428 ret = PTR_ERR(prueth->pru[slice]); 1429 prueth->pru[slice] = NULL; 1430 return dev_err_probe(dev, ret, "unable to get PRU%d\n", slice); 1431 } 1432 prueth->pru_id[slice] = pruss_id; 1433 1434 idx++; 1435 prueth->rtu[slice] = pru_rproc_get(np, idx, NULL); 1436 if (IS_ERR(prueth->rtu[slice])) { 1437 ret = PTR_ERR(prueth->rtu[slice]); 1438 prueth->rtu[slice] = NULL; 1439 return dev_err_probe(dev, ret, "unable to get RTU%d\n", slice); 1440 } 1441 1442 if (is_sr1) 1443 return 0; 1444 1445 idx++; 1446 prueth->txpru[slice] = pru_rproc_get(np, idx, NULL); 1447 if (IS_ERR(prueth->txpru[slice])) { 1448 ret = PTR_ERR(prueth->txpru[slice]); 1449 prueth->txpru[slice] = NULL; 1450 return dev_err_probe(dev, ret, "unable to get TX_PRU%d\n", slice); 1451 } 1452 1453 return 0; 1454 } 1455 EXPORT_SYMBOL_GPL(prueth_get_cores); 1456 1457 void prueth_put_cores(struct prueth *prueth, int slice) 1458 { 1459 if (prueth->txpru[slice]) 1460 pru_rproc_put(prueth->txpru[slice]); 1461 1462 if (prueth->rtu[slice]) 1463 pru_rproc_put(prueth->rtu[slice]); 1464 1465 if (prueth->pru[slice]) 1466 pru_rproc_put(prueth->pru[slice]); 1467 } 1468 EXPORT_SYMBOL_GPL(prueth_put_cores); 1469 1470 #ifdef CONFIG_PM_SLEEP 1471 static int prueth_suspend(struct device *dev) 1472 { 1473 struct prueth *prueth = dev_get_drvdata(dev); 1474 struct net_device *ndev; 1475 int i, ret; 1476 1477 for (i = 0; i < PRUETH_NUM_MACS; i++) { 1478 ndev = prueth->registered_netdevs[i]; 1479 1480 if (!ndev) 1481 continue; 1482 1483 if (netif_running(ndev)) { 1484 netif_device_detach(ndev); 1485 ret = ndev->netdev_ops->ndo_stop(ndev); 1486 if (ret < 0) { 1487 netdev_err(ndev, "failed to stop: %d", ret); 1488 return ret; 1489 } 1490 } 1491 } 1492 1493 return 0; 1494 } 1495 1496 static int prueth_resume(struct device *dev) 1497 { 1498 struct prueth *prueth = dev_get_drvdata(dev); 1499 struct net_device *ndev; 1500 int i, ret; 1501 1502 for (i = 0; i < PRUETH_NUM_MACS; i++) { 1503 ndev = prueth->registered_netdevs[i]; 1504 1505 if (!ndev) 1506 continue; 1507 1508 if (netif_running(ndev)) { 1509 ret = ndev->netdev_ops->ndo_open(ndev); 1510 if (ret < 0) { 1511 netdev_err(ndev, "failed to start: %d", ret); 1512 return ret; 1513 } 1514 netif_device_attach(ndev); 1515 } 1516 } 1517 1518 return 0; 1519 } 1520 #endif /* CONFIG_PM_SLEEP */ 1521 1522 const struct dev_pm_ops prueth_dev_pm_ops = { 1523 SET_SYSTEM_SLEEP_PM_OPS(prueth_suspend, prueth_resume) 1524 }; 1525 EXPORT_SYMBOL_GPL(prueth_dev_pm_ops); 1526 1527 MODULE_AUTHOR("Roger Quadros <rogerq@ti.com>"); 1528 MODULE_AUTHOR("Md Danish Anwar <danishanwar@ti.com>"); 1529 MODULE_DESCRIPTION("PRUSS ICSSG Ethernet Driver Common Module"); 1530 MODULE_LICENSE("GPL"); 1531