1 // SPDX-License-Identifier: GPL-2.0+ 2 /* Microchip Sparx5 Switch driver 3 * 4 * Copyright (c) 2021 Microchip Technology Inc. and its subsidiaries. 5 * 6 * The Sparx5 Chip Register Model can be browsed at this location: 7 * https://github.com/microchip-ung/sparx-5_reginfo 8 */ 9 10 #include <linux/types.h> 11 #include <linux/skbuff.h> 12 #include <linux/netdevice.h> 13 #include <linux/interrupt.h> 14 #include <linux/ip.h> 15 #include <linux/dma-mapping.h> 16 17 #include "sparx5_main_regs.h" 18 #include "sparx5_main.h" 19 #include "sparx5_port.h" 20 21 #define FDMA_XTR_BUFFER_SIZE 2048 22 #define FDMA_WEIGHT 4 23 24 static int sparx5_fdma_tx_dataptr_cb(struct fdma *fdma, int dcb, int db, 25 u64 *dataptr) 26 { 27 *dataptr = fdma->dma + (sizeof(struct fdma_dcb) * fdma->n_dcbs) + 28 ((dcb * fdma->n_dbs + db) * fdma->db_size); 29 30 return 0; 31 } 32 33 static int sparx5_fdma_rx_dataptr_cb(struct fdma *fdma, int dcb, int db, 34 u64 *dataptr) 35 { 36 struct sparx5 *sparx5 = fdma->priv; 37 struct sparx5_rx *rx = &sparx5->rx; 38 struct sk_buff *skb; 39 40 skb = __netdev_alloc_skb(rx->ndev, fdma->db_size, GFP_ATOMIC); 41 if (unlikely(!skb)) 42 return -ENOMEM; 43 44 *dataptr = virt_to_phys(skb->data); 45 46 rx->skb[dcb][db] = skb; 47 48 return 0; 49 } 50 51 static void sparx5_fdma_rx_activate(struct sparx5 *sparx5, struct sparx5_rx *rx) 52 { 53 struct fdma *fdma = &rx->fdma; 54 55 /* Write the buffer address in the LLP and LLP1 regs */ 56 spx5_wr(((u64)fdma->dma) & GENMASK(31, 0), sparx5, 57 FDMA_DCB_LLP(fdma->channel_id)); 58 spx5_wr(((u64)fdma->dma) >> 32, sparx5, 59 FDMA_DCB_LLP1(fdma->channel_id)); 60 61 /* Set the number of RX DBs to be used, and DB end-of-frame interrupt */ 62 spx5_wr(FDMA_CH_CFG_CH_DCB_DB_CNT_SET(fdma->n_dbs) | 63 FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY_SET(1) | 64 FDMA_CH_CFG_CH_INJ_PORT_SET(XTR_QUEUE), 65 sparx5, FDMA_CH_CFG(fdma->channel_id)); 66 67 /* Set the RX Watermark to max */ 68 spx5_rmw(FDMA_XTR_CFG_XTR_FIFO_WM_SET(31), FDMA_XTR_CFG_XTR_FIFO_WM, 69 sparx5, 70 FDMA_XTR_CFG); 71 72 /* Start RX fdma */ 73 spx5_rmw(FDMA_PORT_CTRL_XTR_STOP_SET(0), FDMA_PORT_CTRL_XTR_STOP, 74 sparx5, FDMA_PORT_CTRL(0)); 75 76 /* Enable RX channel DB interrupt */ 77 spx5_rmw(BIT(fdma->channel_id), 78 BIT(fdma->channel_id) & FDMA_INTR_DB_ENA_INTR_DB_ENA, 79 sparx5, FDMA_INTR_DB_ENA); 80 81 /* Activate the RX channel */ 82 spx5_wr(BIT(fdma->channel_id), sparx5, FDMA_CH_ACTIVATE); 83 } 84 85 static void sparx5_fdma_rx_deactivate(struct sparx5 *sparx5, struct sparx5_rx *rx) 86 { 87 struct fdma *fdma = &rx->fdma; 88 89 /* Deactivate the RX channel */ 90 spx5_rmw(0, BIT(fdma->channel_id) & FDMA_CH_ACTIVATE_CH_ACTIVATE, 91 sparx5, FDMA_CH_ACTIVATE); 92 93 /* Disable RX channel DB interrupt */ 94 spx5_rmw(0, BIT(fdma->channel_id) & FDMA_INTR_DB_ENA_INTR_DB_ENA, 95 sparx5, FDMA_INTR_DB_ENA); 96 97 /* Stop RX fdma */ 98 spx5_rmw(FDMA_PORT_CTRL_XTR_STOP_SET(1), FDMA_PORT_CTRL_XTR_STOP, 99 sparx5, FDMA_PORT_CTRL(0)); 100 } 101 102 static void sparx5_fdma_tx_activate(struct sparx5 *sparx5, struct sparx5_tx *tx) 103 { 104 struct fdma *fdma = &tx->fdma; 105 106 /* Write the buffer address in the LLP and LLP1 regs */ 107 spx5_wr(((u64)fdma->dma) & GENMASK(31, 0), sparx5, 108 FDMA_DCB_LLP(fdma->channel_id)); 109 spx5_wr(((u64)fdma->dma) >> 32, sparx5, 110 FDMA_DCB_LLP1(fdma->channel_id)); 111 112 /* Set the number of TX DBs to be used, and DB end-of-frame interrupt */ 113 spx5_wr(FDMA_CH_CFG_CH_DCB_DB_CNT_SET(fdma->n_dbs) | 114 FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY_SET(1) | 115 FDMA_CH_CFG_CH_INJ_PORT_SET(INJ_QUEUE), 116 sparx5, FDMA_CH_CFG(fdma->channel_id)); 117 118 /* Start TX fdma */ 119 spx5_rmw(FDMA_PORT_CTRL_INJ_STOP_SET(0), FDMA_PORT_CTRL_INJ_STOP, 120 sparx5, FDMA_PORT_CTRL(0)); 121 122 /* Activate the channel */ 123 spx5_wr(BIT(fdma->channel_id), sparx5, FDMA_CH_ACTIVATE); 124 } 125 126 static void sparx5_fdma_tx_deactivate(struct sparx5 *sparx5, struct sparx5_tx *tx) 127 { 128 /* Disable the channel */ 129 spx5_rmw(0, BIT(tx->fdma.channel_id) & FDMA_CH_ACTIVATE_CH_ACTIVATE, 130 sparx5, FDMA_CH_ACTIVATE); 131 } 132 133 void sparx5_fdma_reload(struct sparx5 *sparx5, struct fdma *fdma) 134 { 135 /* Reload the RX channel */ 136 spx5_wr(BIT(fdma->channel_id), sparx5, FDMA_CH_RELOAD); 137 } 138 139 static bool sparx5_fdma_rx_get_frame(struct sparx5 *sparx5, struct sparx5_rx *rx) 140 { 141 struct fdma *fdma = &rx->fdma; 142 struct sparx5_port *port; 143 struct fdma_db *db_hw; 144 struct frame_info fi; 145 struct sk_buff *skb; 146 147 /* Check if the DCB is done */ 148 db_hw = fdma_db_next_get(fdma); 149 if (unlikely(!fdma_db_is_done(db_hw))) 150 return false; 151 skb = rx->skb[fdma->dcb_index][fdma->db_index]; 152 skb_put(skb, fdma_db_len_get(db_hw)); 153 /* Now do the normal processing of the skb */ 154 sparx5_ifh_parse(sparx5, (u32 *)skb->data, &fi); 155 /* Map to port netdev */ 156 port = fi.src_port < sparx5->data->consts->n_ports ? 157 sparx5->ports[fi.src_port] : 158 NULL; 159 if (!port || !port->ndev) { 160 dev_err(sparx5->dev, "Data on inactive port %d\n", fi.src_port); 161 sparx5_xtr_flush(sparx5, XTR_QUEUE); 162 return false; 163 } 164 skb->dev = port->ndev; 165 skb_pull(skb, IFH_LEN * sizeof(u32)); 166 if (likely(!(skb->dev->features & NETIF_F_RXFCS))) 167 skb_trim(skb, skb->len - ETH_FCS_LEN); 168 169 sparx5_ptp_rxtstamp(sparx5, skb, fi.timestamp); 170 skb->protocol = eth_type_trans(skb, skb->dev); 171 /* Everything we see on an interface that is in the HW bridge 172 * has already been forwarded 173 */ 174 if (test_bit(port->portno, sparx5->bridge_mask)) 175 skb->offload_fwd_mark = 1; 176 skb->dev->stats.rx_bytes += skb->len; 177 skb->dev->stats.rx_packets++; 178 rx->packets++; 179 netif_receive_skb(skb); 180 return true; 181 } 182 183 int sparx5_fdma_napi_callback(struct napi_struct *napi, int weight) 184 { 185 struct sparx5_rx *rx = container_of(napi, struct sparx5_rx, napi); 186 struct sparx5 *sparx5 = container_of(rx, struct sparx5, rx); 187 struct fdma *fdma = &rx->fdma; 188 int counter = 0; 189 190 while (counter < weight && sparx5_fdma_rx_get_frame(sparx5, rx)) { 191 fdma_db_advance(fdma); 192 counter++; 193 /* Check if the DCB can be reused */ 194 if (fdma_dcb_is_reusable(fdma)) 195 continue; 196 fdma_dcb_add(fdma, fdma->dcb_index, 197 FDMA_DCB_INFO_DATAL(fdma->db_size), 198 FDMA_DCB_STATUS_INTR); 199 fdma_db_reset(fdma); 200 fdma_dcb_advance(fdma); 201 } 202 if (counter < weight) { 203 napi_complete_done(&rx->napi, counter); 204 spx5_rmw(BIT(fdma->channel_id), 205 BIT(fdma->channel_id) & FDMA_INTR_DB_ENA_INTR_DB_ENA, 206 sparx5, FDMA_INTR_DB_ENA); 207 } 208 if (counter) 209 sparx5_fdma_reload(sparx5, fdma); 210 return counter; 211 } 212 213 int sparx5_fdma_xmit(struct sparx5 *sparx5, u32 *ifh, struct sk_buff *skb, 214 struct net_device *dev) 215 { 216 struct sparx5_tx *tx = &sparx5->tx; 217 struct fdma *fdma = &tx->fdma; 218 void *virt_addr; 219 220 fdma_dcb_advance(fdma); 221 if (!fdma_db_is_done(fdma_db_get(fdma, fdma->dcb_index, 0))) 222 return -EINVAL; 223 224 /* Get the virtual address of the dataptr for the next DB */ 225 virt_addr = ((u8 *)fdma->dcbs + 226 (sizeof(struct fdma_dcb) * fdma->n_dcbs) + 227 ((fdma->dcb_index * fdma->n_dbs) * fdma->db_size)); 228 229 memcpy(virt_addr, ifh, IFH_LEN * 4); 230 memcpy(virt_addr + IFH_LEN * 4, skb->data, skb->len); 231 232 fdma_dcb_add(fdma, fdma->dcb_index, 0, 233 FDMA_DCB_STATUS_SOF | 234 FDMA_DCB_STATUS_EOF | 235 FDMA_DCB_STATUS_BLOCKO(0) | 236 FDMA_DCB_STATUS_BLOCKL(skb->len + IFH_LEN * 4 + 4)); 237 238 sparx5_fdma_reload(sparx5, fdma); 239 240 return NETDEV_TX_OK; 241 } 242 243 static int sparx5_fdma_rx_alloc(struct sparx5 *sparx5) 244 { 245 struct sparx5_rx *rx = &sparx5->rx; 246 struct fdma *fdma = &rx->fdma; 247 int err; 248 249 err = fdma_alloc_phys(fdma); 250 if (err) 251 return err; 252 253 fdma_dcbs_init(fdma, FDMA_DCB_INFO_DATAL(fdma->db_size), 254 FDMA_DCB_STATUS_INTR); 255 256 return 0; 257 } 258 259 static int sparx5_fdma_tx_alloc(struct sparx5 *sparx5) 260 { 261 struct sparx5_tx *tx = &sparx5->tx; 262 struct fdma *fdma = &tx->fdma; 263 int err; 264 265 err = fdma_alloc_phys(fdma); 266 if (err) 267 return err; 268 269 fdma_dcbs_init(fdma, FDMA_DCB_INFO_DATAL(fdma->db_size), 270 FDMA_DCB_STATUS_DONE); 271 272 return 0; 273 } 274 275 static void sparx5_fdma_rx_init(struct sparx5 *sparx5, 276 struct sparx5_rx *rx, int channel) 277 { 278 struct fdma *fdma = &rx->fdma; 279 int idx; 280 281 fdma->channel_id = channel; 282 fdma->n_dcbs = FDMA_DCB_MAX; 283 fdma->n_dbs = FDMA_RX_DCB_MAX_DBS; 284 fdma->priv = sparx5; 285 fdma->db_size = ALIGN(FDMA_XTR_BUFFER_SIZE, PAGE_SIZE); 286 fdma->size = fdma_get_size(&sparx5->rx.fdma); 287 fdma->ops.dataptr_cb = &sparx5_fdma_rx_dataptr_cb; 288 fdma->ops.nextptr_cb = &fdma_nextptr_cb; 289 /* Fetch a netdev for SKB and NAPI use, any will do */ 290 for (idx = 0; idx < sparx5->data->consts->n_ports; ++idx) { 291 struct sparx5_port *port = sparx5->ports[idx]; 292 293 if (port && port->ndev) { 294 rx->ndev = port->ndev; 295 break; 296 } 297 } 298 } 299 300 static void sparx5_fdma_tx_init(struct sparx5 *sparx5, 301 struct sparx5_tx *tx, int channel) 302 { 303 struct fdma *fdma = &tx->fdma; 304 305 fdma->channel_id = channel; 306 fdma->n_dcbs = FDMA_DCB_MAX; 307 fdma->n_dbs = FDMA_TX_DCB_MAX_DBS; 308 fdma->priv = sparx5; 309 fdma->db_size = ALIGN(FDMA_XTR_BUFFER_SIZE, PAGE_SIZE); 310 fdma->size = fdma_get_size_contiguous(&sparx5->tx.fdma); 311 fdma->ops.dataptr_cb = &sparx5_fdma_tx_dataptr_cb; 312 fdma->ops.nextptr_cb = &fdma_nextptr_cb; 313 } 314 315 irqreturn_t sparx5_fdma_handler(int irq, void *args) 316 { 317 struct sparx5 *sparx5 = args; 318 u32 db = 0, err = 0; 319 320 db = spx5_rd(sparx5, FDMA_INTR_DB); 321 err = spx5_rd(sparx5, FDMA_INTR_ERR); 322 /* Clear interrupt */ 323 if (db) { 324 spx5_wr(0, sparx5, FDMA_INTR_DB_ENA); 325 spx5_wr(db, sparx5, FDMA_INTR_DB); 326 napi_schedule(&sparx5->rx.napi); 327 } 328 if (err) { 329 u32 err_type = spx5_rd(sparx5, FDMA_ERRORS); 330 331 dev_err_ratelimited(sparx5->dev, 332 "ERR: int: %#x, type: %#x\n", 333 err, err_type); 334 spx5_wr(err, sparx5, FDMA_INTR_ERR); 335 spx5_wr(err_type, sparx5, FDMA_ERRORS); 336 } 337 return IRQ_HANDLED; 338 } 339 340 void sparx5_fdma_injection_mode(struct sparx5 *sparx5) 341 { 342 const int byte_swap = 1; 343 int portno; 344 int urgency; 345 346 /* Change mode to fdma extraction and injection */ 347 spx5_wr(QS_XTR_GRP_CFG_MODE_SET(2) | 348 QS_XTR_GRP_CFG_STATUS_WORD_POS_SET(1) | 349 QS_XTR_GRP_CFG_BYTE_SWAP_SET(byte_swap), 350 sparx5, QS_XTR_GRP_CFG(XTR_QUEUE)); 351 spx5_wr(QS_INJ_GRP_CFG_MODE_SET(2) | 352 QS_INJ_GRP_CFG_BYTE_SWAP_SET(byte_swap), 353 sparx5, QS_INJ_GRP_CFG(INJ_QUEUE)); 354 355 /* CPU ports capture setup */ 356 for (portno = sparx5_get_internal_port(sparx5, SPX5_PORT_CPU_0); 357 portno <= sparx5_get_internal_port(sparx5, SPX5_PORT_CPU_1); 358 portno++) { 359 /* ASM CPU port: No preamble, IFH, enable padding */ 360 spx5_wr(ASM_PORT_CFG_PAD_ENA_SET(1) | 361 ASM_PORT_CFG_NO_PREAMBLE_ENA_SET(1) | 362 ASM_PORT_CFG_INJ_FORMAT_CFG_SET(1), /* 1 = IFH */ 363 sparx5, ASM_PORT_CFG(portno)); 364 365 /* Reset WM cnt to unclog queued frames */ 366 spx5_rmw(DSM_DEV_TX_STOP_WM_CFG_DEV_TX_CNT_CLR_SET(1), 367 DSM_DEV_TX_STOP_WM_CFG_DEV_TX_CNT_CLR, 368 sparx5, 369 DSM_DEV_TX_STOP_WM_CFG(portno)); 370 371 /* Set Disassembler Stop Watermark level */ 372 spx5_rmw(DSM_DEV_TX_STOP_WM_CFG_DEV_TX_STOP_WM_SET(100), 373 DSM_DEV_TX_STOP_WM_CFG_DEV_TX_STOP_WM, 374 sparx5, 375 DSM_DEV_TX_STOP_WM_CFG(portno)); 376 377 /* Enable port in queue system */ 378 urgency = sparx5_port_fwd_urg(sparx5, SPEED_2500); 379 spx5_rmw(QFWD_SWITCH_PORT_MODE_PORT_ENA_SET(1) | 380 QFWD_SWITCH_PORT_MODE_FWD_URGENCY_SET(urgency), 381 QFWD_SWITCH_PORT_MODE_PORT_ENA | 382 QFWD_SWITCH_PORT_MODE_FWD_URGENCY, 383 sparx5, 384 QFWD_SWITCH_PORT_MODE(portno)); 385 386 /* Disable Disassembler buffer underrun watchdog 387 * to avoid truncated packets in XTR 388 */ 389 spx5_rmw(DSM_BUF_CFG_UNDERFLOW_WATCHDOG_DIS_SET(1), 390 DSM_BUF_CFG_UNDERFLOW_WATCHDOG_DIS, 391 sparx5, 392 DSM_BUF_CFG(portno)); 393 394 /* Disabling frame aging */ 395 spx5_rmw(HSCH_PORT_MODE_AGE_DIS_SET(1), 396 HSCH_PORT_MODE_AGE_DIS, 397 sparx5, 398 HSCH_PORT_MODE(portno)); 399 } 400 } 401 402 int sparx5_fdma_init(struct sparx5 *sparx5) 403 { 404 int err; 405 406 /* Reset FDMA state */ 407 spx5_wr(FDMA_CTRL_NRESET_SET(0), sparx5, FDMA_CTRL); 408 spx5_wr(FDMA_CTRL_NRESET_SET(1), sparx5, FDMA_CTRL); 409 410 /* Force ACP caching but disable read/write allocation */ 411 spx5_rmw(CPU_PROC_CTRL_ACP_CACHE_FORCE_ENA_SET(1) | 412 CPU_PROC_CTRL_ACP_AWCACHE_SET(0) | 413 CPU_PROC_CTRL_ACP_ARCACHE_SET(0), 414 CPU_PROC_CTRL_ACP_CACHE_FORCE_ENA | 415 CPU_PROC_CTRL_ACP_AWCACHE | 416 CPU_PROC_CTRL_ACP_ARCACHE, 417 sparx5, CPU_PROC_CTRL); 418 419 sparx5_fdma_injection_mode(sparx5); 420 sparx5_fdma_rx_init(sparx5, &sparx5->rx, FDMA_XTR_CHANNEL); 421 sparx5_fdma_tx_init(sparx5, &sparx5->tx, FDMA_INJ_CHANNEL); 422 err = sparx5_fdma_rx_alloc(sparx5); 423 if (err) { 424 dev_err(sparx5->dev, "Could not allocate RX buffers: %d\n", err); 425 return err; 426 } 427 err = sparx5_fdma_tx_alloc(sparx5); 428 if (err) { 429 dev_err(sparx5->dev, "Could not allocate TX buffers: %d\n", err); 430 return err; 431 } 432 return err; 433 } 434 435 int sparx5_fdma_deinit(struct sparx5 *sparx5) 436 { 437 sparx5_fdma_stop(sparx5); 438 fdma_free_phys(&sparx5->rx.fdma); 439 fdma_free_phys(&sparx5->tx.fdma); 440 441 return 0; 442 } 443 444 static u32 sparx5_fdma_port_ctrl(struct sparx5 *sparx5) 445 { 446 return spx5_rd(sparx5, FDMA_PORT_CTRL(0)); 447 } 448 449 int sparx5_fdma_start(struct sparx5 *sparx5) 450 { 451 const struct sparx5_ops *ops = sparx5->data->ops; 452 struct sparx5_rx *rx = &sparx5->rx; 453 struct sparx5_tx *tx = &sparx5->tx; 454 455 netif_napi_add_weight(rx->ndev, 456 &rx->napi, 457 ops->fdma_poll, 458 FDMA_WEIGHT); 459 460 napi_enable(&rx->napi); 461 462 sparx5_fdma_rx_activate(sparx5, rx); 463 sparx5_fdma_tx_activate(sparx5, tx); 464 465 return 0; 466 } 467 468 int sparx5_fdma_stop(struct sparx5 *sparx5) 469 { 470 struct sparx5_rx *rx = &sparx5->rx; 471 struct sparx5_tx *tx = &sparx5->tx; 472 u32 val; 473 474 napi_disable(&rx->napi); 475 476 /* Stop the fdma and channel interrupts */ 477 sparx5_fdma_rx_deactivate(sparx5, rx); 478 sparx5_fdma_tx_deactivate(sparx5, tx); 479 480 /* Wait for the RX channel to stop */ 481 read_poll_timeout(sparx5_fdma_port_ctrl, val, 482 FDMA_PORT_CTRL_XTR_BUF_IS_EMPTY_GET(val) == 0, 483 500, 10000, 0, sparx5); 484 485 return 0; 486 } 487