Lines Matching refs:lan966x

12 	struct lan966x *lan966x = (struct lan966x *)fdma->priv;
13 struct lan966x_rx *rx = &lan966x->rx;
29 struct lan966x *lan966x = (struct lan966x *)fdma->priv;
31 *dataptr = lan966x->tx.dcbs_buf[dcb].dma_addr;
39 struct lan966x *lan966x = (struct lan966x *)fdma->priv;
41 *dataptr = lan966x->tx.dcbs_buf[dcb].dma_addr + XDP_PACKET_HEADROOM;
46 static int lan966x_fdma_channel_active(struct lan966x *lan966x)
48 return lan_rd(lan966x, FDMA_CH_ACTIVE);
77 struct lan966x *lan966x = rx->lan966x;
83 .dev = lan966x->dev,
90 if (lan966x_xdp_present(lan966x))
95 for (int i = 0; i < lan966x->num_phys_ports; ++i) {
98 if (!lan966x->ports[i])
101 port = lan966x->ports[i];
112 struct lan966x *lan966x = rx->lan966x;
119 err = fdma_alloc_coherent(lan966x->dev, fdma);
131 struct lan966x *lan966x = rx->lan966x;
138 lan_wr(lower_32_bits((u64)fdma->dma), lan966x,
140 lan_wr(upper_32_bits((u64)fdma->dma), lan966x,
147 lan966x, FDMA_CH_CFG(fdma->channel_id));
152 lan966x, FDMA_PORT_CTRL(0));
155 mask = lan_rd(lan966x, FDMA_INTR_DB_ENA);
160 lan966x, FDMA_INTR_DB_ENA);
165 lan966x, FDMA_CH_ACTIVATE);
170 struct lan966x *lan966x = rx->lan966x;
177 lan966x, FDMA_CH_DISABLE);
179 readx_poll_timeout_atomic(lan966x_fdma_channel_active, lan966x,
185 lan966x, FDMA_CH_DB_DISCARD);
190 struct lan966x *lan966x = rx->lan966x;
194 lan966x, FDMA_CH_RELOAD);
199 struct lan966x *lan966x = tx->lan966x;
208 err = fdma_alloc_coherent(lan966x->dev, fdma);
223 struct lan966x *lan966x = tx->lan966x;
226 fdma_free_coherent(lan966x->dev, &tx->fdma);
231 struct lan966x *lan966x = tx->lan966x;
238 lan_wr(lower_32_bits((u64)fdma->dma), lan966x,
240 lan_wr(upper_32_bits((u64)fdma->dma), lan966x,
247 lan966x, FDMA_CH_CFG(fdma->channel_id));
252 lan966x, FDMA_PORT_CTRL(0));
255 mask = lan_rd(lan966x, FDMA_INTR_DB_ENA);
260 lan966x, FDMA_INTR_DB_ENA);
265 lan966x, FDMA_CH_ACTIVATE);
270 struct lan966x *lan966x = tx->lan966x;
277 lan966x, FDMA_CH_DISABLE);
279 readx_poll_timeout_atomic(lan966x_fdma_channel_active, lan966x,
285 lan966x, FDMA_CH_DB_DISCARD);
292 struct lan966x *lan966x = tx->lan966x;
297 lan966x, FDMA_CH_RELOAD);
300 static void lan966x_fdma_wakeup_netdev(struct lan966x *lan966x)
305 for (i = 0; i < lan966x->num_phys_ports; ++i) {
306 port = lan966x->ports[i];
315 static void lan966x_fdma_stop_netdev(struct lan966x *lan966x)
320 for (i = 0; i < lan966x->num_phys_ports; ++i) {
321 port = lan966x->ports[i];
329 static void lan966x_fdma_tx_clear_buf(struct lan966x *lan966x, int weight)
331 struct lan966x_tx *tx = &lan966x->tx;
332 struct lan966x_rx *rx = &lan966x->rx;
343 spin_lock_irqsave(&lan966x->tx_lock, flags);
359 dma_unmap_single(lan966x->dev,
368 dma_unmap_single(lan966x->dev,
386 lan966x_fdma_wakeup_netdev(lan966x);
388 spin_unlock_irqrestore(&lan966x->tx_lock, flags);
393 struct lan966x *lan966x = rx->lan966x;
404 dma_sync_single_for_cpu(lan966x->dev,
411 if (WARN_ON(*src_port >= lan966x->num_phys_ports))
414 port = lan966x->ports[*src_port];
424 struct lan966x *lan966x = rx->lan966x;
446 skb->dev = lan966x->ports[src_port]->dev;
452 lan966x_ptp_rxtstamp(lan966x, skb, src_port, timestamp);
455 if (lan966x->bridge_mask & BIT(src_port)) {
459 if (!lan966x_hw_offload(lan966x, src_port, skb))
476 struct lan966x *lan966x = container_of(napi, struct lan966x, napi);
477 struct lan966x_rx *rx = &lan966x->rx;
486 lan966x_fdma_tx_clear_buf(lan966x, weight);
519 napi_gro_receive(&lan966x->napi, skb);
539 lan_wr(0xff, lan966x, FDMA_INTR_DB_ENA);
546 struct lan966x *lan966x = args;
549 db = lan_rd(lan966x, FDMA_INTR_DB);
550 err = lan_rd(lan966x, FDMA_INTR_ERR);
553 lan_wr(0, lan966x, FDMA_INTR_DB_ENA);
554 lan_wr(db, lan966x, FDMA_INTR_DB);
556 napi_schedule(&lan966x->napi);
560 err_type = lan_rd(lan966x, FDMA_ERRORS);
564 lan_wr(err, lan966x, FDMA_INTR_ERR);
565 lan_wr(err_type, lan966x, FDMA_ERRORS);
589 struct lan966x *lan966x = tx->lan966x;
591 if (likely(lan966x->tx.activated)) {
595 lan966x->tx.activated = true;
602 struct lan966x *lan966x = port->lan966x;
604 struct lan966x_tx *tx = &lan966x->tx;
612 spin_lock(&lan966x->tx_lock);
639 dma_addr = dma_map_single(lan966x->dev,
643 if (dma_mapping_error(lan966x->dev, dma_addr)) {
659 dma_sync_single_for_device(lan966x->dev,
691 spin_unlock(&lan966x->tx_lock);
699 struct lan966x *lan966x = port->lan966x;
701 struct lan966x_tx *tx = &lan966x->tx;
738 dma_addr = dma_map_single(lan966x->dev, skb->data, skb->len,
740 if (dma_mapping_error(lan966x->dev, dma_addr)) {
784 static int lan966x_fdma_get_max_mtu(struct lan966x *lan966x)
789 for (i = 0; i < lan966x->num_phys_ports; ++i) {
793 port = lan966x->ports[i];
797 mtu = lan_rd(lan966x, DEV_MAC_MAXLEN_CFG(port->chip_port));
805 static int lan966x_qsys_sw_status(struct lan966x *lan966x)
807 return lan_rd(lan966x, QSYS_SW_STATUS(CPU_PORT));
810 static int lan966x_fdma_reload(struct lan966x *lan966x, int new_mtu)
817 memcpy(&fdma_rx_old, &lan966x->rx.fdma, sizeof(struct fdma));
818 page_pool = lan966x->rx.page_pool;
820 napi_synchronize(&lan966x->napi);
821 napi_disable(&lan966x->napi);
822 lan966x_fdma_stop_netdev(lan966x);
824 lan966x_fdma_rx_disable(&lan966x->rx);
825 lan966x_fdma_rx_free_pages(&lan966x->rx);
826 lan966x->rx.page_order = round_up(new_mtu, PAGE_SIZE) / PAGE_SIZE - 1;
827 lan966x->rx.max_mtu = new_mtu;
828 err = lan966x_fdma_rx_alloc(&lan966x->rx);
831 lan966x_fdma_rx_start(&lan966x->rx);
833 fdma_free_coherent(lan966x->dev, &fdma_rx_old);
837 lan966x_fdma_wakeup_netdev(lan966x);
838 napi_enable(&lan966x->napi);
842 lan966x->rx.page_pool = page_pool;
843 memcpy(&lan966x->rx.fdma, &fdma_rx_old, sizeof(struct fdma));
844 lan966x_fdma_rx_start(&lan966x->rx);
849 static int lan966x_fdma_get_max_frame(struct lan966x *lan966x)
851 return lan966x_fdma_get_max_mtu(lan966x) +
858 static int __lan966x_fdma_reload(struct lan966x *lan966x, int max_mtu)
866 lan966x, QSYS_SW_PORT_MODE(CPU_PORT));
869 readx_poll_timeout(lan966x_qsys_sw_status, lan966x,
878 err = lan966x_fdma_reload(lan966x, max_mtu);
883 lan966x, QSYS_SW_PORT_MODE(CPU_PORT));
888 int lan966x_fdma_change_mtu(struct lan966x *lan966x)
892 max_mtu = lan966x_fdma_get_max_frame(lan966x);
893 if (max_mtu == lan966x->rx.max_mtu)
896 return __lan966x_fdma_reload(lan966x, max_mtu);
899 int lan966x_fdma_reload_page_pool(struct lan966x *lan966x)
903 max_mtu = lan966x_fdma_get_max_frame(lan966x);
904 return __lan966x_fdma_reload(lan966x, max_mtu);
907 void lan966x_fdma_netdev_init(struct lan966x *lan966x, struct net_device *dev)
909 if (lan966x->fdma_ndev)
912 lan966x->fdma_ndev = dev;
913 netif_napi_add(dev, &lan966x->napi, lan966x_fdma_napi_poll);
914 napi_enable(&lan966x->napi);
917 void lan966x_fdma_netdev_deinit(struct lan966x *lan966x, struct net_device *dev)
919 if (lan966x->fdma_ndev == dev) {
920 netif_napi_del(&lan966x->napi);
921 lan966x->fdma_ndev = NULL;
925 int lan966x_fdma_init(struct lan966x *lan966x)
929 if (!lan966x->fdma)
932 lan966x->rx.lan966x = lan966x;
933 lan966x->rx.fdma.channel_id = FDMA_XTR_CHANNEL;
934 lan966x->rx.fdma.n_dcbs = FDMA_DCB_MAX;
935 lan966x->rx.fdma.n_dbs = FDMA_RX_DCB_MAX_DBS;
936 lan966x->rx.fdma.priv = lan966x;
937 lan966x->rx.fdma.size = fdma_get_size(&lan966x->rx.fdma);
938 lan966x->rx.fdma.db_size = PAGE_SIZE << lan966x->rx.page_order;
939 lan966x->rx.fdma.ops.nextptr_cb = &fdma_nextptr_cb;
940 lan966x->rx.fdma.ops.dataptr_cb = &lan966x_fdma_rx_dataptr_cb;
941 lan966x->rx.max_mtu = lan966x_fdma_get_max_frame(lan966x);
942 lan966x->tx.lan966x = lan966x;
943 lan966x->tx.fdma.channel_id = FDMA_INJ_CHANNEL;
944 lan966x->tx.fdma.n_dcbs = FDMA_DCB_MAX;
945 lan966x->tx.fdma.n_dbs = FDMA_TX_DCB_MAX_DBS;
946 lan966x->tx.fdma.priv = lan966x;
947 lan966x->tx.fdma.size = fdma_get_size(&lan966x->tx.fdma);
948 lan966x->tx.fdma.db_size = PAGE_SIZE << lan966x->rx.page_order;
949 lan966x->tx.fdma.ops.nextptr_cb = &fdma_nextptr_cb;
950 lan966x->tx.fdma.ops.dataptr_cb = &lan966x_fdma_tx_dataptr_cb;
952 err = lan966x_fdma_rx_alloc(&lan966x->rx);
956 err = lan966x_fdma_tx_alloc(&lan966x->tx);
958 fdma_free_coherent(lan966x->dev, &lan966x->rx.fdma);
962 lan966x_fdma_rx_start(&lan966x->rx);
967 void lan966x_fdma_deinit(struct lan966x *lan966x)
969 if (!lan966x->fdma)
972 lan966x_fdma_rx_disable(&lan966x->rx);
973 lan966x_fdma_tx_disable(&lan966x->tx);
975 napi_synchronize(&lan966x->napi);
976 napi_disable(&lan966x->napi);
978 lan966x_fdma_rx_free_pages(&lan966x->rx);
979 fdma_free_coherent(lan966x->dev, &lan966x->rx.fdma);
980 page_pool_destroy(lan966x->rx.page_pool);
981 lan966x_fdma_tx_free(&lan966x->tx);