Lines Matching +full:sparx5 +full:- +full:switch +full:- +full:reset
1 // SPDX-License-Identifier: GPL-2.0+
2 /* Microchip Sparx5 Switch driver
19 #define XTR_VALID_BYTES(x) (4 - ((x) & 3))
23 void sparx5_xtr_flush(struct sparx5 *sparx5, u8 grp) in sparx5_xtr_flush() argument
26 spx5_wr(QS_XTR_FLUSH_FLUSH_SET(BIT(grp)), sparx5, QS_XTR_FLUSH); in sparx5_xtr_flush()
32 spx5_wr(0, sparx5, QS_XTR_FLUSH); in sparx5_xtr_flush()
39 /* FWD is bit 45-72 (28 bits), but we only read the 27 LSB for now */ in sparx5_ifh_parse()
46 info->src_port = FIELD_GET(GENMASK(7, 1), fwd); in sparx5_ifh_parse()
48 info->timestamp = in sparx5_ifh_parse()
55 static void sparx5_xtr_grp(struct sparx5 *sparx5, u8 grp, bool byte_swap) in sparx5_xtr_grp() argument
68 ifh[i] = spx5_rd(sparx5, QS_XTR_RD(grp)); in sparx5_xtr_grp()
75 sparx5->ports[fi.src_port] : NULL; in sparx5_xtr_grp()
76 if (!port || !port->ndev) { in sparx5_xtr_grp()
77 dev_err(sparx5->dev, "Data on inactive port %d\n", fi.src_port); in sparx5_xtr_grp()
78 sparx5_xtr_flush(sparx5, grp); in sparx5_xtr_grp()
83 netdev = port->ndev; in sparx5_xtr_grp()
84 skb = netdev_alloc_skb(netdev, netdev->mtu + ETH_HLEN); in sparx5_xtr_grp()
86 sparx5_xtr_flush(sparx5, grp); in sparx5_xtr_grp()
87 dev_err(sparx5->dev, "No skb allocated\n"); in sparx5_xtr_grp()
88 netdev->stats.rx_dropped++; in sparx5_xtr_grp()
91 rxbuf = (u32 *)skb->data; in sparx5_xtr_grp()
95 u32 val = spx5_rd(sparx5, QS_XTR_RD(grp)); in sparx5_xtr_grp()
101 switch (cmp) { in sparx5_xtr_grp()
118 byte_cnt -= (4 - XTR_VALID_BYTES(val)); in sparx5_xtr_grp()
127 *rxbuf = spx5_rd(sparx5, QS_XTR_RD(grp)); in sparx5_xtr_grp()
142 netdev->stats.rx_dropped++; in sparx5_xtr_grp()
149 if (test_bit(port->portno, sparx5->bridge_mask)) in sparx5_xtr_grp()
150 skb->offload_fwd_mark = 1; in sparx5_xtr_grp()
153 skb_put(skb, byte_cnt - ETH_FCS_LEN); in sparx5_xtr_grp()
155 sparx5_ptp_rxtstamp(sparx5, skb, fi.timestamp); in sparx5_xtr_grp()
156 skb->protocol = eth_type_trans(skb, netdev); in sparx5_xtr_grp()
157 netdev->stats.rx_bytes += skb->len; in sparx5_xtr_grp()
158 netdev->stats.rx_packets++; in sparx5_xtr_grp()
162 static int sparx5_inject(struct sparx5 *sparx5, in sparx5_inject() argument
171 val = spx5_rd(sparx5, QS_INJ_STATUS); in sparx5_inject()
175 return -EBUSY; in sparx5_inject()
181 sparx5, QS_INJ_CTRL(grp)); in sparx5_inject()
185 spx5_wr(ifh[w], sparx5, QS_INJ_WR(grp)); in sparx5_inject()
188 count = DIV_ROUND_UP(skb->len, 4); in sparx5_inject()
189 buf = skb->data; in sparx5_inject()
192 spx5_wr(val, sparx5, QS_INJ_WR(grp)); in sparx5_inject()
197 spx5_wr(0, sparx5, QS_INJ_WR(grp)); in sparx5_inject()
203 QS_INJ_CTRL_VLD_BYTES_SET(skb->len < 60 ? 0 : skb->len % 4) | in sparx5_inject()
205 sparx5, QS_INJ_CTRL(grp)); in sparx5_inject()
208 spx5_wr(0, sparx5, QS_INJ_WR(grp)); in sparx5_inject()
211 val = spx5_rd(sparx5, QS_INJ_STATUS); in sparx5_inject()
218 hrtimer_start(&port->inj_timer, INJ_TIMEOUT_NS, in sparx5_inject()
227 struct net_device_stats *stats = &dev->stats; in sparx5_port_xmit_impl()
229 struct sparx5 *sparx5 = port->sparx5; in sparx5_port_xmit_impl() local
234 sparx5_set_port_ifh(ifh, port->portno); in sparx5_port_xmit_impl()
236 if (sparx5->ptp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) { in sparx5_port_xmit_impl()
240 sparx5_set_port_ifh_rew_op(ifh, SPARX5_SKB_CB(skb)->rew_op); in sparx5_port_xmit_impl()
241 sparx5_set_port_ifh_pdu_type(ifh, SPARX5_SKB_CB(skb)->pdu_type); in sparx5_port_xmit_impl()
242 sparx5_set_port_ifh_pdu_w16_offset(ifh, SPARX5_SKB_CB(skb)->pdu_w16_offset); in sparx5_port_xmit_impl()
243 sparx5_set_port_ifh_timestamp(ifh, SPARX5_SKB_CB(skb)->ts_id); in sparx5_port_xmit_impl()
247 spin_lock(&sparx5->tx_lock); in sparx5_port_xmit_impl()
248 if (sparx5->fdma_irq > 0) in sparx5_port_xmit_impl()
249 ret = sparx5_fdma_xmit(sparx5, ifh, skb); in sparx5_port_xmit_impl()
251 ret = sparx5_inject(sparx5, ifh, skb, dev); in sparx5_port_xmit_impl()
252 spin_unlock(&sparx5->tx_lock); in sparx5_port_xmit_impl()
254 if (ret == -EBUSY) in sparx5_port_xmit_impl()
259 stats->tx_bytes += skb->len; in sparx5_port_xmit_impl()
260 stats->tx_packets++; in sparx5_port_xmit_impl()
261 sparx5->tx.packets++; in sparx5_port_xmit_impl()
263 if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP && in sparx5_port_xmit_impl()
264 SPARX5_SKB_CB(skb)->rew_op == IFH_REW_OP_TWO_STEP_PTP) in sparx5_port_xmit_impl()
270 stats->tx_dropped++; in sparx5_port_xmit_impl()
271 sparx5->tx.dropped++; in sparx5_port_xmit_impl()
275 if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP && in sparx5_port_xmit_impl()
276 SPARX5_SKB_CB(skb)->rew_op == IFH_REW_OP_TWO_STEP_PTP) in sparx5_port_xmit_impl()
288 val = spx5_rd(port->sparx5, QS_INJ_STATUS); in sparx5_injection_timeout()
290 pr_err_ratelimited("Injection: Reset watermark count\n"); in sparx5_injection_timeout()
291 /* Reset Watermark count to restart */ in sparx5_injection_timeout()
294 port->sparx5, in sparx5_injection_timeout()
295 DSM_DEV_TX_STOP_WM_CFG(port->portno)); in sparx5_injection_timeout()
297 netif_wake_queue(port->ndev); in sparx5_injection_timeout()
301 int sparx5_manual_injection_mode(struct sparx5 *sparx5) in sparx5_manual_injection_mode() argument
310 sparx5, QS_XTR_GRP_CFG(XTR_QUEUE)); in sparx5_manual_injection_mode()
313 sparx5, QS_INJ_GRP_CFG(INJ_QUEUE)); in sparx5_manual_injection_mode()
321 sparx5, ASM_PORT_CFG(portno)); in sparx5_manual_injection_mode()
323 /* Reset WM cnt to unclog queued frames */ in sparx5_manual_injection_mode()
326 sparx5, in sparx5_manual_injection_mode()
332 sparx5, in sparx5_manual_injection_mode()
339 sparx5, in sparx5_manual_injection_mode()
347 struct sparx5 *s5 = _sparx5; in sparx5_xtr_handler()
351 while (spx5_rd(s5, QS_XTR_DATA_PRESENT) & BIT(XTR_QUEUE) && poll-- > 0) in sparx5_xtr_handler()
359 hrtimer_init(&port->inj_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); in sparx5_port_inj_timer_setup()
360 port->inj_timer.function = sparx5_injection_timeout; in sparx5_port_inj_timer_setup()