xref: /linux/drivers/net/ethernet/fungible/funeth/funeth_tx.c (revision b23f9239195a1af116d6b388cd00c9002f80f80f)
1db37bc17SDimitris Michailidis // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
2db37bc17SDimitris Michailidis 
3db37bc17SDimitris Michailidis #include <linux/dma-mapping.h>
4db37bc17SDimitris Michailidis #include <linux/ip.h>
5db37bc17SDimitris Michailidis #include <linux/pci.h>
6db37bc17SDimitris Michailidis #include <linux/skbuff.h>
7db37bc17SDimitris Michailidis #include <linux/tcp.h>
8db37bc17SDimitris Michailidis #include <uapi/linux/udp.h>
9db37bc17SDimitris Michailidis #include "funeth.h"
10*b23f9239SDimitris Michailidis #include "funeth_ktls.h"
11db37bc17SDimitris Michailidis #include "funeth_txrx.h"
12db37bc17SDimitris Michailidis #include "funeth_trace.h"
13db37bc17SDimitris Michailidis #include "fun_queue.h"
14db37bc17SDimitris Michailidis 
15db37bc17SDimitris Michailidis #define FUN_XDP_CLEAN_THRES 32
16db37bc17SDimitris Michailidis #define FUN_XDP_CLEAN_BATCH 16
17db37bc17SDimitris Michailidis 
18db37bc17SDimitris Michailidis /* DMA-map a packet and return the (length, DMA_address) pairs for its
19db37bc17SDimitris Michailidis  * segments. If a mapping error occurs -ENOMEM is returned.
20db37bc17SDimitris Michailidis  */
21db37bc17SDimitris Michailidis static int map_skb(const struct sk_buff *skb, struct device *dev,
22db37bc17SDimitris Michailidis 		   dma_addr_t *addr, unsigned int *len)
23db37bc17SDimitris Michailidis {
24db37bc17SDimitris Michailidis 	const struct skb_shared_info *si;
25db37bc17SDimitris Michailidis 	const skb_frag_t *fp, *end;
26db37bc17SDimitris Michailidis 
27db37bc17SDimitris Michailidis 	*len = skb_headlen(skb);
28db37bc17SDimitris Michailidis 	*addr = dma_map_single(dev, skb->data, *len, DMA_TO_DEVICE);
29db37bc17SDimitris Michailidis 	if (dma_mapping_error(dev, *addr))
30db37bc17SDimitris Michailidis 		return -ENOMEM;
31db37bc17SDimitris Michailidis 
32db37bc17SDimitris Michailidis 	si = skb_shinfo(skb);
33db37bc17SDimitris Michailidis 	end = &si->frags[si->nr_frags];
34db37bc17SDimitris Michailidis 
35db37bc17SDimitris Michailidis 	for (fp = si->frags; fp < end; fp++) {
36db37bc17SDimitris Michailidis 		*++len = skb_frag_size(fp);
37db37bc17SDimitris Michailidis 		*++addr = skb_frag_dma_map(dev, fp, 0, *len, DMA_TO_DEVICE);
38db37bc17SDimitris Michailidis 		if (dma_mapping_error(dev, *addr))
39db37bc17SDimitris Michailidis 			goto unwind;
40db37bc17SDimitris Michailidis 	}
41db37bc17SDimitris Michailidis 	return 0;
42db37bc17SDimitris Michailidis 
43db37bc17SDimitris Michailidis unwind:
44db37bc17SDimitris Michailidis 	while (fp-- > si->frags)
45db37bc17SDimitris Michailidis 		dma_unmap_page(dev, *--addr, skb_frag_size(fp), DMA_TO_DEVICE);
46db37bc17SDimitris Michailidis 
47db37bc17SDimitris Michailidis 	dma_unmap_single(dev, addr[-1], skb_headlen(skb), DMA_TO_DEVICE);
48db37bc17SDimitris Michailidis 	return -ENOMEM;
49db37bc17SDimitris Michailidis }
50db37bc17SDimitris Michailidis 
51db37bc17SDimitris Michailidis /* Return the address just past the end of a Tx queue's descriptor ring.
52db37bc17SDimitris Michailidis  * It exploits the fact that the HW writeback area is just after the end
53db37bc17SDimitris Michailidis  * of the descriptor ring.
54db37bc17SDimitris Michailidis  */
55db37bc17SDimitris Michailidis static void *txq_end(const struct funeth_txq *q)
56db37bc17SDimitris Michailidis {
57db37bc17SDimitris Michailidis 	return (void *)q->hw_wb;
58db37bc17SDimitris Michailidis }
59db37bc17SDimitris Michailidis 
60db37bc17SDimitris Michailidis /* Return the amount of space within a Tx ring from the given address to the
61db37bc17SDimitris Michailidis  * end.
62db37bc17SDimitris Michailidis  */
63db37bc17SDimitris Michailidis static unsigned int txq_to_end(const struct funeth_txq *q, void *p)
64db37bc17SDimitris Michailidis {
65db37bc17SDimitris Michailidis 	return txq_end(q) - p;
66db37bc17SDimitris Michailidis }
67db37bc17SDimitris Michailidis 
68db37bc17SDimitris Michailidis /* Return the number of Tx descriptors occupied by a Tx request. */
69db37bc17SDimitris Michailidis static unsigned int tx_req_ndesc(const struct fun_eth_tx_req *req)
70db37bc17SDimitris Michailidis {
71db37bc17SDimitris Michailidis 	return DIV_ROUND_UP(req->len8, FUNETH_SQE_SIZE / 8);
72db37bc17SDimitris Michailidis }
73db37bc17SDimitris Michailidis 
74db37bc17SDimitris Michailidis static __be16 tcp_hdr_doff_flags(const struct tcphdr *th)
75db37bc17SDimitris Michailidis {
76db37bc17SDimitris Michailidis 	return *(__be16 *)&tcp_flag_word(th);
77db37bc17SDimitris Michailidis }
78db37bc17SDimitris Michailidis 
79db37bc17SDimitris Michailidis static struct sk_buff *fun_tls_tx(struct sk_buff *skb, struct funeth_txq *q,
80db37bc17SDimitris Michailidis 				  unsigned int *tls_len)
81db37bc17SDimitris Michailidis {
82*b23f9239SDimitris Michailidis #if IS_ENABLED(CONFIG_TLS_DEVICE)
83db37bc17SDimitris Michailidis 	const struct fun_ktls_tx_ctx *tls_ctx;
84db37bc17SDimitris Michailidis 	u32 datalen, seq;
85db37bc17SDimitris Michailidis 
86db37bc17SDimitris Michailidis 	datalen = skb->len - (skb_transport_offset(skb) + tcp_hdrlen(skb));
87db37bc17SDimitris Michailidis 	if (!datalen)
88db37bc17SDimitris Michailidis 		return skb;
89db37bc17SDimitris Michailidis 
90db37bc17SDimitris Michailidis 	if (likely(!tls_offload_tx_resync_pending(skb->sk))) {
91db37bc17SDimitris Michailidis 		seq = ntohl(tcp_hdr(skb)->seq);
92db37bc17SDimitris Michailidis 		tls_ctx = tls_driver_ctx(skb->sk, TLS_OFFLOAD_CTX_DIR_TX);
93db37bc17SDimitris Michailidis 
94db37bc17SDimitris Michailidis 		if (likely(tls_ctx->next_seq == seq)) {
95db37bc17SDimitris Michailidis 			*tls_len = datalen;
96db37bc17SDimitris Michailidis 			return skb;
97db37bc17SDimitris Michailidis 		}
98db37bc17SDimitris Michailidis 		if (seq - tls_ctx->next_seq < U32_MAX / 4) {
99db37bc17SDimitris Michailidis 			tls_offload_tx_resync_request(skb->sk, seq,
100db37bc17SDimitris Michailidis 						      tls_ctx->next_seq);
101db37bc17SDimitris Michailidis 		}
102db37bc17SDimitris Michailidis 	}
103db37bc17SDimitris Michailidis 
104db37bc17SDimitris Michailidis 	FUN_QSTAT_INC(q, tx_tls_fallback);
105db37bc17SDimitris Michailidis 	skb = tls_encrypt_skb(skb);
106db37bc17SDimitris Michailidis 	if (!skb)
107db37bc17SDimitris Michailidis 		FUN_QSTAT_INC(q, tx_tls_drops);
108db37bc17SDimitris Michailidis 
109db37bc17SDimitris Michailidis 	return skb;
110*b23f9239SDimitris Michailidis #else
111*b23f9239SDimitris Michailidis 	return NULL;
112db37bc17SDimitris Michailidis #endif
113*b23f9239SDimitris Michailidis }
114db37bc17SDimitris Michailidis 
115db37bc17SDimitris Michailidis /* Write as many descriptors as needed for the supplied skb starting at the
116db37bc17SDimitris Michailidis  * current producer location. The caller has made certain enough descriptors
117db37bc17SDimitris Michailidis  * are available.
118db37bc17SDimitris Michailidis  *
119db37bc17SDimitris Michailidis  * Returns the number of descriptors written, 0 on error.
120db37bc17SDimitris Michailidis  */
121db37bc17SDimitris Michailidis static unsigned int write_pkt_desc(struct sk_buff *skb, struct funeth_txq *q,
122db37bc17SDimitris Michailidis 				   unsigned int tls_len)
123db37bc17SDimitris Michailidis {
124db37bc17SDimitris Michailidis 	unsigned int extra_bytes = 0, extra_pkts = 0;
125db37bc17SDimitris Michailidis 	unsigned int idx = q->prod_cnt & q->mask;
126db37bc17SDimitris Michailidis 	const struct skb_shared_info *shinfo;
127db37bc17SDimitris Michailidis 	unsigned int lens[MAX_SKB_FRAGS + 1];
128db37bc17SDimitris Michailidis 	dma_addr_t addrs[MAX_SKB_FRAGS + 1];
129db37bc17SDimitris Michailidis 	struct fun_eth_tx_req *req;
130db37bc17SDimitris Michailidis 	struct fun_dataop_gl *gle;
131db37bc17SDimitris Michailidis 	const struct tcphdr *th;
132db37bc17SDimitris Michailidis 	unsigned int ngle, i;
133db37bc17SDimitris Michailidis 	u16 flags;
134db37bc17SDimitris Michailidis 
135db37bc17SDimitris Michailidis 	if (unlikely(map_skb(skb, q->dma_dev, addrs, lens))) {
136db37bc17SDimitris Michailidis 		FUN_QSTAT_INC(q, tx_map_err);
137db37bc17SDimitris Michailidis 		return 0;
138db37bc17SDimitris Michailidis 	}
139db37bc17SDimitris Michailidis 
140db37bc17SDimitris Michailidis 	req = fun_tx_desc_addr(q, idx);
141db37bc17SDimitris Michailidis 	req->op = FUN_ETH_OP_TX;
142db37bc17SDimitris Michailidis 	req->len8 = 0;
143db37bc17SDimitris Michailidis 	req->flags = 0;
144db37bc17SDimitris Michailidis 	req->suboff8 = offsetof(struct fun_eth_tx_req, dataop);
145db37bc17SDimitris Michailidis 	req->repr_idn = 0;
146db37bc17SDimitris Michailidis 	req->encap_proto = 0;
147db37bc17SDimitris Michailidis 
148db37bc17SDimitris Michailidis 	shinfo = skb_shinfo(skb);
149db37bc17SDimitris Michailidis 	if (likely(shinfo->gso_size)) {
150db37bc17SDimitris Michailidis 		if (skb->encapsulation) {
151db37bc17SDimitris Michailidis 			u16 ol4_ofst;
152db37bc17SDimitris Michailidis 
153db37bc17SDimitris Michailidis 			flags = FUN_ETH_OUTER_EN | FUN_ETH_INNER_LSO |
154db37bc17SDimitris Michailidis 				FUN_ETH_UPDATE_INNER_L4_CKSUM |
155db37bc17SDimitris Michailidis 				FUN_ETH_UPDATE_OUTER_L3_LEN;
156db37bc17SDimitris Michailidis 			if (shinfo->gso_type & (SKB_GSO_UDP_TUNNEL |
157db37bc17SDimitris Michailidis 						SKB_GSO_UDP_TUNNEL_CSUM)) {
158db37bc17SDimitris Michailidis 				flags |= FUN_ETH_UPDATE_OUTER_L4_LEN |
159db37bc17SDimitris Michailidis 					 FUN_ETH_OUTER_UDP;
160db37bc17SDimitris Michailidis 				if (shinfo->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)
161db37bc17SDimitris Michailidis 					flags |= FUN_ETH_UPDATE_OUTER_L4_CKSUM;
162db37bc17SDimitris Michailidis 				ol4_ofst = skb_transport_offset(skb);
163db37bc17SDimitris Michailidis 			} else {
164db37bc17SDimitris Michailidis 				ol4_ofst = skb_inner_network_offset(skb);
165db37bc17SDimitris Michailidis 			}
166db37bc17SDimitris Michailidis 
167db37bc17SDimitris Michailidis 			if (ip_hdr(skb)->version == 4)
168db37bc17SDimitris Michailidis 				flags |= FUN_ETH_UPDATE_OUTER_L3_CKSUM;
169db37bc17SDimitris Michailidis 			else
170db37bc17SDimitris Michailidis 				flags |= FUN_ETH_OUTER_IPV6;
171db37bc17SDimitris Michailidis 
172db37bc17SDimitris Michailidis 			if (skb->inner_network_header) {
173db37bc17SDimitris Michailidis 				if (inner_ip_hdr(skb)->version == 4)
174db37bc17SDimitris Michailidis 					flags |= FUN_ETH_UPDATE_INNER_L3_CKSUM |
175db37bc17SDimitris Michailidis 						 FUN_ETH_UPDATE_INNER_L3_LEN;
176db37bc17SDimitris Michailidis 				else
177db37bc17SDimitris Michailidis 					flags |= FUN_ETH_INNER_IPV6 |
178db37bc17SDimitris Michailidis 						 FUN_ETH_UPDATE_INNER_L3_LEN;
179db37bc17SDimitris Michailidis 			}
180db37bc17SDimitris Michailidis 			th = inner_tcp_hdr(skb);
181db37bc17SDimitris Michailidis 			fun_eth_offload_init(&req->offload, flags,
182db37bc17SDimitris Michailidis 					     shinfo->gso_size,
183db37bc17SDimitris Michailidis 					     tcp_hdr_doff_flags(th), 0,
184db37bc17SDimitris Michailidis 					     skb_inner_network_offset(skb),
185db37bc17SDimitris Michailidis 					     skb_inner_transport_offset(skb),
186db37bc17SDimitris Michailidis 					     skb_network_offset(skb), ol4_ofst);
187db37bc17SDimitris Michailidis 			FUN_QSTAT_INC(q, tx_encap_tso);
188db37bc17SDimitris Michailidis 		} else {
189db37bc17SDimitris Michailidis 			/* HW considers one set of headers as inner */
190db37bc17SDimitris Michailidis 			flags = FUN_ETH_INNER_LSO |
191db37bc17SDimitris Michailidis 				FUN_ETH_UPDATE_INNER_L4_CKSUM |
192db37bc17SDimitris Michailidis 				FUN_ETH_UPDATE_INNER_L3_LEN;
193db37bc17SDimitris Michailidis 			if (shinfo->gso_type & SKB_GSO_TCPV6)
194db37bc17SDimitris Michailidis 				flags |= FUN_ETH_INNER_IPV6;
195db37bc17SDimitris Michailidis 			else
196db37bc17SDimitris Michailidis 				flags |= FUN_ETH_UPDATE_INNER_L3_CKSUM;
197db37bc17SDimitris Michailidis 			th = tcp_hdr(skb);
198db37bc17SDimitris Michailidis 			fun_eth_offload_init(&req->offload, flags,
199db37bc17SDimitris Michailidis 					     shinfo->gso_size,
200db37bc17SDimitris Michailidis 					     tcp_hdr_doff_flags(th), 0,
201db37bc17SDimitris Michailidis 					     skb_network_offset(skb),
202db37bc17SDimitris Michailidis 					     skb_transport_offset(skb), 0, 0);
203db37bc17SDimitris Michailidis 			FUN_QSTAT_INC(q, tx_tso);
204db37bc17SDimitris Michailidis 		}
205db37bc17SDimitris Michailidis 
206db37bc17SDimitris Michailidis 		u64_stats_update_begin(&q->syncp);
207db37bc17SDimitris Michailidis 		q->stats.tx_cso += shinfo->gso_segs;
208db37bc17SDimitris Michailidis 		u64_stats_update_end(&q->syncp);
209db37bc17SDimitris Michailidis 
210db37bc17SDimitris Michailidis 		extra_pkts = shinfo->gso_segs - 1;
211db37bc17SDimitris Michailidis 		extra_bytes = (be16_to_cpu(req->offload.inner_l4_off) +
212db37bc17SDimitris Michailidis 			       __tcp_hdrlen(th)) * extra_pkts;
213db37bc17SDimitris Michailidis 	} else if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
214db37bc17SDimitris Michailidis 		flags = FUN_ETH_UPDATE_INNER_L4_CKSUM;
215db37bc17SDimitris Michailidis 		if (skb->csum_offset == offsetof(struct udphdr, check))
216db37bc17SDimitris Michailidis 			flags |= FUN_ETH_INNER_UDP;
217db37bc17SDimitris Michailidis 		fun_eth_offload_init(&req->offload, flags, 0, 0, 0, 0,
218db37bc17SDimitris Michailidis 				     skb_checksum_start_offset(skb), 0, 0);
219db37bc17SDimitris Michailidis 		FUN_QSTAT_INC(q, tx_cso);
220db37bc17SDimitris Michailidis 	} else {
221db37bc17SDimitris Michailidis 		fun_eth_offload_init(&req->offload, 0, 0, 0, 0, 0, 0, 0, 0);
222db37bc17SDimitris Michailidis 	}
223db37bc17SDimitris Michailidis 
224db37bc17SDimitris Michailidis 	ngle = shinfo->nr_frags + 1;
225db37bc17SDimitris Michailidis 	req->len8 = (sizeof(*req) + ngle * sizeof(*gle)) / 8;
226db37bc17SDimitris Michailidis 	req->dataop = FUN_DATAOP_HDR_INIT(ngle, 0, ngle, 0, skb->len);
227db37bc17SDimitris Michailidis 
228db37bc17SDimitris Michailidis 	for (i = 0, gle = (struct fun_dataop_gl *)req->dataop.imm;
229db37bc17SDimitris Michailidis 	     i < ngle && txq_to_end(q, gle); i++, gle++)
230db37bc17SDimitris Michailidis 		fun_dataop_gl_init(gle, 0, 0, lens[i], addrs[i]);
231db37bc17SDimitris Michailidis 
232db37bc17SDimitris Michailidis 	if (txq_to_end(q, gle) == 0) {
233db37bc17SDimitris Michailidis 		gle = (struct fun_dataop_gl *)q->desc;
234db37bc17SDimitris Michailidis 		for ( ; i < ngle; i++, gle++)
235db37bc17SDimitris Michailidis 			fun_dataop_gl_init(gle, 0, 0, lens[i], addrs[i]);
236db37bc17SDimitris Michailidis 	}
237db37bc17SDimitris Michailidis 
238db37bc17SDimitris Michailidis 	if (IS_ENABLED(CONFIG_TLS_DEVICE) && unlikely(tls_len)) {
239db37bc17SDimitris Michailidis 		struct fun_eth_tls *tls = (struct fun_eth_tls *)gle;
240db37bc17SDimitris Michailidis 		struct fun_ktls_tx_ctx *tls_ctx;
241db37bc17SDimitris Michailidis 
242db37bc17SDimitris Michailidis 		req->len8 += FUNETH_TLS_SZ / 8;
243db37bc17SDimitris Michailidis 		req->flags = cpu_to_be16(FUN_ETH_TX_TLS);
244db37bc17SDimitris Michailidis 
245db37bc17SDimitris Michailidis 		tls_ctx = tls_driver_ctx(skb->sk, TLS_OFFLOAD_CTX_DIR_TX);
246db37bc17SDimitris Michailidis 		tls->tlsid = tls_ctx->tlsid;
247db37bc17SDimitris Michailidis 		tls_ctx->next_seq += tls_len;
248db37bc17SDimitris Michailidis 
249db37bc17SDimitris Michailidis 		u64_stats_update_begin(&q->syncp);
250db37bc17SDimitris Michailidis 		q->stats.tx_tls_bytes += tls_len;
251db37bc17SDimitris Michailidis 		q->stats.tx_tls_pkts += 1 + extra_pkts;
252db37bc17SDimitris Michailidis 		u64_stats_update_end(&q->syncp);
253db37bc17SDimitris Michailidis 	}
254db37bc17SDimitris Michailidis 
255db37bc17SDimitris Michailidis 	u64_stats_update_begin(&q->syncp);
256db37bc17SDimitris Michailidis 	q->stats.tx_bytes += skb->len + extra_bytes;
257db37bc17SDimitris Michailidis 	q->stats.tx_pkts += 1 + extra_pkts;
258db37bc17SDimitris Michailidis 	u64_stats_update_end(&q->syncp);
259db37bc17SDimitris Michailidis 
260db37bc17SDimitris Michailidis 	q->info[idx].skb = skb;
261db37bc17SDimitris Michailidis 
262db37bc17SDimitris Michailidis 	trace_funeth_tx(q, skb->len, idx, req->dataop.ngather);
263db37bc17SDimitris Michailidis 	return tx_req_ndesc(req);
264db37bc17SDimitris Michailidis }
265db37bc17SDimitris Michailidis 
266db37bc17SDimitris Michailidis /* Return the number of available descriptors of a Tx queue.
267db37bc17SDimitris Michailidis  * HW assumes head==tail means the ring is empty so we need to keep one
268db37bc17SDimitris Michailidis  * descriptor unused.
269db37bc17SDimitris Michailidis  */
270db37bc17SDimitris Michailidis static unsigned int fun_txq_avail(const struct funeth_txq *q)
271db37bc17SDimitris Michailidis {
272db37bc17SDimitris Michailidis 	return q->mask - q->prod_cnt + q->cons_cnt;
273db37bc17SDimitris Michailidis }
274db37bc17SDimitris Michailidis 
275db37bc17SDimitris Michailidis /* Stop a queue if it can't handle another worst-case packet. */
276db37bc17SDimitris Michailidis static void fun_tx_check_stop(struct funeth_txq *q)
277db37bc17SDimitris Michailidis {
278db37bc17SDimitris Michailidis 	if (likely(fun_txq_avail(q) >= FUNETH_MAX_PKT_DESC))
279db37bc17SDimitris Michailidis 		return;
280db37bc17SDimitris Michailidis 
281db37bc17SDimitris Michailidis 	netif_tx_stop_queue(q->ndq);
282db37bc17SDimitris Michailidis 
283db37bc17SDimitris Michailidis 	/* NAPI reclaim is freeing packets in parallel with us and we may race.
284db37bc17SDimitris Michailidis 	 * We have stopped the queue but check again after synchronizing with
285db37bc17SDimitris Michailidis 	 * reclaim.
286db37bc17SDimitris Michailidis 	 */
287db37bc17SDimitris Michailidis 	smp_mb();
288db37bc17SDimitris Michailidis 	if (likely(fun_txq_avail(q) < FUNETH_MAX_PKT_DESC))
289db37bc17SDimitris Michailidis 		FUN_QSTAT_INC(q, tx_nstops);
290db37bc17SDimitris Michailidis 	else
291db37bc17SDimitris Michailidis 		netif_tx_start_queue(q->ndq);
292db37bc17SDimitris Michailidis }
293db37bc17SDimitris Michailidis 
294db37bc17SDimitris Michailidis /* Return true if a queue has enough space to restart. Current condition is
295db37bc17SDimitris Michailidis  * that the queue must be >= 1/4 empty.
296db37bc17SDimitris Michailidis  */
297db37bc17SDimitris Michailidis static bool fun_txq_may_restart(struct funeth_txq *q)
298db37bc17SDimitris Michailidis {
299db37bc17SDimitris Michailidis 	return fun_txq_avail(q) >= q->mask / 4;
300db37bc17SDimitris Michailidis }
301db37bc17SDimitris Michailidis 
302db37bc17SDimitris Michailidis netdev_tx_t fun_start_xmit(struct sk_buff *skb, struct net_device *netdev)
303db37bc17SDimitris Michailidis {
304db37bc17SDimitris Michailidis 	struct funeth_priv *fp = netdev_priv(netdev);
305db37bc17SDimitris Michailidis 	unsigned int qid = skb_get_queue_mapping(skb);
306db37bc17SDimitris Michailidis 	struct funeth_txq *q = fp->txqs[qid];
307db37bc17SDimitris Michailidis 	unsigned int tls_len = 0;
308db37bc17SDimitris Michailidis 	unsigned int ndesc;
309db37bc17SDimitris Michailidis 
310db37bc17SDimitris Michailidis 	if (IS_ENABLED(CONFIG_TLS_DEVICE) && skb->sk &&
311db37bc17SDimitris Michailidis 	    tls_is_sk_tx_device_offloaded(skb->sk)) {
312db37bc17SDimitris Michailidis 		skb = fun_tls_tx(skb, q, &tls_len);
313db37bc17SDimitris Michailidis 		if (unlikely(!skb))
314db37bc17SDimitris Michailidis 			goto dropped;
315db37bc17SDimitris Michailidis 	}
316db37bc17SDimitris Michailidis 
317db37bc17SDimitris Michailidis 	ndesc = write_pkt_desc(skb, q, tls_len);
318db37bc17SDimitris Michailidis 	if (unlikely(!ndesc)) {
319db37bc17SDimitris Michailidis 		dev_kfree_skb_any(skb);
320db37bc17SDimitris Michailidis 		goto dropped;
321db37bc17SDimitris Michailidis 	}
322db37bc17SDimitris Michailidis 
323db37bc17SDimitris Michailidis 	q->prod_cnt += ndesc;
324db37bc17SDimitris Michailidis 	fun_tx_check_stop(q);
325db37bc17SDimitris Michailidis 
326db37bc17SDimitris Michailidis 	skb_tx_timestamp(skb);
327db37bc17SDimitris Michailidis 
328db37bc17SDimitris Michailidis 	if (__netdev_tx_sent_queue(q->ndq, skb->len, netdev_xmit_more()))
329db37bc17SDimitris Michailidis 		fun_txq_wr_db(q);
330db37bc17SDimitris Michailidis 	else
331db37bc17SDimitris Michailidis 		FUN_QSTAT_INC(q, tx_more);
332db37bc17SDimitris Michailidis 
333db37bc17SDimitris Michailidis 	return NETDEV_TX_OK;
334db37bc17SDimitris Michailidis 
335db37bc17SDimitris Michailidis dropped:
336db37bc17SDimitris Michailidis 	/* A dropped packet may be the last one in a xmit_more train,
337db37bc17SDimitris Michailidis 	 * ring the doorbell just in case.
338db37bc17SDimitris Michailidis 	 */
339db37bc17SDimitris Michailidis 	if (!netdev_xmit_more())
340db37bc17SDimitris Michailidis 		fun_txq_wr_db(q);
341db37bc17SDimitris Michailidis 	return NETDEV_TX_OK;
342db37bc17SDimitris Michailidis }
343db37bc17SDimitris Michailidis 
344db37bc17SDimitris Michailidis /* Return a Tx queue's HW head index written back to host memory. */
345db37bc17SDimitris Michailidis static u16 txq_hw_head(const struct funeth_txq *q)
346db37bc17SDimitris Michailidis {
347db37bc17SDimitris Michailidis 	return (u16)be64_to_cpu(*q->hw_wb);
348db37bc17SDimitris Michailidis }
349db37bc17SDimitris Michailidis 
350db37bc17SDimitris Michailidis /* Unmap the Tx packet starting at the given descriptor index and
351db37bc17SDimitris Michailidis  * return the number of Tx descriptors it occupied.
352db37bc17SDimitris Michailidis  */
353db37bc17SDimitris Michailidis static unsigned int unmap_skb(const struct funeth_txq *q, unsigned int idx)
354db37bc17SDimitris Michailidis {
355db37bc17SDimitris Michailidis 	const struct fun_eth_tx_req *req = fun_tx_desc_addr(q, idx);
356db37bc17SDimitris Michailidis 	unsigned int ngle = req->dataop.ngather;
357db37bc17SDimitris Michailidis 	struct fun_dataop_gl *gle;
358db37bc17SDimitris Michailidis 
359db37bc17SDimitris Michailidis 	if (ngle) {
360db37bc17SDimitris Michailidis 		gle = (struct fun_dataop_gl *)req->dataop.imm;
361db37bc17SDimitris Michailidis 		dma_unmap_single(q->dma_dev, be64_to_cpu(gle->sgl_data),
362db37bc17SDimitris Michailidis 				 be32_to_cpu(gle->sgl_len), DMA_TO_DEVICE);
363db37bc17SDimitris Michailidis 
364db37bc17SDimitris Michailidis 		for (gle++; --ngle && txq_to_end(q, gle); gle++)
365db37bc17SDimitris Michailidis 			dma_unmap_page(q->dma_dev, be64_to_cpu(gle->sgl_data),
366db37bc17SDimitris Michailidis 				       be32_to_cpu(gle->sgl_len),
367db37bc17SDimitris Michailidis 				       DMA_TO_DEVICE);
368db37bc17SDimitris Michailidis 
369db37bc17SDimitris Michailidis 		for (gle = (struct fun_dataop_gl *)q->desc; ngle; ngle--, gle++)
370db37bc17SDimitris Michailidis 			dma_unmap_page(q->dma_dev, be64_to_cpu(gle->sgl_data),
371db37bc17SDimitris Michailidis 				       be32_to_cpu(gle->sgl_len),
372db37bc17SDimitris Michailidis 				       DMA_TO_DEVICE);
373db37bc17SDimitris Michailidis 	}
374db37bc17SDimitris Michailidis 
375db37bc17SDimitris Michailidis 	return tx_req_ndesc(req);
376db37bc17SDimitris Michailidis }
377db37bc17SDimitris Michailidis 
378db37bc17SDimitris Michailidis /* Reclaim completed Tx descriptors and free their packets. Restart a stopped
379db37bc17SDimitris Michailidis  * queue if we freed enough descriptors.
380db37bc17SDimitris Michailidis  *
381db37bc17SDimitris Michailidis  * Return true if we exhausted the budget while there is more work to be done.
382db37bc17SDimitris Michailidis  */
383db37bc17SDimitris Michailidis static bool fun_txq_reclaim(struct funeth_txq *q, int budget)
384db37bc17SDimitris Michailidis {
385db37bc17SDimitris Michailidis 	unsigned int npkts = 0, nbytes = 0, ndesc = 0;
386db37bc17SDimitris Michailidis 	unsigned int head, limit, reclaim_idx;
387db37bc17SDimitris Michailidis 
388db37bc17SDimitris Michailidis 	/* budget may be 0, e.g., netpoll */
389db37bc17SDimitris Michailidis 	limit = budget ? budget : UINT_MAX;
390db37bc17SDimitris Michailidis 
391db37bc17SDimitris Michailidis 	for (head = txq_hw_head(q), reclaim_idx = q->cons_cnt & q->mask;
392db37bc17SDimitris Michailidis 	     head != reclaim_idx && npkts < limit; head = txq_hw_head(q)) {
393db37bc17SDimitris Michailidis 		/* The HW head is continually updated, ensure we don't read
394db37bc17SDimitris Michailidis 		 * descriptor state before the head tells us to reclaim it.
395db37bc17SDimitris Michailidis 		 * On the enqueue side the doorbell is an implicit write
396db37bc17SDimitris Michailidis 		 * barrier.
397db37bc17SDimitris Michailidis 		 */
398db37bc17SDimitris Michailidis 		rmb();
399db37bc17SDimitris Michailidis 
400db37bc17SDimitris Michailidis 		do {
401db37bc17SDimitris Michailidis 			unsigned int pkt_desc = unmap_skb(q, reclaim_idx);
402db37bc17SDimitris Michailidis 			struct sk_buff *skb = q->info[reclaim_idx].skb;
403db37bc17SDimitris Michailidis 
404db37bc17SDimitris Michailidis 			trace_funeth_tx_free(q, reclaim_idx, pkt_desc, head);
405db37bc17SDimitris Michailidis 
406db37bc17SDimitris Michailidis 			nbytes += skb->len;
407db37bc17SDimitris Michailidis 			napi_consume_skb(skb, budget);
408db37bc17SDimitris Michailidis 			ndesc += pkt_desc;
409db37bc17SDimitris Michailidis 			reclaim_idx = (reclaim_idx + pkt_desc) & q->mask;
410db37bc17SDimitris Michailidis 			npkts++;
411db37bc17SDimitris Michailidis 		} while (reclaim_idx != head && npkts < limit);
412db37bc17SDimitris Michailidis 	}
413db37bc17SDimitris Michailidis 
414db37bc17SDimitris Michailidis 	q->cons_cnt += ndesc;
415db37bc17SDimitris Michailidis 	netdev_tx_completed_queue(q->ndq, npkts, nbytes);
416db37bc17SDimitris Michailidis 	smp_mb(); /* pairs with the one in fun_tx_check_stop() */
417db37bc17SDimitris Michailidis 
418db37bc17SDimitris Michailidis 	if (unlikely(netif_tx_queue_stopped(q->ndq) &&
419db37bc17SDimitris Michailidis 		     fun_txq_may_restart(q))) {
420db37bc17SDimitris Michailidis 		netif_tx_wake_queue(q->ndq);
421db37bc17SDimitris Michailidis 		FUN_QSTAT_INC(q, tx_nrestarts);
422db37bc17SDimitris Michailidis 	}
423db37bc17SDimitris Michailidis 
424db37bc17SDimitris Michailidis 	return reclaim_idx != head;
425db37bc17SDimitris Michailidis }
426db37bc17SDimitris Michailidis 
427db37bc17SDimitris Michailidis /* The NAPI handler for Tx queues. */
428db37bc17SDimitris Michailidis int fun_txq_napi_poll(struct napi_struct *napi, int budget)
429db37bc17SDimitris Michailidis {
430db37bc17SDimitris Michailidis 	struct fun_irq *irq = container_of(napi, struct fun_irq, napi);
431db37bc17SDimitris Michailidis 	struct funeth_txq *q = irq->txq;
432db37bc17SDimitris Michailidis 	unsigned int db_val;
433db37bc17SDimitris Michailidis 
434db37bc17SDimitris Michailidis 	if (fun_txq_reclaim(q, budget))
435db37bc17SDimitris Michailidis 		return budget;               /* exhausted budget */
436db37bc17SDimitris Michailidis 
437db37bc17SDimitris Michailidis 	napi_complete(napi);                 /* exhausted pending work */
438db37bc17SDimitris Michailidis 	db_val = READ_ONCE(q->irq_db_val) | (q->cons_cnt & q->mask);
439db37bc17SDimitris Michailidis 	writel(db_val, q->db);
440db37bc17SDimitris Michailidis 	return 0;
441db37bc17SDimitris Michailidis }
442db37bc17SDimitris Michailidis 
443db37bc17SDimitris Michailidis static void fun_xdp_unmap(const struct funeth_txq *q, unsigned int idx)
444db37bc17SDimitris Michailidis {
445db37bc17SDimitris Michailidis 	const struct fun_eth_tx_req *req = fun_tx_desc_addr(q, idx);
446db37bc17SDimitris Michailidis 	const struct fun_dataop_gl *gle;
447db37bc17SDimitris Michailidis 
448db37bc17SDimitris Michailidis 	gle = (const struct fun_dataop_gl *)req->dataop.imm;
449db37bc17SDimitris Michailidis 	dma_unmap_single(q->dma_dev, be64_to_cpu(gle->sgl_data),
450db37bc17SDimitris Michailidis 			 be32_to_cpu(gle->sgl_len), DMA_TO_DEVICE);
451db37bc17SDimitris Michailidis }
452db37bc17SDimitris Michailidis 
453db37bc17SDimitris Michailidis /* Reclaim up to @budget completed Tx descriptors from a TX XDP queue. */
454db37bc17SDimitris Michailidis static unsigned int fun_xdpq_clean(struct funeth_txq *q, unsigned int budget)
455db37bc17SDimitris Michailidis {
456db37bc17SDimitris Michailidis 	unsigned int npkts = 0, head, reclaim_idx;
457db37bc17SDimitris Michailidis 
458db37bc17SDimitris Michailidis 	for (head = txq_hw_head(q), reclaim_idx = q->cons_cnt & q->mask;
459db37bc17SDimitris Michailidis 	     head != reclaim_idx && npkts < budget; head = txq_hw_head(q)) {
460db37bc17SDimitris Michailidis 		/* The HW head is continually updated, ensure we don't read
461db37bc17SDimitris Michailidis 		 * descriptor state before the head tells us to reclaim it.
462db37bc17SDimitris Michailidis 		 * On the enqueue side the doorbell is an implicit write
463db37bc17SDimitris Michailidis 		 * barrier.
464db37bc17SDimitris Michailidis 		 */
465db37bc17SDimitris Michailidis 		rmb();
466db37bc17SDimitris Michailidis 
467db37bc17SDimitris Michailidis 		do {
468db37bc17SDimitris Michailidis 			fun_xdp_unmap(q, reclaim_idx);
469db37bc17SDimitris Michailidis 			page_frag_free(q->info[reclaim_idx].vaddr);
470db37bc17SDimitris Michailidis 
471db37bc17SDimitris Michailidis 			trace_funeth_tx_free(q, reclaim_idx, 1, head);
472db37bc17SDimitris Michailidis 
473db37bc17SDimitris Michailidis 			reclaim_idx = (reclaim_idx + 1) & q->mask;
474db37bc17SDimitris Michailidis 			npkts++;
475db37bc17SDimitris Michailidis 		} while (reclaim_idx != head && npkts < budget);
476db37bc17SDimitris Michailidis 	}
477db37bc17SDimitris Michailidis 
478db37bc17SDimitris Michailidis 	q->cons_cnt += npkts;
479db37bc17SDimitris Michailidis 	return npkts;
480db37bc17SDimitris Michailidis }
481db37bc17SDimitris Michailidis 
482db37bc17SDimitris Michailidis bool fun_xdp_tx(struct funeth_txq *q, void *data, unsigned int len)
483db37bc17SDimitris Michailidis {
484db37bc17SDimitris Michailidis 	struct fun_eth_tx_req *req;
485db37bc17SDimitris Michailidis 	struct fun_dataop_gl *gle;
486db37bc17SDimitris Michailidis 	unsigned int idx;
487db37bc17SDimitris Michailidis 	dma_addr_t dma;
488db37bc17SDimitris Michailidis 
489db37bc17SDimitris Michailidis 	if (fun_txq_avail(q) < FUN_XDP_CLEAN_THRES)
490db37bc17SDimitris Michailidis 		fun_xdpq_clean(q, FUN_XDP_CLEAN_BATCH);
491db37bc17SDimitris Michailidis 
492db37bc17SDimitris Michailidis 	if (!unlikely(fun_txq_avail(q))) {
493db37bc17SDimitris Michailidis 		FUN_QSTAT_INC(q, tx_xdp_full);
494db37bc17SDimitris Michailidis 		return false;
495db37bc17SDimitris Michailidis 	}
496db37bc17SDimitris Michailidis 
497db37bc17SDimitris Michailidis 	dma = dma_map_single(q->dma_dev, data, len, DMA_TO_DEVICE);
498db37bc17SDimitris Michailidis 	if (unlikely(dma_mapping_error(q->dma_dev, dma))) {
499db37bc17SDimitris Michailidis 		FUN_QSTAT_INC(q, tx_map_err);
500db37bc17SDimitris Michailidis 		return false;
501db37bc17SDimitris Michailidis 	}
502db37bc17SDimitris Michailidis 
503db37bc17SDimitris Michailidis 	idx = q->prod_cnt & q->mask;
504db37bc17SDimitris Michailidis 	req = fun_tx_desc_addr(q, idx);
505db37bc17SDimitris Michailidis 	req->op = FUN_ETH_OP_TX;
506db37bc17SDimitris Michailidis 	req->len8 = (sizeof(*req) + sizeof(*gle)) / 8;
507db37bc17SDimitris Michailidis 	req->flags = 0;
508db37bc17SDimitris Michailidis 	req->suboff8 = offsetof(struct fun_eth_tx_req, dataop);
509db37bc17SDimitris Michailidis 	req->repr_idn = 0;
510db37bc17SDimitris Michailidis 	req->encap_proto = 0;
511db37bc17SDimitris Michailidis 	fun_eth_offload_init(&req->offload, 0, 0, 0, 0, 0, 0, 0, 0);
512db37bc17SDimitris Michailidis 	req->dataop = FUN_DATAOP_HDR_INIT(1, 0, 1, 0, len);
513db37bc17SDimitris Michailidis 
514db37bc17SDimitris Michailidis 	gle = (struct fun_dataop_gl *)req->dataop.imm;
515db37bc17SDimitris Michailidis 	fun_dataop_gl_init(gle, 0, 0, len, dma);
516db37bc17SDimitris Michailidis 
517db37bc17SDimitris Michailidis 	q->info[idx].vaddr = data;
518db37bc17SDimitris Michailidis 
519db37bc17SDimitris Michailidis 	u64_stats_update_begin(&q->syncp);
520db37bc17SDimitris Michailidis 	q->stats.tx_bytes += len;
521db37bc17SDimitris Michailidis 	q->stats.tx_pkts++;
522db37bc17SDimitris Michailidis 	u64_stats_update_end(&q->syncp);
523db37bc17SDimitris Michailidis 
524db37bc17SDimitris Michailidis 	trace_funeth_tx(q, len, idx, 1);
525db37bc17SDimitris Michailidis 	q->prod_cnt++;
526db37bc17SDimitris Michailidis 
527db37bc17SDimitris Michailidis 	return true;
528db37bc17SDimitris Michailidis }
529db37bc17SDimitris Michailidis 
530db37bc17SDimitris Michailidis int fun_xdp_xmit_frames(struct net_device *dev, int n,
531db37bc17SDimitris Michailidis 			struct xdp_frame **frames, u32 flags)
532db37bc17SDimitris Michailidis {
533db37bc17SDimitris Michailidis 	struct funeth_priv *fp = netdev_priv(dev);
534db37bc17SDimitris Michailidis 	struct funeth_txq *q, **xdpqs;
535db37bc17SDimitris Michailidis 	int i, q_idx;
536db37bc17SDimitris Michailidis 
537db37bc17SDimitris Michailidis 	if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
538db37bc17SDimitris Michailidis 		return -EINVAL;
539db37bc17SDimitris Michailidis 
540db37bc17SDimitris Michailidis 	xdpqs = rcu_dereference_bh(fp->xdpqs);
541db37bc17SDimitris Michailidis 	if (unlikely(!xdpqs))
542db37bc17SDimitris Michailidis 		return -ENETDOWN;
543db37bc17SDimitris Michailidis 
544db37bc17SDimitris Michailidis 	q_idx = smp_processor_id();
545db37bc17SDimitris Michailidis 	if (unlikely(q_idx >= fp->num_xdpqs))
546db37bc17SDimitris Michailidis 		return -ENXIO;
547db37bc17SDimitris Michailidis 
548db37bc17SDimitris Michailidis 	for (q = xdpqs[q_idx], i = 0; i < n; i++) {
549db37bc17SDimitris Michailidis 		const struct xdp_frame *xdpf = frames[i];
550db37bc17SDimitris Michailidis 
551db37bc17SDimitris Michailidis 		if (!fun_xdp_tx(q, xdpf->data, xdpf->len))
552db37bc17SDimitris Michailidis 			break;
553db37bc17SDimitris Michailidis 	}
554db37bc17SDimitris Michailidis 
555db37bc17SDimitris Michailidis 	if (unlikely(flags & XDP_XMIT_FLUSH))
556db37bc17SDimitris Michailidis 		fun_txq_wr_db(q);
557db37bc17SDimitris Michailidis 	return i;
558db37bc17SDimitris Michailidis }
559db37bc17SDimitris Michailidis 
560db37bc17SDimitris Michailidis /* Purge a Tx queue of any queued packets. Should be called once HW access
561db37bc17SDimitris Michailidis  * to the packets has been revoked, e.g., after the queue has been disabled.
562db37bc17SDimitris Michailidis  */
563db37bc17SDimitris Michailidis static void fun_txq_purge(struct funeth_txq *q)
564db37bc17SDimitris Michailidis {
565db37bc17SDimitris Michailidis 	while (q->cons_cnt != q->prod_cnt) {
566db37bc17SDimitris Michailidis 		unsigned int idx = q->cons_cnt & q->mask;
567db37bc17SDimitris Michailidis 
568db37bc17SDimitris Michailidis 		q->cons_cnt += unmap_skb(q, idx);
569db37bc17SDimitris Michailidis 		dev_kfree_skb_any(q->info[idx].skb);
570db37bc17SDimitris Michailidis 	}
571db37bc17SDimitris Michailidis 	netdev_tx_reset_queue(q->ndq);
572db37bc17SDimitris Michailidis }
573db37bc17SDimitris Michailidis 
574db37bc17SDimitris Michailidis static void fun_xdpq_purge(struct funeth_txq *q)
575db37bc17SDimitris Michailidis {
576db37bc17SDimitris Michailidis 	while (q->cons_cnt != q->prod_cnt) {
577db37bc17SDimitris Michailidis 		unsigned int idx = q->cons_cnt & q->mask;
578db37bc17SDimitris Michailidis 
579db37bc17SDimitris Michailidis 		fun_xdp_unmap(q, idx);
580db37bc17SDimitris Michailidis 		page_frag_free(q->info[idx].vaddr);
581db37bc17SDimitris Michailidis 		q->cons_cnt++;
582db37bc17SDimitris Michailidis 	}
583db37bc17SDimitris Michailidis }
584db37bc17SDimitris Michailidis 
585db37bc17SDimitris Michailidis /* Create a Tx queue, allocating all the host resources needed. */
586db37bc17SDimitris Michailidis static struct funeth_txq *fun_txq_create_sw(struct net_device *dev,
587db37bc17SDimitris Michailidis 					    unsigned int qidx,
588db37bc17SDimitris Michailidis 					    unsigned int ndesc,
589db37bc17SDimitris Michailidis 					    struct fun_irq *irq)
590db37bc17SDimitris Michailidis {
591db37bc17SDimitris Michailidis 	struct funeth_priv *fp = netdev_priv(dev);
592db37bc17SDimitris Michailidis 	struct funeth_txq *q;
593db37bc17SDimitris Michailidis 	int numa_node;
594db37bc17SDimitris Michailidis 
595db37bc17SDimitris Michailidis 	if (irq)
596db37bc17SDimitris Michailidis 		numa_node = fun_irq_node(irq); /* skb Tx queue */
597db37bc17SDimitris Michailidis 	else
598db37bc17SDimitris Michailidis 		numa_node = cpu_to_node(qidx); /* XDP Tx queue */
599db37bc17SDimitris Michailidis 
600db37bc17SDimitris Michailidis 	q = kzalloc_node(sizeof(*q), GFP_KERNEL, numa_node);
601db37bc17SDimitris Michailidis 	if (!q)
602db37bc17SDimitris Michailidis 		goto err;
603db37bc17SDimitris Michailidis 
604db37bc17SDimitris Michailidis 	q->dma_dev = &fp->pdev->dev;
605db37bc17SDimitris Michailidis 	q->desc = fun_alloc_ring_mem(q->dma_dev, ndesc, FUNETH_SQE_SIZE,
606db37bc17SDimitris Michailidis 				     sizeof(*q->info), true, numa_node,
607db37bc17SDimitris Michailidis 				     &q->dma_addr, (void **)&q->info,
608db37bc17SDimitris Michailidis 				     &q->hw_wb);
609db37bc17SDimitris Michailidis 	if (!q->desc)
610db37bc17SDimitris Michailidis 		goto free_q;
611db37bc17SDimitris Michailidis 
612db37bc17SDimitris Michailidis 	q->netdev = dev;
613db37bc17SDimitris Michailidis 	q->mask = ndesc - 1;
614db37bc17SDimitris Michailidis 	q->qidx = qidx;
615db37bc17SDimitris Michailidis 	q->numa_node = numa_node;
616db37bc17SDimitris Michailidis 	u64_stats_init(&q->syncp);
617db37bc17SDimitris Michailidis 	q->init_state = FUN_QSTATE_INIT_SW;
618db37bc17SDimitris Michailidis 	return q;
619db37bc17SDimitris Michailidis 
620db37bc17SDimitris Michailidis free_q:
621db37bc17SDimitris Michailidis 	kfree(q);
622db37bc17SDimitris Michailidis err:
623db37bc17SDimitris Michailidis 	netdev_err(dev, "Can't allocate memory for %s queue %u\n",
624db37bc17SDimitris Michailidis 		   irq ? "Tx" : "XDP", qidx);
625db37bc17SDimitris Michailidis 	return NULL;
626db37bc17SDimitris Michailidis }
627db37bc17SDimitris Michailidis 
628db37bc17SDimitris Michailidis static void fun_txq_free_sw(struct funeth_txq *q)
629db37bc17SDimitris Michailidis {
630db37bc17SDimitris Michailidis 	struct funeth_priv *fp = netdev_priv(q->netdev);
631db37bc17SDimitris Michailidis 
632db37bc17SDimitris Michailidis 	fun_free_ring_mem(q->dma_dev, q->mask + 1, FUNETH_SQE_SIZE, true,
633db37bc17SDimitris Michailidis 			  q->desc, q->dma_addr, q->info);
634db37bc17SDimitris Michailidis 
635db37bc17SDimitris Michailidis 	fp->tx_packets += q->stats.tx_pkts;
636db37bc17SDimitris Michailidis 	fp->tx_bytes   += q->stats.tx_bytes;
637db37bc17SDimitris Michailidis 	fp->tx_dropped += q->stats.tx_map_err;
638db37bc17SDimitris Michailidis 
639db37bc17SDimitris Michailidis 	kfree(q);
640db37bc17SDimitris Michailidis }
641db37bc17SDimitris Michailidis 
642db37bc17SDimitris Michailidis /* Allocate the device portion of a Tx queue. */
643db37bc17SDimitris Michailidis int fun_txq_create_dev(struct funeth_txq *q, struct fun_irq *irq)
644db37bc17SDimitris Michailidis {
645db37bc17SDimitris Michailidis 	struct funeth_priv *fp = netdev_priv(q->netdev);
646db37bc17SDimitris Michailidis 	unsigned int irq_idx, ndesc = q->mask + 1;
647db37bc17SDimitris Michailidis 	int err;
648db37bc17SDimitris Michailidis 
649db37bc17SDimitris Michailidis 	q->irq = irq;
650db37bc17SDimitris Michailidis 	*q->hw_wb = 0;
651db37bc17SDimitris Michailidis 	q->prod_cnt = 0;
652db37bc17SDimitris Michailidis 	q->cons_cnt = 0;
653db37bc17SDimitris Michailidis 	irq_idx = irq ? irq->irq_idx : 0;
654db37bc17SDimitris Michailidis 
655db37bc17SDimitris Michailidis 	err = fun_sq_create(fp->fdev,
656db37bc17SDimitris Michailidis 			    FUN_ADMIN_EPSQ_CREATE_FLAG_HEAD_WB_ADDRESS |
657db37bc17SDimitris Michailidis 			    FUN_ADMIN_RES_CREATE_FLAG_ALLOCATOR, 0,
658db37bc17SDimitris Michailidis 			    FUN_HCI_ID_INVALID, ilog2(FUNETH_SQE_SIZE), ndesc,
659db37bc17SDimitris Michailidis 			    q->dma_addr, fp->tx_coal_count, fp->tx_coal_usec,
660db37bc17SDimitris Michailidis 			    irq_idx, 0, fp->fdev->kern_end_qid, 0,
661db37bc17SDimitris Michailidis 			    &q->hw_qid, &q->db);
662db37bc17SDimitris Michailidis 	if (err)
663db37bc17SDimitris Michailidis 		goto out;
664db37bc17SDimitris Michailidis 
665db37bc17SDimitris Michailidis 	err = fun_create_and_bind_tx(fp, q->hw_qid);
666db37bc17SDimitris Michailidis 	if (err < 0)
667db37bc17SDimitris Michailidis 		goto free_devq;
668db37bc17SDimitris Michailidis 	q->ethid = err;
669db37bc17SDimitris Michailidis 
670db37bc17SDimitris Michailidis 	if (irq) {
671db37bc17SDimitris Michailidis 		irq->txq = q;
672db37bc17SDimitris Michailidis 		q->ndq = netdev_get_tx_queue(q->netdev, q->qidx);
673db37bc17SDimitris Michailidis 		q->irq_db_val = FUN_IRQ_SQ_DB(fp->tx_coal_usec,
674db37bc17SDimitris Michailidis 					      fp->tx_coal_count);
675db37bc17SDimitris Michailidis 		writel(q->irq_db_val, q->db);
676db37bc17SDimitris Michailidis 	}
677db37bc17SDimitris Michailidis 
678db37bc17SDimitris Michailidis 	q->init_state = FUN_QSTATE_INIT_FULL;
679db37bc17SDimitris Michailidis 	netif_info(fp, ifup, q->netdev,
680db37bc17SDimitris Michailidis 		   "%s queue %u, depth %u, HW qid %u, IRQ idx %u, eth id %u, node %d\n",
681db37bc17SDimitris Michailidis 		   irq ? "Tx" : "XDP", q->qidx, ndesc, q->hw_qid, irq_idx,
682db37bc17SDimitris Michailidis 		   q->ethid, q->numa_node);
683db37bc17SDimitris Michailidis 	return 0;
684db37bc17SDimitris Michailidis 
685db37bc17SDimitris Michailidis free_devq:
686db37bc17SDimitris Michailidis 	fun_destroy_sq(fp->fdev, q->hw_qid);
687db37bc17SDimitris Michailidis out:
688db37bc17SDimitris Michailidis 	netdev_err(q->netdev,
689db37bc17SDimitris Michailidis 		   "Failed to create %s queue %u on device, error %d\n",
690db37bc17SDimitris Michailidis 		   irq ? "Tx" : "XDP", q->qidx, err);
691db37bc17SDimitris Michailidis 	return err;
692db37bc17SDimitris Michailidis }
693db37bc17SDimitris Michailidis 
694db37bc17SDimitris Michailidis static void fun_txq_free_dev(struct funeth_txq *q)
695db37bc17SDimitris Michailidis {
696db37bc17SDimitris Michailidis 	struct funeth_priv *fp = netdev_priv(q->netdev);
697db37bc17SDimitris Michailidis 
698db37bc17SDimitris Michailidis 	if (q->init_state < FUN_QSTATE_INIT_FULL)
699db37bc17SDimitris Michailidis 		return;
700db37bc17SDimitris Michailidis 
701db37bc17SDimitris Michailidis 	netif_info(fp, ifdown, q->netdev,
702db37bc17SDimitris Michailidis 		   "Freeing %s queue %u (id %u), IRQ %u, ethid %u\n",
703db37bc17SDimitris Michailidis 		   q->irq ? "Tx" : "XDP", q->qidx, q->hw_qid,
704db37bc17SDimitris Michailidis 		   q->irq ? q->irq->irq_idx : 0, q->ethid);
705db37bc17SDimitris Michailidis 
706db37bc17SDimitris Michailidis 	fun_destroy_sq(fp->fdev, q->hw_qid);
707db37bc17SDimitris Michailidis 	fun_res_destroy(fp->fdev, FUN_ADMIN_OP_ETH, 0, q->ethid);
708db37bc17SDimitris Michailidis 
709db37bc17SDimitris Michailidis 	if (q->irq) {
710db37bc17SDimitris Michailidis 		q->irq->txq = NULL;
711db37bc17SDimitris Michailidis 		fun_txq_purge(q);
712db37bc17SDimitris Michailidis 	} else {
713db37bc17SDimitris Michailidis 		fun_xdpq_purge(q);
714db37bc17SDimitris Michailidis 	}
715db37bc17SDimitris Michailidis 
716db37bc17SDimitris Michailidis 	q->init_state = FUN_QSTATE_INIT_SW;
717db37bc17SDimitris Michailidis }
718db37bc17SDimitris Michailidis 
719db37bc17SDimitris Michailidis /* Create or advance a Tx queue, allocating all the host and device resources
720db37bc17SDimitris Michailidis  * needed to reach the target state.
721db37bc17SDimitris Michailidis  */
722db37bc17SDimitris Michailidis int funeth_txq_create(struct net_device *dev, unsigned int qidx,
723db37bc17SDimitris Michailidis 		      unsigned int ndesc, struct fun_irq *irq, int state,
724db37bc17SDimitris Michailidis 		      struct funeth_txq **qp)
725db37bc17SDimitris Michailidis {
726db37bc17SDimitris Michailidis 	struct funeth_txq *q = *qp;
727db37bc17SDimitris Michailidis 	int err;
728db37bc17SDimitris Michailidis 
729db37bc17SDimitris Michailidis 	if (!q)
730db37bc17SDimitris Michailidis 		q = fun_txq_create_sw(dev, qidx, ndesc, irq);
731db37bc17SDimitris Michailidis 	if (!q)
732db37bc17SDimitris Michailidis 		return -ENOMEM;
733db37bc17SDimitris Michailidis 
734db37bc17SDimitris Michailidis 	if (q->init_state >= state)
735db37bc17SDimitris Michailidis 		goto out;
736db37bc17SDimitris Michailidis 
737db37bc17SDimitris Michailidis 	err = fun_txq_create_dev(q, irq);
738db37bc17SDimitris Michailidis 	if (err) {
739db37bc17SDimitris Michailidis 		if (!*qp)
740db37bc17SDimitris Michailidis 			fun_txq_free_sw(q);
741db37bc17SDimitris Michailidis 		return err;
742db37bc17SDimitris Michailidis 	}
743db37bc17SDimitris Michailidis 
744db37bc17SDimitris Michailidis out:
745db37bc17SDimitris Michailidis 	*qp = q;
746db37bc17SDimitris Michailidis 	return 0;
747db37bc17SDimitris Michailidis }
748db37bc17SDimitris Michailidis 
749db37bc17SDimitris Michailidis /* Free Tx queue resources until it reaches the target state.
750db37bc17SDimitris Michailidis  * The queue must be already disconnected from the stack.
751db37bc17SDimitris Michailidis  */
752db37bc17SDimitris Michailidis struct funeth_txq *funeth_txq_free(struct funeth_txq *q, int state)
753db37bc17SDimitris Michailidis {
754db37bc17SDimitris Michailidis 	if (state < FUN_QSTATE_INIT_FULL)
755db37bc17SDimitris Michailidis 		fun_txq_free_dev(q);
756db37bc17SDimitris Michailidis 
757db37bc17SDimitris Michailidis 	if (state == FUN_QSTATE_DESTROYED) {
758db37bc17SDimitris Michailidis 		fun_txq_free_sw(q);
759db37bc17SDimitris Michailidis 		q = NULL;
760db37bc17SDimitris Michailidis 	}
761db37bc17SDimitris Michailidis 
762db37bc17SDimitris Michailidis 	return q;
763db37bc17SDimitris Michailidis }
764