xref: /src/sys/dev/dwc/dwc1000_dma.c (revision 5d8777f3a7aee04eabbc9f3cf12138f9b56e3ebc)
1 /*-
2  * Copyright (c) 2014 Ruslan Bukin <br@bsdpad.com>
3  *
4  * This software was developed by SRI International and the University of
5  * Cambridge Computer Laboratory under DARPA/AFRL contract (FA8750-10-C-0237)
6  * ("CTSRD"), as part of the DARPA CRASH research programme.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  */
29 
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/bus.h>
33 #include <sys/kernel.h>
34 #include <sys/lock.h>
35 #include <sys/malloc.h>
36 #include <sys/mbuf.h>
37 #include <sys/module.h>
38 #include <sys/mutex.h>
39 #include <sys/rman.h>
40 #include <sys/socket.h>
41 
42 #include <net/bpf.h>
43 #include <net/if.h>
44 #include <net/ethernet.h>
45 #include <net/if_dl.h>
46 #include <net/if_media.h>
47 #include <net/if_types.h>
48 #include <net/if_var.h>
49 
50 #include <machine/bus.h>
51 
52 #include <dev/clk/clk.h>
53 #include <dev/hwreset/hwreset.h>
54 
55 #include <dev/ofw/ofw_bus.h>
56 #include <dev/ofw/ofw_bus_subr.h>
57 
58 #include <dev/dwc/if_dwcvar.h>
59 #include <dev/dwc/dwc1000_reg.h>
60 #include <dev/dwc/dwc1000_dma.h>
61 
62 #define	WATCHDOG_TIMEOUT_SECS	5
63 #define	DMA_RESET_TIMEOUT	100
64 
65 /* TX descriptors - TDESC0 is almost unified */
66 #define	TDESC0_OWN		(1U << 31)
67 #define	TDESC0_IHE		(1U << 16)	/* IP Header Error */
68 #define	TDESC0_ES		(1U << 15)	/* Error Summary */
69 #define	TDESC0_JT		(1U << 14)	/* Jabber Timeout */
70 #define	TDESC0_FF		(1U << 13)	/* Frame Flushed */
71 #define	TDESC0_PCE		(1U << 12)	/* Payload Checksum Error */
72 #define	TDESC0_LOC		(1U << 11)	/* Loss of Carrier */
73 #define	TDESC0_NC		(1U << 10)	/* No Carrier */
74 #define	TDESC0_LC		(1U <<  9)	/* Late Collision */
75 #define	TDESC0_EC		(1U <<  8)	/* Excessive Collision */
76 #define	TDESC0_VF		(1U <<  7)	/* VLAN Frame */
77 #define	TDESC0_CC_MASK		0xf
78 #define	TDESC0_CC_SHIFT		3		/* Collision Count */
79 #define	TDESC0_ED		(1U <<  2)	/* Excessive Deferral */
80 #define	TDESC0_UF		(1U <<  1)	/* Underflow Error */
81 #define	TDESC0_DB		(1U <<  0)	/* Deferred Bit */
82 /* TX descriptors - TDESC0 extended format only */
83 #define	ETDESC0_IC		(1U << 30)	/* Interrupt on Completion */
84 #define	ETDESC0_LS		(1U << 29)	/* Last Segment */
85 #define	ETDESC0_FS		(1U << 28)	/* First Segment */
86 #define	ETDESC0_DC		(1U << 27)	/* Disable CRC */
87 #define	ETDESC0_DP		(1U << 26)	/* Disable Padding */
88 #define	ETDESC0_CIC_NONE	(0U << 22)	/* Checksum Insertion Control */
89 #define	ETDESC0_CIC_HDR		(1U << 22)
90 #define	ETDESC0_CIC_SEG 	(2U << 22)
91 #define	ETDESC0_CIC_FULL	(3U << 22)
92 #define	ETDESC0_TER		(1U << 21)	/* Transmit End of Ring */
93 #define	ETDESC0_TCH		(1U << 20)	/* Second Address Chained */
94 
95 /* TX descriptors - TDESC1 normal format */
96 #define	NTDESC1_IC		(1U << 31)	/* Interrupt on Completion */
97 #define	NTDESC1_LS		(1U << 30)	/* Last Segment */
98 #define	NTDESC1_FS		(1U << 29)	/* First Segment */
99 #define	NTDESC1_CIC_NONE	(0U << 27)	/* Checksum Insertion Control */
100 #define	NTDESC1_CIC_HDR		(1U << 27)
101 #define	NTDESC1_CIC_SEG 	(2U << 27)
102 #define	NTDESC1_CIC_FULL	(3U << 27)
103 #define	NTDESC1_DC		(1U << 26)	/* Disable CRC */
104 #define	NTDESC1_TER		(1U << 25)	/* Transmit End of Ring */
105 #define	NTDESC1_TCH		(1U << 24)	/* Second Address Chained */
106 /* TX descriptors - TDESC1 extended format */
107 #define	ETDESC1_DP		(1U << 23)	/* Disable Padding */
108 #define	ETDESC1_TBS2_MASK	0x7ff
109 #define	ETDESC1_TBS2_SHIFT	11		/* Receive Buffer 2 Size */
110 #define	ETDESC1_TBS1_MASK	0x7ff
111 #define	ETDESC1_TBS1_SHIFT	0		/* Receive Buffer 1 Size */
112 
113 /* RX descriptor - RDESC0 is unified */
114 #define	RDESC0_OWN		(1U << 31)
115 #define	RDESC0_AFM		(1U << 30)	/* Dest. Address Filter Fail */
116 #define	RDESC0_FL_MASK		0x3fff
117 #define	RDESC0_FL_SHIFT		16		/* Frame Length */
118 #define	RDESC0_ES		(1U << 15)	/* Error Summary */
119 #define	RDESC0_DE		(1U << 14)	/* Descriptor Error */
120 #define	RDESC0_SAF		(1U << 13)	/* Source Address Filter Fail */
121 #define	RDESC0_LE		(1U << 12)	/* Length Error */
122 #define	RDESC0_OE		(1U << 11)	/* Overflow Error */
123 #define	RDESC0_VLAN		(1U << 10)	/* VLAN Tag */
124 #define	RDESC0_FS		(1U <<  9)	/* First Descriptor */
125 #define	RDESC0_LS		(1U <<  8)	/* Last Descriptor */
126 #define	RDESC0_ICE		(1U <<  7)	/* IPC Checksum Error */
127 #define	RDESC0_LC		(1U <<  6)	/* Late Collision */
128 #define	RDESC0_FT		(1U <<  5)	/* Frame Type */
129 #define	RDESC0_RWT		(1U <<  4)	/* Receive Watchdog Timeout */
130 #define	RDESC0_RE		(1U <<  3)	/* Receive Error */
131 #define	RDESC0_DBE		(1U <<  2)	/* Dribble Bit Error */
132 #define	RDESC0_CE		(1U <<  1)	/* CRC Error */
133 #define	RDESC0_PCE		(1U <<  0)	/* Payload Checksum Error */
134 #define	RDESC0_RXMA		(1U <<  0)	/* Rx MAC Address */
135 
136 /* RX descriptors - RDESC1 normal format */
137 #define	NRDESC1_DIC		(1U << 31)	/* Disable Intr on Completion */
138 #define	NRDESC1_RER		(1U << 25)	/* Receive End of Ring */
139 #define	NRDESC1_RCH		(1U << 24)	/* Second Address Chained */
140 #define	NRDESC1_RBS2_MASK	0x7ff
141 #define	NRDESC1_RBS2_SHIFT	11		/* Receive Buffer 2 Size */
142 #define	NRDESC1_RBS1_MASK	0x7ff
143 #define	NRDESC1_RBS1_SHIFT	0		/* Receive Buffer 1 Size */
144 
145 /* RX descriptors - RDESC1 enhanced format */
146 #define	ERDESC1_DIC		(1U << 31)	/* Disable Intr on Completion */
147 #define	ERDESC1_RBS2_MASK	0x7ffff
148 #define	ERDESC1_RBS2_SHIFT	16		/* Receive Buffer 2 Size */
149 #define	ERDESC1_RER		(1U << 15)	/* Receive End of Ring */
150 #define	ERDESC1_RCH		(1U << 14)	/* Second Address Chained */
151 #define	ERDESC1_RBS1_MASK	0x7ffff
152 #define	ERDESC1_RBS1_SHIFT	0		/* Receive Buffer 1 Size */
153 
154 /*
155  * The hardware imposes alignment restrictions on various objects involved in
156  * DMA transfers.  These values are expressed in bytes (not bits).
157  */
158 #define	DWC_DESC_RING_ALIGN	2048
159 
160 static inline uint32_t
next_txidx(struct dwc_softc * sc,uint32_t curidx)161 next_txidx(struct dwc_softc *sc, uint32_t curidx)
162 {
163 
164 	return ((curidx + 1) % TX_DESC_COUNT);
165 }
166 
167 static inline uint32_t
next_rxidx(struct dwc_softc * sc,uint32_t curidx)168 next_rxidx(struct dwc_softc *sc, uint32_t curidx)
169 {
170 
171 	return ((curidx + 1) % RX_DESC_COUNT);
172 }
173 
174 static void
dwc_get1paddr(void * arg,bus_dma_segment_t * segs,int nsegs,int error)175 dwc_get1paddr(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
176 {
177 
178 	if (error != 0)
179 		return;
180 	*(bus_addr_t *)arg = segs[0].ds_addr;
181 }
182 
183 inline static void
txdesc_clear(struct dwc_softc * sc,int idx)184 txdesc_clear(struct dwc_softc *sc, int idx)
185 {
186 
187 	sc->tx_desccount--;
188 	sc->txdesc_ring[idx].addr1 = (uint32_t)(0);
189 	sc->txdesc_ring[idx].desc0 = 0;
190 	sc->txdesc_ring[idx].desc1 = 0;
191 }
192 
193 inline static void
txdesc_setup(struct dwc_softc * sc,int idx,bus_addr_t paddr,uint32_t len,uint32_t flags,bool first,bool last)194 txdesc_setup(struct dwc_softc *sc, int idx, bus_addr_t paddr,
195   uint32_t len, uint32_t flags, bool first, bool last)
196 {
197 	uint32_t desc0, desc1;
198 
199 	if (!sc->dma_ext_desc) {
200 		desc0 = 0;
201 		desc1 = NTDESC1_TCH | len | flags;
202 		if (first)
203 			desc1 |=  NTDESC1_FS;
204 		if (last)
205 			desc1 |= NTDESC1_LS | NTDESC1_IC;
206 	} else {
207 		desc0 = ETDESC0_TCH | flags;
208 		if (first)
209 			desc0 |= ETDESC0_FS;
210 		if (last)
211 			desc0 |= ETDESC0_LS | ETDESC0_IC;
212 		desc1 = len;
213 	}
214 	++sc->tx_desccount;
215 	sc->txdesc_ring[idx].addr1 = (uint32_t)(paddr);
216 	sc->txdesc_ring[idx].desc0 = desc0;
217 	sc->txdesc_ring[idx].desc1 = desc1;
218 	wmb();
219 	sc->txdesc_ring[idx].desc0 |= TDESC0_OWN;
220 	wmb();
221 }
222 
223 inline static uint32_t
rxdesc_setup(struct dwc_softc * sc,int idx,bus_addr_t paddr)224 rxdesc_setup(struct dwc_softc *sc, int idx, bus_addr_t paddr)
225 {
226 	uint32_t nidx;
227 
228 	sc->rxdesc_ring[idx].addr1 = (uint32_t)paddr;
229 	nidx = next_rxidx(sc, idx);
230 	sc->rxdesc_ring[idx].addr2 = sc->rxdesc_ring_paddr +
231 	    (nidx * sizeof(struct dwc_hwdesc));
232 	if (!sc->dma_ext_desc)
233 		sc->rxdesc_ring[idx].desc1 = NRDESC1_RCH |
234 		    MIN(MCLBYTES, NRDESC1_RBS1_MASK);
235 	else
236 		sc->rxdesc_ring[idx].desc1 = ERDESC1_RCH |
237 		    MIN(MCLBYTES, ERDESC1_RBS1_MASK);
238 
239 	wmb();
240 	sc->rxdesc_ring[idx].desc0 = RDESC0_OWN;
241 	wmb();
242 	return (nidx);
243 }
244 
245 int
dma1000_setup_txbuf(struct dwc_softc * sc,int idx,struct mbuf ** mp)246 dma1000_setup_txbuf(struct dwc_softc *sc, int idx, struct mbuf **mp)
247 {
248 	struct bus_dma_segment segs[TX_MAP_MAX_SEGS];
249 	int error, nsegs;
250 	struct mbuf * m;
251 	uint32_t flags;
252 	int i;
253 	int last;
254 
255 	error = bus_dmamap_load_mbuf_sg(sc->txbuf_tag, sc->txbuf_map[idx].map,
256 	    *mp, segs, &nsegs, 0);
257 	if (error == EFBIG) {
258 		/*
259 		 * The map may be partially mapped from the first call.
260 		 * Make sure to reset it.
261 		 */
262 		bus_dmamap_unload(sc->txbuf_tag, sc->txbuf_map[idx].map);
263 		if ((m = m_defrag(*mp, M_NOWAIT)) == NULL)
264 			return (ENOMEM);
265 		*mp = m;
266 		error = bus_dmamap_load_mbuf_sg(sc->txbuf_tag, sc->txbuf_map[idx].map,
267 		    *mp, segs, &nsegs, 0);
268 	}
269 	if (error != 0)
270 		return (ENOMEM);
271 
272 	if (sc->tx_desccount + nsegs > TX_DESC_COUNT) {
273 		bus_dmamap_unload(sc->txbuf_tag, sc->txbuf_map[idx].map);
274 		return (ENOMEM);
275 	}
276 
277 	m = *mp;
278 
279 	if ((m->m_pkthdr.csum_flags & (CSUM_DELAY_DATA | CSUM_DELAY_DATA_IPV6)) != 0)
280 		flags = sc->dma_ext_desc ? ETDESC0_CIC_SEG : NTDESC1_CIC_SEG;
281 	else if ((m->m_pkthdr.csum_flags & CSUM_IP) != 0)
282 		flags = sc->dma_ext_desc ? ETDESC0_CIC_HDR : NTDESC1_CIC_HDR;
283 	else
284 		flags = sc->dma_ext_desc ? ETDESC0_CIC_NONE : NTDESC1_CIC_NONE;
285 
286 	bus_dmamap_sync(sc->txbuf_tag, sc->txbuf_map[idx].map,
287 	    BUS_DMASYNC_PREWRITE);
288 
289 	sc->txbuf_map[idx].mbuf = m;
290 
291 	for (i = 0; i < nsegs; i++) {
292 		txdesc_setup(sc, sc->tx_desc_head,
293 		    segs[i].ds_addr, segs[i].ds_len,
294 		    (i == 0) ? flags : 0, /* only first desc needs flags */
295 		    (i == 0),
296 		    (i == nsegs - 1));
297 		last = sc->tx_desc_head;
298 		sc->tx_desc_head = next_txidx(sc, sc->tx_desc_head);
299 	}
300 
301 	sc->txbuf_map[idx].last_desc_idx = last;
302 
303 	return (0);
304 }
305 
306 static int
dma1000_setup_rxbuf(struct dwc_softc * sc,int idx,struct mbuf * m)307 dma1000_setup_rxbuf(struct dwc_softc *sc, int idx, struct mbuf *m)
308 {
309 	struct bus_dma_segment seg;
310 	int error, nsegs;
311 
312 	m_adj(m, ETHER_ALIGN);
313 
314 	error = bus_dmamap_load_mbuf_sg(sc->rxbuf_tag, sc->rxbuf_map[idx].map,
315 	    m, &seg, &nsegs, 0);
316 	if (error != 0)
317 		return (error);
318 
319 	KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
320 
321 	bus_dmamap_sync(sc->rxbuf_tag, sc->rxbuf_map[idx].map,
322 	    BUS_DMASYNC_PREREAD);
323 
324 	sc->rxbuf_map[idx].mbuf = m;
325 	rxdesc_setup(sc, idx, seg.ds_addr);
326 
327 	return (0);
328 }
329 
330 static struct mbuf *
dwc_alloc_mbufcl(struct dwc_softc * sc)331 dwc_alloc_mbufcl(struct dwc_softc *sc)
332 {
333 	struct mbuf *m;
334 
335 	m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
336 	if (m != NULL)
337 		m->m_pkthdr.len = m->m_len = m->m_ext.ext_size;
338 
339 	return (m);
340 }
341 
342 static struct mbuf *
dwc_rxfinish_one(struct dwc_softc * sc,struct dwc_hwdesc * desc,struct dwc_bufmap * map)343 dwc_rxfinish_one(struct dwc_softc *sc, struct dwc_hwdesc *desc,
344     struct dwc_bufmap *map)
345 {
346 	if_t ifp;
347 	struct mbuf *m, *m0;
348 	int len;
349 	uint32_t rdesc0;
350 
351 	m = map->mbuf;
352 	ifp = sc->ifp;
353 	rdesc0 = desc ->desc0;
354 
355 	if ((rdesc0 & (RDESC0_FS | RDESC0_LS)) !=
356 		    (RDESC0_FS | RDESC0_LS)) {
357 		/*
358 		 * Something very wrong happens. The whole packet should be
359 		 * received in one descriptor. Report problem.
360 		 */
361 		device_printf(sc->dev,
362 		    "%s: RX descriptor without FIRST and LAST bit set: 0x%08X",
363 		    __func__, rdesc0);
364 		return (NULL);
365 	}
366 
367 	len = (rdesc0 >> RDESC0_FL_SHIFT) & RDESC0_FL_MASK;
368 	if (len < 64) {
369 		/*
370 		 * Lenght is invalid, recycle old mbuf
371 		 * Probably impossible case
372 		 */
373 		return (NULL);
374 	}
375 
376 	/* Allocate new buffer */
377 	m0 = dwc_alloc_mbufcl(sc);
378 	if (m0 == NULL) {
379 		/* no new mbuf available, recycle old */
380 		if_inc_counter(sc->ifp, IFCOUNTER_IQDROPS, 1);
381 		return (NULL);
382 	}
383 	/* Do dmasync for newly received packet */
384 	bus_dmamap_sync(sc->rxbuf_tag, map->map, BUS_DMASYNC_POSTREAD);
385 	bus_dmamap_unload(sc->rxbuf_tag, map->map);
386 
387 	/* Received packet is valid, process it */
388 	m->m_pkthdr.rcvif = ifp;
389 	m->m_pkthdr.len = len;
390 	m->m_len = len;
391 	if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
392 
393 	if ((if_getcapenable(ifp) & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6)) != 0 &&
394 	    (rdesc0 & RDESC0_FT) != 0) {
395 		m->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
396 		if ((rdesc0 & RDESC0_ICE) == 0)
397 			m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
398 		if ((rdesc0 & RDESC0_PCE) == 0) {
399 			m->m_pkthdr.csum_flags |=
400 				CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
401 			m->m_pkthdr.csum_data = 0xffff;
402 		}
403 	}
404 
405 	/* Remove trailing FCS */
406 	m_adj(m, -ETHER_CRC_LEN);
407 
408 	DWC_UNLOCK(sc);
409 	if_input(ifp, m);
410 	DWC_LOCK(sc);
411 	return (m0);
412 }
413 
414 void
dma1000_txfinish_locked(struct dwc_softc * sc)415 dma1000_txfinish_locked(struct dwc_softc *sc)
416 {
417 	struct dwc_bufmap *bmap;
418 	struct dwc_hwdesc *desc;
419 	if_t ifp;
420 	int idx, last_idx;
421 	bool map_finished;
422 
423 	DWC_ASSERT_LOCKED(sc);
424 
425 	ifp = sc->ifp;
426 	/* check if all descriptors of the map are done */
427 	while (sc->tx_map_tail != sc->tx_map_head) {
428 		map_finished = true;
429 		bmap = &sc->txbuf_map[sc->tx_map_tail];
430 		idx = sc->tx_desc_tail;
431 		last_idx = next_txidx(sc, bmap->last_desc_idx);
432 		while (idx != last_idx) {
433 			desc = &sc->txdesc_ring[idx];
434 			if ((desc->desc0 & TDESC0_OWN) != 0) {
435 				map_finished = false;
436 				break;
437 			}
438 			idx = next_txidx(sc, idx);
439 		}
440 
441 		if (!map_finished)
442 			break;
443 		bus_dmamap_sync(sc->txbuf_tag, bmap->map,
444 		    BUS_DMASYNC_POSTWRITE);
445 		bus_dmamap_unload(sc->txbuf_tag, bmap->map);
446 		m_freem(bmap->mbuf);
447 		bmap->mbuf = NULL;
448 		sc->tx_mapcount--;
449 		while (sc->tx_desc_tail != last_idx) {
450 			txdesc_clear(sc, sc->tx_desc_tail);
451 			sc->tx_desc_tail = next_txidx(sc, sc->tx_desc_tail);
452 		}
453 		sc->tx_map_tail = next_txidx(sc, sc->tx_map_tail);
454 		if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
455 		if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
456 	}
457 
458 	/* If there are no buffers outstanding, muzzle the watchdog. */
459 	if (sc->tx_desc_tail == sc->tx_desc_head) {
460 		sc->tx_watchdog_count = 0;
461 	}
462 }
463 
464 void
dma1000_txstart(struct dwc_softc * sc)465 dma1000_txstart(struct dwc_softc *sc)
466 {
467 	int enqueued;
468 	struct mbuf *m;
469 
470 	enqueued = 0;
471 
472 	for (;;) {
473 		if (sc->tx_desccount > (TX_DESC_COUNT - TX_MAP_MAX_SEGS  + 1)) {
474 			if_setdrvflagbits(sc->ifp, IFF_DRV_OACTIVE, 0);
475 			break;
476 		}
477 
478 		if (sc->tx_mapcount == (TX_MAP_COUNT - 1)) {
479 			if_setdrvflagbits(sc->ifp, IFF_DRV_OACTIVE, 0);
480 			break;
481 		}
482 
483 		m = if_dequeue(sc->ifp);
484 		if (m == NULL)
485 			break;
486 		if (dma1000_setup_txbuf(sc, sc->tx_map_head, &m) != 0) {
487 			if_sendq_prepend(sc->ifp, m);
488 			if_setdrvflagbits(sc->ifp, IFF_DRV_OACTIVE, 0);
489 			break;
490 		}
491 		bpf_mtap_if(sc->ifp, m);
492 		sc->tx_map_head = next_txidx(sc, sc->tx_map_head);
493 		sc->tx_mapcount++;
494 		++enqueued;
495 	}
496 
497 	if (enqueued != 0) {
498 		WRITE4(sc, TRANSMIT_POLL_DEMAND, 0x1);
499 		sc->tx_watchdog_count = WATCHDOG_TIMEOUT_SECS;
500 	}
501 }
502 
503 void
dma1000_rxfinish_locked(struct dwc_softc * sc)504 dma1000_rxfinish_locked(struct dwc_softc *sc)
505 {
506 	struct mbuf *m;
507 	int error, idx;
508 	struct dwc_hwdesc *desc;
509 
510 	DWC_ASSERT_LOCKED(sc);
511 	for (;;) {
512 		idx = sc->rx_idx;
513 		desc = sc->rxdesc_ring + idx;
514 		if ((desc->desc0 & RDESC0_OWN) != 0)
515 			break;
516 
517 		m = dwc_rxfinish_one(sc, desc, sc->rxbuf_map + idx);
518 		if (m == NULL) {
519 			wmb();
520 			desc->desc0 = RDESC0_OWN;
521 			wmb();
522 		} else {
523 			/* We cannot create hole in RX ring */
524 			error = dma1000_setup_rxbuf(sc, idx, m);
525 			if (error != 0)
526 				panic("dma1000_setup_rxbuf failed:  error %d\n",
527 				    error);
528 
529 		}
530 		sc->rx_idx = next_rxidx(sc, sc->rx_idx);
531 	}
532 }
533 
534 /*
535  * Start the DMA controller
536  */
537 void
dma1000_start(struct dwc_softc * sc)538 dma1000_start(struct dwc_softc *sc)
539 {
540 	uint32_t reg;
541 
542 	DWC_ASSERT_LOCKED(sc);
543 
544 	/* Initializa DMA and enable transmitters */
545 	reg = READ4(sc, OPERATION_MODE);
546 	reg |= (MODE_TSF | MODE_OSF | MODE_FUF);
547 	reg &= ~(MODE_RSF);
548 	reg |= (MODE_RTC_LEV32 << MODE_RTC_SHIFT);
549 	WRITE4(sc, OPERATION_MODE, reg);
550 
551 	WRITE4(sc, INTERRUPT_ENABLE, INT_EN_DEFAULT);
552 
553 	/* Start DMA */
554 	reg = READ4(sc, OPERATION_MODE);
555 	reg |= (MODE_ST | MODE_SR);
556 	WRITE4(sc, OPERATION_MODE, reg);
557 }
558 
559 /*
560  * Stop the DMA controller
561  */
562 void
dma1000_stop(struct dwc_softc * sc)563 dma1000_stop(struct dwc_softc *sc)
564 {
565 	uint32_t reg;
566 
567 	DWC_ASSERT_LOCKED(sc);
568 
569 	/* Stop DMA TX */
570 	reg = READ4(sc, OPERATION_MODE);
571 	reg &= ~(MODE_ST);
572 	WRITE4(sc, OPERATION_MODE, reg);
573 
574 	/* Flush TX */
575 	reg = READ4(sc, OPERATION_MODE);
576 	reg |= (MODE_FTF);
577 	WRITE4(sc, OPERATION_MODE, reg);
578 
579 	/* Stop DMA RX */
580 	reg = READ4(sc, OPERATION_MODE);
581 	reg &= ~(MODE_SR);
582 	WRITE4(sc, OPERATION_MODE, reg);
583 }
584 
585 int
dma1000_reset(struct dwc_softc * sc)586 dma1000_reset(struct dwc_softc *sc)
587 {
588 	uint32_t reg;
589 	int i;
590 
591 	reg = READ4(sc, BUS_MODE);
592 	reg |= (BUS_MODE_SWR);
593 	WRITE4(sc, BUS_MODE, reg);
594 
595 	for (i = 0; i < DMA_RESET_TIMEOUT; i++) {
596 		if ((READ4(sc, BUS_MODE) & BUS_MODE_SWR) == 0)
597 			break;
598 		DELAY(10);
599 	}
600 	if (i >= DMA_RESET_TIMEOUT) {
601 		return (ENXIO);
602 	}
603 
604 	return (0);
605 }
606 
607 /*
608  * Create the bus_dma resources
609  */
610 int
dma1000_init(struct dwc_softc * sc)611 dma1000_init(struct dwc_softc *sc)
612 {
613 	struct mbuf *m;
614 	uint32_t reg;
615 	int error;
616 	int nidx;
617 	int idx;
618 
619 	reg = BUS_MODE_USP;
620 	if (!sc->nopblx8)
621 		reg |= BUS_MODE_EIGHTXPBL;
622 	reg |= (sc->txpbl << BUS_MODE_PBL_SHIFT);
623 	reg |= (sc->rxpbl << BUS_MODE_RPBL_SHIFT);
624 	if (sc->fixed_burst)
625 		reg |= BUS_MODE_FIXEDBURST;
626 	if (sc->mixed_burst)
627 		reg |= BUS_MODE_MIXEDBURST;
628 	if (sc->aal)
629 		reg |= BUS_MODE_AAL;
630 
631 	WRITE4(sc, BUS_MODE, reg);
632 
633 	reg = READ4(sc, HW_FEATURE);
634 	if (reg & HW_FEATURE_EXT_DESCRIPTOR)
635 		sc->dma_ext_desc = true;
636 
637 	/*
638 	 * DMA must be stop while changing descriptor list addresses.
639 	 */
640 	reg = READ4(sc, OPERATION_MODE);
641 	reg &= ~(MODE_ST | MODE_SR);
642 	WRITE4(sc, OPERATION_MODE, reg);
643 
644 	/*
645 	 * Set up TX descriptor ring, descriptors, and dma maps.
646 	 */
647 	error = bus_dma_tag_create(
648 	    bus_get_dma_tag(sc->dev),	/* Parent tag. */
649 	    DWC_DESC_RING_ALIGN, 0,	/* alignment, boundary */
650 	    BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
651 	    BUS_SPACE_MAXADDR,		/* highaddr */
652 	    NULL, NULL,			/* filter, filterarg */
653 	    TX_DESC_SIZE, 1, 		/* maxsize, nsegments */
654 	    TX_DESC_SIZE,		/* maxsegsize */
655 	    0,				/* flags */
656 	    NULL, NULL,			/* lockfunc, lockarg */
657 	    &sc->txdesc_tag);
658 	if (error != 0) {
659 		device_printf(sc->dev,
660 		    "could not create TX ring DMA tag.\n");
661 		goto out;
662 	}
663 
664 	error = bus_dmamem_alloc(sc->txdesc_tag, (void**)&sc->txdesc_ring,
665 	    BUS_DMA_COHERENT | BUS_DMA_WAITOK | BUS_DMA_ZERO,
666 	    &sc->txdesc_map);
667 	if (error != 0) {
668 		device_printf(sc->dev,
669 		    "could not allocate TX descriptor ring.\n");
670 		goto out;
671 	}
672 
673 	error = bus_dmamap_load(sc->txdesc_tag, sc->txdesc_map,
674 	    sc->txdesc_ring, TX_DESC_SIZE, dwc_get1paddr,
675 	    &sc->txdesc_ring_paddr, 0);
676 	if (error != 0) {
677 		device_printf(sc->dev,
678 		    "could not load TX descriptor ring map.\n");
679 		goto out;
680 	}
681 
682 	for (idx = 0; idx < TX_DESC_COUNT; idx++) {
683 		nidx = next_txidx(sc, idx);
684 		sc->txdesc_ring[idx].addr2 = sc->txdesc_ring_paddr +
685 		    (nidx * sizeof(struct dwc_hwdesc));
686 	}
687 
688 	error = bus_dma_tag_create(
689 	    bus_get_dma_tag(sc->dev),	/* Parent tag. */
690 	    1, 0,			/* alignment, boundary */
691 	    BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
692 	    BUS_SPACE_MAXADDR,		/* highaddr */
693 	    NULL, NULL,			/* filter, filterarg */
694 	    MCLBYTES*TX_MAP_MAX_SEGS,	/* maxsize */
695 	    TX_MAP_MAX_SEGS,		/* nsegments */
696 	    MCLBYTES,			/* maxsegsize */
697 	    0,				/* flags */
698 	    NULL, NULL,			/* lockfunc, lockarg */
699 	    &sc->txbuf_tag);
700 	if (error != 0) {
701 		device_printf(sc->dev,
702 		    "could not create TX ring DMA tag.\n");
703 		goto out;
704 	}
705 
706 	for (idx = 0; idx < TX_MAP_COUNT; idx++) {
707 		error = bus_dmamap_create(sc->txbuf_tag, BUS_DMA_COHERENT,
708 		    &sc->txbuf_map[idx].map);
709 		if (error != 0) {
710 			device_printf(sc->dev,
711 			    "could not create TX buffer DMA map.\n");
712 			goto out;
713 		}
714 	}
715 
716 	for (idx = 0; idx < TX_DESC_COUNT; idx++)
717 		txdesc_clear(sc, idx);
718 
719 	WRITE4(sc, TX_DESCR_LIST_ADDR, sc->txdesc_ring_paddr);
720 
721 	/*
722 	 * Set up RX descriptor ring, descriptors, dma maps, and mbufs.
723 	 */
724 	error = bus_dma_tag_create(
725 	    bus_get_dma_tag(sc->dev),	/* Parent tag. */
726 	    DWC_DESC_RING_ALIGN, 0,	/* alignment, boundary */
727 	    BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
728 	    BUS_SPACE_MAXADDR,		/* highaddr */
729 	    NULL, NULL,			/* filter, filterarg */
730 	    RX_DESC_SIZE, 1, 		/* maxsize, nsegments */
731 	    RX_DESC_SIZE,		/* maxsegsize */
732 	    0,				/* flags */
733 	    NULL, NULL,			/* lockfunc, lockarg */
734 	    &sc->rxdesc_tag);
735 	if (error != 0) {
736 		device_printf(sc->dev,
737 		    "could not create RX ring DMA tag.\n");
738 		goto out;
739 	}
740 
741 	error = bus_dmamem_alloc(sc->rxdesc_tag, (void **)&sc->rxdesc_ring,
742 	    BUS_DMA_COHERENT | BUS_DMA_WAITOK | BUS_DMA_ZERO,
743 	    &sc->rxdesc_map);
744 	if (error != 0) {
745 		device_printf(sc->dev,
746 		    "could not allocate RX descriptor ring.\n");
747 		goto out;
748 	}
749 
750 	error = bus_dmamap_load(sc->rxdesc_tag, sc->rxdesc_map,
751 	    sc->rxdesc_ring, RX_DESC_SIZE, dwc_get1paddr,
752 	    &sc->rxdesc_ring_paddr, 0);
753 	if (error != 0) {
754 		device_printf(sc->dev,
755 		    "could not load RX descriptor ring map.\n");
756 		goto out;
757 	}
758 
759 	error = bus_dma_tag_create(
760 	    bus_get_dma_tag(sc->dev),	/* Parent tag. */
761 	    1, 0,			/* alignment, boundary */
762 	    BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
763 	    BUS_SPACE_MAXADDR,		/* highaddr */
764 	    NULL, NULL,			/* filter, filterarg */
765 	    MCLBYTES, 1, 		/* maxsize, nsegments */
766 	    MCLBYTES,			/* maxsegsize */
767 	    0,				/* flags */
768 	    NULL, NULL,			/* lockfunc, lockarg */
769 	    &sc->rxbuf_tag);
770 	if (error != 0) {
771 		device_printf(sc->dev,
772 		    "could not create RX buf DMA tag.\n");
773 		goto out;
774 	}
775 
776 	for (idx = 0; idx < RX_DESC_COUNT; idx++) {
777 		error = bus_dmamap_create(sc->rxbuf_tag, BUS_DMA_COHERENT,
778 		    &sc->rxbuf_map[idx].map);
779 		if (error != 0) {
780 			device_printf(sc->dev,
781 			    "could not create RX buffer DMA map.\n");
782 			goto out;
783 		}
784 		if ((m = dwc_alloc_mbufcl(sc)) == NULL) {
785 			device_printf(sc->dev, "Could not alloc mbuf\n");
786 			error = ENOMEM;
787 			goto out;
788 		}
789 		if ((error = dma1000_setup_rxbuf(sc, idx, m)) != 0) {
790 			device_printf(sc->dev,
791 			    "could not create new RX buffer.\n");
792 			goto out;
793 		}
794 	}
795 	WRITE4(sc, RX_DESCR_LIST_ADDR, sc->rxdesc_ring_paddr);
796 
797 out:
798 	if (error != 0)
799 		return (ENXIO);
800 
801 	return (0);
802 }
803 
804 /*
805  * Free the bus_dma resources
806  */
807 void
dma1000_free(struct dwc_softc * sc)808 dma1000_free(struct dwc_softc *sc)
809 {
810 	bus_dmamap_t map;
811 	int idx;
812 
813 	/* Clean up RX DMA resources and free mbufs. */
814 	for (idx = 0; idx < RX_DESC_COUNT; ++idx) {
815 		if ((map = sc->rxbuf_map[idx].map) != NULL) {
816 			bus_dmamap_unload(sc->rxbuf_tag, map);
817 			bus_dmamap_destroy(sc->rxbuf_tag, map);
818 			m_freem(sc->rxbuf_map[idx].mbuf);
819 		}
820 	}
821 	if (sc->rxbuf_tag != NULL)
822 		bus_dma_tag_destroy(sc->rxbuf_tag);
823 	if (sc->rxdesc_map != NULL) {
824 		bus_dmamap_unload(sc->rxdesc_tag, sc->rxdesc_map);
825 		bus_dmamem_free(sc->rxdesc_tag, sc->rxdesc_ring,
826 		    sc->rxdesc_map);
827 	}
828 	if (sc->rxdesc_tag != NULL)
829 		bus_dma_tag_destroy(sc->rxdesc_tag);
830 
831 	/* Clean up TX DMA resources. */
832 	for (idx = 0; idx < TX_DESC_COUNT; ++idx) {
833 		if ((map = sc->txbuf_map[idx].map) != NULL) {
834 			/* TX maps are already unloaded. */
835 			bus_dmamap_destroy(sc->txbuf_tag, map);
836 		}
837 	}
838 	if (sc->txbuf_tag != NULL)
839 		bus_dma_tag_destroy(sc->txbuf_tag);
840 	if (sc->txdesc_map != NULL) {
841 		bus_dmamap_unload(sc->txdesc_tag, sc->txdesc_map);
842 		bus_dmamem_free(sc->txdesc_tag, sc->txdesc_ring,
843 		    sc->txdesc_map);
844 	}
845 	if (sc->txdesc_tag != NULL)
846 		bus_dma_tag_destroy(sc->txdesc_tag);
847 }
848 
849 /*
850  * Interrupt function
851  */
852 
853 int
dma1000_intr(struct dwc_softc * sc)854 dma1000_intr(struct dwc_softc *sc)
855 {
856 	uint32_t reg;
857 	int rv;
858 
859 	DWC_ASSERT_LOCKED(sc);
860 
861 	rv = 0;
862 	reg = READ4(sc, DMA_STATUS);
863 	if (reg & DMA_STATUS_NIS) {
864 		if (reg & DMA_STATUS_RI)
865 			dma1000_rxfinish_locked(sc);
866 
867 		if (reg & DMA_STATUS_TI) {
868 			dma1000_txfinish_locked(sc);
869 			dma1000_txstart(sc);
870 		}
871 	}
872 
873 	if (reg & DMA_STATUS_AIS) {
874 		if (reg & DMA_STATUS_FBI) {
875 			/* Fatal bus error */
876 			rv = EIO;
877 		}
878 	}
879 
880 	WRITE4(sc, DMA_STATUS, reg & DMA_STATUS_INTR_MASK);
881 	return (rv);
882 }
883