1 /*
2 
3   Broadcom B43legacy wireless driver
4 
5   DMA ringbuffer and descriptor allocation/management
6 
7   Copyright (c) 2005, 2006 Michael Buesch <m@bues.ch>
8 
9   Some code in this file is derived from the b44.c driver
10   Copyright (C) 2002 David S. Miller
11   Copyright (C) Pekka Pietikainen
12 
13   This program is free software; you can redistribute it and/or modify
14   it under the terms of the GNU General Public License as published by
15   the Free Software Foundation; either version 2 of the License, or
16   (at your option) any later version.
17 
18   This program is distributed in the hope that it will be useful,
19   but WITHOUT ANY WARRANTY; without even the implied warranty of
20   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
21   GNU General Public License for more details.
22 
23   You should have received a copy of the GNU General Public License
24   along with this program; see the file COPYING.  If not, write to
25   the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor,
26   Boston, MA 02110-1301, USA.
27 
28 */
29 
30 #include "b43legacy.h"
31 #include "dma.h"
32 #include "main.h"
33 #include "debugfs.h"
34 #include "xmit.h"
35 
36 #include <linux/dma-mapping.h>
37 #include <linux/pci.h>
38 #include <linux/delay.h>
39 #include <linux/skbuff.h>
40 #include <linux/slab.h>
41 #include <net/dst.h>
42 
43 /* 32bit DMA ops. */
44 static
op32_idx2desc(struct b43legacy_dmaring * ring,int slot,struct b43legacy_dmadesc_meta ** meta)45 struct b43legacy_dmadesc32 *op32_idx2desc(struct b43legacy_dmaring *ring,
46 					  int slot,
47 					  struct b43legacy_dmadesc_meta **meta)
48 {
49 	struct b43legacy_dmadesc32 *desc;
50 
51 	*meta = &(ring->meta[slot]);
52 	desc = ring->descbase;
53 	desc = &(desc[slot]);
54 
55 	return (struct b43legacy_dmadesc32 *)desc;
56 }
57 
op32_fill_descriptor(struct b43legacy_dmaring * ring,struct b43legacy_dmadesc32 * desc,dma_addr_t dmaaddr,u16 bufsize,int start,int end,int irq)58 static void op32_fill_descriptor(struct b43legacy_dmaring *ring,
59 				 struct b43legacy_dmadesc32 *desc,
60 				 dma_addr_t dmaaddr, u16 bufsize,
61 				 int start, int end, int irq)
62 {
63 	struct b43legacy_dmadesc32 *descbase = ring->descbase;
64 	int slot;
65 	u32 ctl;
66 	u32 addr;
67 	u32 addrext;
68 
69 	slot = (int)(desc - descbase);
70 	B43legacy_WARN_ON(!(slot >= 0 && slot < ring->nr_slots));
71 
72 	addr = (u32)(dmaaddr & ~SSB_DMA_TRANSLATION_MASK);
73 	addrext = (u32)(dmaaddr & SSB_DMA_TRANSLATION_MASK)
74 		   >> SSB_DMA_TRANSLATION_SHIFT;
75 	addr |= ring->dev->dma.translation;
76 	ctl = (bufsize - ring->frameoffset)
77 	      & B43legacy_DMA32_DCTL_BYTECNT;
78 	if (slot == ring->nr_slots - 1)
79 		ctl |= B43legacy_DMA32_DCTL_DTABLEEND;
80 	if (start)
81 		ctl |= B43legacy_DMA32_DCTL_FRAMESTART;
82 	if (end)
83 		ctl |= B43legacy_DMA32_DCTL_FRAMEEND;
84 	if (irq)
85 		ctl |= B43legacy_DMA32_DCTL_IRQ;
86 	ctl |= (addrext << B43legacy_DMA32_DCTL_ADDREXT_SHIFT)
87 	       & B43legacy_DMA32_DCTL_ADDREXT_MASK;
88 
89 	desc->control = cpu_to_le32(ctl);
90 	desc->address = cpu_to_le32(addr);
91 }
92 
op32_poke_tx(struct b43legacy_dmaring * ring,int slot)93 static void op32_poke_tx(struct b43legacy_dmaring *ring, int slot)
94 {
95 	b43legacy_dma_write(ring, B43legacy_DMA32_TXINDEX,
96 			    (u32)(slot * sizeof(struct b43legacy_dmadesc32)));
97 }
98 
op32_tx_suspend(struct b43legacy_dmaring * ring)99 static void op32_tx_suspend(struct b43legacy_dmaring *ring)
100 {
101 	b43legacy_dma_write(ring, B43legacy_DMA32_TXCTL,
102 			    b43legacy_dma_read(ring, B43legacy_DMA32_TXCTL)
103 			    | B43legacy_DMA32_TXSUSPEND);
104 }
105 
op32_tx_resume(struct b43legacy_dmaring * ring)106 static void op32_tx_resume(struct b43legacy_dmaring *ring)
107 {
108 	b43legacy_dma_write(ring, B43legacy_DMA32_TXCTL,
109 			    b43legacy_dma_read(ring, B43legacy_DMA32_TXCTL)
110 			    & ~B43legacy_DMA32_TXSUSPEND);
111 }
112 
op32_get_current_rxslot(struct b43legacy_dmaring * ring)113 static int op32_get_current_rxslot(struct b43legacy_dmaring *ring)
114 {
115 	u32 val;
116 
117 	val = b43legacy_dma_read(ring, B43legacy_DMA32_RXSTATUS);
118 	val &= B43legacy_DMA32_RXDPTR;
119 
120 	return (val / sizeof(struct b43legacy_dmadesc32));
121 }
122 
op32_set_current_rxslot(struct b43legacy_dmaring * ring,int slot)123 static void op32_set_current_rxslot(struct b43legacy_dmaring *ring,
124 				    int slot)
125 {
126 	b43legacy_dma_write(ring, B43legacy_DMA32_RXINDEX,
127 			    (u32)(slot * sizeof(struct b43legacy_dmadesc32)));
128 }
129 
free_slots(struct b43legacy_dmaring * ring)130 static inline int free_slots(struct b43legacy_dmaring *ring)
131 {
132 	return (ring->nr_slots - ring->used_slots);
133 }
134 
next_slot(struct b43legacy_dmaring * ring,int slot)135 static inline int next_slot(struct b43legacy_dmaring *ring, int slot)
136 {
137 	B43legacy_WARN_ON(!(slot >= -1 && slot <= ring->nr_slots - 1));
138 	if (slot == ring->nr_slots - 1)
139 		return 0;
140 	return slot + 1;
141 }
142 
prev_slot(struct b43legacy_dmaring * ring,int slot)143 static inline int prev_slot(struct b43legacy_dmaring *ring, int slot)
144 {
145 	B43legacy_WARN_ON(!(slot >= 0 && slot <= ring->nr_slots - 1));
146 	if (slot == 0)
147 		return ring->nr_slots - 1;
148 	return slot - 1;
149 }
150 
151 #ifdef CONFIG_B43LEGACY_DEBUG
update_max_used_slots(struct b43legacy_dmaring * ring,int current_used_slots)152 static void update_max_used_slots(struct b43legacy_dmaring *ring,
153 				  int current_used_slots)
154 {
155 	if (current_used_slots <= ring->max_used_slots)
156 		return;
157 	ring->max_used_slots = current_used_slots;
158 	if (b43legacy_debug(ring->dev, B43legacy_DBG_DMAVERBOSE))
159 		b43legacydbg(ring->dev->wl,
160 		       "max_used_slots increased to %d on %s ring %d\n",
161 		       ring->max_used_slots,
162 		       ring->tx ? "TX" : "RX",
163 		       ring->index);
164 }
165 #else
166 static inline
update_max_used_slots(struct b43legacy_dmaring * ring,int current_used_slots)167 void update_max_used_slots(struct b43legacy_dmaring *ring,
168 			   int current_used_slots)
169 { }
170 #endif /* DEBUG */
171 
172 /* Request a slot for usage. */
173 static inline
request_slot(struct b43legacy_dmaring * ring)174 int request_slot(struct b43legacy_dmaring *ring)
175 {
176 	int slot;
177 
178 	B43legacy_WARN_ON(!ring->tx);
179 	B43legacy_WARN_ON(ring->stopped);
180 	B43legacy_WARN_ON(free_slots(ring) == 0);
181 
182 	slot = next_slot(ring, ring->current_slot);
183 	ring->current_slot = slot;
184 	ring->used_slots++;
185 
186 	update_max_used_slots(ring, ring->used_slots);
187 
188 	return slot;
189 }
190 
191 /* Mac80211-queue to b43legacy-ring mapping */
priority_to_txring(struct b43legacy_wldev * dev,int queue_priority)192 static struct b43legacy_dmaring *priority_to_txring(
193 						struct b43legacy_wldev *dev,
194 						int queue_priority)
195 {
196 	struct b43legacy_dmaring *ring;
197 
198 /*FIXME: For now we always run on TX-ring-1 */
199 return dev->dma.tx_ring1;
200 
201 	/* 0 = highest priority */
202 	switch (queue_priority) {
203 	default:
204 		B43legacy_WARN_ON(1);
205 		/* fallthrough */
206 	case 0:
207 		ring = dev->dma.tx_ring3;
208 		break;
209 	case 1:
210 		ring = dev->dma.tx_ring2;
211 		break;
212 	case 2:
213 		ring = dev->dma.tx_ring1;
214 		break;
215 	case 3:
216 		ring = dev->dma.tx_ring0;
217 		break;
218 	case 4:
219 		ring = dev->dma.tx_ring4;
220 		break;
221 	case 5:
222 		ring = dev->dma.tx_ring5;
223 		break;
224 	}
225 
226 	return ring;
227 }
228 
229 /* Bcm4301-ring to mac80211-queue mapping */
txring_to_priority(struct b43legacy_dmaring * ring)230 static inline int txring_to_priority(struct b43legacy_dmaring *ring)
231 {
232 	static const u8 idx_to_prio[] =
233 		{ 3, 2, 1, 0, 4, 5, };
234 
235 /*FIXME: have only one queue, for now */
236 return 0;
237 
238 	return idx_to_prio[ring->index];
239 }
240 
241 
b43legacy_dmacontroller_base(enum b43legacy_dmatype type,int controller_idx)242 static u16 b43legacy_dmacontroller_base(enum b43legacy_dmatype type,
243 					int controller_idx)
244 {
245 	static const u16 map32[] = {
246 		B43legacy_MMIO_DMA32_BASE0,
247 		B43legacy_MMIO_DMA32_BASE1,
248 		B43legacy_MMIO_DMA32_BASE2,
249 		B43legacy_MMIO_DMA32_BASE3,
250 		B43legacy_MMIO_DMA32_BASE4,
251 		B43legacy_MMIO_DMA32_BASE5,
252 	};
253 
254 	B43legacy_WARN_ON(!(controller_idx >= 0 &&
255 			  controller_idx < ARRAY_SIZE(map32)));
256 	return map32[controller_idx];
257 }
258 
259 static inline
map_descbuffer(struct b43legacy_dmaring * ring,unsigned char * buf,size_t len,int tx)260 dma_addr_t map_descbuffer(struct b43legacy_dmaring *ring,
261 			  unsigned char *buf,
262 			  size_t len,
263 			  int tx)
264 {
265 	dma_addr_t dmaaddr;
266 
267 	if (tx)
268 		dmaaddr = dma_map_single(ring->dev->dev->dma_dev,
269 					     buf, len,
270 					     DMA_TO_DEVICE);
271 	else
272 		dmaaddr = dma_map_single(ring->dev->dev->dma_dev,
273 					     buf, len,
274 					     DMA_FROM_DEVICE);
275 
276 	return dmaaddr;
277 }
278 
279 static inline
unmap_descbuffer(struct b43legacy_dmaring * ring,dma_addr_t addr,size_t len,int tx)280 void unmap_descbuffer(struct b43legacy_dmaring *ring,
281 		      dma_addr_t addr,
282 		      size_t len,
283 		      int tx)
284 {
285 	if (tx)
286 		dma_unmap_single(ring->dev->dev->dma_dev,
287 				     addr, len,
288 				     DMA_TO_DEVICE);
289 	else
290 		dma_unmap_single(ring->dev->dev->dma_dev,
291 				     addr, len,
292 				     DMA_FROM_DEVICE);
293 }
294 
295 static inline
sync_descbuffer_for_cpu(struct b43legacy_dmaring * ring,dma_addr_t addr,size_t len)296 void sync_descbuffer_for_cpu(struct b43legacy_dmaring *ring,
297 			     dma_addr_t addr,
298 			     size_t len)
299 {
300 	B43legacy_WARN_ON(ring->tx);
301 
302 	dma_sync_single_for_cpu(ring->dev->dev->dma_dev,
303 				addr, len, DMA_FROM_DEVICE);
304 }
305 
306 static inline
sync_descbuffer_for_device(struct b43legacy_dmaring * ring,dma_addr_t addr,size_t len)307 void sync_descbuffer_for_device(struct b43legacy_dmaring *ring,
308 				dma_addr_t addr,
309 				size_t len)
310 {
311 	B43legacy_WARN_ON(ring->tx);
312 
313 	dma_sync_single_for_device(ring->dev->dev->dma_dev,
314 				   addr, len, DMA_FROM_DEVICE);
315 }
316 
317 static inline
free_descriptor_buffer(struct b43legacy_dmaring * ring,struct b43legacy_dmadesc_meta * meta,int irq_context)318 void free_descriptor_buffer(struct b43legacy_dmaring *ring,
319 			    struct b43legacy_dmadesc_meta *meta,
320 			    int irq_context)
321 {
322 	if (meta->skb) {
323 		if (irq_context)
324 			dev_kfree_skb_irq(meta->skb);
325 		else
326 			dev_kfree_skb(meta->skb);
327 		meta->skb = NULL;
328 	}
329 }
330 
alloc_ringmemory(struct b43legacy_dmaring * ring)331 static int alloc_ringmemory(struct b43legacy_dmaring *ring)
332 {
333 	/* GFP flags must match the flags in free_ringmemory()! */
334 	ring->descbase = dma_alloc_coherent(ring->dev->dev->dma_dev,
335 					    B43legacy_DMA_RINGMEMSIZE,
336 					    &(ring->dmabase),
337 					    GFP_KERNEL);
338 	if (!ring->descbase) {
339 		b43legacyerr(ring->dev->wl, "DMA ringmemory allocation"
340 			     " failed\n");
341 		return -ENOMEM;
342 	}
343 	memset(ring->descbase, 0, B43legacy_DMA_RINGMEMSIZE);
344 
345 	return 0;
346 }
347 
free_ringmemory(struct b43legacy_dmaring * ring)348 static void free_ringmemory(struct b43legacy_dmaring *ring)
349 {
350 	dma_free_coherent(ring->dev->dev->dma_dev, B43legacy_DMA_RINGMEMSIZE,
351 			  ring->descbase, ring->dmabase);
352 }
353 
354 /* Reset the RX DMA channel */
b43legacy_dmacontroller_rx_reset(struct b43legacy_wldev * dev,u16 mmio_base,enum b43legacy_dmatype type)355 static int b43legacy_dmacontroller_rx_reset(struct b43legacy_wldev *dev,
356 					    u16 mmio_base,
357 					    enum b43legacy_dmatype type)
358 {
359 	int i;
360 	u32 value;
361 	u16 offset;
362 
363 	might_sleep();
364 
365 	offset = B43legacy_DMA32_RXCTL;
366 	b43legacy_write32(dev, mmio_base + offset, 0);
367 	for (i = 0; i < 10; i++) {
368 		offset = B43legacy_DMA32_RXSTATUS;
369 		value = b43legacy_read32(dev, mmio_base + offset);
370 		value &= B43legacy_DMA32_RXSTATE;
371 		if (value == B43legacy_DMA32_RXSTAT_DISABLED) {
372 			i = -1;
373 			break;
374 		}
375 		msleep(1);
376 	}
377 	if (i != -1) {
378 		b43legacyerr(dev->wl, "DMA RX reset timed out\n");
379 		return -ENODEV;
380 	}
381 
382 	return 0;
383 }
384 
385 /* Reset the RX DMA channel */
b43legacy_dmacontroller_tx_reset(struct b43legacy_wldev * dev,u16 mmio_base,enum b43legacy_dmatype type)386 static int b43legacy_dmacontroller_tx_reset(struct b43legacy_wldev *dev,
387 					    u16 mmio_base,
388 					    enum b43legacy_dmatype type)
389 {
390 	int i;
391 	u32 value;
392 	u16 offset;
393 
394 	might_sleep();
395 
396 	for (i = 0; i < 10; i++) {
397 		offset = B43legacy_DMA32_TXSTATUS;
398 		value = b43legacy_read32(dev, mmio_base + offset);
399 		value &= B43legacy_DMA32_TXSTATE;
400 		if (value == B43legacy_DMA32_TXSTAT_DISABLED ||
401 		    value == B43legacy_DMA32_TXSTAT_IDLEWAIT ||
402 		    value == B43legacy_DMA32_TXSTAT_STOPPED)
403 			break;
404 		msleep(1);
405 	}
406 	offset = B43legacy_DMA32_TXCTL;
407 	b43legacy_write32(dev, mmio_base + offset, 0);
408 	for (i = 0; i < 10; i++) {
409 		offset = B43legacy_DMA32_TXSTATUS;
410 		value = b43legacy_read32(dev, mmio_base + offset);
411 		value &= B43legacy_DMA32_TXSTATE;
412 		if (value == B43legacy_DMA32_TXSTAT_DISABLED) {
413 			i = -1;
414 			break;
415 		}
416 		msleep(1);
417 	}
418 	if (i != -1) {
419 		b43legacyerr(dev->wl, "DMA TX reset timed out\n");
420 		return -ENODEV;
421 	}
422 	/* ensure the reset is completed. */
423 	msleep(1);
424 
425 	return 0;
426 }
427 
428 /* Check if a DMA mapping address is invalid. */
b43legacy_dma_mapping_error(struct b43legacy_dmaring * ring,dma_addr_t addr,size_t buffersize,bool dma_to_device)429 static bool b43legacy_dma_mapping_error(struct b43legacy_dmaring *ring,
430 					 dma_addr_t addr,
431 					 size_t buffersize,
432 					 bool dma_to_device)
433 {
434 	if (unlikely(dma_mapping_error(ring->dev->dev->dma_dev, addr)))
435 		return 1;
436 
437 	switch (ring->type) {
438 	case B43legacy_DMA_30BIT:
439 		if ((u64)addr + buffersize > (1ULL << 30))
440 			goto address_error;
441 		break;
442 	case B43legacy_DMA_32BIT:
443 		if ((u64)addr + buffersize > (1ULL << 32))
444 			goto address_error;
445 		break;
446 	}
447 
448 	/* The address is OK. */
449 	return 0;
450 
451 address_error:
452 	/* We can't support this address. Unmap it again. */
453 	unmap_descbuffer(ring, addr, buffersize, dma_to_device);
454 
455 	return 1;
456 }
457 
setup_rx_descbuffer(struct b43legacy_dmaring * ring,struct b43legacy_dmadesc32 * desc,struct b43legacy_dmadesc_meta * meta,gfp_t gfp_flags)458 static int setup_rx_descbuffer(struct b43legacy_dmaring *ring,
459 			       struct b43legacy_dmadesc32 *desc,
460 			       struct b43legacy_dmadesc_meta *meta,
461 			       gfp_t gfp_flags)
462 {
463 	struct b43legacy_rxhdr_fw3 *rxhdr;
464 	struct b43legacy_hwtxstatus *txstat;
465 	dma_addr_t dmaaddr;
466 	struct sk_buff *skb;
467 
468 	B43legacy_WARN_ON(ring->tx);
469 
470 	skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags);
471 	if (unlikely(!skb))
472 		return -ENOMEM;
473 	dmaaddr = map_descbuffer(ring, skb->data,
474 				 ring->rx_buffersize, 0);
475 	if (b43legacy_dma_mapping_error(ring, dmaaddr, ring->rx_buffersize, 0)) {
476 		/* ugh. try to realloc in zone_dma */
477 		gfp_flags |= GFP_DMA;
478 
479 		dev_kfree_skb_any(skb);
480 
481 		skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags);
482 		if (unlikely(!skb))
483 			return -ENOMEM;
484 		dmaaddr = map_descbuffer(ring, skb->data,
485 					 ring->rx_buffersize, 0);
486 	}
487 
488 	if (b43legacy_dma_mapping_error(ring, dmaaddr, ring->rx_buffersize, 0)) {
489 		dev_kfree_skb_any(skb);
490 		return -EIO;
491 	}
492 
493 	meta->skb = skb;
494 	meta->dmaaddr = dmaaddr;
495 	op32_fill_descriptor(ring, desc, dmaaddr, ring->rx_buffersize, 0, 0, 0);
496 
497 	rxhdr = (struct b43legacy_rxhdr_fw3 *)(skb->data);
498 	rxhdr->frame_len = 0;
499 	txstat = (struct b43legacy_hwtxstatus *)(skb->data);
500 	txstat->cookie = 0;
501 
502 	return 0;
503 }
504 
505 /* Allocate the initial descbuffers.
506  * This is used for an RX ring only.
507  */
alloc_initial_descbuffers(struct b43legacy_dmaring * ring)508 static int alloc_initial_descbuffers(struct b43legacy_dmaring *ring)
509 {
510 	int i;
511 	int err = -ENOMEM;
512 	struct b43legacy_dmadesc32 *desc;
513 	struct b43legacy_dmadesc_meta *meta;
514 
515 	for (i = 0; i < ring->nr_slots; i++) {
516 		desc = op32_idx2desc(ring, i, &meta);
517 
518 		err = setup_rx_descbuffer(ring, desc, meta, GFP_KERNEL);
519 		if (err) {
520 			b43legacyerr(ring->dev->wl,
521 			       "Failed to allocate initial descbuffers\n");
522 			goto err_unwind;
523 		}
524 	}
525 	mb(); /* all descbuffer setup before next line */
526 	ring->used_slots = ring->nr_slots;
527 	err = 0;
528 out:
529 	return err;
530 
531 err_unwind:
532 	for (i--; i >= 0; i--) {
533 		desc = op32_idx2desc(ring, i, &meta);
534 
535 		unmap_descbuffer(ring, meta->dmaaddr, ring->rx_buffersize, 0);
536 		dev_kfree_skb(meta->skb);
537 	}
538 	goto out;
539 }
540 
541 /* Do initial setup of the DMA controller.
542  * Reset the controller, write the ring busaddress
543  * and switch the "enable" bit on.
544  */
dmacontroller_setup(struct b43legacy_dmaring * ring)545 static int dmacontroller_setup(struct b43legacy_dmaring *ring)
546 {
547 	int err = 0;
548 	u32 value;
549 	u32 addrext;
550 	u32 trans = ring->dev->dma.translation;
551 	u32 ringbase = (u32)(ring->dmabase);
552 
553 	if (ring->tx) {
554 		addrext = (ringbase & SSB_DMA_TRANSLATION_MASK)
555 			  >> SSB_DMA_TRANSLATION_SHIFT;
556 		value = B43legacy_DMA32_TXENABLE;
557 		value |= (addrext << B43legacy_DMA32_TXADDREXT_SHIFT)
558 			& B43legacy_DMA32_TXADDREXT_MASK;
559 		b43legacy_dma_write(ring, B43legacy_DMA32_TXCTL, value);
560 		b43legacy_dma_write(ring, B43legacy_DMA32_TXRING,
561 				    (ringbase & ~SSB_DMA_TRANSLATION_MASK)
562 				    | trans);
563 	} else {
564 		err = alloc_initial_descbuffers(ring);
565 		if (err)
566 			goto out;
567 
568 		addrext = (ringbase & SSB_DMA_TRANSLATION_MASK)
569 			  >> SSB_DMA_TRANSLATION_SHIFT;
570 		value = (ring->frameoffset <<
571 			 B43legacy_DMA32_RXFROFF_SHIFT);
572 		value |= B43legacy_DMA32_RXENABLE;
573 		value |= (addrext << B43legacy_DMA32_RXADDREXT_SHIFT)
574 			 & B43legacy_DMA32_RXADDREXT_MASK;
575 		b43legacy_dma_write(ring, B43legacy_DMA32_RXCTL, value);
576 		b43legacy_dma_write(ring, B43legacy_DMA32_RXRING,
577 				    (ringbase & ~SSB_DMA_TRANSLATION_MASK)
578 				    | trans);
579 		b43legacy_dma_write(ring, B43legacy_DMA32_RXINDEX, 200);
580 	}
581 
582 out:
583 	return err;
584 }
585 
586 /* Shutdown the DMA controller. */
dmacontroller_cleanup(struct b43legacy_dmaring * ring)587 static void dmacontroller_cleanup(struct b43legacy_dmaring *ring)
588 {
589 	if (ring->tx) {
590 		b43legacy_dmacontroller_tx_reset(ring->dev, ring->mmio_base,
591 						 ring->type);
592 		b43legacy_dma_write(ring, B43legacy_DMA32_TXRING, 0);
593 	} else {
594 		b43legacy_dmacontroller_rx_reset(ring->dev, ring->mmio_base,
595 						 ring->type);
596 		b43legacy_dma_write(ring, B43legacy_DMA32_RXRING, 0);
597 	}
598 }
599 
free_all_descbuffers(struct b43legacy_dmaring * ring)600 static void free_all_descbuffers(struct b43legacy_dmaring *ring)
601 {
602 	struct b43legacy_dmadesc_meta *meta;
603 	int i;
604 
605 	if (!ring->used_slots)
606 		return;
607 	for (i = 0; i < ring->nr_slots; i++) {
608 		op32_idx2desc(ring, i, &meta);
609 
610 		if (!meta->skb) {
611 			B43legacy_WARN_ON(!ring->tx);
612 			continue;
613 		}
614 		if (ring->tx)
615 			unmap_descbuffer(ring, meta->dmaaddr,
616 					 meta->skb->len, 1);
617 		else
618 			unmap_descbuffer(ring, meta->dmaaddr,
619 					 ring->rx_buffersize, 0);
620 		free_descriptor_buffer(ring, meta, 0);
621 	}
622 }
623 
supported_dma_mask(struct b43legacy_wldev * dev)624 static u64 supported_dma_mask(struct b43legacy_wldev *dev)
625 {
626 	u32 tmp;
627 	u16 mmio_base;
628 
629 	mmio_base = b43legacy_dmacontroller_base(0, 0);
630 	b43legacy_write32(dev,
631 			mmio_base + B43legacy_DMA32_TXCTL,
632 			B43legacy_DMA32_TXADDREXT_MASK);
633 	tmp = b43legacy_read32(dev, mmio_base +
634 			       B43legacy_DMA32_TXCTL);
635 	if (tmp & B43legacy_DMA32_TXADDREXT_MASK)
636 		return DMA_BIT_MASK(32);
637 
638 	return DMA_BIT_MASK(30);
639 }
640 
dma_mask_to_engine_type(u64 dmamask)641 static enum b43legacy_dmatype dma_mask_to_engine_type(u64 dmamask)
642 {
643 	if (dmamask == DMA_BIT_MASK(30))
644 		return B43legacy_DMA_30BIT;
645 	if (dmamask == DMA_BIT_MASK(32))
646 		return B43legacy_DMA_32BIT;
647 	B43legacy_WARN_ON(1);
648 	return B43legacy_DMA_30BIT;
649 }
650 
651 /* Main initialization function. */
652 static
b43legacy_setup_dmaring(struct b43legacy_wldev * dev,int controller_index,int for_tx,enum b43legacy_dmatype type)653 struct b43legacy_dmaring *b43legacy_setup_dmaring(struct b43legacy_wldev *dev,
654 						  int controller_index,
655 						  int for_tx,
656 						  enum b43legacy_dmatype type)
657 {
658 	struct b43legacy_dmaring *ring;
659 	int err;
660 	int nr_slots;
661 	dma_addr_t dma_test;
662 
663 	ring = kzalloc(sizeof(*ring), GFP_KERNEL);
664 	if (!ring)
665 		goto out;
666 	ring->type = type;
667 	ring->dev = dev;
668 
669 	nr_slots = B43legacy_RXRING_SLOTS;
670 	if (for_tx)
671 		nr_slots = B43legacy_TXRING_SLOTS;
672 
673 	ring->meta = kcalloc(nr_slots, sizeof(struct b43legacy_dmadesc_meta),
674 			     GFP_KERNEL);
675 	if (!ring->meta)
676 		goto err_kfree_ring;
677 	if (for_tx) {
678 		ring->txhdr_cache = kcalloc(nr_slots,
679 					sizeof(struct b43legacy_txhdr_fw3),
680 					GFP_KERNEL);
681 		if (!ring->txhdr_cache)
682 			goto err_kfree_meta;
683 
684 		/* test for ability to dma to txhdr_cache */
685 		dma_test = dma_map_single(dev->dev->dma_dev, ring->txhdr_cache,
686 					      sizeof(struct b43legacy_txhdr_fw3),
687 					      DMA_TO_DEVICE);
688 
689 		if (b43legacy_dma_mapping_error(ring, dma_test,
690 					sizeof(struct b43legacy_txhdr_fw3), 1)) {
691 			/* ugh realloc */
692 			kfree(ring->txhdr_cache);
693 			ring->txhdr_cache = kcalloc(nr_slots,
694 					sizeof(struct b43legacy_txhdr_fw3),
695 					GFP_KERNEL | GFP_DMA);
696 			if (!ring->txhdr_cache)
697 				goto err_kfree_meta;
698 
699 			dma_test = dma_map_single(dev->dev->dma_dev,
700 					ring->txhdr_cache,
701 					sizeof(struct b43legacy_txhdr_fw3),
702 					DMA_TO_DEVICE);
703 
704 			if (b43legacy_dma_mapping_error(ring, dma_test,
705 					sizeof(struct b43legacy_txhdr_fw3), 1))
706 				goto err_kfree_txhdr_cache;
707 		}
708 
709 		dma_unmap_single(dev->dev->dma_dev, dma_test,
710 				 sizeof(struct b43legacy_txhdr_fw3),
711 				 DMA_TO_DEVICE);
712 	}
713 
714 	ring->nr_slots = nr_slots;
715 	ring->mmio_base = b43legacy_dmacontroller_base(type, controller_index);
716 	ring->index = controller_index;
717 	if (for_tx) {
718 		ring->tx = true;
719 		ring->current_slot = -1;
720 	} else {
721 		if (ring->index == 0) {
722 			ring->rx_buffersize = B43legacy_DMA0_RX_BUFFERSIZE;
723 			ring->frameoffset = B43legacy_DMA0_RX_FRAMEOFFSET;
724 		} else if (ring->index == 3) {
725 			ring->rx_buffersize = B43legacy_DMA3_RX_BUFFERSIZE;
726 			ring->frameoffset = B43legacy_DMA3_RX_FRAMEOFFSET;
727 		} else
728 			B43legacy_WARN_ON(1);
729 	}
730 #ifdef CONFIG_B43LEGACY_DEBUG
731 	ring->last_injected_overflow = jiffies;
732 #endif
733 
734 	err = alloc_ringmemory(ring);
735 	if (err)
736 		goto err_kfree_txhdr_cache;
737 	err = dmacontroller_setup(ring);
738 	if (err)
739 		goto err_free_ringmemory;
740 
741 out:
742 	return ring;
743 
744 err_free_ringmemory:
745 	free_ringmemory(ring);
746 err_kfree_txhdr_cache:
747 	kfree(ring->txhdr_cache);
748 err_kfree_meta:
749 	kfree(ring->meta);
750 err_kfree_ring:
751 	kfree(ring);
752 	ring = NULL;
753 	goto out;
754 }
755 
756 /* Main cleanup function. */
b43legacy_destroy_dmaring(struct b43legacy_dmaring * ring)757 static void b43legacy_destroy_dmaring(struct b43legacy_dmaring *ring)
758 {
759 	if (!ring)
760 		return;
761 
762 	b43legacydbg(ring->dev->wl, "DMA-%u 0x%04X (%s) max used slots:"
763 		     " %d/%d\n", (unsigned int)(ring->type), ring->mmio_base,
764 		     (ring->tx) ? "TX" : "RX", ring->max_used_slots,
765 		     ring->nr_slots);
766 	/* Device IRQs are disabled prior entering this function,
767 	 * so no need to take care of concurrency with rx handler stuff.
768 	 */
769 	dmacontroller_cleanup(ring);
770 	free_all_descbuffers(ring);
771 	free_ringmemory(ring);
772 
773 	kfree(ring->txhdr_cache);
774 	kfree(ring->meta);
775 	kfree(ring);
776 }
777 
b43legacy_dma_free(struct b43legacy_wldev * dev)778 void b43legacy_dma_free(struct b43legacy_wldev *dev)
779 {
780 	struct b43legacy_dma *dma;
781 
782 	if (b43legacy_using_pio(dev))
783 		return;
784 	dma = &dev->dma;
785 
786 	b43legacy_destroy_dmaring(dma->rx_ring3);
787 	dma->rx_ring3 = NULL;
788 	b43legacy_destroy_dmaring(dma->rx_ring0);
789 	dma->rx_ring0 = NULL;
790 
791 	b43legacy_destroy_dmaring(dma->tx_ring5);
792 	dma->tx_ring5 = NULL;
793 	b43legacy_destroy_dmaring(dma->tx_ring4);
794 	dma->tx_ring4 = NULL;
795 	b43legacy_destroy_dmaring(dma->tx_ring3);
796 	dma->tx_ring3 = NULL;
797 	b43legacy_destroy_dmaring(dma->tx_ring2);
798 	dma->tx_ring2 = NULL;
799 	b43legacy_destroy_dmaring(dma->tx_ring1);
800 	dma->tx_ring1 = NULL;
801 	b43legacy_destroy_dmaring(dma->tx_ring0);
802 	dma->tx_ring0 = NULL;
803 }
804 
b43legacy_dma_set_mask(struct b43legacy_wldev * dev,u64 mask)805 static int b43legacy_dma_set_mask(struct b43legacy_wldev *dev, u64 mask)
806 {
807 	u64 orig_mask = mask;
808 	bool fallback = false;
809 	int err;
810 
811 	/* Try to set the DMA mask. If it fails, try falling back to a
812 	 * lower mask, as we can always also support a lower one. */
813 	while (1) {
814 		err = dma_set_mask(dev->dev->dma_dev, mask);
815 		if (!err) {
816 			err = dma_set_coherent_mask(dev->dev->dma_dev, mask);
817 			if (!err)
818 				break;
819 		}
820 		if (mask == DMA_BIT_MASK(64)) {
821 			mask = DMA_BIT_MASK(32);
822 			fallback = true;
823 			continue;
824 		}
825 		if (mask == DMA_BIT_MASK(32)) {
826 			mask = DMA_BIT_MASK(30);
827 			fallback = true;
828 			continue;
829 		}
830 		b43legacyerr(dev->wl, "The machine/kernel does not support "
831 		       "the required %u-bit DMA mask\n",
832 		       (unsigned int)dma_mask_to_engine_type(orig_mask));
833 		return -EOPNOTSUPP;
834 	}
835 	if (fallback) {
836 		b43legacyinfo(dev->wl, "DMA mask fallback from %u-bit to %u-"
837 			"bit\n",
838 			(unsigned int)dma_mask_to_engine_type(orig_mask),
839 			(unsigned int)dma_mask_to_engine_type(mask));
840 	}
841 
842 	return 0;
843 }
844 
b43legacy_dma_init(struct b43legacy_wldev * dev)845 int b43legacy_dma_init(struct b43legacy_wldev *dev)
846 {
847 	struct b43legacy_dma *dma = &dev->dma;
848 	struct b43legacy_dmaring *ring;
849 	int err;
850 	u64 dmamask;
851 	enum b43legacy_dmatype type;
852 
853 	dmamask = supported_dma_mask(dev);
854 	type = dma_mask_to_engine_type(dmamask);
855 	err = b43legacy_dma_set_mask(dev, dmamask);
856 	if (err) {
857 #ifdef CONFIG_B43LEGACY_PIO
858 		b43legacywarn(dev->wl, "DMA for this device not supported. "
859 			"Falling back to PIO\n");
860 		dev->__using_pio = true;
861 		return -EAGAIN;
862 #else
863 		b43legacyerr(dev->wl, "DMA for this device not supported and "
864 		       "no PIO support compiled in\n");
865 		return -EOPNOTSUPP;
866 #endif
867 	}
868 	dma->translation = ssb_dma_translation(dev->dev);
869 
870 	err = -ENOMEM;
871 	/* setup TX DMA channels. */
872 	ring = b43legacy_setup_dmaring(dev, 0, 1, type);
873 	if (!ring)
874 		goto out;
875 	dma->tx_ring0 = ring;
876 
877 	ring = b43legacy_setup_dmaring(dev, 1, 1, type);
878 	if (!ring)
879 		goto err_destroy_tx0;
880 	dma->tx_ring1 = ring;
881 
882 	ring = b43legacy_setup_dmaring(dev, 2, 1, type);
883 	if (!ring)
884 		goto err_destroy_tx1;
885 	dma->tx_ring2 = ring;
886 
887 	ring = b43legacy_setup_dmaring(dev, 3, 1, type);
888 	if (!ring)
889 		goto err_destroy_tx2;
890 	dma->tx_ring3 = ring;
891 
892 	ring = b43legacy_setup_dmaring(dev, 4, 1, type);
893 	if (!ring)
894 		goto err_destroy_tx3;
895 	dma->tx_ring4 = ring;
896 
897 	ring = b43legacy_setup_dmaring(dev, 5, 1, type);
898 	if (!ring)
899 		goto err_destroy_tx4;
900 	dma->tx_ring5 = ring;
901 
902 	/* setup RX DMA channels. */
903 	ring = b43legacy_setup_dmaring(dev, 0, 0, type);
904 	if (!ring)
905 		goto err_destroy_tx5;
906 	dma->rx_ring0 = ring;
907 
908 	if (dev->dev->id.revision < 5) {
909 		ring = b43legacy_setup_dmaring(dev, 3, 0, type);
910 		if (!ring)
911 			goto err_destroy_rx0;
912 		dma->rx_ring3 = ring;
913 	}
914 
915 	b43legacydbg(dev->wl, "%u-bit DMA initialized\n", (unsigned int)type);
916 	err = 0;
917 out:
918 	return err;
919 
920 err_destroy_rx0:
921 	b43legacy_destroy_dmaring(dma->rx_ring0);
922 	dma->rx_ring0 = NULL;
923 err_destroy_tx5:
924 	b43legacy_destroy_dmaring(dma->tx_ring5);
925 	dma->tx_ring5 = NULL;
926 err_destroy_tx4:
927 	b43legacy_destroy_dmaring(dma->tx_ring4);
928 	dma->tx_ring4 = NULL;
929 err_destroy_tx3:
930 	b43legacy_destroy_dmaring(dma->tx_ring3);
931 	dma->tx_ring3 = NULL;
932 err_destroy_tx2:
933 	b43legacy_destroy_dmaring(dma->tx_ring2);
934 	dma->tx_ring2 = NULL;
935 err_destroy_tx1:
936 	b43legacy_destroy_dmaring(dma->tx_ring1);
937 	dma->tx_ring1 = NULL;
938 err_destroy_tx0:
939 	b43legacy_destroy_dmaring(dma->tx_ring0);
940 	dma->tx_ring0 = NULL;
941 	goto out;
942 }
943 
944 /* Generate a cookie for the TX header. */
generate_cookie(struct b43legacy_dmaring * ring,int slot)945 static u16 generate_cookie(struct b43legacy_dmaring *ring,
946 			   int slot)
947 {
948 	u16 cookie = 0x1000;
949 
950 	/* Use the upper 4 bits of the cookie as
951 	 * DMA controller ID and store the slot number
952 	 * in the lower 12 bits.
953 	 * Note that the cookie must never be 0, as this
954 	 * is a special value used in RX path.
955 	 */
956 	switch (ring->index) {
957 	case 0:
958 		cookie = 0xA000;
959 		break;
960 	case 1:
961 		cookie = 0xB000;
962 		break;
963 	case 2:
964 		cookie = 0xC000;
965 		break;
966 	case 3:
967 		cookie = 0xD000;
968 		break;
969 	case 4:
970 		cookie = 0xE000;
971 		break;
972 	case 5:
973 		cookie = 0xF000;
974 		break;
975 	}
976 	B43legacy_WARN_ON(!(((u16)slot & 0xF000) == 0x0000));
977 	cookie |= (u16)slot;
978 
979 	return cookie;
980 }
981 
982 /* Inspect a cookie and find out to which controller/slot it belongs. */
983 static
parse_cookie(struct b43legacy_wldev * dev,u16 cookie,int * slot)984 struct b43legacy_dmaring *parse_cookie(struct b43legacy_wldev *dev,
985 				      u16 cookie, int *slot)
986 {
987 	struct b43legacy_dma *dma = &dev->dma;
988 	struct b43legacy_dmaring *ring = NULL;
989 
990 	switch (cookie & 0xF000) {
991 	case 0xA000:
992 		ring = dma->tx_ring0;
993 		break;
994 	case 0xB000:
995 		ring = dma->tx_ring1;
996 		break;
997 	case 0xC000:
998 		ring = dma->tx_ring2;
999 		break;
1000 	case 0xD000:
1001 		ring = dma->tx_ring3;
1002 		break;
1003 	case 0xE000:
1004 		ring = dma->tx_ring4;
1005 		break;
1006 	case 0xF000:
1007 		ring = dma->tx_ring5;
1008 		break;
1009 	default:
1010 		B43legacy_WARN_ON(1);
1011 	}
1012 	*slot = (cookie & 0x0FFF);
1013 	B43legacy_WARN_ON(!(ring && *slot >= 0 && *slot < ring->nr_slots));
1014 
1015 	return ring;
1016 }
1017 
dma_tx_fragment(struct b43legacy_dmaring * ring,struct sk_buff ** in_skb)1018 static int dma_tx_fragment(struct b43legacy_dmaring *ring,
1019 			    struct sk_buff **in_skb)
1020 {
1021 	struct sk_buff *skb = *in_skb;
1022 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1023 	u8 *header;
1024 	int slot, old_top_slot, old_used_slots;
1025 	int err;
1026 	struct b43legacy_dmadesc32 *desc;
1027 	struct b43legacy_dmadesc_meta *meta;
1028 	struct b43legacy_dmadesc_meta *meta_hdr;
1029 	struct sk_buff *bounce_skb;
1030 
1031 #define SLOTS_PER_PACKET  2
1032 	B43legacy_WARN_ON(skb_shinfo(skb)->nr_frags != 0);
1033 
1034 	old_top_slot = ring->current_slot;
1035 	old_used_slots = ring->used_slots;
1036 
1037 	/* Get a slot for the header. */
1038 	slot = request_slot(ring);
1039 	desc = op32_idx2desc(ring, slot, &meta_hdr);
1040 	memset(meta_hdr, 0, sizeof(*meta_hdr));
1041 
1042 	header = &(ring->txhdr_cache[slot * sizeof(
1043 			       struct b43legacy_txhdr_fw3)]);
1044 	err = b43legacy_generate_txhdr(ring->dev, header,
1045 				 skb->data, skb->len, info,
1046 				 generate_cookie(ring, slot));
1047 	if (unlikely(err)) {
1048 		ring->current_slot = old_top_slot;
1049 		ring->used_slots = old_used_slots;
1050 		return err;
1051 	}
1052 
1053 	meta_hdr->dmaaddr = map_descbuffer(ring, (unsigned char *)header,
1054 					   sizeof(struct b43legacy_txhdr_fw3), 1);
1055 	if (b43legacy_dma_mapping_error(ring, meta_hdr->dmaaddr,
1056 					sizeof(struct b43legacy_txhdr_fw3), 1)) {
1057 		ring->current_slot = old_top_slot;
1058 		ring->used_slots = old_used_slots;
1059 		return -EIO;
1060 	}
1061 	op32_fill_descriptor(ring, desc, meta_hdr->dmaaddr,
1062 			     sizeof(struct b43legacy_txhdr_fw3), 1, 0, 0);
1063 
1064 	/* Get a slot for the payload. */
1065 	slot = request_slot(ring);
1066 	desc = op32_idx2desc(ring, slot, &meta);
1067 	memset(meta, 0, sizeof(*meta));
1068 
1069 	meta->skb = skb;
1070 	meta->is_last_fragment = true;
1071 
1072 	meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
1073 	/* create a bounce buffer in zone_dma on mapping failure. */
1074 	if (b43legacy_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) {
1075 		bounce_skb = __dev_alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA);
1076 		if (!bounce_skb) {
1077 			ring->current_slot = old_top_slot;
1078 			ring->used_slots = old_used_slots;
1079 			err = -ENOMEM;
1080 			goto out_unmap_hdr;
1081 		}
1082 
1083 		memcpy(skb_put(bounce_skb, skb->len), skb->data, skb->len);
1084 		memcpy(bounce_skb->cb, skb->cb, sizeof(skb->cb));
1085 		bounce_skb->dev = skb->dev;
1086 		skb_set_queue_mapping(bounce_skb, skb_get_queue_mapping(skb));
1087 		info = IEEE80211_SKB_CB(bounce_skb);
1088 
1089 		dev_kfree_skb_any(skb);
1090 		skb = bounce_skb;
1091 		*in_skb = bounce_skb;
1092 		meta->skb = skb;
1093 		meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
1094 		if (b43legacy_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) {
1095 			ring->current_slot = old_top_slot;
1096 			ring->used_slots = old_used_slots;
1097 			err = -EIO;
1098 			goto out_free_bounce;
1099 		}
1100 	}
1101 
1102 	op32_fill_descriptor(ring, desc, meta->dmaaddr,
1103 			     skb->len, 0, 1, 1);
1104 
1105 	wmb();	/* previous stuff MUST be done */
1106 	/* Now transfer the whole frame. */
1107 	op32_poke_tx(ring, next_slot(ring, slot));
1108 	return 0;
1109 
1110 out_free_bounce:
1111 	dev_kfree_skb_any(skb);
1112 out_unmap_hdr:
1113 	unmap_descbuffer(ring, meta_hdr->dmaaddr,
1114 			 sizeof(struct b43legacy_txhdr_fw3), 1);
1115 	return err;
1116 }
1117 
1118 static inline
should_inject_overflow(struct b43legacy_dmaring * ring)1119 int should_inject_overflow(struct b43legacy_dmaring *ring)
1120 {
1121 #ifdef CONFIG_B43LEGACY_DEBUG
1122 	if (unlikely(b43legacy_debug(ring->dev,
1123 				     B43legacy_DBG_DMAOVERFLOW))) {
1124 		/* Check if we should inject another ringbuffer overflow
1125 		 * to test handling of this situation in the stack. */
1126 		unsigned long next_overflow;
1127 
1128 		next_overflow = ring->last_injected_overflow + HZ;
1129 		if (time_after(jiffies, next_overflow)) {
1130 			ring->last_injected_overflow = jiffies;
1131 			b43legacydbg(ring->dev->wl,
1132 			       "Injecting TX ring overflow on "
1133 			       "DMA controller %d\n", ring->index);
1134 			return 1;
1135 		}
1136 	}
1137 #endif /* CONFIG_B43LEGACY_DEBUG */
1138 	return 0;
1139 }
1140 
b43legacy_dma_tx(struct b43legacy_wldev * dev,struct sk_buff * skb)1141 int b43legacy_dma_tx(struct b43legacy_wldev *dev,
1142 		     struct sk_buff *skb)
1143 {
1144 	struct b43legacy_dmaring *ring;
1145 	int err = 0;
1146 
1147 	ring = priority_to_txring(dev, skb_get_queue_mapping(skb));
1148 	B43legacy_WARN_ON(!ring->tx);
1149 
1150 	if (unlikely(ring->stopped)) {
1151 		/* We get here only because of a bug in mac80211.
1152 		 * Because of a race, one packet may be queued after
1153 		 * the queue is stopped, thus we got called when we shouldn't.
1154 		 * For now, just refuse the transmit. */
1155 		if (b43legacy_debug(dev, B43legacy_DBG_DMAVERBOSE))
1156 			b43legacyerr(dev->wl, "Packet after queue stopped\n");
1157 		return -ENOSPC;
1158 	}
1159 
1160 	if (unlikely(WARN_ON(free_slots(ring) < SLOTS_PER_PACKET))) {
1161 		/* If we get here, we have a real error with the queue
1162 		 * full, but queues not stopped. */
1163 		b43legacyerr(dev->wl, "DMA queue overflow\n");
1164 		return -ENOSPC;
1165 	}
1166 
1167 	/* dma_tx_fragment might reallocate the skb, so invalidate pointers pointing
1168 	 * into the skb data or cb now. */
1169 	err = dma_tx_fragment(ring, &skb);
1170 	if (unlikely(err == -ENOKEY)) {
1171 		/* Drop this packet, as we don't have the encryption key
1172 		 * anymore and must not transmit it unencrypted. */
1173 		dev_kfree_skb_any(skb);
1174 		return 0;
1175 	}
1176 	if (unlikely(err)) {
1177 		b43legacyerr(dev->wl, "DMA tx mapping failure\n");
1178 		return err;
1179 	}
1180 	if ((free_slots(ring) < SLOTS_PER_PACKET) ||
1181 	    should_inject_overflow(ring)) {
1182 		/* This TX ring is full. */
1183 		unsigned int skb_mapping = skb_get_queue_mapping(skb);
1184 		ieee80211_stop_queue(dev->wl->hw, skb_mapping);
1185 		dev->wl->tx_queue_stopped[skb_mapping] = 1;
1186 		ring->stopped = true;
1187 		if (b43legacy_debug(dev, B43legacy_DBG_DMAVERBOSE))
1188 			b43legacydbg(dev->wl, "Stopped TX ring %d\n",
1189 			       ring->index);
1190 	}
1191 	return err;
1192 }
1193 
b43legacy_dma_handle_txstatus(struct b43legacy_wldev * dev,const struct b43legacy_txstatus * status)1194 void b43legacy_dma_handle_txstatus(struct b43legacy_wldev *dev,
1195 				 const struct b43legacy_txstatus *status)
1196 {
1197 	struct b43legacy_dmaring *ring;
1198 	struct b43legacy_dmadesc_meta *meta;
1199 	int retry_limit;
1200 	int slot;
1201 	int firstused;
1202 
1203 	ring = parse_cookie(dev, status->cookie, &slot);
1204 	if (unlikely(!ring))
1205 		return;
1206 	B43legacy_WARN_ON(!ring->tx);
1207 
1208 	/* Sanity check: TX packets are processed in-order on one ring.
1209 	 * Check if the slot deduced from the cookie really is the first
1210 	 * used slot. */
1211 	firstused = ring->current_slot - ring->used_slots + 1;
1212 	if (firstused < 0)
1213 		firstused = ring->nr_slots + firstused;
1214 	if (unlikely(slot != firstused)) {
1215 		/* This possibly is a firmware bug and will result in
1216 		 * malfunction, memory leaks and/or stall of DMA functionality.
1217 		 */
1218 		b43legacydbg(dev->wl, "Out of order TX status report on DMA "
1219 			     "ring %d. Expected %d, but got %d\n",
1220 			     ring->index, firstused, slot);
1221 		return;
1222 	}
1223 
1224 	while (1) {
1225 		B43legacy_WARN_ON(!(slot >= 0 && slot < ring->nr_slots));
1226 		op32_idx2desc(ring, slot, &meta);
1227 
1228 		if (meta->skb)
1229 			unmap_descbuffer(ring, meta->dmaaddr,
1230 					 meta->skb->len, 1);
1231 		else
1232 			unmap_descbuffer(ring, meta->dmaaddr,
1233 					 sizeof(struct b43legacy_txhdr_fw3),
1234 					 1);
1235 
1236 		if (meta->is_last_fragment) {
1237 			struct ieee80211_tx_info *info;
1238 			BUG_ON(!meta->skb);
1239 			info = IEEE80211_SKB_CB(meta->skb);
1240 
1241 			/* preserve the confiured retry limit before clearing the status
1242 			 * The xmit function has overwritten the rc's value with the actual
1243 			 * retry limit done by the hardware */
1244 			retry_limit = info->status.rates[0].count;
1245 			ieee80211_tx_info_clear_status(info);
1246 
1247 			if (status->acked)
1248 				info->flags |= IEEE80211_TX_STAT_ACK;
1249 
1250 			if (status->rts_count > dev->wl->hw->conf.short_frame_max_tx_count) {
1251 				/*
1252 				 * If the short retries (RTS, not data frame) have exceeded
1253 				 * the limit, the hw will not have tried the selected rate,
1254 				 * but will have used the fallback rate instead.
1255 				 * Don't let the rate control count attempts for the selected
1256 				 * rate in this case, otherwise the statistics will be off.
1257 				 */
1258 				info->status.rates[0].count = 0;
1259 				info->status.rates[1].count = status->frame_count;
1260 			} else {
1261 				if (status->frame_count > retry_limit) {
1262 					info->status.rates[0].count = retry_limit;
1263 					info->status.rates[1].count = status->frame_count -
1264 							retry_limit;
1265 
1266 				} else {
1267 					info->status.rates[0].count = status->frame_count;
1268 					info->status.rates[1].idx = -1;
1269 				}
1270 			}
1271 
1272 			/* Call back to inform the ieee80211 subsystem about the
1273 			 * status of the transmission.
1274 			 * Some fields of txstat are already filled in dma_tx().
1275 			 */
1276 			ieee80211_tx_status_irqsafe(dev->wl->hw, meta->skb);
1277 			/* skb is freed by ieee80211_tx_status_irqsafe() */
1278 			meta->skb = NULL;
1279 		} else {
1280 			/* No need to call free_descriptor_buffer here, as
1281 			 * this is only the txhdr, which is not allocated.
1282 			 */
1283 			B43legacy_WARN_ON(meta->skb != NULL);
1284 		}
1285 
1286 		/* Everything unmapped and free'd. So it's not used anymore. */
1287 		ring->used_slots--;
1288 
1289 		if (meta->is_last_fragment)
1290 			break;
1291 		slot = next_slot(ring, slot);
1292 	}
1293 	dev->stats.last_tx = jiffies;
1294 	if (ring->stopped) {
1295 		B43legacy_WARN_ON(free_slots(ring) < SLOTS_PER_PACKET);
1296 		ring->stopped = false;
1297 	}
1298 
1299 	if (dev->wl->tx_queue_stopped[ring->queue_prio]) {
1300 		dev->wl->tx_queue_stopped[ring->queue_prio] = 0;
1301 	} else {
1302 		/* If the driver queue is running wake the corresponding
1303 		 * mac80211 queue. */
1304 		ieee80211_wake_queue(dev->wl->hw, ring->queue_prio);
1305 		if (b43legacy_debug(dev, B43legacy_DBG_DMAVERBOSE))
1306 			b43legacydbg(dev->wl, "Woke up TX ring %d\n",
1307 				     ring->index);
1308 	}
1309 	/* Add work to the queue. */
1310 	ieee80211_queue_work(dev->wl->hw, &dev->wl->tx_work);
1311 }
1312 
dma_rx(struct b43legacy_dmaring * ring,int * slot)1313 static void dma_rx(struct b43legacy_dmaring *ring,
1314 		   int *slot)
1315 {
1316 	struct b43legacy_dmadesc32 *desc;
1317 	struct b43legacy_dmadesc_meta *meta;
1318 	struct b43legacy_rxhdr_fw3 *rxhdr;
1319 	struct sk_buff *skb;
1320 	u16 len;
1321 	int err;
1322 	dma_addr_t dmaaddr;
1323 
1324 	desc = op32_idx2desc(ring, *slot, &meta);
1325 
1326 	sync_descbuffer_for_cpu(ring, meta->dmaaddr, ring->rx_buffersize);
1327 	skb = meta->skb;
1328 
1329 	if (ring->index == 3) {
1330 		/* We received an xmit status. */
1331 		struct b43legacy_hwtxstatus *hw =
1332 				(struct b43legacy_hwtxstatus *)skb->data;
1333 		int i = 0;
1334 
1335 		while (hw->cookie == 0) {
1336 			if (i > 100)
1337 				break;
1338 			i++;
1339 			udelay(2);
1340 			barrier();
1341 		}
1342 		b43legacy_handle_hwtxstatus(ring->dev, hw);
1343 		/* recycle the descriptor buffer. */
1344 		sync_descbuffer_for_device(ring, meta->dmaaddr,
1345 					   ring->rx_buffersize);
1346 
1347 		return;
1348 	}
1349 	rxhdr = (struct b43legacy_rxhdr_fw3 *)skb->data;
1350 	len = le16_to_cpu(rxhdr->frame_len);
1351 	if (len == 0) {
1352 		int i = 0;
1353 
1354 		do {
1355 			udelay(2);
1356 			barrier();
1357 			len = le16_to_cpu(rxhdr->frame_len);
1358 		} while (len == 0 && i++ < 5);
1359 		if (unlikely(len == 0)) {
1360 			/* recycle the descriptor buffer. */
1361 			sync_descbuffer_for_device(ring, meta->dmaaddr,
1362 						   ring->rx_buffersize);
1363 			goto drop;
1364 		}
1365 	}
1366 	if (unlikely(len > ring->rx_buffersize)) {
1367 		/* The data did not fit into one descriptor buffer
1368 		 * and is split over multiple buffers.
1369 		 * This should never happen, as we try to allocate buffers
1370 		 * big enough. So simply ignore this packet.
1371 		 */
1372 		int cnt = 0;
1373 		s32 tmp = len;
1374 
1375 		while (1) {
1376 			desc = op32_idx2desc(ring, *slot, &meta);
1377 			/* recycle the descriptor buffer. */
1378 			sync_descbuffer_for_device(ring, meta->dmaaddr,
1379 						   ring->rx_buffersize);
1380 			*slot = next_slot(ring, *slot);
1381 			cnt++;
1382 			tmp -= ring->rx_buffersize;
1383 			if (tmp <= 0)
1384 				break;
1385 		}
1386 		b43legacyerr(ring->dev->wl, "DMA RX buffer too small "
1387 		       "(len: %u, buffer: %u, nr-dropped: %d)\n",
1388 		       len, ring->rx_buffersize, cnt);
1389 		goto drop;
1390 	}
1391 
1392 	dmaaddr = meta->dmaaddr;
1393 	err = setup_rx_descbuffer(ring, desc, meta, GFP_ATOMIC);
1394 	if (unlikely(err)) {
1395 		b43legacydbg(ring->dev->wl, "DMA RX: setup_rx_descbuffer()"
1396 			     " failed\n");
1397 		sync_descbuffer_for_device(ring, dmaaddr,
1398 					   ring->rx_buffersize);
1399 		goto drop;
1400 	}
1401 
1402 	unmap_descbuffer(ring, dmaaddr, ring->rx_buffersize, 0);
1403 	skb_put(skb, len + ring->frameoffset);
1404 	skb_pull(skb, ring->frameoffset);
1405 
1406 	b43legacy_rx(ring->dev, skb, rxhdr);
1407 drop:
1408 	return;
1409 }
1410 
b43legacy_dma_rx(struct b43legacy_dmaring * ring)1411 void b43legacy_dma_rx(struct b43legacy_dmaring *ring)
1412 {
1413 	int slot;
1414 	int current_slot;
1415 	int used_slots = 0;
1416 
1417 	B43legacy_WARN_ON(ring->tx);
1418 	current_slot = op32_get_current_rxslot(ring);
1419 	B43legacy_WARN_ON(!(current_slot >= 0 && current_slot <
1420 			   ring->nr_slots));
1421 
1422 	slot = ring->current_slot;
1423 	for (; slot != current_slot; slot = next_slot(ring, slot)) {
1424 		dma_rx(ring, &slot);
1425 		update_max_used_slots(ring, ++used_slots);
1426 	}
1427 	op32_set_current_rxslot(ring, slot);
1428 	ring->current_slot = slot;
1429 }
1430 
b43legacy_dma_tx_suspend_ring(struct b43legacy_dmaring * ring)1431 static void b43legacy_dma_tx_suspend_ring(struct b43legacy_dmaring *ring)
1432 {
1433 	B43legacy_WARN_ON(!ring->tx);
1434 	op32_tx_suspend(ring);
1435 }
1436 
b43legacy_dma_tx_resume_ring(struct b43legacy_dmaring * ring)1437 static void b43legacy_dma_tx_resume_ring(struct b43legacy_dmaring *ring)
1438 {
1439 	B43legacy_WARN_ON(!ring->tx);
1440 	op32_tx_resume(ring);
1441 }
1442 
b43legacy_dma_tx_suspend(struct b43legacy_wldev * dev)1443 void b43legacy_dma_tx_suspend(struct b43legacy_wldev *dev)
1444 {
1445 	b43legacy_power_saving_ctl_bits(dev, -1, 1);
1446 	b43legacy_dma_tx_suspend_ring(dev->dma.tx_ring0);
1447 	b43legacy_dma_tx_suspend_ring(dev->dma.tx_ring1);
1448 	b43legacy_dma_tx_suspend_ring(dev->dma.tx_ring2);
1449 	b43legacy_dma_tx_suspend_ring(dev->dma.tx_ring3);
1450 	b43legacy_dma_tx_suspend_ring(dev->dma.tx_ring4);
1451 	b43legacy_dma_tx_suspend_ring(dev->dma.tx_ring5);
1452 }
1453 
b43legacy_dma_tx_resume(struct b43legacy_wldev * dev)1454 void b43legacy_dma_tx_resume(struct b43legacy_wldev *dev)
1455 {
1456 	b43legacy_dma_tx_resume_ring(dev->dma.tx_ring5);
1457 	b43legacy_dma_tx_resume_ring(dev->dma.tx_ring4);
1458 	b43legacy_dma_tx_resume_ring(dev->dma.tx_ring3);
1459 	b43legacy_dma_tx_resume_ring(dev->dma.tx_ring2);
1460 	b43legacy_dma_tx_resume_ring(dev->dma.tx_ring1);
1461 	b43legacy_dma_tx_resume_ring(dev->dma.tx_ring0);
1462 	b43legacy_power_saving_ctl_bits(dev, -1, -1);
1463 }
1464