1 /*
2 	Copyright (C) 2010 Willow Garage <http://www.willowgarage.com>
3 	Copyright (C) 2004 - 2010 Ivo van Doorn <IvDoorn@gmail.com>
4 	Copyright (C) 2004 - 2009 Gertjan van Wingerde <gwingerde@gmail.com>
5 	<http://rt2x00.serialmonkey.com>
6 
7 	This program is free software; you can redistribute it and/or modify
8 	it under the terms of the GNU General Public License as published by
9 	the Free Software Foundation; either version 2 of the License, or
10 	(at your option) any later version.
11 
12 	This program is distributed in the hope that it will be useful,
13 	but WITHOUT ANY WARRANTY; without even the implied warranty of
14 	MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 	GNU General Public License for more details.
16 
17 	You should have received a copy of the GNU General Public License
18 	along with this program; if not, write to the
19 	Free Software Foundation, Inc.,
20 	59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
21  */
22 
23 /*
24 	Module: rt2x00lib
25 	Abstract: rt2x00 queue specific routines.
26  */
27 
28 #include <linux/slab.h>
29 #include <linux/kernel.h>
30 #include <linux/module.h>
31 #include <linux/dma-mapping.h>
32 
33 #include "rt2x00.h"
34 #include "rt2x00lib.h"
35 
rt2x00queue_alloc_rxskb(struct queue_entry * entry)36 struct sk_buff *rt2x00queue_alloc_rxskb(struct queue_entry *entry)
37 {
38 	struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
39 	struct sk_buff *skb;
40 	struct skb_frame_desc *skbdesc;
41 	unsigned int frame_size;
42 	unsigned int head_size = 0;
43 	unsigned int tail_size = 0;
44 
45 	/*
46 	 * The frame size includes descriptor size, because the
47 	 * hardware directly receive the frame into the skbuffer.
48 	 */
49 	frame_size = entry->queue->data_size + entry->queue->desc_size;
50 
51 	/*
52 	 * The payload should be aligned to a 4-byte boundary,
53 	 * this means we need at least 3 bytes for moving the frame
54 	 * into the correct offset.
55 	 */
56 	head_size = 4;
57 
58 	/*
59 	 * For IV/EIV/ICV assembly we must make sure there is
60 	 * at least 8 bytes bytes available in headroom for IV/EIV
61 	 * and 8 bytes for ICV data as tailroon.
62 	 */
63 	if (test_bit(CAPABILITY_HW_CRYPTO, &rt2x00dev->cap_flags)) {
64 		head_size += 8;
65 		tail_size += 8;
66 	}
67 
68 	/*
69 	 * Allocate skbuffer.
70 	 */
71 	skb = dev_alloc_skb(frame_size + head_size + tail_size);
72 	if (!skb)
73 		return NULL;
74 
75 	/*
76 	 * Make sure we not have a frame with the requested bytes
77 	 * available in the head and tail.
78 	 */
79 	skb_reserve(skb, head_size);
80 	skb_put(skb, frame_size);
81 
82 	/*
83 	 * Populate skbdesc.
84 	 */
85 	skbdesc = get_skb_frame_desc(skb);
86 	memset(skbdesc, 0, sizeof(*skbdesc));
87 	skbdesc->entry = entry;
88 
89 	if (test_bit(REQUIRE_DMA, &rt2x00dev->cap_flags)) {
90 		skbdesc->skb_dma = dma_map_single(rt2x00dev->dev,
91 						  skb->data,
92 						  skb->len,
93 						  DMA_FROM_DEVICE);
94 		skbdesc->flags |= SKBDESC_DMA_MAPPED_RX;
95 	}
96 
97 	return skb;
98 }
99 
rt2x00queue_map_txskb(struct queue_entry * entry)100 void rt2x00queue_map_txskb(struct queue_entry *entry)
101 {
102 	struct device *dev = entry->queue->rt2x00dev->dev;
103 	struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
104 
105 	skbdesc->skb_dma =
106 	    dma_map_single(dev, entry->skb->data, entry->skb->len, DMA_TO_DEVICE);
107 	skbdesc->flags |= SKBDESC_DMA_MAPPED_TX;
108 }
109 EXPORT_SYMBOL_GPL(rt2x00queue_map_txskb);
110 
rt2x00queue_unmap_skb(struct queue_entry * entry)111 void rt2x00queue_unmap_skb(struct queue_entry *entry)
112 {
113 	struct device *dev = entry->queue->rt2x00dev->dev;
114 	struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
115 
116 	if (skbdesc->flags & SKBDESC_DMA_MAPPED_RX) {
117 		dma_unmap_single(dev, skbdesc->skb_dma, entry->skb->len,
118 				 DMA_FROM_DEVICE);
119 		skbdesc->flags &= ~SKBDESC_DMA_MAPPED_RX;
120 	} else if (skbdesc->flags & SKBDESC_DMA_MAPPED_TX) {
121 		dma_unmap_single(dev, skbdesc->skb_dma, entry->skb->len,
122 				 DMA_TO_DEVICE);
123 		skbdesc->flags &= ~SKBDESC_DMA_MAPPED_TX;
124 	}
125 }
126 EXPORT_SYMBOL_GPL(rt2x00queue_unmap_skb);
127 
rt2x00queue_free_skb(struct queue_entry * entry)128 void rt2x00queue_free_skb(struct queue_entry *entry)
129 {
130 	if (!entry->skb)
131 		return;
132 
133 	rt2x00queue_unmap_skb(entry);
134 	dev_kfree_skb_any(entry->skb);
135 	entry->skb = NULL;
136 }
137 
rt2x00queue_align_frame(struct sk_buff * skb)138 void rt2x00queue_align_frame(struct sk_buff *skb)
139 {
140 	unsigned int frame_length = skb->len;
141 	unsigned int align = ALIGN_SIZE(skb, 0);
142 
143 	if (!align)
144 		return;
145 
146 	skb_push(skb, align);
147 	memmove(skb->data, skb->data + align, frame_length);
148 	skb_trim(skb, frame_length);
149 }
150 
rt2x00queue_insert_l2pad(struct sk_buff * skb,unsigned int header_length)151 void rt2x00queue_insert_l2pad(struct sk_buff *skb, unsigned int header_length)
152 {
153 	unsigned int payload_length = skb->len - header_length;
154 	unsigned int header_align = ALIGN_SIZE(skb, 0);
155 	unsigned int payload_align = ALIGN_SIZE(skb, header_length);
156 	unsigned int l2pad = payload_length ? L2PAD_SIZE(header_length) : 0;
157 
158 	/*
159 	 * Adjust the header alignment if the payload needs to be moved more
160 	 * than the header.
161 	 */
162 	if (payload_align > header_align)
163 		header_align += 4;
164 
165 	/* There is nothing to do if no alignment is needed */
166 	if (!header_align)
167 		return;
168 
169 	/* Reserve the amount of space needed in front of the frame */
170 	skb_push(skb, header_align);
171 
172 	/*
173 	 * Move the header.
174 	 */
175 	memmove(skb->data, skb->data + header_align, header_length);
176 
177 	/* Move the payload, if present and if required */
178 	if (payload_length && payload_align)
179 		memmove(skb->data + header_length + l2pad,
180 			skb->data + header_length + l2pad + payload_align,
181 			payload_length);
182 
183 	/* Trim the skb to the correct size */
184 	skb_trim(skb, header_length + l2pad + payload_length);
185 }
186 
rt2x00queue_remove_l2pad(struct sk_buff * skb,unsigned int header_length)187 void rt2x00queue_remove_l2pad(struct sk_buff *skb, unsigned int header_length)
188 {
189 	/*
190 	 * L2 padding is only present if the skb contains more than just the
191 	 * IEEE 802.11 header.
192 	 */
193 	unsigned int l2pad = (skb->len > header_length) ?
194 				L2PAD_SIZE(header_length) : 0;
195 
196 	if (!l2pad)
197 		return;
198 
199 	memmove(skb->data + l2pad, skb->data, header_length);
200 	skb_pull(skb, l2pad);
201 }
202 
rt2x00queue_create_tx_descriptor_seq(struct rt2x00_dev * rt2x00dev,struct sk_buff * skb,struct txentry_desc * txdesc)203 static void rt2x00queue_create_tx_descriptor_seq(struct rt2x00_dev *rt2x00dev,
204 						 struct sk_buff *skb,
205 						 struct txentry_desc *txdesc)
206 {
207 	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
208 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
209 	struct rt2x00_intf *intf = vif_to_intf(tx_info->control.vif);
210 
211 	if (!(tx_info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ))
212 		return;
213 
214 	__set_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags);
215 
216 	if (!test_bit(REQUIRE_SW_SEQNO, &rt2x00dev->cap_flags))
217 		return;
218 
219 	/*
220 	 * The hardware is not able to insert a sequence number. Assign a
221 	 * software generated one here.
222 	 *
223 	 * This is wrong because beacons are not getting sequence
224 	 * numbers assigned properly.
225 	 *
226 	 * A secondary problem exists for drivers that cannot toggle
227 	 * sequence counting per-frame, since those will override the
228 	 * sequence counter given by mac80211.
229 	 */
230 	spin_lock(&intf->seqlock);
231 
232 	if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags))
233 		intf->seqno += 0x10;
234 	hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
235 	hdr->seq_ctrl |= cpu_to_le16(intf->seqno);
236 
237 	spin_unlock(&intf->seqlock);
238 
239 }
240 
rt2x00queue_create_tx_descriptor_plcp(struct rt2x00_dev * rt2x00dev,struct sk_buff * skb,struct txentry_desc * txdesc,const struct rt2x00_rate * hwrate)241 static void rt2x00queue_create_tx_descriptor_plcp(struct rt2x00_dev *rt2x00dev,
242 						  struct sk_buff *skb,
243 						  struct txentry_desc *txdesc,
244 						  const struct rt2x00_rate *hwrate)
245 {
246 	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
247 	struct ieee80211_tx_rate *txrate = &tx_info->control.rates[0];
248 	unsigned int data_length;
249 	unsigned int duration;
250 	unsigned int residual;
251 
252 	/*
253 	 * Determine with what IFS priority this frame should be send.
254 	 * Set ifs to IFS_SIFS when the this is not the first fragment,
255 	 * or this fragment came after RTS/CTS.
256 	 */
257 	if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags))
258 		txdesc->u.plcp.ifs = IFS_BACKOFF;
259 	else
260 		txdesc->u.plcp.ifs = IFS_SIFS;
261 
262 	/* Data length + CRC + Crypto overhead (IV/EIV/ICV/MIC) */
263 	data_length = skb->len + 4;
264 	data_length += rt2x00crypto_tx_overhead(rt2x00dev, skb);
265 
266 	/*
267 	 * PLCP setup
268 	 * Length calculation depends on OFDM/CCK rate.
269 	 */
270 	txdesc->u.plcp.signal = hwrate->plcp;
271 	txdesc->u.plcp.service = 0x04;
272 
273 	if (hwrate->flags & DEV_RATE_OFDM) {
274 		txdesc->u.plcp.length_high = (data_length >> 6) & 0x3f;
275 		txdesc->u.plcp.length_low = data_length & 0x3f;
276 	} else {
277 		/*
278 		 * Convert length to microseconds.
279 		 */
280 		residual = GET_DURATION_RES(data_length, hwrate->bitrate);
281 		duration = GET_DURATION(data_length, hwrate->bitrate);
282 
283 		if (residual != 0) {
284 			duration++;
285 
286 			/*
287 			 * Check if we need to set the Length Extension
288 			 */
289 			if (hwrate->bitrate == 110 && residual <= 30)
290 				txdesc->u.plcp.service |= 0x80;
291 		}
292 
293 		txdesc->u.plcp.length_high = (duration >> 8) & 0xff;
294 		txdesc->u.plcp.length_low = duration & 0xff;
295 
296 		/*
297 		 * When preamble is enabled we should set the
298 		 * preamble bit for the signal.
299 		 */
300 		if (txrate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
301 			txdesc->u.plcp.signal |= 0x08;
302 	}
303 }
304 
rt2x00queue_create_tx_descriptor_ht(struct rt2x00_dev * rt2x00dev,struct sk_buff * skb,struct txentry_desc * txdesc,const struct rt2x00_rate * hwrate)305 static void rt2x00queue_create_tx_descriptor_ht(struct rt2x00_dev *rt2x00dev,
306 						struct sk_buff *skb,
307 						struct txentry_desc *txdesc,
308 						const struct rt2x00_rate *hwrate)
309 {
310 	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
311 	struct ieee80211_tx_rate *txrate = &tx_info->control.rates[0];
312 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
313 	struct rt2x00_sta *sta_priv = NULL;
314 
315 	if (tx_info->control.sta) {
316 		txdesc->u.ht.mpdu_density =
317 		    tx_info->control.sta->ht_cap.ampdu_density;
318 
319 		sta_priv = sta_to_rt2x00_sta(tx_info->control.sta);
320 		txdesc->u.ht.wcid = sta_priv->wcid;
321 	}
322 
323 	txdesc->u.ht.ba_size = 7;	/* FIXME: What value is needed? */
324 
325 	/*
326 	 * Only one STBC stream is supported for now.
327 	 */
328 	if (tx_info->flags & IEEE80211_TX_CTL_STBC)
329 		txdesc->u.ht.stbc = 1;
330 
331 	/*
332 	 * If IEEE80211_TX_RC_MCS is set txrate->idx just contains the
333 	 * mcs rate to be used
334 	 */
335 	if (txrate->flags & IEEE80211_TX_RC_MCS) {
336 		txdesc->u.ht.mcs = txrate->idx;
337 
338 		/*
339 		 * MIMO PS should be set to 1 for STA's using dynamic SM PS
340 		 * when using more then one tx stream (>MCS7).
341 		 */
342 		if (tx_info->control.sta && txdesc->u.ht.mcs > 7 &&
343 		    ((tx_info->control.sta->ht_cap.cap &
344 		      IEEE80211_HT_CAP_SM_PS) >>
345 		     IEEE80211_HT_CAP_SM_PS_SHIFT) ==
346 		    WLAN_HT_CAP_SM_PS_DYNAMIC)
347 			__set_bit(ENTRY_TXD_HT_MIMO_PS, &txdesc->flags);
348 	} else {
349 		txdesc->u.ht.mcs = rt2x00_get_rate_mcs(hwrate->mcs);
350 		if (txrate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
351 			txdesc->u.ht.mcs |= 0x08;
352 	}
353 
354 	/*
355 	 * This frame is eligible for an AMPDU, however, don't aggregate
356 	 * frames that are intended to probe a specific tx rate.
357 	 */
358 	if (tx_info->flags & IEEE80211_TX_CTL_AMPDU &&
359 	    !(tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE))
360 		__set_bit(ENTRY_TXD_HT_AMPDU, &txdesc->flags);
361 
362 	/*
363 	 * Set 40Mhz mode if necessary (for legacy rates this will
364 	 * duplicate the frame to both channels).
365 	 */
366 	if (txrate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH ||
367 	    txrate->flags & IEEE80211_TX_RC_DUP_DATA)
368 		__set_bit(ENTRY_TXD_HT_BW_40, &txdesc->flags);
369 	if (txrate->flags & IEEE80211_TX_RC_SHORT_GI)
370 		__set_bit(ENTRY_TXD_HT_SHORT_GI, &txdesc->flags);
371 
372 	/*
373 	 * Determine IFS values
374 	 * - Use TXOP_BACKOFF for management frames except beacons
375 	 * - Use TXOP_SIFS for fragment bursts
376 	 * - Use TXOP_HTTXOP for everything else
377 	 *
378 	 * Note: rt2800 devices won't use CTS protection (if used)
379 	 * for frames not transmitted with TXOP_HTTXOP
380 	 */
381 	if (ieee80211_is_mgmt(hdr->frame_control) &&
382 	    !ieee80211_is_beacon(hdr->frame_control))
383 		txdesc->u.ht.txop = TXOP_BACKOFF;
384 	else if (!(tx_info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT))
385 		txdesc->u.ht.txop = TXOP_SIFS;
386 	else
387 		txdesc->u.ht.txop = TXOP_HTTXOP;
388 }
389 
rt2x00queue_create_tx_descriptor(struct rt2x00_dev * rt2x00dev,struct sk_buff * skb,struct txentry_desc * txdesc)390 static void rt2x00queue_create_tx_descriptor(struct rt2x00_dev *rt2x00dev,
391 					     struct sk_buff *skb,
392 					     struct txentry_desc *txdesc)
393 {
394 	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
395 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
396 	struct ieee80211_tx_rate *txrate = &tx_info->control.rates[0];
397 	struct ieee80211_rate *rate;
398 	const struct rt2x00_rate *hwrate = NULL;
399 
400 	memset(txdesc, 0, sizeof(*txdesc));
401 
402 	/*
403 	 * Header and frame information.
404 	 */
405 	txdesc->length = skb->len;
406 	txdesc->header_length = ieee80211_get_hdrlen_from_skb(skb);
407 
408 	/*
409 	 * Check whether this frame is to be acked.
410 	 */
411 	if (!(tx_info->flags & IEEE80211_TX_CTL_NO_ACK))
412 		__set_bit(ENTRY_TXD_ACK, &txdesc->flags);
413 
414 	/*
415 	 * Check if this is a RTS/CTS frame
416 	 */
417 	if (ieee80211_is_rts(hdr->frame_control) ||
418 	    ieee80211_is_cts(hdr->frame_control)) {
419 		__set_bit(ENTRY_TXD_BURST, &txdesc->flags);
420 		if (ieee80211_is_rts(hdr->frame_control))
421 			__set_bit(ENTRY_TXD_RTS_FRAME, &txdesc->flags);
422 		else
423 			__set_bit(ENTRY_TXD_CTS_FRAME, &txdesc->flags);
424 		if (tx_info->control.rts_cts_rate_idx >= 0)
425 			rate =
426 			    ieee80211_get_rts_cts_rate(rt2x00dev->hw, tx_info);
427 	}
428 
429 	/*
430 	 * Determine retry information.
431 	 */
432 	txdesc->retry_limit = tx_info->control.rates[0].count - 1;
433 	if (txdesc->retry_limit >= rt2x00dev->long_retry)
434 		__set_bit(ENTRY_TXD_RETRY_MODE, &txdesc->flags);
435 
436 	/*
437 	 * Check if more fragments are pending
438 	 */
439 	if (ieee80211_has_morefrags(hdr->frame_control)) {
440 		__set_bit(ENTRY_TXD_BURST, &txdesc->flags);
441 		__set_bit(ENTRY_TXD_MORE_FRAG, &txdesc->flags);
442 	}
443 
444 	/*
445 	 * Check if more frames (!= fragments) are pending
446 	 */
447 	if (tx_info->flags & IEEE80211_TX_CTL_MORE_FRAMES)
448 		__set_bit(ENTRY_TXD_BURST, &txdesc->flags);
449 
450 	/*
451 	 * Beacons and probe responses require the tsf timestamp
452 	 * to be inserted into the frame.
453 	 */
454 	if (ieee80211_is_beacon(hdr->frame_control) ||
455 	    ieee80211_is_probe_resp(hdr->frame_control))
456 		__set_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc->flags);
457 
458 	if ((tx_info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT) &&
459 	    !test_bit(ENTRY_TXD_RTS_FRAME, &txdesc->flags))
460 		__set_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags);
461 
462 	/*
463 	 * Determine rate modulation.
464 	 */
465 	if (txrate->flags & IEEE80211_TX_RC_GREEN_FIELD)
466 		txdesc->rate_mode = RATE_MODE_HT_GREENFIELD;
467 	else if (txrate->flags & IEEE80211_TX_RC_MCS)
468 		txdesc->rate_mode = RATE_MODE_HT_MIX;
469 	else {
470 		rate = ieee80211_get_tx_rate(rt2x00dev->hw, tx_info);
471 		hwrate = rt2x00_get_rate(rate->hw_value);
472 		if (hwrate->flags & DEV_RATE_OFDM)
473 			txdesc->rate_mode = RATE_MODE_OFDM;
474 		else
475 			txdesc->rate_mode = RATE_MODE_CCK;
476 	}
477 
478 	/*
479 	 * Apply TX descriptor handling by components
480 	 */
481 	rt2x00crypto_create_tx_descriptor(rt2x00dev, skb, txdesc);
482 	rt2x00queue_create_tx_descriptor_seq(rt2x00dev, skb, txdesc);
483 
484 	if (test_bit(REQUIRE_HT_TX_DESC, &rt2x00dev->cap_flags))
485 		rt2x00queue_create_tx_descriptor_ht(rt2x00dev, skb, txdesc,
486 						    hwrate);
487 	else
488 		rt2x00queue_create_tx_descriptor_plcp(rt2x00dev, skb, txdesc,
489 						      hwrate);
490 }
491 
rt2x00queue_write_tx_data(struct queue_entry * entry,struct txentry_desc * txdesc)492 static int rt2x00queue_write_tx_data(struct queue_entry *entry,
493 				     struct txentry_desc *txdesc)
494 {
495 	struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
496 
497 	/*
498 	 * This should not happen, we already checked the entry
499 	 * was ours. When the hardware disagrees there has been
500 	 * a queue corruption!
501 	 */
502 	if (unlikely(rt2x00dev->ops->lib->get_entry_state &&
503 		     rt2x00dev->ops->lib->get_entry_state(entry))) {
504 		ERROR(rt2x00dev,
505 		      "Corrupt queue %d, accessing entry which is not ours.\n"
506 		      "Please file bug report to %s.\n",
507 		      entry->queue->qid, DRV_PROJECT);
508 		return -EINVAL;
509 	}
510 
511 	/*
512 	 * Add the requested extra tx headroom in front of the skb.
513 	 */
514 	skb_push(entry->skb, rt2x00dev->ops->extra_tx_headroom);
515 	memset(entry->skb->data, 0, rt2x00dev->ops->extra_tx_headroom);
516 
517 	/*
518 	 * Call the driver's write_tx_data function, if it exists.
519 	 */
520 	if (rt2x00dev->ops->lib->write_tx_data)
521 		rt2x00dev->ops->lib->write_tx_data(entry, txdesc);
522 
523 	/*
524 	 * Map the skb to DMA.
525 	 */
526 	if (test_bit(REQUIRE_DMA, &rt2x00dev->cap_flags))
527 		rt2x00queue_map_txskb(entry);
528 
529 	return 0;
530 }
531 
rt2x00queue_write_tx_descriptor(struct queue_entry * entry,struct txentry_desc * txdesc)532 static void rt2x00queue_write_tx_descriptor(struct queue_entry *entry,
533 					    struct txentry_desc *txdesc)
534 {
535 	struct data_queue *queue = entry->queue;
536 
537 	queue->rt2x00dev->ops->lib->write_tx_desc(entry, txdesc);
538 
539 	/*
540 	 * All processing on the frame has been completed, this means
541 	 * it is now ready to be dumped to userspace through debugfs.
542 	 */
543 	rt2x00debug_dump_frame(queue->rt2x00dev, DUMP_FRAME_TX, entry->skb);
544 }
545 
rt2x00queue_kick_tx_queue(struct data_queue * queue,struct txentry_desc * txdesc)546 static void rt2x00queue_kick_tx_queue(struct data_queue *queue,
547 				      struct txentry_desc *txdesc)
548 {
549 	/*
550 	 * Check if we need to kick the queue, there are however a few rules
551 	 *	1) Don't kick unless this is the last in frame in a burst.
552 	 *	   When the burst flag is set, this frame is always followed
553 	 *	   by another frame which in some way are related to eachother.
554 	 *	   This is true for fragments, RTS or CTS-to-self frames.
555 	 *	2) Rule 1 can be broken when the available entries
556 	 *	   in the queue are less then a certain threshold.
557 	 */
558 	if (rt2x00queue_threshold(queue) ||
559 	    !test_bit(ENTRY_TXD_BURST, &txdesc->flags))
560 		queue->rt2x00dev->ops->lib->kick_queue(queue);
561 }
562 
rt2x00queue_write_tx_frame(struct data_queue * queue,struct sk_buff * skb,bool local)563 int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb,
564 			       bool local)
565 {
566 	struct ieee80211_tx_info *tx_info;
567 	struct queue_entry *entry;
568 	struct txentry_desc txdesc;
569 	struct skb_frame_desc *skbdesc;
570 	u8 rate_idx, rate_flags;
571 	int ret = 0;
572 
573 	/*
574 	 * Copy all TX descriptor information into txdesc,
575 	 * after that we are free to use the skb->cb array
576 	 * for our information.
577 	 */
578 	rt2x00queue_create_tx_descriptor(queue->rt2x00dev, skb, &txdesc);
579 
580 	/*
581 	 * All information is retrieved from the skb->cb array,
582 	 * now we should claim ownership of the driver part of that
583 	 * array, preserving the bitrate index and flags.
584 	 */
585 	tx_info = IEEE80211_SKB_CB(skb);
586 	rate_idx = tx_info->control.rates[0].idx;
587 	rate_flags = tx_info->control.rates[0].flags;
588 	skbdesc = get_skb_frame_desc(skb);
589 	memset(skbdesc, 0, sizeof(*skbdesc));
590 	skbdesc->tx_rate_idx = rate_idx;
591 	skbdesc->tx_rate_flags = rate_flags;
592 
593 	if (local)
594 		skbdesc->flags |= SKBDESC_NOT_MAC80211;
595 
596 	/*
597 	 * When hardware encryption is supported, and this frame
598 	 * is to be encrypted, we should strip the IV/EIV data from
599 	 * the frame so we can provide it to the driver separately.
600 	 */
601 	if (test_bit(ENTRY_TXD_ENCRYPT, &txdesc.flags) &&
602 	    !test_bit(ENTRY_TXD_ENCRYPT_IV, &txdesc.flags)) {
603 		if (test_bit(REQUIRE_COPY_IV, &queue->rt2x00dev->cap_flags))
604 			rt2x00crypto_tx_copy_iv(skb, &txdesc);
605 		else
606 			rt2x00crypto_tx_remove_iv(skb, &txdesc);
607 	}
608 
609 	/*
610 	 * When DMA allocation is required we should guarantee to the
611 	 * driver that the DMA is aligned to a 4-byte boundary.
612 	 * However some drivers require L2 padding to pad the payload
613 	 * rather then the header. This could be a requirement for
614 	 * PCI and USB devices, while header alignment only is valid
615 	 * for PCI devices.
616 	 */
617 	if (test_bit(REQUIRE_L2PAD, &queue->rt2x00dev->cap_flags))
618 		rt2x00queue_insert_l2pad(skb, txdesc.header_length);
619 	else if (test_bit(REQUIRE_DMA, &queue->rt2x00dev->cap_flags))
620 		rt2x00queue_align_frame(skb);
621 
622 	/*
623 	 * That function must be called with bh disabled.
624 	 */
625 	spin_lock(&queue->tx_lock);
626 
627 	if (unlikely(rt2x00queue_full(queue))) {
628 		ERROR(queue->rt2x00dev,
629 		      "Dropping frame due to full tx queue %d.\n", queue->qid);
630 		ret = -ENOBUFS;
631 		goto out;
632 	}
633 
634 	entry = rt2x00queue_get_entry(queue, Q_INDEX);
635 
636 	if (unlikely(test_and_set_bit(ENTRY_OWNER_DEVICE_DATA,
637 				      &entry->flags))) {
638 		ERROR(queue->rt2x00dev,
639 		      "Arrived at non-free entry in the non-full queue %d.\n"
640 		      "Please file bug report to %s.\n",
641 		      queue->qid, DRV_PROJECT);
642 		ret = -EINVAL;
643 		goto out;
644 	}
645 
646 	skbdesc->entry = entry;
647 	entry->skb = skb;
648 
649 	/*
650 	 * It could be possible that the queue was corrupted and this
651 	 * call failed. Since we always return NETDEV_TX_OK to mac80211,
652 	 * this frame will simply be dropped.
653 	 */
654 	if (unlikely(rt2x00queue_write_tx_data(entry, &txdesc))) {
655 		clear_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags);
656 		entry->skb = NULL;
657 		ret = -EIO;
658 		goto out;
659 	}
660 
661 	set_bit(ENTRY_DATA_PENDING, &entry->flags);
662 
663 	rt2x00queue_index_inc(entry, Q_INDEX);
664 	rt2x00queue_write_tx_descriptor(entry, &txdesc);
665 	rt2x00queue_kick_tx_queue(queue, &txdesc);
666 
667 out:
668 	spin_unlock(&queue->tx_lock);
669 	return ret;
670 }
671 
rt2x00queue_clear_beacon(struct rt2x00_dev * rt2x00dev,struct ieee80211_vif * vif)672 int rt2x00queue_clear_beacon(struct rt2x00_dev *rt2x00dev,
673 			     struct ieee80211_vif *vif)
674 {
675 	struct rt2x00_intf *intf = vif_to_intf(vif);
676 
677 	if (unlikely(!intf->beacon))
678 		return -ENOBUFS;
679 
680 	mutex_lock(&intf->beacon_skb_mutex);
681 
682 	/*
683 	 * Clean up the beacon skb.
684 	 */
685 	rt2x00queue_free_skb(intf->beacon);
686 
687 	/*
688 	 * Clear beacon (single bssid devices don't need to clear the beacon
689 	 * since the beacon queue will get stopped anyway).
690 	 */
691 	if (rt2x00dev->ops->lib->clear_beacon)
692 		rt2x00dev->ops->lib->clear_beacon(intf->beacon);
693 
694 	mutex_unlock(&intf->beacon_skb_mutex);
695 
696 	return 0;
697 }
698 
rt2x00queue_update_beacon_locked(struct rt2x00_dev * rt2x00dev,struct ieee80211_vif * vif)699 int rt2x00queue_update_beacon_locked(struct rt2x00_dev *rt2x00dev,
700 				     struct ieee80211_vif *vif)
701 {
702 	struct rt2x00_intf *intf = vif_to_intf(vif);
703 	struct skb_frame_desc *skbdesc;
704 	struct txentry_desc txdesc;
705 
706 	if (unlikely(!intf->beacon))
707 		return -ENOBUFS;
708 
709 	/*
710 	 * Clean up the beacon skb.
711 	 */
712 	rt2x00queue_free_skb(intf->beacon);
713 
714 	intf->beacon->skb = ieee80211_beacon_get(rt2x00dev->hw, vif);
715 	if (!intf->beacon->skb)
716 		return -ENOMEM;
717 
718 	/*
719 	 * Copy all TX descriptor information into txdesc,
720 	 * after that we are free to use the skb->cb array
721 	 * for our information.
722 	 */
723 	rt2x00queue_create_tx_descriptor(rt2x00dev, intf->beacon->skb, &txdesc);
724 
725 	/*
726 	 * Fill in skb descriptor
727 	 */
728 	skbdesc = get_skb_frame_desc(intf->beacon->skb);
729 	memset(skbdesc, 0, sizeof(*skbdesc));
730 	skbdesc->entry = intf->beacon;
731 
732 	/*
733 	 * Send beacon to hardware.
734 	 */
735 	rt2x00dev->ops->lib->write_beacon(intf->beacon, &txdesc);
736 
737 	return 0;
738 
739 }
740 
rt2x00queue_update_beacon(struct rt2x00_dev * rt2x00dev,struct ieee80211_vif * vif)741 int rt2x00queue_update_beacon(struct rt2x00_dev *rt2x00dev,
742 			      struct ieee80211_vif *vif)
743 {
744 	struct rt2x00_intf *intf = vif_to_intf(vif);
745 	int ret;
746 
747 	mutex_lock(&intf->beacon_skb_mutex);
748 	ret = rt2x00queue_update_beacon_locked(rt2x00dev, vif);
749 	mutex_unlock(&intf->beacon_skb_mutex);
750 
751 	return ret;
752 }
753 
rt2x00queue_for_each_entry(struct data_queue * queue,enum queue_index start,enum queue_index end,void * data,bool (* fn)(struct queue_entry * entry,void * data))754 bool rt2x00queue_for_each_entry(struct data_queue *queue,
755 				enum queue_index start,
756 				enum queue_index end,
757 				void *data,
758 				bool (*fn)(struct queue_entry *entry,
759 					   void *data))
760 {
761 	unsigned long irqflags;
762 	unsigned int index_start;
763 	unsigned int index_end;
764 	unsigned int i;
765 
766 	if (unlikely(start >= Q_INDEX_MAX || end >= Q_INDEX_MAX)) {
767 		ERROR(queue->rt2x00dev,
768 		      "Entry requested from invalid index range (%d - %d)\n",
769 		      start, end);
770 		return true;
771 	}
772 
773 	/*
774 	 * Only protect the range we are going to loop over,
775 	 * if during our loop a extra entry is set to pending
776 	 * it should not be kicked during this run, since it
777 	 * is part of another TX operation.
778 	 */
779 	spin_lock_irqsave(&queue->index_lock, irqflags);
780 	index_start = queue->index[start];
781 	index_end = queue->index[end];
782 	spin_unlock_irqrestore(&queue->index_lock, irqflags);
783 
784 	/*
785 	 * Start from the TX done pointer, this guarantees that we will
786 	 * send out all frames in the correct order.
787 	 */
788 	if (index_start < index_end) {
789 		for (i = index_start; i < index_end; i++) {
790 			if (fn(&queue->entries[i], data))
791 				return true;
792 		}
793 	} else {
794 		for (i = index_start; i < queue->limit; i++) {
795 			if (fn(&queue->entries[i], data))
796 				return true;
797 		}
798 
799 		for (i = 0; i < index_end; i++) {
800 			if (fn(&queue->entries[i], data))
801 				return true;
802 		}
803 	}
804 
805 	return false;
806 }
807 EXPORT_SYMBOL_GPL(rt2x00queue_for_each_entry);
808 
rt2x00queue_get_entry(struct data_queue * queue,enum queue_index index)809 struct queue_entry *rt2x00queue_get_entry(struct data_queue *queue,
810 					  enum queue_index index)
811 {
812 	struct queue_entry *entry;
813 	unsigned long irqflags;
814 
815 	if (unlikely(index >= Q_INDEX_MAX)) {
816 		ERROR(queue->rt2x00dev,
817 		      "Entry requested from invalid index type (%d)\n", index);
818 		return NULL;
819 	}
820 
821 	spin_lock_irqsave(&queue->index_lock, irqflags);
822 
823 	entry = &queue->entries[queue->index[index]];
824 
825 	spin_unlock_irqrestore(&queue->index_lock, irqflags);
826 
827 	return entry;
828 }
829 EXPORT_SYMBOL_GPL(rt2x00queue_get_entry);
830 
rt2x00queue_index_inc(struct queue_entry * entry,enum queue_index index)831 void rt2x00queue_index_inc(struct queue_entry *entry, enum queue_index index)
832 {
833 	struct data_queue *queue = entry->queue;
834 	unsigned long irqflags;
835 
836 	if (unlikely(index >= Q_INDEX_MAX)) {
837 		ERROR(queue->rt2x00dev,
838 		      "Index change on invalid index type (%d)\n", index);
839 		return;
840 	}
841 
842 	spin_lock_irqsave(&queue->index_lock, irqflags);
843 
844 	queue->index[index]++;
845 	if (queue->index[index] >= queue->limit)
846 		queue->index[index] = 0;
847 
848 	entry->last_action = jiffies;
849 
850 	if (index == Q_INDEX) {
851 		queue->length++;
852 	} else if (index == Q_INDEX_DONE) {
853 		queue->length--;
854 		queue->count++;
855 	}
856 
857 	spin_unlock_irqrestore(&queue->index_lock, irqflags);
858 }
859 
rt2x00queue_pause_queue(struct data_queue * queue)860 void rt2x00queue_pause_queue(struct data_queue *queue)
861 {
862 	if (!test_bit(DEVICE_STATE_PRESENT, &queue->rt2x00dev->flags) ||
863 	    !test_bit(QUEUE_STARTED, &queue->flags) ||
864 	    test_and_set_bit(QUEUE_PAUSED, &queue->flags))
865 		return;
866 
867 	switch (queue->qid) {
868 	case QID_AC_VO:
869 	case QID_AC_VI:
870 	case QID_AC_BE:
871 	case QID_AC_BK:
872 		/*
873 		 * For TX queues, we have to disable the queue
874 		 * inside mac80211.
875 		 */
876 		ieee80211_stop_queue(queue->rt2x00dev->hw, queue->qid);
877 		break;
878 	default:
879 		break;
880 	}
881 }
882 EXPORT_SYMBOL_GPL(rt2x00queue_pause_queue);
883 
rt2x00queue_unpause_queue(struct data_queue * queue)884 void rt2x00queue_unpause_queue(struct data_queue *queue)
885 {
886 	if (!test_bit(DEVICE_STATE_PRESENT, &queue->rt2x00dev->flags) ||
887 	    !test_bit(QUEUE_STARTED, &queue->flags) ||
888 	    !test_and_clear_bit(QUEUE_PAUSED, &queue->flags))
889 		return;
890 
891 	switch (queue->qid) {
892 	case QID_AC_VO:
893 	case QID_AC_VI:
894 	case QID_AC_BE:
895 	case QID_AC_BK:
896 		/*
897 		 * For TX queues, we have to enable the queue
898 		 * inside mac80211.
899 		 */
900 		ieee80211_wake_queue(queue->rt2x00dev->hw, queue->qid);
901 		break;
902 	case QID_RX:
903 		/*
904 		 * For RX we need to kick the queue now in order to
905 		 * receive frames.
906 		 */
907 		queue->rt2x00dev->ops->lib->kick_queue(queue);
908 	default:
909 		break;
910 	}
911 }
912 EXPORT_SYMBOL_GPL(rt2x00queue_unpause_queue);
913 
rt2x00queue_start_queue(struct data_queue * queue)914 void rt2x00queue_start_queue(struct data_queue *queue)
915 {
916 	mutex_lock(&queue->status_lock);
917 
918 	if (!test_bit(DEVICE_STATE_PRESENT, &queue->rt2x00dev->flags) ||
919 	    test_and_set_bit(QUEUE_STARTED, &queue->flags)) {
920 		mutex_unlock(&queue->status_lock);
921 		return;
922 	}
923 
924 	set_bit(QUEUE_PAUSED, &queue->flags);
925 
926 	queue->rt2x00dev->ops->lib->start_queue(queue);
927 
928 	rt2x00queue_unpause_queue(queue);
929 
930 	mutex_unlock(&queue->status_lock);
931 }
932 EXPORT_SYMBOL_GPL(rt2x00queue_start_queue);
933 
rt2x00queue_stop_queue(struct data_queue * queue)934 void rt2x00queue_stop_queue(struct data_queue *queue)
935 {
936 	mutex_lock(&queue->status_lock);
937 
938 	if (!test_and_clear_bit(QUEUE_STARTED, &queue->flags)) {
939 		mutex_unlock(&queue->status_lock);
940 		return;
941 	}
942 
943 	rt2x00queue_pause_queue(queue);
944 
945 	queue->rt2x00dev->ops->lib->stop_queue(queue);
946 
947 	mutex_unlock(&queue->status_lock);
948 }
949 EXPORT_SYMBOL_GPL(rt2x00queue_stop_queue);
950 
rt2x00queue_flush_queue(struct data_queue * queue,bool drop)951 void rt2x00queue_flush_queue(struct data_queue *queue, bool drop)
952 {
953 	bool started;
954 	bool tx_queue =
955 		(queue->qid == QID_AC_VO) ||
956 		(queue->qid == QID_AC_VI) ||
957 		(queue->qid == QID_AC_BE) ||
958 		(queue->qid == QID_AC_BK);
959 
960 	mutex_lock(&queue->status_lock);
961 
962 	/*
963 	 * If the queue has been started, we must stop it temporarily
964 	 * to prevent any new frames to be queued on the device. If
965 	 * we are not dropping the pending frames, the queue must
966 	 * only be stopped in the software and not the hardware,
967 	 * otherwise the queue will never become empty on its own.
968 	 */
969 	started = test_bit(QUEUE_STARTED, &queue->flags);
970 	if (started) {
971 		/*
972 		 * Pause the queue
973 		 */
974 		rt2x00queue_pause_queue(queue);
975 
976 		/*
977 		 * If we are not supposed to drop any pending
978 		 * frames, this means we must force a start (=kick)
979 		 * to the queue to make sure the hardware will
980 		 * start transmitting.
981 		 */
982 		if (!drop && tx_queue)
983 			queue->rt2x00dev->ops->lib->kick_queue(queue);
984 	}
985 
986 	/*
987 	 * Check if driver supports flushing, if that is the case we can
988 	 * defer the flushing to the driver. Otherwise we must use the
989 	 * alternative which just waits for the queue to become empty.
990 	 */
991 	if (likely(queue->rt2x00dev->ops->lib->flush_queue))
992 		queue->rt2x00dev->ops->lib->flush_queue(queue, drop);
993 
994 	/*
995 	 * The queue flush has failed...
996 	 */
997 	if (unlikely(!rt2x00queue_empty(queue)))
998 		WARNING(queue->rt2x00dev, "Queue %d failed to flush\n", queue->qid);
999 
1000 	/*
1001 	 * Restore the queue to the previous status
1002 	 */
1003 	if (started)
1004 		rt2x00queue_unpause_queue(queue);
1005 
1006 	mutex_unlock(&queue->status_lock);
1007 }
1008 EXPORT_SYMBOL_GPL(rt2x00queue_flush_queue);
1009 
rt2x00queue_start_queues(struct rt2x00_dev * rt2x00dev)1010 void rt2x00queue_start_queues(struct rt2x00_dev *rt2x00dev)
1011 {
1012 	struct data_queue *queue;
1013 
1014 	/*
1015 	 * rt2x00queue_start_queue will call ieee80211_wake_queue
1016 	 * for each queue after is has been properly initialized.
1017 	 */
1018 	tx_queue_for_each(rt2x00dev, queue)
1019 		rt2x00queue_start_queue(queue);
1020 
1021 	rt2x00queue_start_queue(rt2x00dev->rx);
1022 }
1023 EXPORT_SYMBOL_GPL(rt2x00queue_start_queues);
1024 
rt2x00queue_stop_queues(struct rt2x00_dev * rt2x00dev)1025 void rt2x00queue_stop_queues(struct rt2x00_dev *rt2x00dev)
1026 {
1027 	struct data_queue *queue;
1028 
1029 	/*
1030 	 * rt2x00queue_stop_queue will call ieee80211_stop_queue
1031 	 * as well, but we are completely shutting doing everything
1032 	 * now, so it is much safer to stop all TX queues at once,
1033 	 * and use rt2x00queue_stop_queue for cleaning up.
1034 	 */
1035 	ieee80211_stop_queues(rt2x00dev->hw);
1036 
1037 	tx_queue_for_each(rt2x00dev, queue)
1038 		rt2x00queue_stop_queue(queue);
1039 
1040 	rt2x00queue_stop_queue(rt2x00dev->rx);
1041 }
1042 EXPORT_SYMBOL_GPL(rt2x00queue_stop_queues);
1043 
rt2x00queue_flush_queues(struct rt2x00_dev * rt2x00dev,bool drop)1044 void rt2x00queue_flush_queues(struct rt2x00_dev *rt2x00dev, bool drop)
1045 {
1046 	struct data_queue *queue;
1047 
1048 	tx_queue_for_each(rt2x00dev, queue)
1049 		rt2x00queue_flush_queue(queue, drop);
1050 
1051 	rt2x00queue_flush_queue(rt2x00dev->rx, drop);
1052 }
1053 EXPORT_SYMBOL_GPL(rt2x00queue_flush_queues);
1054 
rt2x00queue_reset(struct data_queue * queue)1055 static void rt2x00queue_reset(struct data_queue *queue)
1056 {
1057 	unsigned long irqflags;
1058 	unsigned int i;
1059 
1060 	spin_lock_irqsave(&queue->index_lock, irqflags);
1061 
1062 	queue->count = 0;
1063 	queue->length = 0;
1064 
1065 	for (i = 0; i < Q_INDEX_MAX; i++)
1066 		queue->index[i] = 0;
1067 
1068 	spin_unlock_irqrestore(&queue->index_lock, irqflags);
1069 }
1070 
rt2x00queue_init_queues(struct rt2x00_dev * rt2x00dev)1071 void rt2x00queue_init_queues(struct rt2x00_dev *rt2x00dev)
1072 {
1073 	struct data_queue *queue;
1074 	unsigned int i;
1075 
1076 	queue_for_each(rt2x00dev, queue) {
1077 		rt2x00queue_reset(queue);
1078 
1079 		for (i = 0; i < queue->limit; i++)
1080 			rt2x00dev->ops->lib->clear_entry(&queue->entries[i]);
1081 	}
1082 }
1083 
rt2x00queue_alloc_entries(struct data_queue * queue,const struct data_queue_desc * qdesc)1084 static int rt2x00queue_alloc_entries(struct data_queue *queue,
1085 				     const struct data_queue_desc *qdesc)
1086 {
1087 	struct queue_entry *entries;
1088 	unsigned int entry_size;
1089 	unsigned int i;
1090 
1091 	rt2x00queue_reset(queue);
1092 
1093 	queue->limit = qdesc->entry_num;
1094 	queue->threshold = DIV_ROUND_UP(qdesc->entry_num, 10);
1095 	queue->data_size = qdesc->data_size;
1096 	queue->desc_size = qdesc->desc_size;
1097 
1098 	/*
1099 	 * Allocate all queue entries.
1100 	 */
1101 	entry_size = sizeof(*entries) + qdesc->priv_size;
1102 	entries = kcalloc(queue->limit, entry_size, GFP_KERNEL);
1103 	if (!entries)
1104 		return -ENOMEM;
1105 
1106 #define QUEUE_ENTRY_PRIV_OFFSET(__base, __index, __limit, __esize, __psize) \
1107 	(((char *)(__base)) + ((__limit) * (__esize)) + \
1108 	    ((__index) * (__psize)))
1109 
1110 	for (i = 0; i < queue->limit; i++) {
1111 		entries[i].flags = 0;
1112 		entries[i].queue = queue;
1113 		entries[i].skb = NULL;
1114 		entries[i].entry_idx = i;
1115 		entries[i].priv_data =
1116 		    QUEUE_ENTRY_PRIV_OFFSET(entries, i, queue->limit,
1117 					    sizeof(*entries), qdesc->priv_size);
1118 	}
1119 
1120 #undef QUEUE_ENTRY_PRIV_OFFSET
1121 
1122 	queue->entries = entries;
1123 
1124 	return 0;
1125 }
1126 
rt2x00queue_free_skbs(struct data_queue * queue)1127 static void rt2x00queue_free_skbs(struct data_queue *queue)
1128 {
1129 	unsigned int i;
1130 
1131 	if (!queue->entries)
1132 		return;
1133 
1134 	for (i = 0; i < queue->limit; i++) {
1135 		rt2x00queue_free_skb(&queue->entries[i]);
1136 	}
1137 }
1138 
rt2x00queue_alloc_rxskbs(struct data_queue * queue)1139 static int rt2x00queue_alloc_rxskbs(struct data_queue *queue)
1140 {
1141 	unsigned int i;
1142 	struct sk_buff *skb;
1143 
1144 	for (i = 0; i < queue->limit; i++) {
1145 		skb = rt2x00queue_alloc_rxskb(&queue->entries[i]);
1146 		if (!skb)
1147 			return -ENOMEM;
1148 		queue->entries[i].skb = skb;
1149 	}
1150 
1151 	return 0;
1152 }
1153 
rt2x00queue_initialize(struct rt2x00_dev * rt2x00dev)1154 int rt2x00queue_initialize(struct rt2x00_dev *rt2x00dev)
1155 {
1156 	struct data_queue *queue;
1157 	int status;
1158 
1159 	status = rt2x00queue_alloc_entries(rt2x00dev->rx, rt2x00dev->ops->rx);
1160 	if (status)
1161 		goto exit;
1162 
1163 	tx_queue_for_each(rt2x00dev, queue) {
1164 		status = rt2x00queue_alloc_entries(queue, rt2x00dev->ops->tx);
1165 		if (status)
1166 			goto exit;
1167 	}
1168 
1169 	status = rt2x00queue_alloc_entries(rt2x00dev->bcn, rt2x00dev->ops->bcn);
1170 	if (status)
1171 		goto exit;
1172 
1173 	if (test_bit(REQUIRE_ATIM_QUEUE, &rt2x00dev->cap_flags)) {
1174 		status = rt2x00queue_alloc_entries(rt2x00dev->atim,
1175 						   rt2x00dev->ops->atim);
1176 		if (status)
1177 			goto exit;
1178 	}
1179 
1180 	status = rt2x00queue_alloc_rxskbs(rt2x00dev->rx);
1181 	if (status)
1182 		goto exit;
1183 
1184 	return 0;
1185 
1186 exit:
1187 	ERROR(rt2x00dev, "Queue entries allocation failed.\n");
1188 
1189 	rt2x00queue_uninitialize(rt2x00dev);
1190 
1191 	return status;
1192 }
1193 
rt2x00queue_uninitialize(struct rt2x00_dev * rt2x00dev)1194 void rt2x00queue_uninitialize(struct rt2x00_dev *rt2x00dev)
1195 {
1196 	struct data_queue *queue;
1197 
1198 	rt2x00queue_free_skbs(rt2x00dev->rx);
1199 
1200 	queue_for_each(rt2x00dev, queue) {
1201 		kfree(queue->entries);
1202 		queue->entries = NULL;
1203 	}
1204 }
1205 
rt2x00queue_init(struct rt2x00_dev * rt2x00dev,struct data_queue * queue,enum data_queue_qid qid)1206 static void rt2x00queue_init(struct rt2x00_dev *rt2x00dev,
1207 			     struct data_queue *queue, enum data_queue_qid qid)
1208 {
1209 	mutex_init(&queue->status_lock);
1210 	spin_lock_init(&queue->tx_lock);
1211 	spin_lock_init(&queue->index_lock);
1212 
1213 	queue->rt2x00dev = rt2x00dev;
1214 	queue->qid = qid;
1215 	queue->txop = 0;
1216 	queue->aifs = 2;
1217 	queue->cw_min = 5;
1218 	queue->cw_max = 10;
1219 }
1220 
rt2x00queue_allocate(struct rt2x00_dev * rt2x00dev)1221 int rt2x00queue_allocate(struct rt2x00_dev *rt2x00dev)
1222 {
1223 	struct data_queue *queue;
1224 	enum data_queue_qid qid;
1225 	unsigned int req_atim =
1226 	    !!test_bit(REQUIRE_ATIM_QUEUE, &rt2x00dev->cap_flags);
1227 
1228 	/*
1229 	 * We need the following queues:
1230 	 * RX: 1
1231 	 * TX: ops->tx_queues
1232 	 * Beacon: 1
1233 	 * Atim: 1 (if required)
1234 	 */
1235 	rt2x00dev->data_queues = 2 + rt2x00dev->ops->tx_queues + req_atim;
1236 
1237 	queue = kcalloc(rt2x00dev->data_queues, sizeof(*queue), GFP_KERNEL);
1238 	if (!queue) {
1239 		ERROR(rt2x00dev, "Queue allocation failed.\n");
1240 		return -ENOMEM;
1241 	}
1242 
1243 	/*
1244 	 * Initialize pointers
1245 	 */
1246 	rt2x00dev->rx = queue;
1247 	rt2x00dev->tx = &queue[1];
1248 	rt2x00dev->bcn = &queue[1 + rt2x00dev->ops->tx_queues];
1249 	rt2x00dev->atim = req_atim ? &queue[2 + rt2x00dev->ops->tx_queues] : NULL;
1250 
1251 	/*
1252 	 * Initialize queue parameters.
1253 	 * RX: qid = QID_RX
1254 	 * TX: qid = QID_AC_VO + index
1255 	 * TX: cw_min: 2^5 = 32.
1256 	 * TX: cw_max: 2^10 = 1024.
1257 	 * BCN: qid = QID_BEACON
1258 	 * ATIM: qid = QID_ATIM
1259 	 */
1260 	rt2x00queue_init(rt2x00dev, rt2x00dev->rx, QID_RX);
1261 
1262 	qid = QID_AC_VO;
1263 	tx_queue_for_each(rt2x00dev, queue)
1264 		rt2x00queue_init(rt2x00dev, queue, qid++);
1265 
1266 	rt2x00queue_init(rt2x00dev, rt2x00dev->bcn, QID_BEACON);
1267 	if (req_atim)
1268 		rt2x00queue_init(rt2x00dev, rt2x00dev->atim, QID_ATIM);
1269 
1270 	return 0;
1271 }
1272 
rt2x00queue_free(struct rt2x00_dev * rt2x00dev)1273 void rt2x00queue_free(struct rt2x00_dev *rt2x00dev)
1274 {
1275 	kfree(rt2x00dev->rx);
1276 	rt2x00dev->rx = NULL;
1277 	rt2x00dev->tx = NULL;
1278 	rt2x00dev->bcn = NULL;
1279 }
1280