1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /*
3 * Copyright (C) 2003-2014, 2018-2021, 2023-2025 Intel Corporation
4 * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
5 * Copyright (C) 2016-2017 Intel Deutschland GmbH
6 */
7 #include <linux/etherdevice.h>
8 #include <linux/ieee80211.h>
9 #include <linux/dmapool.h>
10 #include <linux/slab.h>
11 #include <linux/sched.h>
12 #include <linux/tcp.h>
13 #include <net/ip6_checksum.h>
14 #include <net/tso.h>
15
16 #include "fw/api/commands.h"
17 #include "fw/api/datapath.h"
18 #include "fw/api/debug.h"
19 #include "iwl-fh.h"
20 #include "iwl-debug.h"
21 #include "iwl-csr.h"
22 #include "iwl-prph.h"
23 #include "iwl-io.h"
24 #include "iwl-scd.h"
25 #include "iwl-op-mode.h"
26 #include "internal.h"
27 #include "fw/api/tx.h"
28 #include "fw/dbg.h"
29 #include "pcie/utils.h"
30
31 /*************** DMA-QUEUE-GENERAL-FUNCTIONS *****
32 * DMA services
33 *
34 * Theory of operation
35 *
36 * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer
37 * of buffer descriptors, each of which points to one or more data buffers for
38 * the device to read from or fill. Driver and device exchange status of each
39 * queue via "read" and "write" pointers. Driver keeps minimum of 2 empty
40 * entries in each circular buffer, to protect against confusing empty and full
41 * queue states.
42 *
43 * The device reads or writes the data in the queues via the device's several
44 * DMA/FIFO channels. Each queue is mapped to a single DMA channel.
45 *
46 * For Tx queue, there are low mark and high mark limits. If, after queuing
47 * the packet for Tx, free space become < low mark, Tx queue stopped. When
48 * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
49 * Tx queue resumed.
50 *
51 ***************************************************/
52
53
iwl_pcie_alloc_dma_ptr(struct iwl_trans * trans,struct iwl_dma_ptr * ptr,size_t size)54 int iwl_pcie_alloc_dma_ptr(struct iwl_trans *trans,
55 struct iwl_dma_ptr *ptr, size_t size)
56 {
57 if (WARN_ON(ptr->addr))
58 return -EINVAL;
59
60 ptr->addr = dma_alloc_coherent(trans->dev, size,
61 &ptr->dma, GFP_KERNEL);
62 if (!ptr->addr)
63 return -ENOMEM;
64 ptr->size = size;
65 return 0;
66 }
67
iwl_pcie_free_dma_ptr(struct iwl_trans * trans,struct iwl_dma_ptr * ptr)68 void iwl_pcie_free_dma_ptr(struct iwl_trans *trans, struct iwl_dma_ptr *ptr)
69 {
70 if (unlikely(!ptr->addr))
71 return;
72
73 dma_free_coherent(trans->dev, ptr->size, ptr->addr, ptr->dma);
74 memset(ptr, 0, sizeof(*ptr));
75 }
76
77 /*
78 * iwl_pcie_txq_inc_wr_ptr - Send new write index to hardware
79 */
iwl_pcie_txq_inc_wr_ptr(struct iwl_trans * trans,struct iwl_txq * txq)80 static void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans,
81 struct iwl_txq *txq)
82 {
83 u32 reg = 0;
84 int txq_id = txq->id;
85
86 lockdep_assert_held(&txq->lock);
87
88 /*
89 * explicitly wake up the NIC if:
90 * 1. shadow registers aren't enabled
91 * 2. NIC is woken up for CMD regardless of shadow outside this function
92 * 3. there is a chance that the NIC is asleep
93 */
94 if (!trans->mac_cfg->base->shadow_reg_enable &&
95 txq_id != trans->conf.cmd_queue &&
96 test_bit(STATUS_TPOWER_PMI, &trans->status)) {
97 /*
98 * wake up nic if it's powered down ...
99 * uCode will wake up, and interrupt us again, so next
100 * time we'll skip this part.
101 */
102 reg = iwl_read32(trans, CSR_UCODE_DRV_GP1);
103
104 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
105 IWL_DEBUG_INFO(trans, "Tx queue %d requesting wakeup, GP1 = 0x%x\n",
106 txq_id, reg);
107 iwl_set_bit(trans, CSR_GP_CNTRL,
108 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
109 txq->need_update = true;
110 return;
111 }
112 }
113
114 /*
115 * if not in power-save mode, uCode will never sleep when we're
116 * trying to tx (during RFKILL, we're not trying to tx).
117 */
118 IWL_DEBUG_TX(trans, "Q:%d WR: 0x%x\n", txq_id, txq->write_ptr);
119 if (!txq->block)
120 iwl_write32(trans, HBUS_TARG_WRPTR,
121 txq->write_ptr | (txq_id << 8));
122 }
123
iwl_pcie_txq_check_wrptrs(struct iwl_trans * trans)124 void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans)
125 {
126 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
127 int i;
128
129 for (i = 0; i < trans->mac_cfg->base->num_of_queues; i++) {
130 struct iwl_txq *txq = trans_pcie->txqs.txq[i];
131
132 if (!test_bit(i, trans_pcie->txqs.queue_used))
133 continue;
134
135 spin_lock_bh(&txq->lock);
136 if (txq->need_update) {
137 iwl_pcie_txq_inc_wr_ptr(trans, txq);
138 txq->need_update = false;
139 }
140 spin_unlock_bh(&txq->lock);
141 }
142 }
143
iwl_pcie_gen1_tfd_set_tb(struct iwl_tfd * tfd,u8 idx,dma_addr_t addr,u16 len)144 static inline void iwl_pcie_gen1_tfd_set_tb(struct iwl_tfd *tfd,
145 u8 idx, dma_addr_t addr, u16 len)
146 {
147 struct iwl_tfd_tb *tb = &tfd->tbs[idx];
148 u16 hi_n_len = len << 4;
149
150 put_unaligned_le32(addr, &tb->lo);
151 hi_n_len |= iwl_get_dma_hi_addr(addr);
152
153 tb->hi_n_len = cpu_to_le16(hi_n_len);
154
155 tfd->num_tbs = idx + 1;
156 }
157
iwl_txq_gen1_tfd_get_num_tbs(struct iwl_tfd * tfd)158 static inline u8 iwl_txq_gen1_tfd_get_num_tbs(struct iwl_tfd *tfd)
159 {
160 return tfd->num_tbs & 0x1f;
161 }
162
iwl_pcie_txq_build_tfd(struct iwl_trans * trans,struct iwl_txq * txq,dma_addr_t addr,u16 len,bool reset)163 static int iwl_pcie_txq_build_tfd(struct iwl_trans *trans, struct iwl_txq *txq,
164 dma_addr_t addr, u16 len, bool reset)
165 {
166 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
167 void *tfd;
168 u32 num_tbs;
169
170 tfd = (u8 *)txq->tfds + trans_pcie->txqs.tfd.size * txq->write_ptr;
171
172 if (reset)
173 memset(tfd, 0, trans_pcie->txqs.tfd.size);
174
175 num_tbs = iwl_txq_gen1_tfd_get_num_tbs(tfd);
176
177 /* Each TFD can point to a maximum max_tbs Tx buffers */
178 if (num_tbs >= trans_pcie->txqs.tfd.max_tbs) {
179 IWL_ERR(trans, "Error can not send more than %d chunks\n",
180 trans_pcie->txqs.tfd.max_tbs);
181 return -EINVAL;
182 }
183
184 if (WARN(addr & ~IWL_TX_DMA_MASK,
185 "Unaligned address = %llx\n", (unsigned long long)addr))
186 return -EINVAL;
187
188 iwl_pcie_gen1_tfd_set_tb(tfd, num_tbs, addr, len);
189
190 return num_tbs;
191 }
192
iwl_pcie_clear_cmd_in_flight(struct iwl_trans * trans)193 static void iwl_pcie_clear_cmd_in_flight(struct iwl_trans *trans)
194 {
195 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
196
197 if (!trans->mac_cfg->base->apmg_wake_up_wa)
198 return;
199
200 spin_lock(&trans_pcie->reg_lock);
201
202 if (WARN_ON(!trans_pcie->cmd_hold_nic_awake)) {
203 spin_unlock(&trans_pcie->reg_lock);
204 return;
205 }
206
207 trans_pcie->cmd_hold_nic_awake = false;
208 iwl_trans_clear_bit(trans, CSR_GP_CNTRL,
209 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
210 spin_unlock(&trans_pcie->reg_lock);
211 }
212
iwl_pcie_free_and_unmap_tso_page(struct iwl_trans * trans,struct page * page)213 static void iwl_pcie_free_and_unmap_tso_page(struct iwl_trans *trans,
214 struct page *page)
215 {
216 struct iwl_tso_page_info *info = IWL_TSO_PAGE_INFO(page_address(page));
217
218 /* Decrease internal use count and unmap/free page if needed */
219 if (refcount_dec_and_test(&info->use_count)) {
220 dma_unmap_page(trans->dev, info->dma_addr, PAGE_SIZE,
221 DMA_TO_DEVICE);
222
223 __free_page(page);
224 }
225 }
226
iwl_pcie_free_tso_pages(struct iwl_trans * trans,struct sk_buff * skb,struct iwl_cmd_meta * cmd_meta)227 void iwl_pcie_free_tso_pages(struct iwl_trans *trans, struct sk_buff *skb,
228 struct iwl_cmd_meta *cmd_meta)
229 {
230 struct page **page_ptr;
231 struct page *next;
232
233 page_ptr = (void *)((u8 *)skb->cb + trans->conf.cb_data_offs);
234 next = *page_ptr;
235 *page_ptr = NULL;
236
237 while (next) {
238 struct iwl_tso_page_info *info;
239 struct page *tmp = next;
240
241 info = IWL_TSO_PAGE_INFO(page_address(next));
242 next = info->next;
243
244 /* Unmap the scatter gather list that is on the last page */
245 if (!next && cmd_meta->sg_offset) {
246 struct sg_table *sgt;
247
248 sgt = (void *)((u8 *)page_address(tmp) +
249 cmd_meta->sg_offset);
250
251 dma_unmap_sgtable(trans->dev, sgt, DMA_TO_DEVICE, 0);
252 }
253
254 iwl_pcie_free_and_unmap_tso_page(trans, tmp);
255 }
256 }
257
258 static inline dma_addr_t
iwl_txq_gen1_tfd_tb_get_addr(struct iwl_tfd * tfd,u8 idx)259 iwl_txq_gen1_tfd_tb_get_addr(struct iwl_tfd *tfd, u8 idx)
260 {
261 struct iwl_tfd_tb *tb = &tfd->tbs[idx];
262 dma_addr_t addr;
263 dma_addr_t hi_len;
264
265 addr = get_unaligned_le32(&tb->lo);
266
267 if (sizeof(dma_addr_t) <= sizeof(u32))
268 return addr;
269
270 hi_len = le16_to_cpu(tb->hi_n_len) & 0xF;
271
272 /*
273 * shift by 16 twice to avoid warnings on 32-bit
274 * (where this code never runs anyway due to the
275 * if statement above)
276 */
277 return addr | ((hi_len << 16) << 16);
278 }
279
iwl_txq_set_tfd_invalid_gen1(struct iwl_trans * trans,struct iwl_tfd * tfd)280 static void iwl_txq_set_tfd_invalid_gen1(struct iwl_trans *trans,
281 struct iwl_tfd *tfd)
282 {
283 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
284
285 tfd->num_tbs = 0;
286
287 iwl_pcie_gen1_tfd_set_tb(tfd, 0, trans_pcie->invalid_tx_cmd.dma,
288 trans_pcie->invalid_tx_cmd.size);
289 }
290
iwl_txq_gen1_tfd_unmap(struct iwl_trans * trans,struct iwl_cmd_meta * meta,struct iwl_txq * txq,int index)291 static void iwl_txq_gen1_tfd_unmap(struct iwl_trans *trans,
292 struct iwl_cmd_meta *meta,
293 struct iwl_txq *txq, int index)
294 {
295 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
296 int i, num_tbs;
297 struct iwl_tfd *tfd = iwl_txq_get_tfd(trans, txq, index);
298
299 /* Sanity check on number of chunks */
300 num_tbs = iwl_txq_gen1_tfd_get_num_tbs(tfd);
301
302 if (num_tbs > trans_pcie->txqs.tfd.max_tbs) {
303 IWL_ERR(trans, "Too many chunks: %i\n", num_tbs);
304 /* @todo issue fatal error, it is quite serious situation */
305 return;
306 }
307
308 /* TB1 is mapped directly, the rest is the TSO page and SG list. */
309 if (meta->sg_offset)
310 num_tbs = 2;
311
312 /* first TB is never freed - it's the bidirectional DMA data */
313
314 for (i = 1; i < num_tbs; i++) {
315 if (meta->tbs & BIT(i))
316 dma_unmap_page(trans->dev,
317 iwl_txq_gen1_tfd_tb_get_addr(tfd, i),
318 iwl_txq_gen1_tfd_tb_get_len(trans,
319 tfd, i),
320 DMA_TO_DEVICE);
321 else
322 dma_unmap_single(trans->dev,
323 iwl_txq_gen1_tfd_tb_get_addr(tfd, i),
324 iwl_txq_gen1_tfd_tb_get_len(trans,
325 tfd, i),
326 DMA_TO_DEVICE);
327 }
328
329 meta->tbs = 0;
330
331 iwl_txq_set_tfd_invalid_gen1(trans, tfd);
332 }
333
334 /**
335 * iwl_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
336 * @trans: transport private data
337 * @txq: tx queue
338 * @read_ptr: the TXQ read_ptr to free
339 *
340 * Does NOT advance any TFD circular buffer read/write indexes
341 * Does NOT free the TFD itself (which is within circular buffer)
342 */
iwl_txq_free_tfd(struct iwl_trans * trans,struct iwl_txq * txq,int read_ptr)343 static void iwl_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq,
344 int read_ptr)
345 {
346 /* rd_ptr is bounded by TFD_QUEUE_SIZE_MAX and
347 * idx is bounded by n_window
348 */
349 int idx = iwl_txq_get_cmd_index(txq, read_ptr);
350 struct sk_buff *skb;
351
352 lockdep_assert_held(&txq->reclaim_lock);
353
354 if (!txq->entries)
355 return;
356
357 /* We have only q->n_window txq->entries, but we use
358 * TFD_QUEUE_SIZE_MAX tfds
359 */
360 if (trans->mac_cfg->gen2)
361 iwl_txq_gen2_tfd_unmap(trans, &txq->entries[idx].meta,
362 iwl_txq_get_tfd(trans, txq, read_ptr));
363 else
364 iwl_txq_gen1_tfd_unmap(trans, &txq->entries[idx].meta,
365 txq, read_ptr);
366
367 /* free SKB */
368 skb = txq->entries[idx].skb;
369
370 /* Can be called from irqs-disabled context
371 * If skb is not NULL, it means that the whole queue is being
372 * freed and that the queue is not empty - free the skb
373 */
374 if (skb) {
375 iwl_op_mode_free_skb(trans->op_mode, skb);
376 txq->entries[idx].skb = NULL;
377 }
378 }
379
380 /*
381 * iwl_pcie_txq_unmap - Unmap any remaining DMA mappings and free skb's
382 */
iwl_pcie_txq_unmap(struct iwl_trans * trans,int txq_id)383 static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id)
384 {
385 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
386 struct iwl_txq *txq = trans_pcie->txqs.txq[txq_id];
387
388 if (!txq) {
389 IWL_ERR(trans, "Trying to free a queue that wasn't allocated?\n");
390 return;
391 }
392
393 spin_lock_bh(&txq->reclaim_lock);
394 spin_lock(&txq->lock);
395 while (txq->write_ptr != txq->read_ptr) {
396 IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n",
397 txq_id, txq->read_ptr);
398
399 if (txq_id != trans->conf.cmd_queue) {
400 struct sk_buff *skb = txq->entries[txq->read_ptr].skb;
401 struct iwl_cmd_meta *cmd_meta =
402 &txq->entries[txq->read_ptr].meta;
403
404 if (WARN_ON_ONCE(!skb))
405 continue;
406
407 iwl_pcie_free_tso_pages(trans, skb, cmd_meta);
408 }
409 iwl_txq_free_tfd(trans, txq, txq->read_ptr);
410 txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr);
411
412 if (txq->read_ptr == txq->write_ptr &&
413 txq_id == trans->conf.cmd_queue)
414 iwl_pcie_clear_cmd_in_flight(trans);
415 }
416
417 while (!skb_queue_empty(&txq->overflow_q)) {
418 struct sk_buff *skb = __skb_dequeue(&txq->overflow_q);
419
420 iwl_op_mode_free_skb(trans->op_mode, skb);
421 }
422
423 spin_unlock(&txq->lock);
424 spin_unlock_bh(&txq->reclaim_lock);
425
426 /* just in case - this queue may have been stopped */
427 iwl_trans_pcie_wake_queue(trans, txq);
428 }
429
430 /*
431 * iwl_pcie_txq_free - Deallocate DMA queue.
432 * @txq: Transmit queue to deallocate.
433 *
434 * Empty queue by removing and destroying all BD's.
435 * Free all buffers.
436 * 0-fill, but do not free "txq" descriptor structure.
437 */
iwl_pcie_txq_free(struct iwl_trans * trans,int txq_id)438 static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id)
439 {
440 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
441 struct iwl_txq *txq = trans_pcie->txqs.txq[txq_id];
442 struct device *dev = trans->dev;
443 int i;
444
445 if (WARN_ON(!txq))
446 return;
447
448 iwl_pcie_txq_unmap(trans, txq_id);
449
450 /* De-alloc array of command/tx buffers */
451 if (txq_id == trans->conf.cmd_queue)
452 for (i = 0; i < txq->n_window; i++) {
453 kfree_sensitive(txq->entries[i].cmd);
454 kfree_sensitive(txq->entries[i].free_buf);
455 }
456
457 /* De-alloc circular buffer of TFDs */
458 if (txq->tfds) {
459 dma_free_coherent(dev,
460 trans_pcie->txqs.tfd.size *
461 trans->mac_cfg->base->max_tfd_queue_size,
462 txq->tfds, txq->dma_addr);
463 txq->dma_addr = 0;
464 txq->tfds = NULL;
465
466 dma_free_coherent(dev,
467 sizeof(*txq->first_tb_bufs) * txq->n_window,
468 txq->first_tb_bufs, txq->first_tb_dma);
469 }
470
471 kfree(txq->entries);
472 txq->entries = NULL;
473
474 timer_delete_sync(&txq->stuck_timer);
475
476 /* 0-fill queue descriptor structure */
477 memset(txq, 0, sizeof(*txq));
478 }
479
iwl_pcie_tx_start(struct iwl_trans * trans)480 void iwl_pcie_tx_start(struct iwl_trans *trans)
481 {
482 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
483 int nq = trans->mac_cfg->base->num_of_queues;
484 int chan;
485 u32 reg_val;
486 int clear_dwords = (SCD_TRANS_TBL_OFFSET_QUEUE(nq) -
487 SCD_CONTEXT_MEM_LOWER_BOUND) / sizeof(u32);
488
489 /* make sure all queue are not stopped/used */
490 memset(trans_pcie->txqs.queue_stopped, 0,
491 sizeof(trans_pcie->txqs.queue_stopped));
492 memset(trans_pcie->txqs.queue_used, 0,
493 sizeof(trans_pcie->txqs.queue_used));
494
495 trans_pcie->scd_base_addr =
496 iwl_read_prph(trans, SCD_SRAM_BASE_ADDR);
497
498 /* reset context data, TX status and translation data */
499 iwl_trans_write_mem(trans, trans_pcie->scd_base_addr +
500 SCD_CONTEXT_MEM_LOWER_BOUND,
501 NULL, clear_dwords);
502
503 iwl_write_prph(trans, SCD_DRAM_BASE_ADDR,
504 trans_pcie->txqs.scd_bc_tbls.dma >> 10);
505
506 /* The chain extension of the SCD doesn't work well. This feature is
507 * enabled by default by the HW, so we need to disable it manually.
508 */
509 if (trans->mac_cfg->base->scd_chain_ext_wa)
510 iwl_write_prph(trans, SCD_CHAINEXT_EN, 0);
511
512 iwl_trans_ac_txq_enable(trans, trans->conf.cmd_queue,
513 trans->conf.cmd_fifo,
514 IWL_DEF_WD_TIMEOUT);
515
516 /* Activate all Tx DMA/FIFO channels */
517 iwl_scd_activate_fifos(trans);
518
519 /* Enable DMA channel */
520 for (chan = 0; chan < FH_TCSR_CHNL_NUM; chan++)
521 iwl_write_direct32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(chan),
522 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
523 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
524
525 /* Update FH chicken bits */
526 reg_val = iwl_read_direct32(trans, FH_TX_CHICKEN_BITS_REG);
527 iwl_write_direct32(trans, FH_TX_CHICKEN_BITS_REG,
528 reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
529
530 /* Enable L1-Active */
531 if (trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_8000)
532 iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG,
533 APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
534 }
535
iwl_trans_pcie_tx_reset(struct iwl_trans * trans)536 void iwl_trans_pcie_tx_reset(struct iwl_trans *trans)
537 {
538 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
539 int txq_id;
540
541 /*
542 * we should never get here in gen2 trans mode return early to avoid
543 * having invalid accesses
544 */
545 if (WARN_ON_ONCE(trans->mac_cfg->gen2))
546 return;
547
548 for (txq_id = 0; txq_id < trans->mac_cfg->base->num_of_queues;
549 txq_id++) {
550 struct iwl_txq *txq = trans_pcie->txqs.txq[txq_id];
551 if (trans->mac_cfg->gen2)
552 iwl_write_direct64(trans,
553 FH_MEM_CBBC_QUEUE(trans, txq_id),
554 txq->dma_addr);
555 else
556 iwl_write_direct32(trans,
557 FH_MEM_CBBC_QUEUE(trans, txq_id),
558 txq->dma_addr >> 8);
559 iwl_pcie_txq_unmap(trans, txq_id);
560 txq->read_ptr = 0;
561 txq->write_ptr = 0;
562 }
563
564 /* Tell NIC where to find the "keep warm" buffer */
565 iwl_write_direct32(trans, FH_KW_MEM_ADDR_REG,
566 trans_pcie->kw.dma >> 4);
567
568 /*
569 * Send 0 as the scd_base_addr since the device may have be reset
570 * while we were in WoWLAN in which case SCD_SRAM_BASE_ADDR will
571 * contain garbage.
572 */
573 iwl_pcie_tx_start(trans);
574 }
575
iwl_pcie_tx_stop_fh(struct iwl_trans * trans)576 static void iwl_pcie_tx_stop_fh(struct iwl_trans *trans)
577 {
578 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
579 int ch, ret;
580 u32 mask = 0;
581
582 spin_lock_bh(&trans_pcie->irq_lock);
583
584 if (!iwl_trans_grab_nic_access(trans))
585 goto out;
586
587 /* Stop each Tx DMA channel */
588 for (ch = 0; ch < FH_TCSR_CHNL_NUM; ch++) {
589 iwl_write32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0);
590 mask |= FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch);
591 }
592
593 /* Wait for DMA channels to be idle */
594 ret = iwl_poll_bits(trans, FH_TSSR_TX_STATUS_REG, mask, 5000);
595 if (ret)
596 IWL_ERR(trans,
597 "Failing on timeout while stopping DMA channel %d [0x%08x]\n",
598 ch, iwl_read32(trans, FH_TSSR_TX_STATUS_REG));
599
600 iwl_trans_release_nic_access(trans);
601
602 out:
603 spin_unlock_bh(&trans_pcie->irq_lock);
604 }
605
606 /*
607 * iwl_pcie_tx_stop - Stop all Tx DMA channels
608 */
iwl_pcie_tx_stop(struct iwl_trans * trans)609 int iwl_pcie_tx_stop(struct iwl_trans *trans)
610 {
611 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
612 int txq_id;
613
614 /* Turn off all Tx DMA fifos */
615 iwl_scd_deactivate_fifos(trans);
616
617 /* Turn off all Tx DMA channels */
618 iwl_pcie_tx_stop_fh(trans);
619
620 /*
621 * This function can be called before the op_mode disabled the
622 * queues. This happens when we have an rfkill interrupt.
623 * Since we stop Tx altogether - mark the queues as stopped.
624 */
625 memset(trans_pcie->txqs.queue_stopped, 0,
626 sizeof(trans_pcie->txqs.queue_stopped));
627 memset(trans_pcie->txqs.queue_used, 0,
628 sizeof(trans_pcie->txqs.queue_used));
629
630 /* This can happen: start_hw, stop_device */
631 if (!trans_pcie->txq_memory)
632 return 0;
633
634 /* Unmap DMA from host system and free skb's */
635 for (txq_id = 0; txq_id < trans->mac_cfg->base->num_of_queues;
636 txq_id++)
637 iwl_pcie_txq_unmap(trans, txq_id);
638
639 return 0;
640 }
641
642 /*
643 * iwl_trans_tx_free - Free TXQ Context
644 *
645 * Destroy all TX DMA queues and structures
646 */
iwl_pcie_tx_free(struct iwl_trans * trans)647 void iwl_pcie_tx_free(struct iwl_trans *trans)
648 {
649 int txq_id;
650 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
651
652 memset(trans_pcie->txqs.queue_used, 0,
653 sizeof(trans_pcie->txqs.queue_used));
654
655 /* Tx queues */
656 if (trans_pcie->txq_memory) {
657 for (txq_id = 0;
658 txq_id < trans->mac_cfg->base->num_of_queues;
659 txq_id++) {
660 iwl_pcie_txq_free(trans, txq_id);
661 trans_pcie->txqs.txq[txq_id] = NULL;
662 }
663 }
664
665 kfree(trans_pcie->txq_memory);
666 trans_pcie->txq_memory = NULL;
667
668 iwl_pcie_free_dma_ptr(trans, &trans_pcie->kw);
669
670 iwl_pcie_free_dma_ptr(trans, &trans_pcie->txqs.scd_bc_tbls);
671 }
672
iwl_txq_log_scd_error(struct iwl_trans * trans,struct iwl_txq * txq)673 void iwl_txq_log_scd_error(struct iwl_trans *trans, struct iwl_txq *txq)
674 {
675 u32 txq_id = txq->id;
676 u32 status;
677 bool active;
678 u8 fifo;
679
680 if (trans->mac_cfg->gen2) {
681 IWL_ERR(trans, "Queue %d is stuck %d %d\n", txq_id,
682 txq->read_ptr, txq->write_ptr);
683 /* TODO: access new SCD registers and dump them */
684 return;
685 }
686
687 status = iwl_read_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id));
688 fifo = (status >> SCD_QUEUE_STTS_REG_POS_TXF) & 0x7;
689 active = !!(status & BIT(SCD_QUEUE_STTS_REG_POS_ACTIVE));
690
691 IWL_ERR(trans,
692 "Queue %d is %sactive on fifo %d and stuck for %u ms. SW [%d, %d] HW [%d, %d] FH TRB=0x0%x\n",
693 txq_id, active ? "" : "in", fifo,
694 jiffies_to_msecs(txq->wd_timeout),
695 txq->read_ptr, txq->write_ptr,
696 iwl_read_prph(trans, SCD_QUEUE_RDPTR(txq_id)) &
697 (trans->mac_cfg->base->max_tfd_queue_size - 1),
698 iwl_read_prph(trans, SCD_QUEUE_WRPTR(txq_id)) &
699 (trans->mac_cfg->base->max_tfd_queue_size - 1),
700 iwl_read_direct32(trans, FH_TX_TRB_REG(fifo)));
701 }
702
iwl_txq_stuck_timer(struct timer_list * t)703 static void iwl_txq_stuck_timer(struct timer_list *t)
704 {
705 struct iwl_txq *txq = timer_container_of(txq, t, stuck_timer);
706 struct iwl_trans *trans = txq->trans;
707
708 spin_lock(&txq->lock);
709 /* check if triggered erroneously */
710 if (txq->read_ptr == txq->write_ptr) {
711 spin_unlock(&txq->lock);
712 return;
713 }
714 spin_unlock(&txq->lock);
715
716 iwl_txq_log_scd_error(trans, txq);
717
718 iwl_force_nmi(trans);
719 }
720
iwl_pcie_txq_alloc(struct iwl_trans * trans,struct iwl_txq * txq,int slots_num,bool cmd_queue)721 int iwl_pcie_txq_alloc(struct iwl_trans *trans, struct iwl_txq *txq,
722 int slots_num, bool cmd_queue)
723 {
724 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
725 size_t num_entries = trans->mac_cfg->gen2 ?
726 slots_num : trans->mac_cfg->base->max_tfd_queue_size;
727 size_t tfd_sz;
728 size_t tb0_buf_sz;
729 int i;
730
731 if (WARN_ONCE(slots_num <= 0, "Invalid slots num:%d\n", slots_num))
732 return -EINVAL;
733
734 if (WARN_ON(txq->entries || txq->tfds))
735 return -EINVAL;
736
737 tfd_sz = trans_pcie->txqs.tfd.size * num_entries;
738
739 timer_setup(&txq->stuck_timer, iwl_txq_stuck_timer, 0);
740 txq->trans = trans;
741
742 txq->n_window = slots_num;
743
744 txq->entries = kcalloc(slots_num,
745 sizeof(struct iwl_pcie_txq_entry),
746 GFP_KERNEL);
747
748 if (!txq->entries)
749 goto error;
750
751 if (cmd_queue)
752 for (i = 0; i < slots_num; i++) {
753 txq->entries[i].cmd =
754 kmalloc(sizeof(struct iwl_device_cmd),
755 GFP_KERNEL);
756 if (!txq->entries[i].cmd)
757 goto error;
758 }
759
760 /* Circular buffer of transmit frame descriptors (TFDs),
761 * shared with device
762 */
763 txq->tfds = dma_alloc_coherent(trans->dev, tfd_sz,
764 &txq->dma_addr, GFP_KERNEL);
765 if (!txq->tfds)
766 goto error;
767
768 BUILD_BUG_ON(sizeof(*txq->first_tb_bufs) != IWL_FIRST_TB_SIZE_ALIGN);
769
770 tb0_buf_sz = sizeof(*txq->first_tb_bufs) * slots_num;
771
772 txq->first_tb_bufs = dma_alloc_coherent(trans->dev, tb0_buf_sz,
773 &txq->first_tb_dma,
774 GFP_KERNEL);
775 if (!txq->first_tb_bufs)
776 goto err_free_tfds;
777
778 for (i = 0; i < num_entries; i++) {
779 void *tfd = iwl_txq_get_tfd(trans, txq, i);
780
781 if (trans->mac_cfg->gen2)
782 iwl_txq_set_tfd_invalid_gen2(trans, tfd);
783 else
784 iwl_txq_set_tfd_invalid_gen1(trans, tfd);
785 }
786
787 return 0;
788 err_free_tfds:
789 dma_free_coherent(trans->dev, tfd_sz, txq->tfds, txq->dma_addr);
790 txq->tfds = NULL;
791 error:
792 if (txq->entries && cmd_queue)
793 for (i = 0; i < slots_num; i++)
794 kfree(txq->entries[i].cmd);
795 kfree(txq->entries);
796 txq->entries = NULL;
797
798 return -ENOMEM;
799 }
800
801 #define BC_TABLE_SIZE (sizeof(struct iwl_bc_tbl_entry) * TFD_QUEUE_BC_SIZE)
802
803 /*
804 * iwl_pcie_tx_alloc - allocate TX context
805 * Allocate all Tx DMA structures and initialize them
806 */
iwl_pcie_tx_alloc(struct iwl_trans * trans)807 static int iwl_pcie_tx_alloc(struct iwl_trans *trans)
808 {
809 int ret;
810 int txq_id, slots_num;
811 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
812 u16 bc_tbls_size = trans->mac_cfg->base->num_of_queues;
813
814 if (WARN_ON(trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210))
815 return -EINVAL;
816
817 bc_tbls_size *= BC_TABLE_SIZE;
818
819 /*It is not allowed to alloc twice, so warn when this happens.
820 * We cannot rely on the previous allocation, so free and fail */
821 if (WARN_ON(trans_pcie->txq_memory)) {
822 ret = -EINVAL;
823 goto error;
824 }
825
826 ret = iwl_pcie_alloc_dma_ptr(trans, &trans_pcie->txqs.scd_bc_tbls,
827 bc_tbls_size);
828 if (ret) {
829 IWL_ERR(trans, "Scheduler BC Table allocation failed\n");
830 goto error;
831 }
832
833 /* Alloc keep-warm buffer */
834 ret = iwl_pcie_alloc_dma_ptr(trans, &trans_pcie->kw, IWL_KW_SIZE);
835 if (ret) {
836 IWL_ERR(trans, "Keep Warm allocation failed\n");
837 goto error;
838 }
839
840 trans_pcie->txq_memory =
841 kcalloc(trans->mac_cfg->base->num_of_queues,
842 sizeof(struct iwl_txq), GFP_KERNEL);
843 if (!trans_pcie->txq_memory) {
844 IWL_ERR(trans, "Not enough memory for txq\n");
845 ret = -ENOMEM;
846 goto error;
847 }
848
849 /* Alloc and init all Tx queues, including the command queue (#4/#9) */
850 for (txq_id = 0; txq_id < trans->mac_cfg->base->num_of_queues;
851 txq_id++) {
852 bool cmd_queue = (txq_id == trans->conf.cmd_queue);
853
854 if (cmd_queue)
855 slots_num = max_t(u32, IWL_CMD_QUEUE_SIZE,
856 trans->mac_cfg->base->min_txq_size);
857 else
858 slots_num = max_t(u32, IWL_DEFAULT_QUEUE_SIZE,
859 trans->mac_cfg->base->min_ba_txq_size);
860 trans_pcie->txqs.txq[txq_id] = &trans_pcie->txq_memory[txq_id];
861 ret = iwl_pcie_txq_alloc(trans, trans_pcie->txqs.txq[txq_id],
862 slots_num, cmd_queue);
863 if (ret) {
864 IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id);
865 goto error;
866 }
867 trans_pcie->txqs.txq[txq_id]->id = txq_id;
868 }
869
870 return 0;
871
872 error:
873 iwl_pcie_tx_free(trans);
874
875 return ret;
876 }
877
878 /*
879 * iwl_queue_init - Initialize queue's high/low-water and read/write indexes
880 */
iwl_queue_init(struct iwl_txq * q,int slots_num)881 static int iwl_queue_init(struct iwl_txq *q, int slots_num)
882 {
883 q->n_window = slots_num;
884
885 /* slots_num must be power-of-two size, otherwise
886 * iwl_txq_get_cmd_index is broken.
887 */
888 if (WARN_ON(!is_power_of_2(slots_num)))
889 return -EINVAL;
890
891 q->low_mark = q->n_window / 4;
892 if (q->low_mark < 4)
893 q->low_mark = 4;
894
895 q->high_mark = q->n_window / 8;
896 if (q->high_mark < 2)
897 q->high_mark = 2;
898
899 q->write_ptr = 0;
900 q->read_ptr = 0;
901
902 return 0;
903 }
904
iwl_txq_init(struct iwl_trans * trans,struct iwl_txq * txq,int slots_num,bool cmd_queue)905 int iwl_txq_init(struct iwl_trans *trans, struct iwl_txq *txq,
906 int slots_num, bool cmd_queue)
907 {
908 u32 tfd_queue_max_size =
909 trans->mac_cfg->base->max_tfd_queue_size;
910 int ret;
911
912 txq->need_update = false;
913
914 /* max_tfd_queue_size must be power-of-two size, otherwise
915 * iwl_txq_inc_wrap and iwl_txq_dec_wrap are broken.
916 */
917 if (WARN_ONCE(tfd_queue_max_size & (tfd_queue_max_size - 1),
918 "Max tfd queue size must be a power of two, but is %d",
919 tfd_queue_max_size))
920 return -EINVAL;
921
922 /* Initialize queue's high/low-water marks, and head/tail indexes */
923 ret = iwl_queue_init(txq, slots_num);
924 if (ret)
925 return ret;
926
927 spin_lock_init(&txq->lock);
928 spin_lock_init(&txq->reclaim_lock);
929
930 if (cmd_queue) {
931 static struct lock_class_key iwl_txq_cmd_queue_lock_class;
932
933 lockdep_set_class(&txq->lock, &iwl_txq_cmd_queue_lock_class);
934 }
935
936 __skb_queue_head_init(&txq->overflow_q);
937
938 return 0;
939 }
940
iwl_pcie_tx_init(struct iwl_trans * trans)941 int iwl_pcie_tx_init(struct iwl_trans *trans)
942 {
943 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
944 int ret;
945 int txq_id, slots_num;
946 bool alloc = false;
947
948 if (!trans_pcie->txq_memory) {
949 ret = iwl_pcie_tx_alloc(trans);
950 if (ret)
951 goto error;
952 alloc = true;
953 }
954
955 spin_lock_bh(&trans_pcie->irq_lock);
956
957 /* Turn off all Tx DMA fifos */
958 iwl_scd_deactivate_fifos(trans);
959
960 /* Tell NIC where to find the "keep warm" buffer */
961 iwl_write_direct32(trans, FH_KW_MEM_ADDR_REG,
962 trans_pcie->kw.dma >> 4);
963
964 spin_unlock_bh(&trans_pcie->irq_lock);
965
966 /* Alloc and init all Tx queues, including the command queue (#4/#9) */
967 for (txq_id = 0; txq_id < trans->mac_cfg->base->num_of_queues;
968 txq_id++) {
969 bool cmd_queue = (txq_id == trans->conf.cmd_queue);
970
971 if (cmd_queue)
972 slots_num = max_t(u32, IWL_CMD_QUEUE_SIZE,
973 trans->mac_cfg->base->min_txq_size);
974 else
975 slots_num = max_t(u32, IWL_DEFAULT_QUEUE_SIZE,
976 trans->mac_cfg->base->min_ba_txq_size);
977 ret = iwl_txq_init(trans, trans_pcie->txqs.txq[txq_id], slots_num,
978 cmd_queue);
979 if (ret) {
980 IWL_ERR(trans, "Tx %d queue init failed\n", txq_id);
981 goto error;
982 }
983
984 /*
985 * Tell nic where to find circular buffer of TFDs for a
986 * given Tx queue, and enable the DMA channel used for that
987 * queue.
988 * Circular buffer (TFD queue in DRAM) physical base address
989 */
990 iwl_write_direct32(trans, FH_MEM_CBBC_QUEUE(trans, txq_id),
991 trans_pcie->txqs.txq[txq_id]->dma_addr >> 8);
992 }
993
994 iwl_set_bits_prph(trans, SCD_GP_CTRL, SCD_GP_CTRL_AUTO_ACTIVE_MODE);
995 if (trans->mac_cfg->base->num_of_queues > 20)
996 iwl_set_bits_prph(trans, SCD_GP_CTRL,
997 SCD_GP_CTRL_ENABLE_31_QUEUES);
998
999 return 0;
1000 error:
1001 /*Upon error, free only if we allocated something */
1002 if (alloc)
1003 iwl_pcie_tx_free(trans);
1004 return ret;
1005 }
1006
iwl_pcie_set_cmd_in_flight(struct iwl_trans * trans,const struct iwl_host_cmd * cmd)1007 static int iwl_pcie_set_cmd_in_flight(struct iwl_trans *trans,
1008 const struct iwl_host_cmd *cmd)
1009 {
1010 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1011
1012 /* Make sure the NIC is still alive in the bus */
1013 if (test_bit(STATUS_TRANS_DEAD, &trans->status))
1014 return -ENODEV;
1015
1016 if (!trans->mac_cfg->base->apmg_wake_up_wa)
1017 return 0;
1018
1019 /*
1020 * wake up the NIC to make sure that the firmware will see the host
1021 * command - we will let the NIC sleep once all the host commands
1022 * returned. This needs to be done only on NICs that have
1023 * apmg_wake_up_wa set (see above.)
1024 */
1025 if (!_iwl_trans_pcie_grab_nic_access(trans, false))
1026 return -EIO;
1027
1028 /*
1029 * In iwl_trans_grab_nic_access(), we've acquired the reg_lock.
1030 * There, we also returned immediately if cmd_hold_nic_awake is
1031 * already true, so it's OK to unconditionally set it to true.
1032 */
1033 trans_pcie->cmd_hold_nic_awake = true;
1034 spin_unlock(&trans_pcie->reg_lock);
1035
1036 return 0;
1037 }
1038
iwl_txq_progress(struct iwl_txq * txq)1039 static void iwl_txq_progress(struct iwl_txq *txq)
1040 {
1041 lockdep_assert_held(&txq->lock);
1042
1043 if (!txq->wd_timeout)
1044 return;
1045
1046 /*
1047 * station is asleep and we send data - that must
1048 * be uAPSD or PS-Poll. Don't rearm the timer.
1049 */
1050 if (txq->frozen)
1051 return;
1052
1053 /*
1054 * if empty delete timer, otherwise move timer forward
1055 * since we're making progress on this queue
1056 */
1057 if (txq->read_ptr == txq->write_ptr)
1058 timer_delete(&txq->stuck_timer);
1059 else
1060 mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout);
1061 }
1062
iwl_txq_used(const struct iwl_txq * q,int i,int read_ptr,int write_ptr)1063 static inline bool iwl_txq_used(const struct iwl_txq *q, int i,
1064 int read_ptr, int write_ptr)
1065 {
1066 int index = iwl_txq_get_cmd_index(q, i);
1067 int r = iwl_txq_get_cmd_index(q, read_ptr);
1068 int w = iwl_txq_get_cmd_index(q, write_ptr);
1069
1070 return w >= r ?
1071 (index >= r && index < w) :
1072 !(index < r && index >= w);
1073 }
1074
1075 /*
1076 * iwl_pcie_cmdq_reclaim - Reclaim TX command queue entries already Tx'd
1077 *
1078 * When FW advances 'R' index, all entries between old and new 'R' index
1079 * need to be reclaimed. As result, some free space forms. If there is
1080 * enough free space (> low mark), wake the stack that feeds us.
1081 */
iwl_pcie_cmdq_reclaim(struct iwl_trans * trans,int txq_id,int idx)1082 static void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx)
1083 {
1084 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1085 struct iwl_txq *txq = trans_pcie->txqs.txq[txq_id];
1086 int nfreed = 0;
1087 u16 r;
1088
1089 lockdep_assert_held(&txq->lock);
1090
1091 idx = iwl_txq_get_cmd_index(txq, idx);
1092 r = iwl_txq_get_cmd_index(txq, txq->read_ptr);
1093
1094 if (idx >= trans->mac_cfg->base->max_tfd_queue_size ||
1095 (!iwl_txq_used(txq, idx, txq->read_ptr, txq->write_ptr))) {
1096 WARN_ONCE(test_bit(txq_id, trans_pcie->txqs.queue_used),
1097 "%s: Read index for DMA queue txq id (%d), index %d is out of range [0-%d] %d %d.\n",
1098 __func__, txq_id, idx,
1099 trans->mac_cfg->base->max_tfd_queue_size,
1100 txq->write_ptr, txq->read_ptr);
1101 return;
1102 }
1103
1104 for (idx = iwl_txq_inc_wrap(trans, idx); r != idx;
1105 r = iwl_txq_inc_wrap(trans, r)) {
1106 txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr);
1107
1108 if (nfreed++ > 0) {
1109 IWL_ERR(trans, "HCMD skipped: index (%d) %d %d\n",
1110 idx, txq->write_ptr, r);
1111 iwl_force_nmi(trans);
1112 }
1113 }
1114
1115 if (txq->read_ptr == txq->write_ptr)
1116 iwl_pcie_clear_cmd_in_flight(trans);
1117
1118 iwl_txq_progress(txq);
1119 }
1120
iwl_pcie_txq_set_ratid_map(struct iwl_trans * trans,u16 ra_tid,u16 txq_id)1121 static int iwl_pcie_txq_set_ratid_map(struct iwl_trans *trans, u16 ra_tid,
1122 u16 txq_id)
1123 {
1124 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1125 u32 tbl_dw_addr;
1126 u32 tbl_dw;
1127 u16 scd_q2ratid;
1128
1129 scd_q2ratid = ra_tid & SCD_QUEUE_RA_TID_MAP_RATID_MSK;
1130
1131 tbl_dw_addr = trans_pcie->scd_base_addr +
1132 SCD_TRANS_TBL_OFFSET_QUEUE(txq_id);
1133
1134 tbl_dw = iwl_trans_read_mem32(trans, tbl_dw_addr);
1135
1136 if (txq_id & 0x1)
1137 tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF);
1138 else
1139 tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000);
1140
1141 iwl_trans_write_mem32(trans, tbl_dw_addr, tbl_dw);
1142
1143 return 0;
1144 }
1145
1146 /* Receiver address (actually, Rx station's index into station table),
1147 * combined with Traffic ID (QOS priority), in format used by Tx Scheduler */
1148 #define BUILD_RAxTID(sta_id, tid) (((sta_id) << 4) + (tid))
1149
iwl_trans_pcie_txq_enable(struct iwl_trans * trans,int txq_id,u16 ssn,const struct iwl_trans_txq_scd_cfg * cfg,unsigned int wdg_timeout)1150 bool iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn,
1151 const struct iwl_trans_txq_scd_cfg *cfg,
1152 unsigned int wdg_timeout)
1153 {
1154 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1155 struct iwl_txq *txq = trans_pcie->txqs.txq[txq_id];
1156 int fifo = -1;
1157 bool scd_bug = false;
1158
1159 if (test_and_set_bit(txq_id, trans_pcie->txqs.queue_used))
1160 WARN_ONCE(1, "queue %d already used - expect issues", txq_id);
1161
1162 txq->wd_timeout = msecs_to_jiffies(wdg_timeout);
1163
1164 if (cfg) {
1165 fifo = cfg->fifo;
1166
1167 /* Disable the scheduler prior configuring the cmd queue */
1168 if (txq_id == trans->conf.cmd_queue &&
1169 trans->conf.scd_set_active)
1170 iwl_scd_enable_set_active(trans, 0);
1171
1172 /* Stop this Tx queue before configuring it */
1173 iwl_scd_txq_set_inactive(trans, txq_id);
1174
1175 /* Set this queue as a chain-building queue unless it is CMD */
1176 if (txq_id != trans->conf.cmd_queue)
1177 iwl_scd_txq_set_chain(trans, txq_id);
1178
1179 if (cfg->aggregate) {
1180 u16 ra_tid = BUILD_RAxTID(cfg->sta_id, cfg->tid);
1181
1182 /* Map receiver-address / traffic-ID to this queue */
1183 iwl_pcie_txq_set_ratid_map(trans, ra_tid, txq_id);
1184
1185 /* enable aggregations for the queue */
1186 iwl_scd_txq_enable_agg(trans, txq_id);
1187 txq->ampdu = true;
1188 } else {
1189 /*
1190 * disable aggregations for the queue, this will also
1191 * make the ra_tid mapping configuration irrelevant
1192 * since it is now a non-AGG queue.
1193 */
1194 iwl_scd_txq_disable_agg(trans, txq_id);
1195
1196 ssn = txq->read_ptr;
1197 }
1198 } else {
1199 /*
1200 * If we need to move the SCD write pointer by steps of
1201 * 0x40, 0x80 or 0xc0, it gets stuck. Avoids this and let
1202 * the op_mode know by returning true later.
1203 * Do this only in case cfg is NULL since this trick can
1204 * be done only if we have DQA enabled which is true for mvm
1205 * only. And mvm never sets a cfg pointer.
1206 * This is really ugly, but this is the easiest way out for
1207 * this sad hardware issue.
1208 * This bug has been fixed on devices 9000 and up.
1209 */
1210 scd_bug = !trans->mac_cfg->mq_rx_supported &&
1211 !((ssn - txq->write_ptr) & 0x3f) &&
1212 (ssn != txq->write_ptr);
1213 if (scd_bug)
1214 ssn++;
1215 }
1216
1217 /* Place first TFD at index corresponding to start sequence number.
1218 * Assumes that ssn_idx is valid (!= 0xFFF) */
1219 txq->read_ptr = (ssn & 0xff);
1220 txq->write_ptr = (ssn & 0xff);
1221 iwl_write_direct32(trans, HBUS_TARG_WRPTR,
1222 (ssn & 0xff) | (txq_id << 8));
1223
1224 if (cfg) {
1225 u8 frame_limit = cfg->frame_limit;
1226
1227 iwl_write_prph(trans, SCD_QUEUE_RDPTR(txq_id), ssn);
1228
1229 /* Set up Tx window size and frame limit for this queue */
1230 iwl_trans_write_mem32(trans, trans_pcie->scd_base_addr +
1231 SCD_CONTEXT_QUEUE_OFFSET(txq_id), 0);
1232 iwl_trans_write_mem32(trans,
1233 trans_pcie->scd_base_addr +
1234 SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32),
1235 SCD_QUEUE_CTX_REG2_VAL(WIN_SIZE, frame_limit) |
1236 SCD_QUEUE_CTX_REG2_VAL(FRAME_LIMIT, frame_limit));
1237
1238 /* Set up status area in SRAM, map to Tx DMA/FIFO, activate */
1239 iwl_write_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id),
1240 (1 << SCD_QUEUE_STTS_REG_POS_ACTIVE) |
1241 (cfg->fifo << SCD_QUEUE_STTS_REG_POS_TXF) |
1242 (1 << SCD_QUEUE_STTS_REG_POS_WSL) |
1243 SCD_QUEUE_STTS_REG_MSK);
1244
1245 /* enable the scheduler for this queue (only) */
1246 if (txq_id == trans->conf.cmd_queue &&
1247 trans->conf.scd_set_active)
1248 iwl_scd_enable_set_active(trans, BIT(txq_id));
1249
1250 IWL_DEBUG_TX_QUEUES(trans,
1251 "Activate queue %d on FIFO %d WrPtr: %d\n",
1252 txq_id, fifo, ssn & 0xff);
1253 } else {
1254 IWL_DEBUG_TX_QUEUES(trans,
1255 "Activate queue %d WrPtr: %d\n",
1256 txq_id, ssn & 0xff);
1257 }
1258
1259 return scd_bug;
1260 }
1261
iwl_trans_pcie_txq_set_shared_mode(struct iwl_trans * trans,u32 txq_id,bool shared_mode)1262 void iwl_trans_pcie_txq_set_shared_mode(struct iwl_trans *trans, u32 txq_id,
1263 bool shared_mode)
1264 {
1265 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1266 struct iwl_txq *txq = trans_pcie->txqs.txq[txq_id];
1267
1268 txq->ampdu = !shared_mode;
1269 }
1270
iwl_trans_pcie_txq_disable(struct iwl_trans * trans,int txq_id,bool configure_scd)1271 void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id,
1272 bool configure_scd)
1273 {
1274 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1275 u32 stts_addr = trans_pcie->scd_base_addr +
1276 SCD_TX_STTS_QUEUE_OFFSET(txq_id);
1277 static const u32 zero_val[4] = {};
1278
1279 trans_pcie->txqs.txq[txq_id]->frozen_expiry_remainder = 0;
1280 trans_pcie->txqs.txq[txq_id]->frozen = false;
1281
1282 /*
1283 * Upon HW Rfkill - we stop the device, and then stop the queues
1284 * in the op_mode. Just for the sake of the simplicity of the op_mode,
1285 * allow the op_mode to call txq_disable after it already called
1286 * stop_device.
1287 */
1288 if (!test_and_clear_bit(txq_id, trans_pcie->txqs.queue_used)) {
1289 WARN_ONCE(test_bit(STATUS_DEVICE_ENABLED, &trans->status),
1290 "queue %d not used", txq_id);
1291 return;
1292 }
1293
1294 if (configure_scd) {
1295 iwl_scd_txq_set_inactive(trans, txq_id);
1296
1297 iwl_trans_write_mem(trans, stts_addr, (const void *)zero_val,
1298 ARRAY_SIZE(zero_val));
1299 }
1300
1301 iwl_pcie_txq_unmap(trans, txq_id);
1302 trans_pcie->txqs.txq[txq_id]->ampdu = false;
1303
1304 IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", txq_id);
1305 }
1306
1307 /*************** HOST COMMAND QUEUE FUNCTIONS *****/
1308
iwl_trans_pcie_block_txq_ptrs(struct iwl_trans * trans,bool block)1309 static void iwl_trans_pcie_block_txq_ptrs(struct iwl_trans *trans, bool block)
1310 {
1311 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1312 int i;
1313
1314 for (i = 0; i < trans->mac_cfg->base->num_of_queues; i++) {
1315 struct iwl_txq *txq = trans_pcie->txqs.txq[i];
1316
1317 if (i == trans->conf.cmd_queue)
1318 continue;
1319
1320 /* we skip the command queue (obviously) so it's OK to nest */
1321 spin_lock_nested(&txq->lock, 1);
1322
1323 if (!block && !(WARN_ON_ONCE(!txq->block))) {
1324 txq->block--;
1325 if (!txq->block) {
1326 iwl_write32(trans, HBUS_TARG_WRPTR,
1327 txq->write_ptr | (i << 8));
1328 }
1329 } else if (block) {
1330 txq->block++;
1331 }
1332
1333 spin_unlock(&txq->lock);
1334 }
1335 }
1336
1337 /*
1338 * iwl_pcie_enqueue_hcmd - enqueue a uCode command
1339 * @priv: device private data point
1340 * @cmd: a pointer to the ucode command structure
1341 *
1342 * The function returns < 0 values to indicate the operation
1343 * failed. On success, it returns the index (>= 0) of command in the
1344 * command queue.
1345 */
iwl_pcie_enqueue_hcmd(struct iwl_trans * trans,struct iwl_host_cmd * cmd)1346 int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
1347 struct iwl_host_cmd *cmd)
1348 {
1349 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1350 struct iwl_txq *txq = trans_pcie->txqs.txq[trans->conf.cmd_queue];
1351 struct iwl_device_cmd *out_cmd;
1352 struct iwl_cmd_meta *out_meta;
1353 void *dup_buf = NULL;
1354 dma_addr_t phys_addr;
1355 int idx;
1356 u16 copy_size, cmd_size, tb0_size;
1357 bool had_nocopy = false;
1358 u8 group_id = iwl_cmd_groupid(cmd->id);
1359 int i, ret;
1360 u32 cmd_pos;
1361 const u8 *cmddata[IWL_MAX_CMD_TBS_PER_TFD];
1362 u16 cmdlen[IWL_MAX_CMD_TBS_PER_TFD];
1363 unsigned long flags;
1364
1365 if (WARN(!trans->conf.wide_cmd_header &&
1366 group_id > IWL_ALWAYS_LONG_GROUP,
1367 "unsupported wide command %#x\n", cmd->id))
1368 return -EINVAL;
1369
1370 if (group_id != 0) {
1371 copy_size = sizeof(struct iwl_cmd_header_wide);
1372 cmd_size = sizeof(struct iwl_cmd_header_wide);
1373 } else {
1374 copy_size = sizeof(struct iwl_cmd_header);
1375 cmd_size = sizeof(struct iwl_cmd_header);
1376 }
1377
1378 /* need one for the header if the first is NOCOPY */
1379 BUILD_BUG_ON(IWL_MAX_CMD_TBS_PER_TFD > IWL_NUM_OF_TBS - 1);
1380
1381 for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
1382 cmddata[i] = cmd->data[i];
1383 cmdlen[i] = cmd->len[i];
1384
1385 if (!cmd->len[i])
1386 continue;
1387
1388 /* need at least IWL_FIRST_TB_SIZE copied */
1389 if (copy_size < IWL_FIRST_TB_SIZE) {
1390 int copy = IWL_FIRST_TB_SIZE - copy_size;
1391
1392 if (copy > cmdlen[i])
1393 copy = cmdlen[i];
1394 cmdlen[i] -= copy;
1395 cmddata[i] += copy;
1396 copy_size += copy;
1397 }
1398
1399 if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY) {
1400 had_nocopy = true;
1401 if (WARN_ON(cmd->dataflags[i] & IWL_HCMD_DFL_DUP)) {
1402 idx = -EINVAL;
1403 goto free_dup_buf;
1404 }
1405 } else if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP) {
1406 /*
1407 * This is also a chunk that isn't copied
1408 * to the static buffer so set had_nocopy.
1409 */
1410 had_nocopy = true;
1411
1412 /* only allowed once */
1413 if (WARN_ON(dup_buf)) {
1414 idx = -EINVAL;
1415 goto free_dup_buf;
1416 }
1417
1418 dup_buf = kmemdup(cmddata[i], cmdlen[i],
1419 GFP_ATOMIC);
1420 if (!dup_buf)
1421 return -ENOMEM;
1422 } else {
1423 /* NOCOPY must not be followed by normal! */
1424 if (WARN_ON(had_nocopy)) {
1425 idx = -EINVAL;
1426 goto free_dup_buf;
1427 }
1428 copy_size += cmdlen[i];
1429 }
1430 cmd_size += cmd->len[i];
1431 }
1432
1433 /*
1434 * If any of the command structures end up being larger than
1435 * the TFD_MAX_PAYLOAD_SIZE and they aren't dynamically
1436 * allocated into separate TFDs, then we will need to
1437 * increase the size of the buffers.
1438 */
1439 if (WARN(copy_size > TFD_MAX_PAYLOAD_SIZE,
1440 "Command %s (%#x) is too large (%d bytes)\n",
1441 iwl_get_cmd_string(trans, cmd->id),
1442 cmd->id, copy_size)) {
1443 idx = -EINVAL;
1444 goto free_dup_buf;
1445 }
1446
1447 spin_lock_irqsave(&txq->lock, flags);
1448
1449 if (iwl_txq_space(trans, txq) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
1450 spin_unlock_irqrestore(&txq->lock, flags);
1451
1452 IWL_ERR(trans, "No space in command queue\n");
1453 iwl_op_mode_nic_error(trans->op_mode,
1454 IWL_ERR_TYPE_CMD_QUEUE_FULL);
1455 iwl_trans_schedule_reset(trans, IWL_ERR_TYPE_CMD_QUEUE_FULL);
1456 idx = -ENOSPC;
1457 goto free_dup_buf;
1458 }
1459
1460 idx = iwl_txq_get_cmd_index(txq, txq->write_ptr);
1461 out_cmd = txq->entries[idx].cmd;
1462 out_meta = &txq->entries[idx].meta;
1463
1464 /* re-initialize, this also marks the SG list as unused */
1465 memset(out_meta, 0, sizeof(*out_meta));
1466 if (cmd->flags & CMD_WANT_SKB)
1467 out_meta->source = cmd;
1468
1469 /* set up the header */
1470 if (group_id != 0) {
1471 out_cmd->hdr_wide.cmd = iwl_cmd_opcode(cmd->id);
1472 out_cmd->hdr_wide.group_id = group_id;
1473 out_cmd->hdr_wide.version = iwl_cmd_version(cmd->id);
1474 out_cmd->hdr_wide.length =
1475 cpu_to_le16(cmd_size -
1476 sizeof(struct iwl_cmd_header_wide));
1477 out_cmd->hdr_wide.reserved = 0;
1478 out_cmd->hdr_wide.sequence =
1479 cpu_to_le16(QUEUE_TO_SEQ(trans->conf.cmd_queue) |
1480 INDEX_TO_SEQ(txq->write_ptr));
1481
1482 cmd_pos = sizeof(struct iwl_cmd_header_wide);
1483 copy_size = sizeof(struct iwl_cmd_header_wide);
1484 } else {
1485 out_cmd->hdr.cmd = iwl_cmd_opcode(cmd->id);
1486 out_cmd->hdr.sequence =
1487 cpu_to_le16(QUEUE_TO_SEQ(trans->conf.cmd_queue) |
1488 INDEX_TO_SEQ(txq->write_ptr));
1489 out_cmd->hdr.group_id = 0;
1490
1491 cmd_pos = sizeof(struct iwl_cmd_header);
1492 copy_size = sizeof(struct iwl_cmd_header);
1493 }
1494
1495 /* and copy the data that needs to be copied */
1496 for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
1497 int copy;
1498
1499 if (!cmd->len[i])
1500 continue;
1501
1502 /* copy everything if not nocopy/dup */
1503 if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY |
1504 IWL_HCMD_DFL_DUP))) {
1505 copy = cmd->len[i];
1506
1507 memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy);
1508 cmd_pos += copy;
1509 copy_size += copy;
1510 continue;
1511 }
1512
1513 /*
1514 * Otherwise we need at least IWL_FIRST_TB_SIZE copied
1515 * in total (for bi-directional DMA), but copy up to what
1516 * we can fit into the payload for debug dump purposes.
1517 */
1518 copy = min_t(int, TFD_MAX_PAYLOAD_SIZE - cmd_pos, cmd->len[i]);
1519
1520 memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy);
1521 cmd_pos += copy;
1522
1523 /* However, treat copy_size the proper way, we need it below */
1524 if (copy_size < IWL_FIRST_TB_SIZE) {
1525 copy = IWL_FIRST_TB_SIZE - copy_size;
1526
1527 if (copy > cmd->len[i])
1528 copy = cmd->len[i];
1529 copy_size += copy;
1530 }
1531 }
1532
1533 IWL_DEBUG_HC(trans,
1534 "Sending command %s (%.2x.%.2x), seq: 0x%04X, %d bytes at %d[%d]:%d\n",
1535 iwl_get_cmd_string(trans, cmd->id),
1536 group_id, out_cmd->hdr.cmd,
1537 le16_to_cpu(out_cmd->hdr.sequence),
1538 cmd_size, txq->write_ptr, idx, trans->conf.cmd_queue);
1539
1540 /* start the TFD with the minimum copy bytes */
1541 tb0_size = min_t(int, copy_size, IWL_FIRST_TB_SIZE);
1542 memcpy(&txq->first_tb_bufs[idx], &out_cmd->hdr, tb0_size);
1543 iwl_pcie_txq_build_tfd(trans, txq,
1544 iwl_txq_get_first_tb_dma(txq, idx),
1545 tb0_size, true);
1546
1547 /* map first command fragment, if any remains */
1548 if (copy_size > tb0_size) {
1549 phys_addr = dma_map_single(trans->dev,
1550 ((u8 *)&out_cmd->hdr) + tb0_size,
1551 copy_size - tb0_size,
1552 DMA_TO_DEVICE);
1553 if (dma_mapping_error(trans->dev, phys_addr)) {
1554 iwl_txq_gen1_tfd_unmap(trans, out_meta, txq,
1555 txq->write_ptr);
1556 idx = -ENOMEM;
1557 goto out;
1558 }
1559
1560 iwl_pcie_txq_build_tfd(trans, txq, phys_addr,
1561 copy_size - tb0_size, false);
1562 }
1563
1564 /* map the remaining (adjusted) nocopy/dup fragments */
1565 for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
1566 void *data = (void *)(uintptr_t)cmddata[i];
1567
1568 if (!cmdlen[i])
1569 continue;
1570 if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY |
1571 IWL_HCMD_DFL_DUP)))
1572 continue;
1573 if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP)
1574 data = dup_buf;
1575 phys_addr = dma_map_single(trans->dev, data,
1576 cmdlen[i], DMA_TO_DEVICE);
1577 if (dma_mapping_error(trans->dev, phys_addr)) {
1578 iwl_txq_gen1_tfd_unmap(trans, out_meta, txq,
1579 txq->write_ptr);
1580 idx = -ENOMEM;
1581 goto out;
1582 }
1583
1584 iwl_pcie_txq_build_tfd(trans, txq, phys_addr, cmdlen[i], false);
1585 }
1586
1587 BUILD_BUG_ON(IWL_TFH_NUM_TBS > sizeof(out_meta->tbs) * BITS_PER_BYTE);
1588 out_meta->flags = cmd->flags;
1589 if (WARN_ON_ONCE(txq->entries[idx].free_buf))
1590 kfree_sensitive(txq->entries[idx].free_buf);
1591 txq->entries[idx].free_buf = dup_buf;
1592
1593 trace_iwlwifi_dev_hcmd(trans->dev, cmd, cmd_size, &out_cmd->hdr_wide);
1594
1595 /* start timer if queue currently empty */
1596 if (txq->read_ptr == txq->write_ptr && txq->wd_timeout)
1597 mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout);
1598
1599 ret = iwl_pcie_set_cmd_in_flight(trans, cmd);
1600 if (ret < 0) {
1601 idx = ret;
1602 goto out;
1603 }
1604
1605 if (cmd->flags & CMD_BLOCK_TXQS)
1606 iwl_trans_pcie_block_txq_ptrs(trans, true);
1607
1608 /* Increment and update queue's write index */
1609 txq->write_ptr = iwl_txq_inc_wrap(trans, txq->write_ptr);
1610 iwl_pcie_txq_inc_wr_ptr(trans, txq);
1611
1612 out:
1613 spin_unlock_irqrestore(&txq->lock, flags);
1614 free_dup_buf:
1615 if (idx < 0)
1616 kfree(dup_buf);
1617 return idx;
1618 }
1619
1620 /*
1621 * iwl_pcie_hcmd_complete - Pull unused buffers off the queue and reclaim them
1622 * @rxb: Rx buffer to reclaim
1623 */
iwl_pcie_hcmd_complete(struct iwl_trans * trans,struct iwl_rx_cmd_buffer * rxb)1624 void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
1625 struct iwl_rx_cmd_buffer *rxb)
1626 {
1627 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1628 u16 sequence = le16_to_cpu(pkt->hdr.sequence);
1629 u8 group_id;
1630 u32 cmd_id;
1631 int txq_id = SEQ_TO_QUEUE(sequence);
1632 int index = SEQ_TO_INDEX(sequence);
1633 int cmd_index;
1634 struct iwl_device_cmd *cmd;
1635 struct iwl_cmd_meta *meta;
1636 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1637 struct iwl_txq *txq = trans_pcie->txqs.txq[trans->conf.cmd_queue];
1638
1639 /* If a Tx command is being handled and it isn't in the actual
1640 * command queue then there a command routing bug has been introduced
1641 * in the queue management code. */
1642 if (IWL_FW_CHECK(trans, txq_id != trans->conf.cmd_queue,
1643 "wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d pkt=%*phN\n",
1644 txq_id, trans->conf.cmd_queue, sequence, txq->read_ptr,
1645 txq->write_ptr, 32, pkt))
1646 return;
1647
1648 spin_lock_bh(&txq->lock);
1649
1650 cmd_index = iwl_txq_get_cmd_index(txq, index);
1651 cmd = txq->entries[cmd_index].cmd;
1652 meta = &txq->entries[cmd_index].meta;
1653 group_id = cmd->hdr.group_id;
1654 cmd_id = WIDE_ID(group_id, cmd->hdr.cmd);
1655
1656 if (trans->mac_cfg->gen2)
1657 iwl_txq_gen2_tfd_unmap(trans, meta,
1658 iwl_txq_get_tfd(trans, txq, index));
1659 else
1660 iwl_txq_gen1_tfd_unmap(trans, meta, txq, index);
1661
1662 /* Input error checking is done when commands are added to queue. */
1663 if (meta->flags & CMD_WANT_SKB) {
1664 struct page *p = rxb_steal_page(rxb);
1665
1666 meta->source->resp_pkt = pkt;
1667 meta->source->_rx_page_addr = (unsigned long)page_address(p);
1668 meta->source->_rx_page_order = trans_pcie->rx_page_order;
1669 }
1670
1671 if (meta->flags & CMD_BLOCK_TXQS)
1672 iwl_trans_pcie_block_txq_ptrs(trans, false);
1673
1674 iwl_pcie_cmdq_reclaim(trans, txq_id, index);
1675
1676 if (!(meta->flags & CMD_ASYNC)) {
1677 if (!test_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status)) {
1678 IWL_WARN(trans,
1679 "HCMD_ACTIVE already clear for command %s\n",
1680 iwl_get_cmd_string(trans, cmd_id));
1681 }
1682 clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
1683 IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n",
1684 iwl_get_cmd_string(trans, cmd_id));
1685 wake_up(&trans_pcie->wait_command_queue);
1686 }
1687
1688 meta->flags = 0;
1689
1690 spin_unlock_bh(&txq->lock);
1691 }
1692
iwl_fill_data_tbs(struct iwl_trans * trans,struct sk_buff * skb,struct iwl_txq * txq,u8 hdr_len,struct iwl_cmd_meta * out_meta)1693 static int iwl_fill_data_tbs(struct iwl_trans *trans, struct sk_buff *skb,
1694 struct iwl_txq *txq, u8 hdr_len,
1695 struct iwl_cmd_meta *out_meta)
1696 {
1697 u16 head_tb_len;
1698 int i;
1699
1700 /*
1701 * Set up TFD's third entry to point directly to remainder
1702 * of skb's head, if any
1703 */
1704 head_tb_len = skb_headlen(skb) - hdr_len;
1705
1706 if (head_tb_len > 0) {
1707 dma_addr_t tb_phys = dma_map_single(trans->dev,
1708 skb->data + hdr_len,
1709 head_tb_len, DMA_TO_DEVICE);
1710 if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
1711 return -EINVAL;
1712 trace_iwlwifi_dev_tx_tb(trans->dev, skb, skb->data + hdr_len,
1713 tb_phys, head_tb_len);
1714 iwl_pcie_txq_build_tfd(trans, txq, tb_phys, head_tb_len, false);
1715 }
1716
1717 /* set up the remaining entries to point to the data */
1718 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1719 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1720 dma_addr_t tb_phys;
1721 int tb_idx;
1722
1723 if (!skb_frag_size(frag))
1724 continue;
1725
1726 tb_phys = skb_frag_dma_map(trans->dev, frag, 0,
1727 skb_frag_size(frag), DMA_TO_DEVICE);
1728
1729 if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
1730 return -EINVAL;
1731 trace_iwlwifi_dev_tx_tb(trans->dev, skb, skb_frag_address(frag),
1732 tb_phys, skb_frag_size(frag));
1733 tb_idx = iwl_pcie_txq_build_tfd(trans, txq, tb_phys,
1734 skb_frag_size(frag), false);
1735 if (tb_idx < 0)
1736 return tb_idx;
1737
1738 out_meta->tbs |= BIT(tb_idx);
1739 }
1740
1741 return 0;
1742 }
1743
1744 #ifdef CONFIG_INET
iwl_pcie_get_page_hdr(struct iwl_trans * trans,size_t len,struct sk_buff * skb)1745 static void *iwl_pcie_get_page_hdr(struct iwl_trans *trans,
1746 size_t len, struct sk_buff *skb)
1747 {
1748 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1749 struct iwl_tso_hdr_page *p = this_cpu_ptr(trans_pcie->txqs.tso_hdr_page);
1750 struct iwl_tso_page_info *info;
1751 struct page **page_ptr;
1752 dma_addr_t phys;
1753 void *ret;
1754
1755 page_ptr = (void *)((u8 *)skb->cb + trans->conf.cb_data_offs);
1756
1757 if (WARN_ON(*page_ptr))
1758 return NULL;
1759
1760 if (!p->page)
1761 goto alloc;
1762
1763 /*
1764 * Check if there's enough room on this page
1765 *
1766 * Note that we put a page chaining pointer *last* in the
1767 * page - we need it somewhere, and if it's there then we
1768 * avoid DMA mapping the last bits of the page which may
1769 * trigger the 32-bit boundary hardware bug.
1770 *
1771 * (see also get_workaround_page() in tx-gen2.c)
1772 */
1773 if (((unsigned long)p->pos & ~PAGE_MASK) + len < IWL_TSO_PAGE_DATA_SIZE) {
1774 info = IWL_TSO_PAGE_INFO(page_address(p->page));
1775 goto out;
1776 }
1777
1778 /* We don't have enough room on this page, get a new one. */
1779 iwl_pcie_free_and_unmap_tso_page(trans, p->page);
1780
1781 alloc:
1782 p->page = alloc_page(GFP_ATOMIC);
1783 if (!p->page)
1784 return NULL;
1785 p->pos = page_address(p->page);
1786
1787 info = IWL_TSO_PAGE_INFO(page_address(p->page));
1788
1789 /* set the chaining pointer to NULL */
1790 info->next = NULL;
1791
1792 /* Create a DMA mapping for the page */
1793 phys = dma_map_page_attrs(trans->dev, p->page, 0, PAGE_SIZE,
1794 DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
1795 if (unlikely(dma_mapping_error(trans->dev, phys))) {
1796 __free_page(p->page);
1797 p->page = NULL;
1798
1799 return NULL;
1800 }
1801
1802 /* Store physical address and set use count */
1803 info->dma_addr = phys;
1804 refcount_set(&info->use_count, 1);
1805 out:
1806 *page_ptr = p->page;
1807 /* Return an internal reference for the caller */
1808 refcount_inc(&info->use_count);
1809 ret = p->pos;
1810 p->pos += len;
1811
1812 return ret;
1813 }
1814
1815 /**
1816 * iwl_pcie_get_sgt_tb_phys - Find TB address in mapped SG list
1817 * @sgt: scatter gather table
1818 * @offset: Offset into the mapped memory (i.e. SKB payload data)
1819 * @len: Length of the area
1820 *
1821 * Find the DMA address that corresponds to the SKB payload data at the
1822 * position given by @offset.
1823 *
1824 * Returns: Address for TB entry
1825 */
iwl_pcie_get_sgt_tb_phys(struct sg_table * sgt,unsigned int offset,unsigned int len)1826 dma_addr_t iwl_pcie_get_sgt_tb_phys(struct sg_table *sgt, unsigned int offset,
1827 unsigned int len)
1828 {
1829 struct scatterlist *sg;
1830 unsigned int sg_offset = 0;
1831 int i;
1832
1833 /*
1834 * Search the mapped DMA areas in the SG for the area that contains the
1835 * data at offset with the given length.
1836 */
1837 for_each_sgtable_dma_sg(sgt, sg, i) {
1838 if (offset >= sg_offset &&
1839 offset + len <= sg_offset + sg_dma_len(sg))
1840 return sg_dma_address(sg) + offset - sg_offset;
1841
1842 sg_offset += sg_dma_len(sg);
1843 }
1844
1845 WARN_ON_ONCE(1);
1846
1847 return DMA_MAPPING_ERROR;
1848 }
1849
1850 /**
1851 * iwl_pcie_prep_tso - Prepare TSO page and SKB for sending
1852 * @trans: transport private data
1853 * @skb: the SKB to map
1854 * @cmd_meta: command meta to store the scatter list information for unmapping
1855 * @hdr: output argument for TSO headers
1856 * @hdr_room: requested length for TSO headers
1857 * @offset: offset into the data from which mapping should start
1858 *
1859 * Allocate space for a scatter gather list and TSO headers and map the SKB
1860 * using the scatter gather list. The SKB is unmapped again when the page is
1861 * free'ed again at the end of the operation.
1862 *
1863 * Returns: newly allocated and mapped scatter gather table with list
1864 */
iwl_pcie_prep_tso(struct iwl_trans * trans,struct sk_buff * skb,struct iwl_cmd_meta * cmd_meta,u8 ** hdr,unsigned int hdr_room,unsigned int offset)1865 struct sg_table *iwl_pcie_prep_tso(struct iwl_trans *trans, struct sk_buff *skb,
1866 struct iwl_cmd_meta *cmd_meta,
1867 u8 **hdr, unsigned int hdr_room,
1868 unsigned int offset)
1869 {
1870 struct sg_table *sgt;
1871 unsigned int n_segments = skb_shinfo(skb)->nr_frags + 1;
1872 int orig_nents;
1873
1874 if (WARN_ON_ONCE(skb_has_frag_list(skb)))
1875 return NULL;
1876
1877 *hdr = iwl_pcie_get_page_hdr(trans,
1878 hdr_room + __alignof__(struct sg_table) +
1879 sizeof(struct sg_table) +
1880 n_segments * sizeof(struct scatterlist),
1881 skb);
1882 if (!*hdr)
1883 return NULL;
1884
1885 sgt = (void *)PTR_ALIGN(*hdr + hdr_room, __alignof__(struct sg_table));
1886 sgt->sgl = (void *)(sgt + 1);
1887
1888 sg_init_table(sgt->sgl, n_segments);
1889
1890 /* Only map the data, not the header (it is copied to the TSO page) */
1891 orig_nents = skb_to_sgvec(skb, sgt->sgl, offset, skb->len - offset);
1892 if (WARN_ON_ONCE(orig_nents <= 0))
1893 return NULL;
1894
1895 sgt->orig_nents = orig_nents;
1896
1897 /* And map the entire SKB */
1898 if (dma_map_sgtable(trans->dev, sgt, DMA_TO_DEVICE, 0) < 0)
1899 return NULL;
1900
1901 /* Store non-zero (i.e. valid) offset for unmapping */
1902 cmd_meta->sg_offset = (unsigned long) sgt & ~PAGE_MASK;
1903
1904 return sgt;
1905 }
1906
iwl_fill_data_tbs_amsdu(struct iwl_trans * trans,struct sk_buff * skb,struct iwl_txq * txq,u8 hdr_len,struct iwl_cmd_meta * out_meta,struct iwl_device_tx_cmd * dev_cmd,u16 tb1_len)1907 static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
1908 struct iwl_txq *txq, u8 hdr_len,
1909 struct iwl_cmd_meta *out_meta,
1910 struct iwl_device_tx_cmd *dev_cmd,
1911 u16 tb1_len)
1912 {
1913 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1914 struct iwl_tx_cmd_v6 *tx_cmd = (void *)dev_cmd->payload;
1915 struct ieee80211_hdr *hdr = (void *)skb->data;
1916 unsigned int snap_ip_tcp_hdrlen, ip_hdrlen, total_len, hdr_room;
1917 unsigned int mss = skb_shinfo(skb)->gso_size;
1918 unsigned int data_offset = 0;
1919 u16 length, iv_len, amsdu_pad;
1920 dma_addr_t start_hdr_phys;
1921 u8 *start_hdr, *pos_hdr;
1922 struct sg_table *sgt;
1923 struct tso_t tso;
1924
1925 /* if the packet is protected, then it must be CCMP or GCMP */
1926 BUILD_BUG_ON(IEEE80211_CCMP_HDR_LEN != IEEE80211_GCMP_HDR_LEN);
1927 iv_len = ieee80211_has_protected(hdr->frame_control) ?
1928 IEEE80211_CCMP_HDR_LEN : 0;
1929
1930 trace_iwlwifi_dev_tx(trans->dev, skb,
1931 iwl_txq_get_tfd(trans, txq, txq->write_ptr),
1932 trans_pcie->txqs.tfd.size,
1933 &dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len, 0);
1934
1935 ip_hdrlen = skb_network_header_len(skb);
1936 snap_ip_tcp_hdrlen = 8 + ip_hdrlen + tcp_hdrlen(skb);
1937 total_len = skb->len - snap_ip_tcp_hdrlen - hdr_len - iv_len;
1938 amsdu_pad = 0;
1939
1940 /* total amount of header we may need for this A-MSDU */
1941 hdr_room = DIV_ROUND_UP(total_len, mss) *
1942 (3 + snap_ip_tcp_hdrlen + sizeof(struct ethhdr)) + iv_len;
1943
1944 /* Our device supports 9 segments at most, it will fit in 1 page */
1945 sgt = iwl_pcie_prep_tso(trans, skb, out_meta, &start_hdr, hdr_room,
1946 snap_ip_tcp_hdrlen + hdr_len + iv_len);
1947 if (!sgt)
1948 return -ENOMEM;
1949
1950 start_hdr_phys = iwl_pcie_get_tso_page_phys(start_hdr);
1951 pos_hdr = start_hdr;
1952 memcpy(pos_hdr, skb->data + hdr_len, iv_len);
1953 pos_hdr += iv_len;
1954
1955 /*
1956 * Pull the ieee80211 header + IV to be able to use TSO core,
1957 * we will restore it for the tx_status flow.
1958 */
1959 skb_pull(skb, hdr_len + iv_len);
1960
1961 /*
1962 * Remove the length of all the headers that we don't actually
1963 * have in the MPDU by themselves, but that we duplicate into
1964 * all the different MSDUs inside the A-MSDU.
1965 */
1966 le16_add_cpu(&tx_cmd->params.len, -snap_ip_tcp_hdrlen);
1967
1968 tso_start(skb, &tso);
1969
1970 while (total_len) {
1971 /* this is the data left for this subframe */
1972 unsigned int data_left =
1973 min_t(unsigned int, mss, total_len);
1974 unsigned int hdr_tb_len;
1975 dma_addr_t hdr_tb_phys;
1976 u8 *subf_hdrs_start = pos_hdr;
1977
1978 total_len -= data_left;
1979
1980 memset(pos_hdr, 0, amsdu_pad);
1981 pos_hdr += amsdu_pad;
1982 amsdu_pad = (4 - (sizeof(struct ethhdr) + snap_ip_tcp_hdrlen +
1983 data_left)) & 0x3;
1984 ether_addr_copy(pos_hdr, ieee80211_get_DA(hdr));
1985 pos_hdr += ETH_ALEN;
1986 ether_addr_copy(pos_hdr, ieee80211_get_SA(hdr));
1987 pos_hdr += ETH_ALEN;
1988
1989 length = snap_ip_tcp_hdrlen + data_left;
1990 *((__be16 *)pos_hdr) = cpu_to_be16(length);
1991 pos_hdr += sizeof(length);
1992
1993 /*
1994 * This will copy the SNAP as well which will be considered
1995 * as MAC header.
1996 */
1997 tso_build_hdr(skb, pos_hdr, &tso, data_left, !total_len);
1998
1999 pos_hdr += snap_ip_tcp_hdrlen;
2000
2001 hdr_tb_len = pos_hdr - start_hdr;
2002 hdr_tb_phys = iwl_pcie_get_tso_page_phys(start_hdr);
2003
2004 iwl_pcie_txq_build_tfd(trans, txq, hdr_tb_phys,
2005 hdr_tb_len, false);
2006 trace_iwlwifi_dev_tx_tb(trans->dev, skb, start_hdr,
2007 hdr_tb_phys, hdr_tb_len);
2008 /* add this subframe's headers' length to the tx_cmd */
2009 le16_add_cpu(&tx_cmd->params.len, pos_hdr - subf_hdrs_start);
2010
2011 /* prepare the start_hdr for the next subframe */
2012 start_hdr = pos_hdr;
2013
2014 /* put the payload */
2015 while (data_left) {
2016 unsigned int size = min_t(unsigned int, tso.size,
2017 data_left);
2018 dma_addr_t tb_phys;
2019
2020 tb_phys = iwl_pcie_get_sgt_tb_phys(sgt, data_offset, size);
2021 /* Not a real mapping error, use direct comparison */
2022 if (unlikely(tb_phys == DMA_MAPPING_ERROR))
2023 return -EINVAL;
2024
2025 iwl_pcie_txq_build_tfd(trans, txq, tb_phys,
2026 size, false);
2027 trace_iwlwifi_dev_tx_tb(trans->dev, skb, tso.data,
2028 tb_phys, size);
2029
2030 data_left -= size;
2031 data_offset += size;
2032 tso_build_data(skb, &tso, size);
2033 }
2034 }
2035
2036 dma_sync_single_for_device(trans->dev, start_hdr_phys, hdr_room,
2037 DMA_TO_DEVICE);
2038
2039 /* re -add the WiFi header and IV */
2040 skb_push(skb, hdr_len + iv_len);
2041
2042 return 0;
2043 }
2044 #else /* CONFIG_INET */
iwl_fill_data_tbs_amsdu(struct iwl_trans * trans,struct sk_buff * skb,struct iwl_txq * txq,u8 hdr_len,struct iwl_cmd_meta * out_meta,struct iwl_device_tx_cmd * dev_cmd,u16 tb1_len)2045 static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
2046 struct iwl_txq *txq, u8 hdr_len,
2047 struct iwl_cmd_meta *out_meta,
2048 struct iwl_device_tx_cmd *dev_cmd,
2049 u16 tb1_len)
2050 {
2051 /* No A-MSDU without CONFIG_INET */
2052 WARN_ON(1);
2053
2054 return -1;
2055 }
2056 #endif /* CONFIG_INET */
2057
2058 #define IWL_TX_CRC_SIZE 4
2059 #define IWL_TX_DELIMITER_SIZE 4
2060
2061 /*
2062 * iwl_txq_gen1_update_byte_cnt_tbl - Set up entry in Tx byte-count array
2063 */
iwl_txq_gen1_update_byte_cnt_tbl(struct iwl_trans * trans,struct iwl_txq * txq,u16 byte_cnt,int num_tbs)2064 static void iwl_txq_gen1_update_byte_cnt_tbl(struct iwl_trans *trans,
2065 struct iwl_txq *txq, u16 byte_cnt,
2066 int num_tbs)
2067 {
2068 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2069 struct iwl_bc_tbl_entry *scd_bc_tbl;
2070 int write_ptr = txq->write_ptr;
2071 int txq_id = txq->id;
2072 u8 sec_ctl = 0;
2073 u16 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE;
2074 __le16 bc_ent;
2075 struct iwl_device_tx_cmd *dev_cmd = txq->entries[txq->write_ptr].cmd;
2076 struct iwl_tx_cmd_v6 *tx_cmd = (void *)dev_cmd->payload;
2077 u8 sta_id = tx_cmd->params.sta_id;
2078
2079 scd_bc_tbl = trans_pcie->txqs.scd_bc_tbls.addr;
2080
2081 sec_ctl = tx_cmd->params.sec_ctl;
2082
2083 switch (sec_ctl & TX_CMD_SEC_MSK) {
2084 case TX_CMD_SEC_CCM:
2085 len += IEEE80211_CCMP_MIC_LEN;
2086 break;
2087 case TX_CMD_SEC_TKIP:
2088 len += IEEE80211_TKIP_ICV_LEN;
2089 break;
2090 case TX_CMD_SEC_WEP:
2091 len += IEEE80211_WEP_IV_LEN + IEEE80211_WEP_ICV_LEN;
2092 break;
2093 }
2094
2095 if (trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_AX210)
2096 len = DIV_ROUND_UP(len, 4);
2097
2098 if (WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX))
2099 return;
2100
2101 bc_ent = cpu_to_le16(len | (sta_id << 12));
2102
2103 scd_bc_tbl[txq_id * TFD_QUEUE_BC_SIZE + write_ptr].tfd_offset = bc_ent;
2104
2105 if (write_ptr < TFD_QUEUE_SIZE_BC_DUP)
2106 scd_bc_tbl[txq_id * TFD_QUEUE_BC_SIZE + TFD_QUEUE_SIZE_MAX + write_ptr].tfd_offset =
2107 bc_ent;
2108 }
2109
iwl_trans_pcie_tx(struct iwl_trans * trans,struct sk_buff * skb,struct iwl_device_tx_cmd * dev_cmd,int txq_id)2110 int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
2111 struct iwl_device_tx_cmd *dev_cmd, int txq_id)
2112 {
2113 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2114 struct ieee80211_hdr *hdr;
2115 struct iwl_tx_cmd_v6 *tx_cmd = (struct iwl_tx_cmd_v6 *)dev_cmd->payload;
2116 struct iwl_cmd_meta *out_meta;
2117 struct iwl_txq *txq;
2118 dma_addr_t tb0_phys, tb1_phys, scratch_phys;
2119 void *tb1_addr;
2120 void *tfd;
2121 u16 len, tb1_len;
2122 bool wait_write_ptr;
2123 __le16 fc;
2124 u8 hdr_len;
2125 u16 wifi_seq;
2126 bool amsdu;
2127
2128 txq = trans_pcie->txqs.txq[txq_id];
2129
2130 if (WARN_ONCE(!test_bit(txq_id, trans_pcie->txqs.queue_used),
2131 "TX on unused queue %d\n", txq_id))
2132 return -EINVAL;
2133
2134 if (skb_is_nonlinear(skb) &&
2135 skb_shinfo(skb)->nr_frags > IWL_TRANS_PCIE_MAX_FRAGS(trans_pcie) &&
2136 __skb_linearize(skb))
2137 return -ENOMEM;
2138
2139 /* mac80211 always puts the full header into the SKB's head,
2140 * so there's no need to check if it's readable there
2141 */
2142 hdr = (struct ieee80211_hdr *)skb->data;
2143 fc = hdr->frame_control;
2144 hdr_len = ieee80211_hdrlen(fc);
2145
2146 spin_lock(&txq->lock);
2147
2148 if (iwl_txq_space(trans, txq) < txq->high_mark) {
2149 iwl_txq_stop(trans, txq);
2150
2151 /* don't put the packet on the ring, if there is no room */
2152 if (unlikely(iwl_txq_space(trans, txq) < 3)) {
2153 struct iwl_device_tx_cmd **dev_cmd_ptr;
2154
2155 dev_cmd_ptr = (void *)((u8 *)skb->cb +
2156 trans->conf.cb_data_offs +
2157 sizeof(void *));
2158
2159 *dev_cmd_ptr = dev_cmd;
2160 __skb_queue_tail(&txq->overflow_q, skb);
2161
2162 spin_unlock(&txq->lock);
2163 return 0;
2164 }
2165 }
2166
2167 /* In AGG mode, the index in the ring must correspond to the WiFi
2168 * sequence number. This is a HW requirements to help the SCD to parse
2169 * the BA.
2170 * Check here that the packets are in the right place on the ring.
2171 */
2172 wifi_seq = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
2173 WARN_ONCE(txq->ampdu &&
2174 (wifi_seq & 0xff) != txq->write_ptr,
2175 "Q: %d WiFi Seq %d tfdNum %d",
2176 txq_id, wifi_seq, txq->write_ptr);
2177
2178 /* Set up driver data for this TFD */
2179 txq->entries[txq->write_ptr].skb = skb;
2180 txq->entries[txq->write_ptr].cmd = dev_cmd;
2181
2182 dev_cmd->hdr.sequence =
2183 cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
2184 INDEX_TO_SEQ(txq->write_ptr)));
2185
2186 tb0_phys = iwl_txq_get_first_tb_dma(txq, txq->write_ptr);
2187 scratch_phys = tb0_phys + sizeof(struct iwl_cmd_header) +
2188 offsetof(struct iwl_tx_cmd_v6_params, scratch);
2189
2190 tx_cmd->params.dram_lsb_ptr = cpu_to_le32(scratch_phys);
2191 tx_cmd->params.dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys);
2192
2193 /* Set up first empty entry in queue's array of Tx/cmd buffers */
2194 out_meta = &txq->entries[txq->write_ptr].meta;
2195 memset(out_meta, 0, sizeof(*out_meta));
2196
2197 /*
2198 * The second TB (tb1) points to the remainder of the TX command
2199 * and the 802.11 header - dword aligned size
2200 * (This calculation modifies the TX command, so do it before the
2201 * setup of the first TB)
2202 */
2203 len = sizeof(struct iwl_tx_cmd_v6) + sizeof(struct iwl_cmd_header) +
2204 hdr_len - IWL_FIRST_TB_SIZE;
2205 /* do not align A-MSDU to dword as the subframe header aligns it */
2206 amsdu = ieee80211_is_data_qos(fc) &&
2207 (*ieee80211_get_qos_ctl(hdr) &
2208 IEEE80211_QOS_CTL_A_MSDU_PRESENT);
2209 if (!amsdu) {
2210 tb1_len = ALIGN(len, 4);
2211 /* Tell NIC about any 2-byte padding after MAC header */
2212 if (tb1_len != len)
2213 tx_cmd->params.tx_flags |= cpu_to_le32(TX_CMD_FLG_MH_PAD);
2214 } else {
2215 tb1_len = len;
2216 }
2217
2218 /*
2219 * The first TB points to bi-directional DMA data, we'll
2220 * memcpy the data into it later.
2221 */
2222 iwl_pcie_txq_build_tfd(trans, txq, tb0_phys,
2223 IWL_FIRST_TB_SIZE, true);
2224
2225 /* there must be data left over for TB1 or this code must be changed */
2226 BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_v6) < IWL_FIRST_TB_SIZE);
2227 BUILD_BUG_ON(sizeof(struct iwl_cmd_header) +
2228 offsetofend(struct iwl_tx_cmd_v6_params, scratch) >
2229 IWL_FIRST_TB_SIZE);
2230
2231 /* map the data for TB1 */
2232 tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE;
2233 tb1_phys = dma_map_single(trans->dev, tb1_addr, tb1_len, DMA_TO_DEVICE);
2234 if (unlikely(dma_mapping_error(trans->dev, tb1_phys)))
2235 goto out_err;
2236 iwl_pcie_txq_build_tfd(trans, txq, tb1_phys, tb1_len, false);
2237
2238 trace_iwlwifi_dev_tx(trans->dev, skb,
2239 iwl_txq_get_tfd(trans, txq, txq->write_ptr),
2240 trans_pcie->txqs.tfd.size,
2241 &dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len,
2242 hdr_len);
2243
2244 /*
2245 * If gso_size wasn't set, don't give the frame "amsdu treatment"
2246 * (adding subframes, etc.).
2247 * This can happen in some testing flows when the amsdu was already
2248 * pre-built, and we just need to send the resulting skb.
2249 */
2250 if (amsdu && skb_shinfo(skb)->gso_size) {
2251 if (unlikely(iwl_fill_data_tbs_amsdu(trans, skb, txq, hdr_len,
2252 out_meta, dev_cmd,
2253 tb1_len)))
2254 goto out_err;
2255 } else {
2256 struct sk_buff *frag;
2257
2258 if (unlikely(iwl_fill_data_tbs(trans, skb, txq, hdr_len,
2259 out_meta)))
2260 goto out_err;
2261
2262 skb_walk_frags(skb, frag) {
2263 if (unlikely(iwl_fill_data_tbs(trans, frag, txq, 0,
2264 out_meta)))
2265 goto out_err;
2266 }
2267 }
2268
2269 /* building the A-MSDU might have changed this data, so memcpy it now */
2270 memcpy(&txq->first_tb_bufs[txq->write_ptr], dev_cmd, IWL_FIRST_TB_SIZE);
2271
2272 tfd = iwl_txq_get_tfd(trans, txq, txq->write_ptr);
2273 /* Set up entry for this TFD in Tx byte-count array */
2274 iwl_txq_gen1_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->params.len),
2275 iwl_txq_gen1_tfd_get_num_tbs(tfd));
2276
2277 wait_write_ptr = ieee80211_has_morefrags(fc);
2278
2279 /* start timer if queue currently empty */
2280 if (txq->read_ptr == txq->write_ptr && txq->wd_timeout) {
2281 /*
2282 * If the TXQ is active, then set the timer, if not,
2283 * set the timer in remainder so that the timer will
2284 * be armed with the right value when the station will
2285 * wake up.
2286 */
2287 if (!txq->frozen)
2288 mod_timer(&txq->stuck_timer,
2289 jiffies + txq->wd_timeout);
2290 else
2291 txq->frozen_expiry_remainder = txq->wd_timeout;
2292 }
2293
2294 /* Tell device the write index *just past* this latest filled TFD */
2295 txq->write_ptr = iwl_txq_inc_wrap(trans, txq->write_ptr);
2296 if (!wait_write_ptr)
2297 iwl_pcie_txq_inc_wr_ptr(trans, txq);
2298
2299 /*
2300 * At this point the frame is "transmitted" successfully
2301 * and we will get a TX status notification eventually.
2302 */
2303 spin_unlock(&txq->lock);
2304 return 0;
2305 out_err:
2306 iwl_txq_gen1_tfd_unmap(trans, out_meta, txq, txq->write_ptr);
2307 spin_unlock(&txq->lock);
2308 return -1;
2309 }
2310
iwl_txq_gen1_inval_byte_cnt_tbl(struct iwl_trans * trans,struct iwl_txq * txq,int read_ptr)2311 static void iwl_txq_gen1_inval_byte_cnt_tbl(struct iwl_trans *trans,
2312 struct iwl_txq *txq,
2313 int read_ptr)
2314 {
2315 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2316 struct iwl_bc_tbl_entry *scd_bc_tbl = trans_pcie->txqs.scd_bc_tbls.addr;
2317 int txq_id = txq->id;
2318 u8 sta_id = 0;
2319 __le16 bc_ent;
2320 struct iwl_device_tx_cmd *dev_cmd = txq->entries[read_ptr].cmd;
2321 struct iwl_tx_cmd_v6 *tx_cmd = (void *)dev_cmd->payload;
2322
2323 WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX);
2324
2325 if (txq_id != trans->conf.cmd_queue)
2326 sta_id = tx_cmd->params.sta_id;
2327
2328 bc_ent = cpu_to_le16(1 | (sta_id << 12));
2329
2330 scd_bc_tbl[txq_id * TFD_QUEUE_BC_SIZE + read_ptr].tfd_offset = bc_ent;
2331
2332 if (read_ptr < TFD_QUEUE_SIZE_BC_DUP)
2333 scd_bc_tbl[txq_id * TFD_QUEUE_BC_SIZE + TFD_QUEUE_SIZE_MAX + read_ptr].tfd_offset =
2334 bc_ent;
2335 }
2336
2337 /* Frees buffers until index _not_ inclusive */
iwl_pcie_reclaim(struct iwl_trans * trans,int txq_id,int ssn,struct sk_buff_head * skbs,bool is_flush)2338 void iwl_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
2339 struct sk_buff_head *skbs, bool is_flush)
2340 {
2341 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2342 struct iwl_txq *txq = trans_pcie->txqs.txq[txq_id];
2343 int tfd_num, read_ptr, last_to_free;
2344 int txq_read_ptr, txq_write_ptr;
2345
2346 /* This function is not meant to release cmd queue*/
2347 if (WARN_ON(txq_id == trans->conf.cmd_queue))
2348 return;
2349
2350 if (WARN_ON(!txq))
2351 return;
2352
2353 tfd_num = iwl_txq_get_cmd_index(txq, ssn);
2354
2355 spin_lock_bh(&txq->reclaim_lock);
2356
2357 spin_lock(&txq->lock);
2358 txq_read_ptr = txq->read_ptr;
2359 txq_write_ptr = txq->write_ptr;
2360 spin_unlock(&txq->lock);
2361
2362 /* There is nothing to do if we are flushing an empty queue */
2363 if (is_flush && txq_write_ptr == txq_read_ptr)
2364 goto out;
2365
2366 read_ptr = iwl_txq_get_cmd_index(txq, txq_read_ptr);
2367
2368 if (!test_bit(txq_id, trans_pcie->txqs.queue_used)) {
2369 IWL_DEBUG_TX_QUEUES(trans, "Q %d inactive - ignoring idx %d\n",
2370 txq_id, ssn);
2371 goto out;
2372 }
2373
2374 if (read_ptr == tfd_num)
2375 goto out;
2376
2377 IWL_DEBUG_TX_REPLY(trans, "[Q %d] %d (%d) -> %d (%d)\n",
2378 txq_id, read_ptr, txq_read_ptr, tfd_num, ssn);
2379
2380 /* Since we free until index _not_ inclusive, the one before index is
2381 * the last we will free. This one must be used
2382 */
2383 last_to_free = iwl_txq_dec_wrap(trans, tfd_num);
2384
2385 if (!iwl_txq_used(txq, last_to_free, txq_read_ptr, txq_write_ptr)) {
2386 IWL_ERR(trans,
2387 "%s: Read index for txq id (%d), last_to_free %d is out of range [0-%d] %d %d.\n",
2388 __func__, txq_id, last_to_free,
2389 trans->mac_cfg->base->max_tfd_queue_size,
2390 txq_write_ptr, txq_read_ptr);
2391
2392 iwl_op_mode_time_point(trans->op_mode,
2393 IWL_FW_INI_TIME_POINT_FAKE_TX,
2394 NULL);
2395 goto out;
2396 }
2397
2398 if (WARN_ON(!skb_queue_empty(skbs)))
2399 goto out;
2400
2401 for (;
2402 read_ptr != tfd_num;
2403 txq_read_ptr = iwl_txq_inc_wrap(trans, txq_read_ptr),
2404 read_ptr = iwl_txq_get_cmd_index(txq, txq_read_ptr)) {
2405 struct iwl_cmd_meta *cmd_meta = &txq->entries[read_ptr].meta;
2406 struct sk_buff *skb = txq->entries[read_ptr].skb;
2407
2408 if (WARN_ONCE(!skb, "no SKB at %d (%d) on queue %d\n",
2409 read_ptr, txq_read_ptr, txq_id))
2410 continue;
2411
2412 iwl_pcie_free_tso_pages(trans, skb, cmd_meta);
2413
2414 __skb_queue_tail(skbs, skb);
2415
2416 txq->entries[read_ptr].skb = NULL;
2417
2418 if (!trans->mac_cfg->gen2)
2419 iwl_txq_gen1_inval_byte_cnt_tbl(trans, txq,
2420 txq_read_ptr);
2421
2422 iwl_txq_free_tfd(trans, txq, txq_read_ptr);
2423 }
2424
2425 spin_lock(&txq->lock);
2426 txq->read_ptr = txq_read_ptr;
2427
2428 iwl_txq_progress(txq);
2429
2430 if (iwl_txq_space(trans, txq) > txq->low_mark &&
2431 test_bit(txq_id, trans_pcie->txqs.queue_stopped)) {
2432 struct sk_buff_head overflow_skbs;
2433 struct sk_buff *skb;
2434
2435 __skb_queue_head_init(&overflow_skbs);
2436 skb_queue_splice_init(&txq->overflow_q,
2437 is_flush ? skbs : &overflow_skbs);
2438
2439 /*
2440 * We are going to transmit from the overflow queue.
2441 * Remember this state so that wait_for_txq_empty will know we
2442 * are adding more packets to the TFD queue. It cannot rely on
2443 * the state of &txq->overflow_q, as we just emptied it, but
2444 * haven't TXed the content yet.
2445 */
2446 txq->overflow_tx = true;
2447
2448 /*
2449 * This is tricky: we are in reclaim path and are holding
2450 * reclaim_lock, so noone will try to access the txq data
2451 * from that path. We stopped tx, so we can't have tx as well.
2452 * Bottom line, we can unlock and re-lock later.
2453 */
2454 spin_unlock(&txq->lock);
2455
2456 while ((skb = __skb_dequeue(&overflow_skbs))) {
2457 struct iwl_device_tx_cmd *dev_cmd_ptr;
2458
2459 dev_cmd_ptr = *(void **)((u8 *)skb->cb +
2460 trans->conf.cb_data_offs +
2461 sizeof(void *));
2462
2463 /*
2464 * Note that we can very well be overflowing again.
2465 * In that case, iwl_txq_space will be small again
2466 * and we won't wake mac80211's queue.
2467 */
2468 iwl_trans_tx(trans, skb, dev_cmd_ptr, txq_id);
2469 }
2470
2471 if (iwl_txq_space(trans, txq) > txq->low_mark)
2472 iwl_trans_pcie_wake_queue(trans, txq);
2473
2474 spin_lock(&txq->lock);
2475 txq->overflow_tx = false;
2476 }
2477
2478 spin_unlock(&txq->lock);
2479 out:
2480 spin_unlock_bh(&txq->reclaim_lock);
2481 }
2482
2483 /* Set wr_ptr of specific device and txq */
iwl_pcie_set_q_ptrs(struct iwl_trans * trans,int txq_id,int ptr)2484 void iwl_pcie_set_q_ptrs(struct iwl_trans *trans, int txq_id, int ptr)
2485 {
2486 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2487 struct iwl_txq *txq = trans_pcie->txqs.txq[txq_id];
2488
2489 spin_lock_bh(&txq->lock);
2490
2491 txq->write_ptr = ptr;
2492 txq->read_ptr = txq->write_ptr;
2493
2494 spin_unlock_bh(&txq->lock);
2495 }
2496
iwl_pcie_freeze_txq_timer(struct iwl_trans * trans,unsigned long txqs,bool freeze)2497 void iwl_pcie_freeze_txq_timer(struct iwl_trans *trans,
2498 unsigned long txqs, bool freeze)
2499 {
2500 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2501 int queue;
2502
2503 for_each_set_bit(queue, &txqs, BITS_PER_LONG) {
2504 struct iwl_txq *txq = trans_pcie->txqs.txq[queue];
2505 unsigned long now;
2506
2507 spin_lock_bh(&txq->lock);
2508
2509 now = jiffies;
2510
2511 if (txq->frozen == freeze)
2512 goto next_queue;
2513
2514 IWL_DEBUG_TX_QUEUES(trans, "%s TXQ %d\n",
2515 freeze ? "Freezing" : "Waking", queue);
2516
2517 txq->frozen = freeze;
2518
2519 if (txq->read_ptr == txq->write_ptr)
2520 goto next_queue;
2521
2522 if (freeze) {
2523 if (unlikely(time_after(now,
2524 txq->stuck_timer.expires))) {
2525 /*
2526 * The timer should have fired, maybe it is
2527 * spinning right now on the lock.
2528 */
2529 goto next_queue;
2530 }
2531 /* remember how long until the timer fires */
2532 txq->frozen_expiry_remainder =
2533 txq->stuck_timer.expires - now;
2534 timer_delete(&txq->stuck_timer);
2535 goto next_queue;
2536 }
2537
2538 /*
2539 * Wake a non-empty queue -> arm timer with the
2540 * remainder before it froze
2541 */
2542 mod_timer(&txq->stuck_timer,
2543 now + txq->frozen_expiry_remainder);
2544
2545 next_queue:
2546 spin_unlock_bh(&txq->lock);
2547 }
2548 }
2549
2550 #define HOST_COMPLETE_TIMEOUT (2 * HZ)
2551
iwl_trans_pcie_send_hcmd_sync(struct iwl_trans * trans,struct iwl_host_cmd * cmd,const char * cmd_str)2552 static int iwl_trans_pcie_send_hcmd_sync(struct iwl_trans *trans,
2553 struct iwl_host_cmd *cmd,
2554 const char *cmd_str)
2555 {
2556 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2557 struct iwl_txq *txq = trans_pcie->txqs.txq[trans->conf.cmd_queue];
2558 int cmd_idx;
2559 int ret;
2560
2561 IWL_DEBUG_INFO(trans, "Attempting to send sync command %s\n", cmd_str);
2562
2563 if (WARN(test_and_set_bit(STATUS_SYNC_HCMD_ACTIVE,
2564 &trans->status),
2565 "Command %s: a command is already active!\n", cmd_str))
2566 return -EIO;
2567
2568 IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n", cmd_str);
2569
2570 if (trans->mac_cfg->gen2)
2571 cmd_idx = iwl_pcie_gen2_enqueue_hcmd(trans, cmd);
2572 else
2573 cmd_idx = iwl_pcie_enqueue_hcmd(trans, cmd);
2574
2575 if (cmd_idx < 0) {
2576 ret = cmd_idx;
2577 clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
2578 IWL_ERR(trans, "Error sending %s: enqueue_hcmd failed: %d\n",
2579 cmd_str, ret);
2580 return ret;
2581 }
2582
2583 ret = wait_event_timeout(trans_pcie->wait_command_queue,
2584 !test_bit(STATUS_SYNC_HCMD_ACTIVE,
2585 &trans->status),
2586 HOST_COMPLETE_TIMEOUT);
2587 if (!ret) {
2588 IWL_ERR(trans, "Error sending %s: time out after %dms.\n",
2589 cmd_str, jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
2590
2591 IWL_ERR(trans, "Current CMD queue read_ptr %d write_ptr %d\n",
2592 txq->read_ptr, txq->write_ptr);
2593
2594 clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
2595 IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n",
2596 cmd_str);
2597 ret = -ETIMEDOUT;
2598
2599 iwl_trans_pcie_sync_nmi(trans);
2600 goto cancel;
2601 }
2602
2603 if (test_bit(STATUS_FW_ERROR, &trans->status)) {
2604 if (!test_and_clear_bit(STATUS_SUPPRESS_CMD_ERROR_ONCE,
2605 &trans->status)) {
2606 IWL_ERR(trans, "FW error in SYNC CMD %s\n", cmd_str);
2607 dump_stack();
2608 }
2609 ret = -EIO;
2610 goto cancel;
2611 }
2612
2613 if (!(cmd->flags & CMD_SEND_IN_RFKILL) &&
2614 test_bit(STATUS_RFKILL_OPMODE, &trans->status)) {
2615 IWL_DEBUG_RF_KILL(trans, "RFKILL in SYNC CMD... no rsp\n");
2616 ret = -ERFKILL;
2617 goto cancel;
2618 }
2619
2620 if ((cmd->flags & CMD_WANT_SKB) && !cmd->resp_pkt) {
2621 IWL_ERR(trans, "Error: Response NULL in '%s'\n", cmd_str);
2622 ret = -EIO;
2623 goto cancel;
2624 }
2625
2626 return 0;
2627
2628 cancel:
2629 if (cmd->flags & CMD_WANT_SKB) {
2630 /*
2631 * Cancel the CMD_WANT_SKB flag for the cmd in the
2632 * TX cmd queue. Otherwise in case the cmd comes
2633 * in later, it will possibly set an invalid
2634 * address (cmd->meta.source).
2635 */
2636 txq->entries[cmd_idx].meta.flags &= ~CMD_WANT_SKB;
2637 }
2638
2639 if (cmd->resp_pkt) {
2640 iwl_free_resp(cmd);
2641 cmd->resp_pkt = NULL;
2642 }
2643
2644 return ret;
2645 }
2646
iwl_trans_pcie_send_hcmd(struct iwl_trans * trans,struct iwl_host_cmd * cmd)2647 int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans,
2648 struct iwl_host_cmd *cmd)
2649 {
2650 const char *cmd_str = iwl_get_cmd_string(trans, cmd->id);
2651
2652 /* Make sure the NIC is still alive in the bus */
2653 if (test_bit(STATUS_TRANS_DEAD, &trans->status))
2654 return -ENODEV;
2655
2656 if (!(cmd->flags & CMD_SEND_IN_RFKILL) &&
2657 test_bit(STATUS_RFKILL_OPMODE, &trans->status)) {
2658 IWL_DEBUG_RF_KILL(trans, "Dropping CMD 0x%x: RF KILL\n",
2659 cmd->id);
2660 return -ERFKILL;
2661 }
2662
2663 if (cmd->flags & CMD_ASYNC) {
2664 int ret;
2665
2666 IWL_DEBUG_INFO(trans, "Sending async command %s\n", cmd_str);
2667
2668 /* An asynchronous command can not expect an SKB to be set. */
2669 if (WARN_ON(cmd->flags & CMD_WANT_SKB))
2670 return -EINVAL;
2671
2672 if (trans->mac_cfg->gen2)
2673 ret = iwl_pcie_gen2_enqueue_hcmd(trans, cmd);
2674 else
2675 ret = iwl_pcie_enqueue_hcmd(trans, cmd);
2676
2677 if (ret < 0) {
2678 IWL_ERR(trans,
2679 "Error sending %s: enqueue_hcmd failed: %d\n",
2680 iwl_get_cmd_string(trans, cmd->id), ret);
2681 return ret;
2682 }
2683 return 0;
2684 }
2685
2686 return iwl_trans_pcie_send_hcmd_sync(trans, cmd, cmd_str);
2687 }
2688