Lines Matching +full:pre +full:- +full:filled
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
33 /*TODO: Remove include to iwl-core.h*/
34 #include "iwl-core.h"
35 #include "iwl-io.h"
36 #include "iwl-trans-pcie-int.h"
48 * each of which point to Receive Buffers to be filled by the NIC. These get
57 * to -- the driver can read up to (but not including) this position and get
61 * The WRITE index maps to the last position the driver has read from -- the
64 * The queue is empty (no good data) if WRITE = READ - 1, and is full if
68 * INDEX position, and WRITE to the last (READ - 1 wrapped)
76 * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When
77 * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
78 * to replenish the iwl->rxq->rx_free.
80 * iwl->rxq is replenished and the READ INDEX is updated (updating the
83 * detached from the iwl->rxq. The driver 'processed' index is updated.
84 * + The Host/Firmware iwl->rxq is replenished at tasklet time from the rx_free
85 * list. If there are no allocated buffers in iwl->rxq->rx_free, the READ
86 * INDEX is not incremented and iwl->status(RX_STALLED) is set. If there
100 * -- enable interrupts --
101 * ISR - iwl_rx() Detach iwl_rx_mem_buffers from pool up to the
111 * iwl_rx_queue_space - Return number of free slots available in queue.
115 int s = q->read - q->write; in iwl_rx_queue_space()
119 s -= 2; in iwl_rx_queue_space()
126 * iwl_rx_queue_update_write_ptr - Update the write pointer for the RX queue
134 spin_lock_irqsave(&q->lock, flags); in iwl_rx_queue_update_write_ptr()
136 if (q->need_update == 0) in iwl_rx_queue_update_write_ptr()
142 q->write_actual = (q->write & ~0x7); in iwl_rx_queue_update_write_ptr()
143 iwl_write32(bus(trans), FH_RSCSR_CHNL0_WPTR, q->write_actual); in iwl_rx_queue_update_write_ptr()
145 /* If power-saving is in use, make sure device is awake */ in iwl_rx_queue_update_write_ptr()
146 if (test_bit(STATUS_POWER_PMI, &trans->shrd->status)) { in iwl_rx_queue_update_write_ptr()
158 q->write_actual = (q->write & ~0x7); in iwl_rx_queue_update_write_ptr()
160 q->write_actual); in iwl_rx_queue_update_write_ptr()
165 q->write_actual = (q->write & ~0x7); in iwl_rx_queue_update_write_ptr()
167 q->write_actual); in iwl_rx_queue_update_write_ptr()
170 q->need_update = 0; in iwl_rx_queue_update_write_ptr()
173 spin_unlock_irqrestore(&q->lock, flags); in iwl_rx_queue_update_write_ptr()
177 * iwlagn_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
185 * iwlagn_rx_queue_restock - refill RX queue from pre-allocated pool
188 * and we have free pre-allocated buffers, fill the ranks as much
200 struct iwl_rx_queue *rxq = &trans_pcie->rxq; in iwlagn_rx_queue_restock()
205 spin_lock_irqsave(&rxq->lock, flags); in iwlagn_rx_queue_restock()
206 while ((iwl_rx_queue_space(rxq) > 0) && (rxq->free_count)) { in iwlagn_rx_queue_restock()
208 rxb = rxq->queue[rxq->write]; in iwlagn_rx_queue_restock()
209 BUG_ON(rxb && rxb->page); in iwlagn_rx_queue_restock()
212 element = rxq->rx_free.next; in iwlagn_rx_queue_restock()
217 rxq->bd[rxq->write] = iwlagn_dma_addr2rbd_ptr(rxb->page_dma); in iwlagn_rx_queue_restock()
218 rxq->queue[rxq->write] = rxb; in iwlagn_rx_queue_restock()
219 rxq->write = (rxq->write + 1) & RX_QUEUE_MASK; in iwlagn_rx_queue_restock()
220 rxq->free_count--; in iwlagn_rx_queue_restock()
222 spin_unlock_irqrestore(&rxq->lock, flags); in iwlagn_rx_queue_restock()
223 /* If the pre-allocated buffer pool is dropping low, schedule to in iwlagn_rx_queue_restock()
225 if (rxq->free_count <= RX_LOW_WATERMARK) in iwlagn_rx_queue_restock()
226 queue_work(trans->shrd->workqueue, &trans_pcie->rx_replenish); in iwlagn_rx_queue_restock()
231 if (rxq->write_actual != (rxq->write & ~0x7)) { in iwlagn_rx_queue_restock()
232 spin_lock_irqsave(&rxq->lock, flags); in iwlagn_rx_queue_restock()
233 rxq->need_update = 1; in iwlagn_rx_queue_restock()
234 spin_unlock_irqrestore(&rxq->lock, flags); in iwlagn_rx_queue_restock()
240 * iwlagn_rx_replenish - Move all used packet from rx_used to rx_free
252 struct iwl_rx_queue *rxq = &trans_pcie->rxq; in iwlagn_rx_allocate()
260 spin_lock_irqsave(&rxq->lock, flags); in iwlagn_rx_allocate()
261 if (list_empty(&rxq->rx_used)) { in iwlagn_rx_allocate()
262 spin_unlock_irqrestore(&rxq->lock, flags); in iwlagn_rx_allocate()
265 spin_unlock_irqrestore(&rxq->lock, flags); in iwlagn_rx_allocate()
267 if (rxq->free_count > RX_LOW_WATERMARK) in iwlagn_rx_allocate()
282 if ((rxq->free_count <= RX_LOW_WATERMARK) && in iwlagn_rx_allocate()
288 rxq->free_count); in iwlagn_rx_allocate()
289 /* We don't reschedule replenish work here -- we will in iwlagn_rx_allocate()
295 spin_lock_irqsave(&rxq->lock, flags); in iwlagn_rx_allocate()
297 if (list_empty(&rxq->rx_used)) { in iwlagn_rx_allocate()
298 spin_unlock_irqrestore(&rxq->lock, flags); in iwlagn_rx_allocate()
302 element = rxq->rx_used.next; in iwlagn_rx_allocate()
306 spin_unlock_irqrestore(&rxq->lock, flags); in iwlagn_rx_allocate()
308 BUG_ON(rxb->page); in iwlagn_rx_allocate()
309 rxb->page = page; in iwlagn_rx_allocate()
311 rxb->page_dma = dma_map_page(bus(trans)->dev, page, 0, in iwlagn_rx_allocate()
315 BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36)); in iwlagn_rx_allocate()
317 BUG_ON(rxb->page_dma & DMA_BIT_MASK(8)); in iwlagn_rx_allocate()
319 spin_lock_irqsave(&rxq->lock, flags); in iwlagn_rx_allocate()
321 list_add_tail(&rxb->list, &rxq->rx_free); in iwlagn_rx_allocate()
322 rxq->free_count++; in iwlagn_rx_allocate()
324 spin_unlock_irqrestore(&rxq->lock, flags); in iwlagn_rx_allocate()
334 spin_lock_irqsave(&trans->shrd->lock, flags); in iwlagn_rx_replenish()
336 spin_unlock_irqrestore(&trans->shrd->lock, flags); in iwlagn_rx_replenish()
350 struct iwl_trans *trans = trans_pcie->trans; in iwl_bg_rx_replenish()
352 if (test_bit(STATUS_EXIT_PENDING, &trans->shrd->status)) in iwl_bg_rx_replenish()
355 mutex_lock(&trans->shrd->mutex); in iwl_bg_rx_replenish()
357 mutex_unlock(&trans->shrd->mutex); in iwl_bg_rx_replenish()
361 * iwl_rx_handle - Main entry function for receiving responses from uCode
363 * Uses the priv->rx_handlers callback function array to invoke
365 * frame-received notifications, and other notifications.
373 struct iwl_rx_queue *rxq = &trans_pcie->rxq; in iwl_rx_handle()
374 struct iwl_tx_queue *txq = &trans_pcie->txq[trans->shrd->cmd_queue]; in iwl_rx_handle()
385 * buffer that the driver may process (last buffer filled by ucode). */ in iwl_rx_handle()
386 r = le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF; in iwl_rx_handle()
387 i = rxq->read; in iwl_rx_handle()
394 total_empty = r - rxq->write_actual; in iwl_rx_handle()
405 rxb = rxq->queue[i]; in iwl_rx_handle()
409 * routines -- catch it here */ in iwl_rx_handle()
415 rxq->queue[i] = NULL; in iwl_rx_handle()
417 dma_unmap_page(bus(trans)->dev, rxb->page_dma, in iwl_rx_handle()
423 i, get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd); in iwl_rx_handle()
425 len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK; in iwl_rx_handle()
430 * to a (driver-originated) command. in iwl_rx_handle()
433 * Ucode should set SEQ_RX_FRAME bit if ucode-originated, in iwl_rx_handle()
435 reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME) && in iwl_rx_handle()
436 (pkt->hdr.cmd != REPLY_RX_PHY_CMD) && in iwl_rx_handle()
437 (pkt->hdr.cmd != REPLY_RX) && in iwl_rx_handle()
438 (pkt->hdr.cmd != REPLY_RX_MPDU_CMD) && in iwl_rx_handle()
439 (pkt->hdr.cmd != REPLY_COMPRESSED_BA) && in iwl_rx_handle()
440 (pkt->hdr.cmd != STATISTICS_NOTIFICATION) && in iwl_rx_handle()
441 (pkt->hdr.cmd != REPLY_TX); in iwl_rx_handle()
443 sequence = le16_to_cpu(pkt->hdr.sequence); in iwl_rx_handle()
445 cmd_index = get_cmd_index(&txq->q, index); in iwl_rx_handle()
448 cmd = txq->cmd[cmd_index]; in iwl_rx_handle()
454 * uCode-originated in iwl_rx_handle()
458 WARN(pkt->hdr.cmd != REPLY_TX && reclaim == false && in iwl_rx_handle()
459 (!(pkt->hdr.sequence & SEQ_RX_FRAME)), in iwl_rx_handle()
461 get_cmd_string(pkt->hdr.cmd)); in iwl_rx_handle()
466 * XXX: After here, we should always check rxb->page in iwl_rx_handle()
477 if (rxb->page) in iwl_rx_handle()
486 spin_lock_irqsave(&rxq->lock, flags); in iwl_rx_handle()
487 if (rxb->page != NULL) { in iwl_rx_handle()
488 rxb->page_dma = dma_map_page(bus(trans)->dev, rxb->page, in iwl_rx_handle()
492 list_add_tail(&rxb->list, &rxq->rx_free); in iwl_rx_handle()
493 rxq->free_count++; in iwl_rx_handle()
495 list_add_tail(&rxb->list, &rxq->rx_used); in iwl_rx_handle()
497 spin_unlock_irqrestore(&rxq->lock, flags); in iwl_rx_handle()
505 rxq->read = i; in iwl_rx_handle()
513 rxq->read = i; in iwl_rx_handle()
578 max = ARRAY_SIZE(advanced_lookup) - 1; in desc_lookup()
597 base = trans->shrd->device_pointers.error_event_table; in iwl_dump_nic_error_log()
598 if (trans->shrd->ucode_type == IWL_UCODE_INIT) { in iwl_dump_nic_error_log()
600 base = priv->init_errlog_ptr; in iwl_dump_nic_error_log()
603 base = priv->inst_errlog_ptr; in iwl_dump_nic_error_log()
610 (trans->shrd->ucode_type == IWL_UCODE_INIT) in iwl_dump_nic_error_log()
620 trans->shrd->status, table.valid); in iwl_dump_nic_error_log()
623 trans_pcie->isr_stats.err_code = table.error_id; in iwl_dump_nic_error_log()
631 IWL_ERR(trans, "0x%08X | %-28s\n", table.error_id, in iwl_dump_nic_error_log()
669 * iwl_irq_handle_error - called for HW or SW error interrupt from card
675 if (cfg(priv)->internal_wimax_coex && in iwl_irq_handle_error()
684 clear_bit(STATUS_READY, &trans->shrd->status); in iwl_irq_handle_error()
685 clear_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status); in iwl_irq_handle_error()
686 wake_up(&priv->shrd->wait_command_queue); in iwl_irq_handle_error()
692 priv->hw->wiphy->fw_version); in iwl_irq_handle_error()
699 if (iwl_get_debug_level(trans->shrd) & IWL_DL_FW_ERRORS) in iwl_irq_handle_error()
709 * iwl_print_event_log - Dump error event log to syslog
727 base = trans->shrd->device_pointers.log_event_table; in iwl_print_event_log()
728 if (trans->shrd->ucode_type == IWL_UCODE_INIT) { in iwl_print_event_log()
730 base = priv->init_evtlog_ptr; in iwl_print_event_log()
733 base = priv->inst_evtlog_ptr; in iwl_print_event_log()
744 spin_lock_irqsave(&bus(trans)->reg_lock, reg_flags); in iwl_print_event_log()
747 /* Set starting address; reads will auto-increment */ in iwl_print_event_log()
759 pos += scnprintf(*buf + pos, bufsz - pos, in iwl_print_event_log()
771 pos += scnprintf(*buf + pos, bufsz - pos, in iwl_print_event_log()
785 spin_unlock_irqrestore(&bus(trans)->reg_lock, reg_flags); in iwl_print_event_log()
790 * iwl_print_last_event_logs - Dump the newest # of event log to syslog
804 capacity - (size - next_entry), in iwl_print_last_event_logs()
805 size - next_entry, mode, in iwl_print_last_event_logs()
811 pos = iwl_print_event_log(trans, next_entry - size, in iwl_print_last_event_logs()
818 pos = iwl_print_event_log(trans, next_entry - size, in iwl_print_last_event_logs()
832 u32 mode; /* 0 - no timestamp, 1 - timestamp recorded */ in iwl_dump_nic_event_log()
841 base = trans->shrd->device_pointers.log_event_table; in iwl_dump_nic_event_log()
842 if (trans->shrd->ucode_type == IWL_UCODE_INIT) { in iwl_dump_nic_event_log()
843 logsize = priv->init_evtlog_size; in iwl_dump_nic_event_log()
845 base = priv->init_evtlog_ptr; in iwl_dump_nic_event_log()
847 logsize = priv->inst_evtlog_size; in iwl_dump_nic_event_log()
849 base = priv->inst_evtlog_ptr; in iwl_dump_nic_event_log()
856 (trans->shrd->ucode_type == IWL_UCODE_INIT) in iwl_dump_nic_event_log()
858 return -EINVAL; in iwl_dump_nic_event_log()
888 if (!(iwl_get_debug_level(trans->shrd) & IWL_DL_FW_ERRORS) && !full_log) in iwl_dump_nic_event_log()
906 return -ENOMEM; in iwl_dump_nic_event_log()
908 if ((iwl_get_debug_level(trans->shrd) & IWL_DL_FW_ERRORS) || full_log) { in iwl_dump_nic_event_log()
916 capacity - next_entry, mode, in iwl_dump_nic_event_log()
945 struct isr_statistics *isr_stats = &trans_pcie->isr_stats; in iwl_irq_tasklet()
948 spin_lock_irqsave(&trans->shrd->lock, flags); in iwl_irq_tasklet()
962 trans_pcie->inta | ~trans_pcie->inta_mask); in iwl_irq_tasklet()
964 inta = trans_pcie->inta; in iwl_irq_tasklet()
967 if (iwl_get_debug_level(trans->shrd) & IWL_DL_ISR) { in iwl_irq_tasklet()
975 /* saved interrupt in inta variable now we can reset trans_pcie->inta */ in iwl_irq_tasklet()
976 trans_pcie->inta = 0; in iwl_irq_tasklet()
978 spin_unlock_irqrestore(&trans->shrd->lock, flags); in iwl_irq_tasklet()
987 isr_stats->hw++; in iwl_irq_tasklet()
996 if (iwl_get_debug_level(trans->shrd) & (IWL_DL_ISR)) { in iwl_irq_tasklet()
1001 isr_stats->sch++; in iwl_irq_tasklet()
1007 isr_stats->alive++; in iwl_irq_tasklet()
1024 isr_stats->rfkill++; in iwl_irq_tasklet()
1031 if (!test_bit(STATUS_ALIVE, &trans->shrd->status)) { in iwl_irq_tasklet()
1034 &trans->shrd->status); in iwl_irq_tasklet()
1037 &trans->shrd->status); in iwl_irq_tasklet()
1047 isr_stats->ctkill++; in iwl_irq_tasklet()
1055 isr_stats->sw++; in iwl_irq_tasklet()
1060 /* uCode wakes up after power-down sleep */ in iwl_irq_tasklet()
1063 iwl_rx_queue_update_write_ptr(trans, &trans_pcie->rxq); in iwl_irq_tasklet()
1066 &trans_pcie->txq[i]); in iwl_irq_tasklet()
1068 isr_stats->wakeup++; in iwl_irq_tasklet()
1074 * Rx "responses" (frame-received notification), and other in iwl_irq_tasklet()
1091 * 1- write interrupt to current index in ICT table. in iwl_irq_tasklet()
1092 * 2- dma RX frame. in iwl_irq_tasklet()
1093 * 3- update RX shared data to indicate last write index. in iwl_irq_tasklet()
1094 * 4- send interrupt. in iwl_irq_tasklet()
1100 /* Disable periodic interrupt; we use it as just a one-shot. */ in iwl_irq_tasklet()
1110 * to extend the periodic interrupt; one-shot is enough. in iwl_irq_tasklet()
1116 isr_stats->rx++; in iwl_irq_tasklet()
1123 isr_stats->tx++; in iwl_irq_tasklet()
1126 trans->ucode_write_complete = 1; in iwl_irq_tasklet()
1127 wake_up(&trans->shrd->wait_command_queue); in iwl_irq_tasklet()
1132 isr_stats->unhandled++; in iwl_irq_tasklet()
1135 if (inta & ~(trans_pcie->inta_mask)) { in iwl_irq_tasklet()
1137 inta & ~trans_pcie->inta_mask); in iwl_irq_tasklet()
1140 /* Re-enable all interrupts */ in iwl_irq_tasklet()
1141 /* only Re-enable if disabled by irq */ in iwl_irq_tasklet()
1142 if (test_bit(STATUS_INT_ENABLED, &trans->shrd->status)) in iwl_irq_tasklet()
1144 /* Re-enable RF_KILL if it occurred */ in iwl_irq_tasklet()
1155 /* a device (PCI-E) page is 4096 bytes long */
1166 if (trans_pcie->ict_tbl) { in iwl_free_isr_ict()
1167 dma_free_coherent(bus(trans)->dev, ICT_SIZE, in iwl_free_isr_ict()
1168 trans_pcie->ict_tbl, in iwl_free_isr_ict()
1169 trans_pcie->ict_tbl_dma); in iwl_free_isr_ict()
1170 trans_pcie->ict_tbl = NULL; in iwl_free_isr_ict()
1171 trans_pcie->ict_tbl_dma = 0; in iwl_free_isr_ict()
1186 trans_pcie->ict_tbl = in iwl_alloc_isr_ict()
1187 dma_alloc_coherent(bus(trans)->dev, ICT_SIZE, in iwl_alloc_isr_ict()
1188 &trans_pcie->ict_tbl_dma, in iwl_alloc_isr_ict()
1190 if (!trans_pcie->ict_tbl) in iwl_alloc_isr_ict()
1191 return -ENOMEM; in iwl_alloc_isr_ict()
1194 if (WARN_ON(trans_pcie->ict_tbl_dma & (ICT_SIZE - 1))) { in iwl_alloc_isr_ict()
1196 return -EINVAL; in iwl_alloc_isr_ict()
1200 (unsigned long long)trans_pcie->ict_tbl_dma); in iwl_alloc_isr_ict()
1202 IWL_DEBUG_ISR(trans, "ict vir addr %p\n", trans_pcie->ict_tbl); in iwl_alloc_isr_ict()
1205 memset(trans_pcie->ict_tbl, 0, ICT_SIZE); in iwl_alloc_isr_ict()
1206 trans_pcie->ict_index = 0; in iwl_alloc_isr_ict()
1209 trans_pcie->inta_mask |= CSR_INT_BIT_RX_PERIODIC; in iwl_alloc_isr_ict()
1223 if (!trans_pcie->ict_tbl) in iwl_reset_ict()
1226 spin_lock_irqsave(&trans->shrd->lock, flags); in iwl_reset_ict()
1229 memset(trans_pcie->ict_tbl, 0, ICT_SIZE); in iwl_reset_ict()
1231 val = trans_pcie->ict_tbl_dma >> ICT_SHIFT; in iwl_reset_ict()
1239 trans_pcie->use_ict = true; in iwl_reset_ict()
1240 trans_pcie->ict_index = 0; in iwl_reset_ict()
1241 iwl_write32(bus(trans), CSR_INT, trans_pcie->inta_mask); in iwl_reset_ict()
1243 spin_unlock_irqrestore(&trans->shrd->lock, flags); in iwl_reset_ict()
1256 spin_lock_irqsave(&trans->shrd->lock, flags); in iwl_disable_ict()
1257 trans_pcie->use_ict = false; in iwl_disable_ict()
1258 spin_unlock_irqrestore(&trans->shrd->lock, flags); in iwl_disable_ict()
1277 spin_lock_irqsave(&trans->shrd->lock, flags); in iwl_isr()
1280 * back-to-back ISRs and sporadic interrupts from our NIC. in iwl_isr()
1281 * If we have something to service, the tasklet will re-enable ints. in iwl_isr()
1282 * If we *don't* have something, we'll re-enable before leaving here. */ in iwl_isr()
1305 if (iwl_get_debug_level(trans->shrd) & (IWL_DL_ISR)) { in iwl_isr()
1312 trans_pcie->inta |= inta; in iwl_isr()
1313 /* iwl_irq_tasklet() will service interrupts and re-enable them */ in iwl_isr()
1315 tasklet_schedule(&trans_pcie->irq_tasklet); in iwl_isr()
1316 else if (test_bit(STATUS_INT_ENABLED, &trans->shrd->status) && in iwl_isr()
1317 !trans_pcie->inta) in iwl_isr()
1321 spin_unlock_irqrestore(&trans->shrd->lock, flags); in iwl_isr()
1325 /* re-enable interrupts here since we don't have anything to service. */ in iwl_isr()
1326 /* only Re-enable if disabled by irq and no schedules tasklet. */ in iwl_isr()
1327 if (test_bit(STATUS_INT_ENABLED, &trans->shrd->status) && in iwl_isr()
1328 !trans_pcie->inta) in iwl_isr()
1331 spin_unlock_irqrestore(&trans->shrd->lock, flags); in iwl_isr()
1360 if (!trans_pcie->use_ict) in iwl_isr_ict()
1365 spin_lock_irqsave(&trans->shrd->lock, flags); in iwl_isr_ict()
1368 * back-to-back ISRs and sporadic interrupts from our NIC. in iwl_isr_ict()
1369 * If we have something to service, the tasklet will re-enable ints. in iwl_isr_ict()
1370 * If we *don't* have something, we'll re-enable before leaving here. in iwl_isr_ict()
1379 read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]); in iwl_isr_ict()
1380 trace_iwlwifi_dev_ict_read(priv(trans), trans_pcie->ict_index, read); in iwl_isr_ict()
1393 trans_pcie->ict_index, read); in iwl_isr_ict()
1394 trans_pcie->ict_tbl[trans_pcie->ict_index] = 0; in iwl_isr_ict()
1395 trans_pcie->ict_index = in iwl_isr_ict()
1396 iwl_queue_inc_wrap(trans_pcie->ict_index, ICT_COUNT); in iwl_isr_ict()
1398 read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]); in iwl_isr_ict()
1399 trace_iwlwifi_dev_ict_read(priv(trans), trans_pcie->ict_index, in iwl_isr_ict()
1421 inta &= trans_pcie->inta_mask; in iwl_isr_ict()
1422 trans_pcie->inta |= inta; in iwl_isr_ict()
1424 /* iwl_irq_tasklet() will service interrupts and re-enable them */ in iwl_isr_ict()
1426 tasklet_schedule(&trans_pcie->irq_tasklet); in iwl_isr_ict()
1427 else if (test_bit(STATUS_INT_ENABLED, &trans->shrd->status) && in iwl_isr_ict()
1428 !trans_pcie->inta) { in iwl_isr_ict()
1436 spin_unlock_irqrestore(&trans->shrd->lock, flags); in iwl_isr_ict()
1440 /* re-enable interrupts here since we don't have anything to service. in iwl_isr_ict()
1441 * only Re-enable if disabled by irq. in iwl_isr_ict()
1443 if (test_bit(STATUS_INT_ENABLED, &trans->shrd->status) && in iwl_isr_ict()
1444 !trans_pcie->inta) in iwl_isr_ict()
1447 spin_unlock_irqrestore(&trans->shrd->lock, flags); in iwl_isr_ict()