Lines Matching full:dev

16 	struct mt76x02_dev *dev = (struct mt76x02_dev *)arg;  in mt76x02_pre_tbtt_tasklet()  local
17 struct mt76_queue *q = dev->mt76.q_tx[MT_TXQ_PSD]; in mt76x02_pre_tbtt_tasklet()
22 if (mt76_hw(dev)->conf.flags & IEEE80211_CONF_OFFCHANNEL) in mt76x02_pre_tbtt_tasklet()
25 mt76x02_resync_beacon_timer(dev); in mt76x02_pre_tbtt_tasklet()
28 mt76_set(dev, MT_BCN_BYPASS_MASK, 0xffff); in mt76x02_pre_tbtt_tasklet()
29 dev->beacon_data_count = 0; in mt76x02_pre_tbtt_tasklet()
31 ieee80211_iterate_active_interfaces_atomic(mt76_hw(dev), in mt76x02_pre_tbtt_tasklet()
33 mt76x02_update_beacon_iter, dev); in mt76x02_pre_tbtt_tasklet()
35 mt76_wr(dev, MT_BCN_BYPASS_MASK, in mt76x02_pre_tbtt_tasklet()
36 0xff00 | ~(0xff00 >> dev->beacon_data_count)); in mt76x02_pre_tbtt_tasklet()
38 mt76_csa_check(&dev->mt76); in mt76x02_pre_tbtt_tasklet()
40 if (dev->mt76.csa_complete) in mt76x02_pre_tbtt_tasklet()
43 mt76x02_enqueue_buffered_bc(dev, &data, 8); in mt76x02_pre_tbtt_tasklet()
61 mt76_tx_queue_skb(dev, MT_TXQ_PSD, skb, &mvif->group_wcid, in mt76x02_pre_tbtt_tasklet()
67 static void mt76x02e_pre_tbtt_enable(struct mt76x02_dev *dev, bool en) in mt76x02e_pre_tbtt_enable() argument
70 tasklet_enable(&dev->mt76.pre_tbtt_tasklet); in mt76x02e_pre_tbtt_enable()
72 tasklet_disable(&dev->mt76.pre_tbtt_tasklet); in mt76x02e_pre_tbtt_enable()
75 static void mt76x02e_beacon_enable(struct mt76x02_dev *dev, bool en) in mt76x02e_beacon_enable() argument
77 mt76_rmw_field(dev, MT_INT_TIMER_EN, MT_INT_TIMER_EN_PRE_TBTT_EN, en); in mt76x02e_beacon_enable()
79 mt76x02_irq_enable(dev, MT_INT_PRE_TBTT | MT_INT_TBTT); in mt76x02e_beacon_enable()
81 mt76x02_irq_disable(dev, MT_INT_PRE_TBTT | MT_INT_TBTT); in mt76x02e_beacon_enable()
84 void mt76x02e_init_beacon_config(struct mt76x02_dev *dev) in mt76x02e_init_beacon_config() argument
93 dev->beacon_ops = &beacon_ops; in mt76x02e_init_beacon_config()
96 mt76_rmw_field(dev, MT_INT_TIMER_CFG, MT_INT_TIMER_CFG_PRE_TBTT, in mt76x02e_init_beacon_config()
98 mt76_rmw_field(dev, MT_INT_TIMER_CFG, MT_INT_TIMER_CFG_GP_TIMER, in mt76x02e_init_beacon_config()
100 mt76_wr(dev, MT_INT_TIMER_EN, 0); in mt76x02e_init_beacon_config()
102 mt76x02_init_beacon_config(dev); in mt76x02e_init_beacon_config()
107 mt76x02_init_tx_queue(struct mt76x02_dev *dev, int qid, int idx, int n_desc) in mt76x02_init_tx_queue() argument
112 hwq = devm_kzalloc(dev->mt76.dev, sizeof(*hwq), GFP_KERNEL); in mt76x02_init_tx_queue()
116 err = mt76_queue_alloc(dev, hwq, idx, n_desc, 0, MT_TX_RING_BASE); in mt76x02_init_tx_queue()
120 dev->mt76.q_tx[qid] = hwq; in mt76x02_init_tx_queue()
122 mt76x02_irq_enable(dev, MT_INT_TX_DONE(idx)); in mt76x02_init_tx_queue()
128 mt76x02_init_rx_queue(struct mt76x02_dev *dev, struct mt76_queue *q, in mt76x02_init_rx_queue() argument
133 err = mt76_queue_alloc(dev, q, idx, n_desc, bufsize, in mt76x02_init_rx_queue()
138 mt76x02_irq_enable(dev, MT_INT_RX_DONE(idx)); in mt76x02_init_rx_queue()
143 static void mt76x02_process_tx_status_fifo(struct mt76x02_dev *dev) in mt76x02_process_tx_status_fifo() argument
148 while (kfifo_get(&dev->txstatus_fifo, &stat)) in mt76x02_process_tx_status_fifo()
149 mt76x02_send_tx_status(dev, &stat, &update); in mt76x02_process_tx_status_fifo()
154 struct mt76x02_dev *dev; in mt76x02_tx_worker() local
156 dev = container_of(w, struct mt76x02_dev, mt76.tx_worker); in mt76x02_tx_worker()
158 mt76x02_mac_poll_tx_status(dev, false); in mt76x02_tx_worker()
159 mt76x02_process_tx_status_fifo(dev); in mt76x02_tx_worker()
161 mt76_txq_schedule_all(&dev->mphy); in mt76x02_tx_worker()
166 struct mt76x02_dev *dev = container_of(napi, struct mt76x02_dev, in mt76x02_poll_tx() local
170 mt76x02_mac_poll_tx_status(dev, false); in mt76x02_poll_tx()
173 mt76_queue_tx_cleanup(dev, i, false); in mt76x02_poll_tx()
176 mt76x02_irq_enable(dev, MT_INT_TX_DONE_ALL); in mt76x02_poll_tx()
179 mt76_queue_tx_cleanup(dev, i, false); in mt76x02_poll_tx()
181 mt76_worker_schedule(&dev->mt76.tx_worker); in mt76x02_poll_tx()
186 int mt76x02_dma_init(struct mt76x02_dev *dev) in mt76x02_dma_init() argument
196 status_fifo = devm_kzalloc(dev->mt76.dev, fifo_size, GFP_KERNEL); in mt76x02_dma_init()
200 dev->mt76.tx_worker.fn = mt76x02_tx_worker; in mt76x02_dma_init()
201 tasklet_init(&dev->mt76.pre_tbtt_tasklet, mt76x02_pre_tbtt_tasklet, in mt76x02_dma_init()
202 (unsigned long)dev); in mt76x02_dma_init()
204 spin_lock_init(&dev->txstatus_fifo_lock); in mt76x02_dma_init()
205 kfifo_init(&dev->txstatus_fifo, status_fifo, fifo_size); in mt76x02_dma_init()
207 mt76_dma_attach(&dev->mt76); in mt76x02_dma_init()
209 mt76_wr(dev, MT_WPDMA_RST_IDX, ~0); in mt76x02_dma_init()
212 ret = mt76x02_init_tx_queue(dev, i, mt76_ac_to_hwq(i), in mt76x02_dma_init()
218 ret = mt76x02_init_tx_queue(dev, MT_TXQ_PSD, in mt76x02_dma_init()
223 ret = mt76x02_init_tx_queue(dev, MT_TXQ_MCU, in mt76x02_dma_init()
228 ret = mt76x02_init_rx_queue(dev, &dev->mt76.q_rx[MT_RXQ_MCU], 1, in mt76x02_dma_init()
233 q = &dev->mt76.q_rx[MT_RXQ_MAIN]; in mt76x02_dma_init()
235 ret = mt76x02_init_rx_queue(dev, q, 0, MT76X02_RX_RING_SIZE, in mt76x02_dma_init()
240 ret = mt76_init_queues(dev); in mt76x02_dma_init()
244 netif_tx_napi_add(&dev->mt76.napi_dev, &dev->mt76.tx_napi, in mt76x02_dma_init()
246 napi_enable(&dev->mt76.tx_napi); in mt76x02_dma_init()
254 struct mt76x02_dev *dev; in mt76x02_rx_poll_complete() local
256 dev = container_of(mdev, struct mt76x02_dev, mt76); in mt76x02_rx_poll_complete()
257 mt76x02_irq_enable(dev, MT_INT_RX_DONE(q)); in mt76x02_rx_poll_complete()
263 struct mt76x02_dev *dev = dev_instance; in mt76x02_irq_handler() local
266 intr = mt76_rr(dev, MT_INT_SOURCE_CSR); in mt76x02_irq_handler()
267 intr &= dev->mt76.mmio.irqmask; in mt76x02_irq_handler()
268 mt76_wr(dev, MT_INT_SOURCE_CSR, intr); in mt76x02_irq_handler()
270 if (!test_bit(MT76_STATE_INITIALIZED, &dev->mphy.state)) in mt76x02_irq_handler()
273 trace_dev_irq(&dev->mt76, intr, dev->mt76.mmio.irqmask); in mt76x02_irq_handler()
279 mt76x02_irq_disable(dev, mask); in mt76x02_irq_handler()
282 napi_schedule(&dev->mt76.napi[0]); in mt76x02_irq_handler()
285 napi_schedule(&dev->mt76.napi[1]); in mt76x02_irq_handler()
288 tasklet_schedule(&dev->mt76.pre_tbtt_tasklet); in mt76x02_irq_handler()
292 if (dev->mt76.csa_complete) in mt76x02_irq_handler()
293 mt76_csa_finish(&dev->mt76); in mt76x02_irq_handler()
295 mt76_queue_kick(dev, dev->mt76.q_tx[MT_TXQ_PSD]); in mt76x02_irq_handler()
299 mt76x02_mac_poll_tx_status(dev, true); in mt76x02_irq_handler()
302 napi_schedule(&dev->mt76.tx_napi); in mt76x02_irq_handler()
305 tasklet_schedule(&dev->dfs_pd.dfs_tasklet); in mt76x02_irq_handler()
311 static void mt76x02_dma_enable(struct mt76x02_dev *dev) in mt76x02_dma_enable() argument
315 mt76_wr(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_ENABLE_TX); in mt76x02_dma_enable()
316 mt76x02_wait_for_wpdma(&dev->mt76, 1000); in mt76x02_dma_enable()
322 mt76_set(dev, MT_WPDMA_GLO_CFG, val); in mt76x02_dma_enable()
323 mt76_clear(dev, MT_WPDMA_GLO_CFG, in mt76x02_dma_enable()
327 void mt76x02_dma_disable(struct mt76x02_dev *dev) in mt76x02_dma_disable() argument
329 u32 val = mt76_rr(dev, MT_WPDMA_GLO_CFG); in mt76x02_dma_disable()
335 mt76_wr(dev, MT_WPDMA_GLO_CFG, val); in mt76x02_dma_disable()
339 void mt76x02_mac_start(struct mt76x02_dev *dev) in mt76x02_mac_start() argument
341 mt76x02_mac_reset_counters(dev); in mt76x02_mac_start()
342 mt76x02_dma_enable(dev); in mt76x02_mac_start()
343 mt76_wr(dev, MT_RX_FILTR_CFG, dev->mt76.rxfilter); in mt76x02_mac_start()
344 mt76_wr(dev, MT_MAC_SYS_CTRL, in mt76x02_mac_start()
347 mt76x02_irq_enable(dev, in mt76x02_mac_start()
353 static bool mt76x02_tx_hang(struct mt76x02_dev *dev) in mt76x02_tx_hang() argument
360 q = dev->mt76.q_tx[i]; in mt76x02_tx_hang()
365 prev_dma_idx = dev->mt76.tx_dma_idx[i]; in mt76x02_tx_hang()
367 dev->mt76.tx_dma_idx[i] = dma_idx; in mt76x02_tx_hang()
380 struct mt76x02_dev *dev = hw->priv; in mt76x02_key_sync() local
391 mt76x02_mac_wcid_sync_pn(dev, wcid->idx, key); in mt76x02_key_sync()
394 static void mt76x02_reset_state(struct mt76x02_dev *dev) in mt76x02_reset_state() argument
398 lockdep_assert_held(&dev->mt76.mutex); in mt76x02_reset_state()
400 clear_bit(MT76_STATE_RUNNING, &dev->mphy.state); in mt76x02_reset_state()
403 ieee80211_iter_keys_rcu(dev->mt76.hw, NULL, mt76x02_key_sync, NULL); in mt76x02_reset_state()
413 wcid = rcu_dereference_protected(dev->mt76.wcid[i], in mt76x02_reset_state()
414 lockdep_is_held(&dev->mt76.mutex)); in mt76x02_reset_state()
418 rcu_assign_pointer(dev->mt76.wcid[i], NULL); in mt76x02_reset_state()
426 __mt76_sta_remove(&dev->mt76, vif, sta); in mt76x02_reset_state()
430 dev->mphy.vif_mask = 0; in mt76x02_reset_state()
431 dev->mt76.beacon_mask = 0; in mt76x02_reset_state()
434 static void mt76x02_watchdog_reset(struct mt76x02_dev *dev) in mt76x02_watchdog_reset() argument
436 u32 mask = dev->mt76.mmio.irqmask; in mt76x02_watchdog_reset()
437 bool restart = dev->mt76.mcu_ops->mcu_restart; in mt76x02_watchdog_reset()
440 ieee80211_stop_queues(dev->mt76.hw); in mt76x02_watchdog_reset()
441 set_bit(MT76_RESET, &dev->mphy.state); in mt76x02_watchdog_reset()
443 tasklet_disable(&dev->mt76.pre_tbtt_tasklet); in mt76x02_watchdog_reset()
444 mt76_worker_disable(&dev->mt76.tx_worker); in mt76x02_watchdog_reset()
445 napi_disable(&dev->mt76.tx_napi); in mt76x02_watchdog_reset()
447 mt76_for_each_q_rx(&dev->mt76, i) { in mt76x02_watchdog_reset()
448 napi_disable(&dev->mt76.napi[i]); in mt76x02_watchdog_reset()
451 mutex_lock(&dev->mt76.mutex); in mt76x02_watchdog_reset()
453 dev->mcu_timeout = 0; in mt76x02_watchdog_reset()
455 mt76x02_reset_state(dev); in mt76x02_watchdog_reset()
457 if (dev->mt76.beacon_mask) in mt76x02_watchdog_reset()
458 mt76_clear(dev, MT_BEACON_TIME_CFG, in mt76x02_watchdog_reset()
462 mt76x02_irq_disable(dev, mask); in mt76x02_watchdog_reset()
465 mt76_clear(dev, MT_TXOP_CTRL_CFG, MT_TXOP_ED_CCA_EN); in mt76x02_watchdog_reset()
466 mt76_wr(dev, MT_MAC_SYS_CTRL, 0); in mt76x02_watchdog_reset()
467 mt76_clear(dev, MT_WPDMA_GLO_CFG, in mt76x02_watchdog_reset()
470 mt76_wr(dev, MT_INT_SOURCE_CSR, 0xffffffff); in mt76x02_watchdog_reset()
473 mt76_set(dev, 0x734, 0x3); in mt76x02_watchdog_reset()
476 mt76_mcu_restart(dev); in mt76x02_watchdog_reset()
479 mt76_queue_tx_cleanup(dev, i, true); in mt76x02_watchdog_reset()
481 mt76_for_each_q_rx(&dev->mt76, i) { in mt76x02_watchdog_reset()
482 mt76_queue_rx_reset(dev, i); in mt76x02_watchdog_reset()
485 mt76x02_mac_start(dev); in mt76x02_watchdog_reset()
487 if (dev->ed_monitor) in mt76x02_watchdog_reset()
488 mt76_set(dev, MT_TXOP_CTRL_CFG, MT_TXOP_ED_CCA_EN); in mt76x02_watchdog_reset()
490 if (dev->mt76.beacon_mask && !restart) in mt76x02_watchdog_reset()
491 mt76_set(dev, MT_BEACON_TIME_CFG, in mt76x02_watchdog_reset()
495 mt76x02_irq_enable(dev, mask); in mt76x02_watchdog_reset()
497 mutex_unlock(&dev->mt76.mutex); in mt76x02_watchdog_reset()
499 clear_bit(MT76_RESET, &dev->mphy.state); in mt76x02_watchdog_reset()
501 mt76_worker_enable(&dev->mt76.tx_worker); in mt76x02_watchdog_reset()
502 napi_enable(&dev->mt76.tx_napi); in mt76x02_watchdog_reset()
503 napi_schedule(&dev->mt76.tx_napi); in mt76x02_watchdog_reset()
505 tasklet_enable(&dev->mt76.pre_tbtt_tasklet); in mt76x02_watchdog_reset()
507 mt76_for_each_q_rx(&dev->mt76, i) { in mt76x02_watchdog_reset()
508 napi_enable(&dev->mt76.napi[i]); in mt76x02_watchdog_reset()
509 napi_schedule(&dev->mt76.napi[i]); in mt76x02_watchdog_reset()
513 set_bit(MT76_RESTART, &dev->mphy.state); in mt76x02_watchdog_reset()
514 mt76x02_mcu_function_select(dev, Q_SELECT, 1); in mt76x02_watchdog_reset()
515 ieee80211_restart_hw(dev->mt76.hw); in mt76x02_watchdog_reset()
517 ieee80211_wake_queues(dev->mt76.hw); in mt76x02_watchdog_reset()
518 mt76_txq_schedule_all(&dev->mphy); in mt76x02_watchdog_reset()
525 struct mt76x02_dev *dev = hw->priv; in mt76x02_reconfig_complete() local
530 clear_bit(MT76_RESTART, &dev->mphy.state); in mt76x02_reconfig_complete()
534 static void mt76x02_check_tx_hang(struct mt76x02_dev *dev) in mt76x02_check_tx_hang() argument
536 if (test_bit(MT76_RESTART, &dev->mphy.state)) in mt76x02_check_tx_hang()
539 if (mt76x02_tx_hang(dev)) { in mt76x02_check_tx_hang()
540 if (++dev->tx_hang_check >= MT_TX_HANG_TH) in mt76x02_check_tx_hang()
543 dev->tx_hang_check = 0; in mt76x02_check_tx_hang()
546 if (dev->mcu_timeout) in mt76x02_check_tx_hang()
552 mt76x02_watchdog_reset(dev); in mt76x02_check_tx_hang()
554 dev->tx_hang_reset++; in mt76x02_check_tx_hang()
555 dev->tx_hang_check = 0; in mt76x02_check_tx_hang()
556 memset(dev->mt76.tx_dma_idx, 0xff, in mt76x02_check_tx_hang()
557 sizeof(dev->mt76.tx_dma_idx)); in mt76x02_check_tx_hang()
562 struct mt76x02_dev *dev = container_of(work, struct mt76x02_dev, in mt76x02_wdt_work() local
565 mt76x02_check_tx_hang(dev); in mt76x02_wdt_work()
567 ieee80211_queue_delayed_work(mt76_hw(dev), &dev->wdt_work, in mt76x02_wdt_work()