xref: /linux/drivers/net/wireless/mediatek/mt76/mt792x_dma.c (revision 91a4855d6c03e770e42f17c798a36a3c46e63de2)
1 // SPDX-License-Identifier: BSD-3-Clause-Clear
2 /* Copyright (C) 2023 MediaTek Inc. */
3 
4 #include <linux/module.h>
5 #include <linux/firmware.h>
6 
7 #include "mt792x.h"
8 #include "dma.h"
9 #include "trace.h"
10 
11 irqreturn_t mt792x_irq_handler(int irq, void *dev_instance)
12 {
13 	struct mt792x_dev *dev = dev_instance;
14 
15 	if (test_bit(MT76_REMOVED, &dev->mt76.phy.state))
16 		return IRQ_NONE;
17 	mt76_wr(dev, dev->irq_map->host_irq_enable, 0);
18 
19 	if (!test_bit(MT76_STATE_INITIALIZED, &dev->mphy.state))
20 		return IRQ_NONE;
21 
22 	tasklet_schedule(&dev->mt76.irq_tasklet);
23 
24 	return IRQ_HANDLED;
25 }
26 EXPORT_SYMBOL_GPL(mt792x_irq_handler);
27 
28 void mt792x_irq_tasklet(unsigned long data)
29 {
30 	struct mt792x_dev *dev = (struct mt792x_dev *)data;
31 	const struct mt792x_irq_map *irq_map = dev->irq_map;
32 	u32 intr, mask = 0;
33 
34 	mt76_wr(dev, irq_map->host_irq_enable, 0);
35 
36 	intr = mt76_rr(dev, MT_WFDMA0_HOST_INT_STA);
37 	intr &= dev->mt76.mmio.irqmask;
38 	mt76_wr(dev, MT_WFDMA0_HOST_INT_STA, intr);
39 
40 	trace_dev_irq(&dev->mt76, intr, dev->mt76.mmio.irqmask);
41 
42 	mask |= intr & (irq_map->rx.data_complete_mask |
43 			irq_map->rx.wm_complete_mask |
44 			irq_map->rx.wm2_complete_mask);
45 	if (intr & dev->irq_map->tx.mcu_complete_mask)
46 		mask |= dev->irq_map->tx.mcu_complete_mask;
47 
48 	if (intr & MT_INT_MCU_CMD) {
49 		u32 intr_sw;
50 
51 		intr_sw = mt76_rr(dev, MT_MCU_CMD);
52 		/* ack MCU2HOST_SW_INT_STA */
53 		mt76_wr(dev, MT_MCU_CMD, intr_sw);
54 		if (intr_sw & MT_MCU_CMD_WAKE_RX_PCIE) {
55 			mask |= irq_map->rx.data_complete_mask;
56 			intr |= irq_map->rx.data_complete_mask;
57 		}
58 	}
59 
60 	mt76_set_irq_mask(&dev->mt76, irq_map->host_irq_enable, mask, 0);
61 
62 	if (intr & dev->irq_map->tx.all_complete_mask)
63 		napi_schedule(&dev->mt76.tx_napi);
64 
65 	if (intr & irq_map->rx.wm_complete_mask)
66 		napi_schedule(&dev->mt76.napi[MT_RXQ_MCU]);
67 
68 	if (intr & irq_map->rx.wm2_complete_mask)
69 		napi_schedule(&dev->mt76.napi[MT_RXQ_MCU_WA]);
70 
71 	if (intr & irq_map->rx.data_complete_mask)
72 		napi_schedule(&dev->mt76.napi[MT_RXQ_MAIN]);
73 }
74 EXPORT_SYMBOL_GPL(mt792x_irq_tasklet);
75 
76 void mt792x_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q)
77 {
78 	struct mt792x_dev *dev = container_of(mdev, struct mt792x_dev, mt76);
79 	const struct mt792x_irq_map *irq_map = dev->irq_map;
80 
81 	if (q == MT_RXQ_MAIN)
82 		mt76_connac_irq_enable(mdev, irq_map->rx.data_complete_mask);
83 	else if (q == MT_RXQ_MCU_WA)
84 		mt76_connac_irq_enable(mdev, irq_map->rx.wm2_complete_mask);
85 	else
86 		mt76_connac_irq_enable(mdev, irq_map->rx.wm_complete_mask);
87 }
88 EXPORT_SYMBOL_GPL(mt792x_rx_poll_complete);
89 
90 #define PREFETCH(base, depth)	((base) << 16 | (depth))
91 static void mt792x_dma_prefetch(struct mt792x_dev *dev)
92 {
93 	if (is_mt7925(&dev->mt76)) {
94 		/* rx ring */
95 		mt76_wr(dev, MT_WFDMA0_RX_RING0_EXT_CTRL, PREFETCH(0x0000, 0x4));
96 		mt76_wr(dev, MT_WFDMA0_RX_RING1_EXT_CTRL, PREFETCH(0x0040, 0x4));
97 		mt76_wr(dev, MT_WFDMA0_RX_RING2_EXT_CTRL, PREFETCH(0x0080, 0x4));
98 		mt76_wr(dev, MT_WFDMA0_RX_RING3_EXT_CTRL, PREFETCH(0x00c0, 0x4));
99 		/* tx ring */
100 		mt76_wr(dev, MT_WFDMA0_TX_RING0_EXT_CTRL, PREFETCH(0x0100, 0x10));
101 		mt76_wr(dev, MT_WFDMA0_TX_RING1_EXT_CTRL, PREFETCH(0x0200, 0x10));
102 		mt76_wr(dev, MT_WFDMA0_TX_RING2_EXT_CTRL, PREFETCH(0x0300, 0x10));
103 		mt76_wr(dev, MT_WFDMA0_TX_RING3_EXT_CTRL, PREFETCH(0x0400, 0x10));
104 		mt76_wr(dev, MT_WFDMA0_TX_RING15_EXT_CTRL, PREFETCH(0x0500, 0x4));
105 		mt76_wr(dev, MT_WFDMA0_TX_RING16_EXT_CTRL, PREFETCH(0x0540, 0x4));
106 	} else if (is_mt7902(&dev->mt76)) {
107 		/* rx ring */
108 		mt76_wr(dev, MT_WFDMA0_RX_RING0_EXT_CTRL, PREFETCH(0x0000, 0x4));
109 		mt76_wr(dev, MT_WFDMA0_RX_RING1_EXT_CTRL, PREFETCH(0x0040, 0x4));
110 		mt76_wr(dev, MT_WFDMA0_RX_RING2_EXT_CTRL, PREFETCH(0x0080, 0x4));
111 		mt76_wr(dev, MT_WFDMA0_RX_RING3_EXT_CTRL, PREFETCH(0x00c0, 0x4));
112 		/* tx ring */
113 		mt76_wr(dev, MT_WFDMA0_TX_RING0_EXT_CTRL, PREFETCH(0x0100, 0x4));
114 		mt76_wr(dev, MT_WFDMA0_TX_RING1_EXT_CTRL, PREFETCH(0x0140, 0x4));
115 		mt76_wr(dev, MT_WFDMA0_TX_RING2_EXT_CTRL, PREFETCH(0x0180, 0x4));
116 		mt76_wr(dev, MT_WFDMA0_TX_RING3_EXT_CTRL, PREFETCH(0x01c0, 0x4));
117 		mt76_wr(dev, MT_WFDMA0_TX_RING4_EXT_CTRL, PREFETCH(0x0200, 0x4));
118 		mt76_wr(dev, MT_WFDMA0_TX_RING5_EXT_CTRL, PREFETCH(0x0240, 0x4));
119 		mt76_wr(dev, MT_WFDMA0_TX_RING6_EXT_CTRL, PREFETCH(0x0280, 0x4));
120 		mt76_wr(dev, MT_WFDMA0_TX_RING15_EXT_CTRL, PREFETCH(0x02c0, 0x4));
121 		mt76_wr(dev, MT_WFDMA0_TX_RING16_EXT_CTRL, PREFETCH(0x0300, 0x4));
122 	} else {
123 		/* rx ring */
124 		mt76_wr(dev, MT_WFDMA0_RX_RING0_EXT_CTRL, PREFETCH(0x0, 0x4));
125 		mt76_wr(dev, MT_WFDMA0_RX_RING2_EXT_CTRL, PREFETCH(0x40, 0x4));
126 		mt76_wr(dev, MT_WFDMA0_RX_RING3_EXT_CTRL, PREFETCH(0x80, 0x4));
127 		mt76_wr(dev, MT_WFDMA0_RX_RING4_EXT_CTRL, PREFETCH(0xc0, 0x4));
128 		mt76_wr(dev, MT_WFDMA0_RX_RING5_EXT_CTRL, PREFETCH(0x100, 0x4));
129 		/* tx ring */
130 		mt76_wr(dev, MT_WFDMA0_TX_RING0_EXT_CTRL, PREFETCH(0x140, 0x4));
131 		mt76_wr(dev, MT_WFDMA0_TX_RING1_EXT_CTRL, PREFETCH(0x180, 0x4));
132 		mt76_wr(dev, MT_WFDMA0_TX_RING2_EXT_CTRL, PREFETCH(0x1c0, 0x4));
133 		mt76_wr(dev, MT_WFDMA0_TX_RING3_EXT_CTRL, PREFETCH(0x200, 0x4));
134 		mt76_wr(dev, MT_WFDMA0_TX_RING4_EXT_CTRL, PREFETCH(0x240, 0x4));
135 		mt76_wr(dev, MT_WFDMA0_TX_RING5_EXT_CTRL, PREFETCH(0x280, 0x4));
136 		mt76_wr(dev, MT_WFDMA0_TX_RING6_EXT_CTRL, PREFETCH(0x2c0, 0x4));
137 		mt76_wr(dev, MT_WFDMA0_TX_RING16_EXT_CTRL, PREFETCH(0x340, 0x4));
138 		mt76_wr(dev, MT_WFDMA0_TX_RING17_EXT_CTRL, PREFETCH(0x380, 0x4));
139 	}
140 }
141 
142 int mt792x_dma_enable(struct mt792x_dev *dev)
143 {
144 	/* configure perfetch settings */
145 	mt792x_dma_prefetch(dev);
146 
147 	/* reset dma idx */
148 	mt76_wr(dev, MT_WFDMA0_RST_DTX_PTR, ~0);
149 	if (is_mt7925(&dev->mt76))
150 		mt76_wr(dev, MT_WFDMA0_RST_DRX_PTR, ~0);
151 
152 	/* configure delay interrupt */
153 	mt76_wr(dev, MT_WFDMA0_PRI_DLY_INT_CFG0, 0);
154 
155 	mt76_set(dev, MT_WFDMA0_GLO_CFG,
156 		 MT_WFDMA0_GLO_CFG_TX_WB_DDONE |
157 		 MT_WFDMA0_GLO_CFG_FIFO_LITTLE_ENDIAN |
158 		 MT_WFDMA0_GLO_CFG_CLK_GAT_DIS |
159 		 MT_WFDMA0_GLO_CFG_OMIT_TX_INFO |
160 		 FIELD_PREP(MT_WFDMA0_GLO_CFG_DMA_SIZE, 3) |
161 		 MT_WFDMA0_GLO_CFG_FIFO_DIS_CHECK |
162 		 MT_WFDMA0_GLO_CFG_RX_WB_DDONE |
163 		 MT_WFDMA0_GLO_CFG_CSR_DISP_BASE_PTR_CHAIN_EN |
164 		 MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2);
165 
166 	mt76_set(dev, MT_WFDMA0_GLO_CFG,
167 		 MT_WFDMA0_GLO_CFG_TX_DMA_EN | MT_WFDMA0_GLO_CFG_RX_DMA_EN);
168 
169 	if (is_mt7925(&dev->mt76)) {
170 		mt76_rmw(dev, MT_UWFDMA0_GLO_CFG_EXT1, BIT(28), BIT(28));
171 		mt76_set(dev, MT_WFDMA0_INT_RX_PRI, 0x0F00);
172 		mt76_set(dev, MT_WFDMA0_INT_TX_PRI, 0x7F00);
173 	}
174 	mt76_set(dev, MT_WFDMA_DUMMY_CR, MT_WFDMA_NEED_REINIT);
175 
176 	/* enable interrupts for TX/RX rings */
177 	mt76_connac_irq_enable(&dev->mt76,
178 			       dev->irq_map->tx.all_complete_mask |
179 			       dev->irq_map->rx.data_complete_mask |
180 			       dev->irq_map->rx.wm2_complete_mask |
181 			       dev->irq_map->rx.wm_complete_mask |
182 			       MT_INT_MCU_CMD);
183 	mt76_set(dev, MT_MCU2HOST_SW_INT_ENA, MT_MCU_CMD_WAKE_RX_PCIE);
184 
185 	return 0;
186 }
187 EXPORT_SYMBOL_GPL(mt792x_dma_enable);
188 
189 static int
190 mt792x_dma_reset(struct mt792x_dev *dev, bool force)
191 {
192 	int i, err;
193 
194 	err = mt792x_dma_disable(dev, force);
195 	if (err)
196 		return err;
197 
198 	/* reset hw queues */
199 	for (i = 0; i < __MT_TXQ_MAX; i++)
200 		mt76_queue_reset(dev, dev->mphy.q_tx[i], true);
201 
202 	for (i = 0; i < __MT_MCUQ_MAX; i++)
203 		mt76_queue_reset(dev, dev->mt76.q_mcu[i], true);
204 
205 	mt76_for_each_q_rx(&dev->mt76, i)
206 		mt76_queue_reset(dev, &dev->mt76.q_rx[i], true);
207 
208 	mt76_tx_status_check(&dev->mt76, true);
209 
210 	return mt792x_dma_enable(dev);
211 }
212 
213 int mt792x_wpdma_reset(struct mt792x_dev *dev, bool force)
214 {
215 	int i, err;
216 
217 	/* clean up hw queues */
218 	for (i = 0; i < ARRAY_SIZE(dev->mt76.phy.q_tx); i++)
219 		mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[i], true);
220 
221 	for (i = 0; i < ARRAY_SIZE(dev->mt76.q_mcu); i++)
222 		mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[i], true);
223 
224 	mt76_for_each_q_rx(&dev->mt76, i)
225 		mt76_queue_rx_cleanup(dev, &dev->mt76.q_rx[i]);
226 
227 	if (force) {
228 		err = mt792x_wfsys_reset(dev);
229 		if (err)
230 			return err;
231 	}
232 	err = mt792x_dma_reset(dev, force);
233 	if (err)
234 		return err;
235 
236 	mt76_for_each_q_rx(&dev->mt76, i)
237 		mt76_queue_rx_reset(dev, i);
238 
239 	return 0;
240 }
241 EXPORT_SYMBOL_GPL(mt792x_wpdma_reset);
242 
243 int mt792x_wpdma_reinit_cond(struct mt792x_dev *dev)
244 {
245 	struct mt76_connac_pm *pm = &dev->pm;
246 	int err;
247 
248 	/* check if the wpdma must be reinitialized */
249 	if (mt792x_dma_need_reinit(dev)) {
250 		/* disable interrutpts */
251 		mt76_wr(dev, dev->irq_map->host_irq_enable, 0);
252 		mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0x0);
253 
254 		err = mt792x_wpdma_reset(dev, false);
255 		if (err) {
256 			dev_err(dev->mt76.dev, "wpdma reset failed\n");
257 			return err;
258 		}
259 
260 		/* enable interrutpts */
261 		mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0xff);
262 		pm->stats.lp_wake++;
263 	}
264 
265 	return 0;
266 }
267 EXPORT_SYMBOL_GPL(mt792x_wpdma_reinit_cond);
268 
269 int mt792x_dma_disable(struct mt792x_dev *dev, bool force)
270 {
271 	/* disable WFDMA0 */
272 	mt76_clear(dev, MT_WFDMA0_GLO_CFG,
273 		   MT_WFDMA0_GLO_CFG_TX_DMA_EN | MT_WFDMA0_GLO_CFG_RX_DMA_EN |
274 		   MT_WFDMA0_GLO_CFG_CSR_DISP_BASE_PTR_CHAIN_EN |
275 		   MT_WFDMA0_GLO_CFG_OMIT_TX_INFO |
276 		   MT_WFDMA0_GLO_CFG_OMIT_RX_INFO |
277 		   MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2);
278 
279 	if (!mt76_poll_msec_tick(dev, MT_WFDMA0_GLO_CFG,
280 				 MT_WFDMA0_GLO_CFG_TX_DMA_BUSY |
281 				 MT_WFDMA0_GLO_CFG_RX_DMA_BUSY, 0, 100, 1))
282 		return -ETIMEDOUT;
283 
284 	/* disable dmashdl */
285 	mt76_clear(dev, MT_WFDMA0_GLO_CFG_EXT0,
286 		   MT_WFDMA0_CSR_TX_DMASHDL_ENABLE);
287 	mt76_set(dev, MT_DMASHDL_SW_CONTROL, MT_DMASHDL_DMASHDL_BYPASS);
288 
289 	if (force) {
290 		/* reset */
291 		mt76_clear(dev, MT_WFDMA0_RST,
292 			   MT_WFDMA0_RST_DMASHDL_ALL_RST |
293 			   MT_WFDMA0_RST_LOGIC_RST);
294 
295 		mt76_set(dev, MT_WFDMA0_RST,
296 			 MT_WFDMA0_RST_DMASHDL_ALL_RST |
297 			 MT_WFDMA0_RST_LOGIC_RST);
298 	}
299 
300 	return 0;
301 }
302 EXPORT_SYMBOL_GPL(mt792x_dma_disable);
303 
304 void mt792x_dma_cleanup(struct mt792x_dev *dev)
305 {
306 	/* disable */
307 	mt76_clear(dev, MT_WFDMA0_GLO_CFG,
308 		   MT_WFDMA0_GLO_CFG_TX_DMA_EN |
309 		   MT_WFDMA0_GLO_CFG_RX_DMA_EN |
310 		   MT_WFDMA0_GLO_CFG_CSR_DISP_BASE_PTR_CHAIN_EN |
311 		   MT_WFDMA0_GLO_CFG_OMIT_TX_INFO |
312 		   MT_WFDMA0_GLO_CFG_OMIT_RX_INFO |
313 		   MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2);
314 
315 	mt76_poll_msec_tick(dev, MT_WFDMA0_GLO_CFG,
316 			    MT_WFDMA0_GLO_CFG_TX_DMA_BUSY |
317 			    MT_WFDMA0_GLO_CFG_RX_DMA_BUSY, 0, 100, 1);
318 
319 	/* reset */
320 	mt76_clear(dev, MT_WFDMA0_RST,
321 		   MT_WFDMA0_RST_DMASHDL_ALL_RST |
322 		   MT_WFDMA0_RST_LOGIC_RST);
323 
324 	mt76_set(dev, MT_WFDMA0_RST,
325 		 MT_WFDMA0_RST_DMASHDL_ALL_RST |
326 		 MT_WFDMA0_RST_LOGIC_RST);
327 
328 	mt76_dma_cleanup(&dev->mt76);
329 }
330 EXPORT_SYMBOL_GPL(mt792x_dma_cleanup);
331 
332 int mt792x_poll_tx(struct napi_struct *napi, int budget)
333 {
334 	struct mt792x_dev *dev;
335 
336 	dev = container_of(napi, struct mt792x_dev, mt76.tx_napi);
337 
338 	if (!mt76_connac_pm_ref(&dev->mphy, &dev->pm)) {
339 		napi_complete(napi);
340 		queue_work(dev->mt76.wq, &dev->pm.wake_work);
341 		return 0;
342 	}
343 
344 	mt76_connac_tx_cleanup(&dev->mt76);
345 	if (napi_complete(napi))
346 		mt76_connac_irq_enable(&dev->mt76,
347 				       dev->irq_map->tx.all_complete_mask);
348 	mt76_connac_pm_unref(&dev->mphy, &dev->pm);
349 
350 	return 0;
351 }
352 EXPORT_SYMBOL_GPL(mt792x_poll_tx);
353 
354 int mt792x_poll_rx(struct napi_struct *napi, int budget)
355 {
356 	struct mt792x_dev *dev;
357 	int done;
358 
359 	dev = mt76_priv(napi->dev);
360 
361 	if (!mt76_connac_pm_ref(&dev->mphy, &dev->pm)) {
362 		napi_complete(napi);
363 		queue_work(dev->mt76.wq, &dev->pm.wake_work);
364 		return 0;
365 	}
366 	done = mt76_dma_rx_poll(napi, budget);
367 	mt76_connac_pm_unref(&dev->mphy, &dev->pm);
368 
369 	return done;
370 }
371 EXPORT_SYMBOL_GPL(mt792x_poll_rx);
372 
373 int mt792x_wfsys_reset(struct mt792x_dev *dev)
374 {
375 	u32 addr = is_connac2(&dev->mt76) ? 0x18000140 : 0x7c000140;
376 
377 	mt76_clear(dev, addr, WFSYS_SW_RST_B);
378 	msleep(50);
379 	mt76_set(dev, addr, WFSYS_SW_RST_B);
380 
381 	if (!__mt76_poll_msec(&dev->mt76, addr, WFSYS_SW_INIT_DONE,
382 			      WFSYS_SW_INIT_DONE, 500))
383 		return -ETIMEDOUT;
384 
385 	return 0;
386 }
387 EXPORT_SYMBOL_GPL(mt792x_wfsys_reset);
388 
389