1 // SPDX-License-Identifier: ISC 2 /* 3 * Copyright (C) 2022 MediaTek Inc. 4 */ 5 6 #include "mt7996.h" 7 #include "../dma.h" 8 #include "mac.h" 9 10 int mt7996_init_tx_queues(struct mt7996_phy *phy, int idx, int n_desc, 11 int ring_base, struct mtk_wed_device *wed) 12 { 13 struct mt7996_dev *dev = phy->dev; 14 u32 flags = 0; 15 16 if (mtk_wed_device_active(wed)) { 17 ring_base += MT_TXQ_ID(0) * MT_RING_SIZE; 18 idx -= MT_TXQ_ID(0); 19 20 if (phy->mt76->band_idx == MT_BAND2) 21 flags = MT_WED_Q_TX(0); 22 else 23 flags = MT_WED_Q_TX(idx); 24 } 25 26 return mt76_connac_init_tx_queues(phy->mt76, idx, n_desc, 27 ring_base, wed, flags); 28 } 29 30 static int mt7996_poll_tx(struct napi_struct *napi, int budget) 31 { 32 struct mt7996_dev *dev; 33 34 dev = container_of(napi, struct mt7996_dev, mt76.tx_napi); 35 36 mt76_connac_tx_cleanup(&dev->mt76); 37 if (napi_complete_done(napi, 0)) 38 mt7996_irq_enable(dev, MT_INT_TX_DONE_MCU); 39 40 return 0; 41 } 42 43 static void mt7996_dma_config(struct mt7996_dev *dev) 44 { 45 #define Q_CONFIG(q, wfdma, int, id) do { \ 46 if (wfdma) \ 47 dev->q_wfdma_mask |= (1 << (q)); \ 48 dev->q_int_mask[(q)] = int; \ 49 dev->q_id[(q)] = id; \ 50 } while (0) 51 52 #define MCUQ_CONFIG(q, wfdma, int, id) Q_CONFIG(q, (wfdma), (int), (id)) 53 #define RXQ_CONFIG(q, wfdma, int, id) Q_CONFIG(__RXQ(q), (wfdma), (int), (id)) 54 #define TXQ_CONFIG(q, wfdma, int, id) Q_CONFIG(__TXQ(q), (wfdma), (int), (id)) 55 56 /* rx queue */ 57 RXQ_CONFIG(MT_RXQ_MCU, WFDMA0, MT_INT_RX_DONE_WM, MT7996_RXQ_MCU_WM); 58 /* for mt7990, RX ring 1 is for SDO instead */ 59 RXQ_CONFIG(MT_RXQ_MCU_WA, WFDMA0, MT_INT_RX_DONE_WA, MT7996_RXQ_MCU_WA); 60 RXQ_CONFIG(MT_RXQ_MAIN, WFDMA0, MT_INT_RX_DONE_BAND0, MT7996_RXQ_BAND0); 61 if (mt7996_has_wa(dev)) 62 RXQ_CONFIG(MT_RXQ_MAIN_WA, WFDMA0, MT_INT_RX_DONE_WA_MAIN, 63 MT7996_RXQ_MCU_WA_MAIN); 64 65 switch (mt76_chip(&dev->mt76)) { 66 case MT7992_DEVICE_ID: 67 RXQ_CONFIG(MT_RXQ_BAND1_WA, WFDMA0, MT_INT_RX_DONE_WA_EXT, MT7996_RXQ_MCU_WA_EXT); 68 RXQ_CONFIG(MT_RXQ_BAND1, WFDMA0, MT_INT_RX_DONE_BAND1, MT7996_RXQ_BAND1); 69 break; 70 case MT7990_DEVICE_ID: 71 RXQ_CONFIG(MT_RXQ_BAND1, WFDMA0, MT_INT_RX_DONE_BAND1, MT7996_RXQ_BAND1); 72 RXQ_CONFIG(MT_RXQ_TXFREE_BAND0, WFDMA0, 73 MT_INT_RX_TXFREE_BAND0_MT7990, MT7990_RXQ_TXFREE0); 74 if (dev->hif2) 75 RXQ_CONFIG(MT_RXQ_TXFREE_BAND1, WFDMA0, 76 MT_INT_RX_TXFREE_BAND1_MT7990, MT7990_RXQ_TXFREE1); 77 break; 78 case MT7996_DEVICE_ID: 79 default: 80 /* mt7996 band2 */ 81 RXQ_CONFIG(MT_RXQ_BAND2_WA, WFDMA0, MT_INT_RX_DONE_WA_TRI, MT7996_RXQ_MCU_WA_TRI); 82 RXQ_CONFIG(MT_RXQ_BAND2, WFDMA0, MT_INT_RX_DONE_BAND2, MT7996_RXQ_BAND2); 83 break; 84 } 85 86 if (dev->has_rro) { 87 /* band0 */ 88 RXQ_CONFIG(MT_RXQ_RRO_BAND0, WFDMA0, MT_INT_RX_DONE_RRO_BAND0, 89 MT7996_RXQ_RRO_BAND0); 90 RXQ_CONFIG(MT_RXQ_MSDU_PAGE_BAND0, WFDMA0, MT_INT_RX_DONE_MSDU_PG_BAND0, 91 MT7996_RXQ_MSDU_PG_BAND0); 92 RXQ_CONFIG(MT_RXQ_TXFREE_BAND0, WFDMA0, MT_INT_RX_TXFREE_MAIN, 93 MT7996_RXQ_TXFREE0); 94 /* band1 */ 95 RXQ_CONFIG(MT_RXQ_MSDU_PAGE_BAND1, WFDMA0, MT_INT_RX_DONE_MSDU_PG_BAND1, 96 MT7996_RXQ_MSDU_PG_BAND1); 97 /* band2 */ 98 RXQ_CONFIG(MT_RXQ_RRO_BAND2, WFDMA0, MT_INT_RX_DONE_RRO_BAND2, 99 MT7996_RXQ_RRO_BAND2); 100 RXQ_CONFIG(MT_RXQ_MSDU_PAGE_BAND2, WFDMA0, MT_INT_RX_DONE_MSDU_PG_BAND2, 101 MT7996_RXQ_MSDU_PG_BAND2); 102 RXQ_CONFIG(MT_RXQ_TXFREE_BAND2, WFDMA0, MT_INT_RX_TXFREE_TRI, 103 MT7996_RXQ_TXFREE2); 104 105 RXQ_CONFIG(MT_RXQ_RRO_IND, WFDMA0, MT_INT_RX_DONE_RRO_IND, 106 MT7996_RXQ_RRO_IND); 107 } 108 109 /* data tx queue */ 110 TXQ_CONFIG(0, WFDMA0, MT_INT_TX_DONE_BAND0, MT7996_TXQ_BAND0); 111 if (is_mt7996(&dev->mt76)) { 112 TXQ_CONFIG(1, WFDMA0, MT_INT_TX_DONE_BAND1, MT7996_TXQ_BAND1); 113 TXQ_CONFIG(2, WFDMA0, MT_INT_TX_DONE_BAND2, MT7996_TXQ_BAND2); 114 } else { 115 TXQ_CONFIG(1, WFDMA0, MT_INT_TX_DONE_BAND1, MT7996_TXQ_BAND1); 116 } 117 118 /* mcu tx queue */ 119 MCUQ_CONFIG(MT_MCUQ_FWDL, WFDMA0, MT_INT_TX_DONE_FWDL, MT7996_TXQ_FWDL); 120 MCUQ_CONFIG(MT_MCUQ_WM, WFDMA0, MT_INT_TX_DONE_MCU_WM, MT7996_TXQ_MCU_WM); 121 if (mt7996_has_wa(dev)) 122 MCUQ_CONFIG(MT_MCUQ_WA, WFDMA0, MT_INT_TX_DONE_MCU_WA, 123 MT7996_TXQ_MCU_WA); 124 } 125 126 static u32 __mt7996_dma_prefetch_base(u16 *base, u8 depth) 127 { 128 u32 ret = *base << 16 | depth; 129 130 *base = *base + (depth << 4); 131 132 return ret; 133 } 134 135 static void __mt7996_dma_prefetch(struct mt7996_dev *dev, u32 ofs) 136 { 137 u16 base = 0; 138 u8 queue, val; 139 140 #define PREFETCH(_depth) (__mt7996_dma_prefetch_base(&base, (_depth))) 141 /* prefetch SRAM wrapping boundary for tx/rx ring. */ 142 /* Tx Command Rings */ 143 val = is_mt7996(&dev->mt76) ? 2 : 4; 144 mt76_wr(dev, MT_MCUQ_EXT_CTRL(MT_MCUQ_FWDL) + ofs, PREFETCH(val)); 145 mt76_wr(dev, MT_MCUQ_EXT_CTRL(MT_MCUQ_WM) + ofs, PREFETCH(val)); 146 if (mt7996_has_wa(dev)) 147 mt76_wr(dev, MT_MCUQ_EXT_CTRL(MT_MCUQ_WA) + ofs, PREFETCH(val)); 148 149 /* Tx Data Rings */ 150 mt76_wr(dev, MT_TXQ_EXT_CTRL(0) + ofs, PREFETCH(0x8)); 151 if (!is_mt7996(&dev->mt76) || dev->hif2) 152 mt76_wr(dev, MT_TXQ_EXT_CTRL(1) + ofs, PREFETCH(0x8)); 153 if (is_mt7996(&dev->mt76)) 154 mt76_wr(dev, MT_TXQ_EXT_CTRL(2) + ofs, PREFETCH(0x8)); 155 156 /* Rx Event Rings */ 157 mt76_wr(dev, MT_RXQ_EXT_CTRL(MT_RXQ_MCU) + ofs, PREFETCH(val)); 158 mt76_wr(dev, MT_RXQ_EXT_CTRL(MT_RXQ_MCU_WA) + ofs, PREFETCH(val)); 159 160 /* Rx TxFreeDone From WA Rings */ 161 if (mt7996_has_wa(dev)) { 162 mt76_wr(dev, MT_RXQ_EXT_CTRL(MT_RXQ_MAIN_WA) + ofs, PREFETCH(val)); 163 queue = is_mt7996(&dev->mt76) ? MT_RXQ_BAND2_WA : MT_RXQ_BAND1_WA; 164 mt76_wr(dev, MT_RXQ_EXT_CTRL(queue) + ofs, PREFETCH(val)); 165 } 166 167 /* Rx TxFreeDone From MAC Rings */ 168 val = is_mt7996(&dev->mt76) ? 4 : 8; 169 if (is_mt7990(&dev->mt76) || (is_mt7996(&dev->mt76) && dev->has_rro)) 170 mt76_wr(dev, MT_RXQ_EXT_CTRL(MT_RXQ_TXFREE_BAND0) + ofs, PREFETCH(val)); 171 if (is_mt7990(&dev->mt76) && dev->hif2) 172 mt76_wr(dev, MT_RXQ_EXT_CTRL(MT_RXQ_TXFREE_BAND1) + ofs, PREFETCH(val)); 173 else if (is_mt7996(&dev->mt76) && dev->has_rro) 174 mt76_wr(dev, MT_RXQ_EXT_CTRL(MT_RXQ_TXFREE_BAND2) + ofs, PREFETCH(val)); 175 176 /* Rx Data Rings */ 177 mt76_wr(dev, MT_RXQ_EXT_CTRL(MT_RXQ_MAIN) + ofs, PREFETCH(0x10)); 178 queue = is_mt7996(&dev->mt76) ? MT_RXQ_BAND2 : MT_RXQ_BAND1; 179 mt76_wr(dev, MT_RXQ_EXT_CTRL(queue) + ofs, PREFETCH(0x10)); 180 181 /* Rx RRO Rings */ 182 if (dev->has_rro) { 183 mt76_wr(dev, MT_RXQ_EXT_CTRL(MT_RXQ_RRO_BAND0) + ofs, PREFETCH(0x10)); 184 queue = is_mt7996(&dev->mt76) ? MT_RXQ_RRO_BAND2 : MT_RXQ_RRO_BAND1; 185 mt76_wr(dev, MT_RXQ_EXT_CTRL(queue) + ofs, PREFETCH(0x10)); 186 187 mt76_wr(dev, MT_RXQ_EXT_CTRL(MT_RXQ_MSDU_PAGE_BAND0) + ofs, PREFETCH(val)); 188 if (is_mt7996(&dev->mt76)) { 189 mt76_wr(dev, MT_RXQ_EXT_CTRL(MT_RXQ_MSDU_PAGE_BAND1) + ofs, 190 PREFETCH(val)); 191 mt76_wr(dev, MT_RXQ_EXT_CTRL(MT_RXQ_MSDU_PAGE_BAND2) + ofs, 192 PREFETCH(val)); 193 } 194 } 195 #undef PREFETCH 196 197 mt76_set(dev, WF_WFDMA0_GLO_CFG_EXT1 + ofs, WF_WFDMA0_GLO_CFG_EXT1_CALC_MODE); 198 } 199 200 void mt7996_dma_prefetch(struct mt7996_dev *dev) 201 { 202 __mt7996_dma_prefetch(dev, 0); 203 if (dev->hif2) 204 __mt7996_dma_prefetch(dev, MT_WFDMA0_PCIE1(0) - MT_WFDMA0(0)); 205 } 206 207 static void mt7996_dma_disable(struct mt7996_dev *dev, bool reset) 208 { 209 u32 hif1_ofs = 0; 210 211 if (dev->hif2) 212 hif1_ofs = MT_WFDMA0_PCIE1(0) - MT_WFDMA0(0); 213 214 if (reset) { 215 mt76_clear(dev, MT_WFDMA0_RST, 216 MT_WFDMA0_RST_DMASHDL_ALL_RST | 217 MT_WFDMA0_RST_LOGIC_RST); 218 219 mt76_set(dev, MT_WFDMA0_RST, 220 MT_WFDMA0_RST_DMASHDL_ALL_RST | 221 MT_WFDMA0_RST_LOGIC_RST); 222 223 if (dev->hif2) { 224 mt76_clear(dev, MT_WFDMA0_RST + hif1_ofs, 225 MT_WFDMA0_RST_DMASHDL_ALL_RST | 226 MT_WFDMA0_RST_LOGIC_RST); 227 228 mt76_set(dev, MT_WFDMA0_RST + hif1_ofs, 229 MT_WFDMA0_RST_DMASHDL_ALL_RST | 230 MT_WFDMA0_RST_LOGIC_RST); 231 } 232 } 233 234 /* disable */ 235 mt76_clear(dev, MT_WFDMA0_GLO_CFG, 236 MT_WFDMA0_GLO_CFG_TX_DMA_EN | 237 MT_WFDMA0_GLO_CFG_RX_DMA_EN | 238 MT_WFDMA0_GLO_CFG_OMIT_TX_INFO | 239 MT_WFDMA0_GLO_CFG_OMIT_RX_INFO | 240 MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2); 241 242 if (dev->hif2) { 243 mt76_clear(dev, MT_WFDMA0_GLO_CFG + hif1_ofs, 244 MT_WFDMA0_GLO_CFG_TX_DMA_EN | 245 MT_WFDMA0_GLO_CFG_RX_DMA_EN | 246 MT_WFDMA0_GLO_CFG_OMIT_TX_INFO | 247 MT_WFDMA0_GLO_CFG_OMIT_RX_INFO | 248 MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2); 249 } 250 } 251 252 void mt7996_dma_start(struct mt7996_dev *dev, bool reset, bool wed_reset) 253 { 254 struct mtk_wed_device *wed = &dev->mt76.mmio.wed; 255 u32 hif1_ofs = 0; 256 u32 irq_mask; 257 258 if (dev->hif2) 259 hif1_ofs = MT_WFDMA0_PCIE1(0) - MT_WFDMA0(0); 260 261 /* enable WFDMA Tx/Rx */ 262 if (!reset) { 263 if (mtk_wed_device_active(wed) && mtk_wed_get_rx_capa(wed)) 264 mt76_set(dev, MT_WFDMA0_GLO_CFG, 265 MT_WFDMA0_GLO_CFG_TX_DMA_EN | 266 MT_WFDMA0_GLO_CFG_OMIT_TX_INFO | 267 MT_WFDMA0_GLO_CFG_EXT_EN); 268 else 269 mt76_set(dev, MT_WFDMA0_GLO_CFG, 270 MT_WFDMA0_GLO_CFG_TX_DMA_EN | 271 MT_WFDMA0_GLO_CFG_RX_DMA_EN | 272 MT_WFDMA0_GLO_CFG_OMIT_TX_INFO | 273 MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2 | 274 MT_WFDMA0_GLO_CFG_EXT_EN); 275 276 if (dev->hif2) 277 mt76_set(dev, MT_WFDMA0_GLO_CFG + hif1_ofs, 278 MT_WFDMA0_GLO_CFG_TX_DMA_EN | 279 MT_WFDMA0_GLO_CFG_RX_DMA_EN | 280 MT_WFDMA0_GLO_CFG_OMIT_TX_INFO | 281 MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2 | 282 MT_WFDMA0_GLO_CFG_EXT_EN); 283 } 284 285 /* enable interrupts for TX/RX rings */ 286 irq_mask = MT_INT_MCU_CMD | MT_INT_RX_DONE_MCU | MT_INT_TX_DONE_MCU; 287 288 if (mt7996_band_valid(dev, MT_BAND0)) 289 irq_mask |= MT_INT_BAND0_RX_DONE; 290 291 if (mt7996_band_valid(dev, MT_BAND1)) 292 irq_mask |= MT_INT_BAND1_RX_DONE; 293 294 if (mt7996_band_valid(dev, MT_BAND2)) 295 irq_mask |= MT_INT_BAND2_RX_DONE; 296 297 if (mtk_wed_device_active(wed) && wed_reset) { 298 u32 wed_irq_mask = irq_mask; 299 300 wed_irq_mask |= MT_INT_TX_DONE_BAND0 | MT_INT_TX_DONE_BAND1; 301 mt76_wr(dev, MT_INT_MASK_CSR, wed_irq_mask); 302 mtk_wed_device_start(wed, wed_irq_mask); 303 } 304 305 if (!mt7996_has_wa(dev)) 306 irq_mask &= ~(MT_INT_RX(MT_RXQ_MAIN_WA) | 307 MT_INT_RX(MT_RXQ_BAND1_WA)); 308 irq_mask = reset ? MT_INT_MCU_CMD : irq_mask; 309 310 mt7996_irq_enable(dev, irq_mask); 311 mt7996_irq_disable(dev, 0); 312 } 313 314 static void mt7996_dma_enable(struct mt7996_dev *dev, bool reset) 315 { 316 u32 hif1_ofs = 0; 317 318 if (dev->hif2) 319 hif1_ofs = MT_WFDMA0_PCIE1(0) - MT_WFDMA0(0); 320 321 /* reset dma idx */ 322 mt76_wr(dev, MT_WFDMA0_RST_DTX_PTR, ~0); 323 if (dev->hif2) 324 mt76_wr(dev, MT_WFDMA0_RST_DTX_PTR + hif1_ofs, ~0); 325 326 /* configure delay interrupt off */ 327 mt76_wr(dev, MT_WFDMA0_PRI_DLY_INT_CFG0, 0); 328 mt76_wr(dev, MT_WFDMA0_PRI_DLY_INT_CFG1, 0); 329 mt76_wr(dev, MT_WFDMA0_PRI_DLY_INT_CFG2, 0); 330 331 if (dev->hif2) { 332 mt76_wr(dev, MT_WFDMA0_PRI_DLY_INT_CFG0 + hif1_ofs, 0); 333 mt76_wr(dev, MT_WFDMA0_PRI_DLY_INT_CFG1 + hif1_ofs, 0); 334 mt76_wr(dev, MT_WFDMA0_PRI_DLY_INT_CFG2 + hif1_ofs, 0); 335 } 336 337 /* configure perfetch settings */ 338 mt7996_dma_prefetch(dev); 339 340 /* hif wait WFDMA idle */ 341 mt76_set(dev, MT_WFDMA0_BUSY_ENA, 342 MT_WFDMA0_BUSY_ENA_TX_FIFO0 | 343 MT_WFDMA0_BUSY_ENA_TX_FIFO1 | 344 MT_WFDMA0_BUSY_ENA_RX_FIFO); 345 346 if (dev->hif2) 347 mt76_set(dev, MT_WFDMA0_BUSY_ENA + hif1_ofs, 348 MT_WFDMA0_PCIE1_BUSY_ENA_TX_FIFO0 | 349 MT_WFDMA0_PCIE1_BUSY_ENA_TX_FIFO1 | 350 MT_WFDMA0_PCIE1_BUSY_ENA_RX_FIFO); 351 352 mt76_poll(dev, MT_WFDMA_EXT_CSR_HIF_MISC, 353 MT_WFDMA_EXT_CSR_HIF_MISC_BUSY, 0, 1000); 354 355 /* GLO_CFG_EXT0 */ 356 mt76_set(dev, WF_WFDMA0_GLO_CFG_EXT0, 357 WF_WFDMA0_GLO_CFG_EXT0_RX_WB_RXD | 358 WF_WFDMA0_GLO_CFG_EXT0_WED_MERGE_MODE); 359 360 /* GLO_CFG_EXT1 */ 361 mt76_set(dev, WF_WFDMA0_GLO_CFG_EXT1, 362 WF_WFDMA0_GLO_CFG_EXT1_TX_FCTRL_MODE); 363 364 /* WFDMA rx threshold */ 365 mt76_wr(dev, MT_WFDMA0_PAUSE_RX_Q_45_TH, 0xc000c); 366 mt76_wr(dev, MT_WFDMA0_PAUSE_RX_Q_67_TH, 0x10008); 367 mt76_wr(dev, MT_WFDMA0_PAUSE_RX_Q_89_TH, 0x10008); 368 mt76_wr(dev, MT_WFDMA0_PAUSE_RX_Q_RRO_TH, 0x20); 369 370 if (dev->hif2) { 371 /* GLO_CFG_EXT0 */ 372 mt76_set(dev, WF_WFDMA0_GLO_CFG_EXT0 + hif1_ofs, 373 WF_WFDMA0_GLO_CFG_EXT0_RX_WB_RXD | 374 WF_WFDMA0_GLO_CFG_EXT0_WED_MERGE_MODE); 375 376 /* GLO_CFG_EXT1 */ 377 mt76_set(dev, WF_WFDMA0_GLO_CFG_EXT1 + hif1_ofs, 378 WF_WFDMA0_GLO_CFG_EXT1_TX_FCTRL_MODE); 379 380 mt76_set(dev, MT_WFDMA_HOST_CONFIG, 381 MT_WFDMA_HOST_CONFIG_PDMA_BAND | 382 MT_WFDMA_HOST_CONFIG_BAND2_PCIE1); 383 384 /* AXI read outstanding number */ 385 mt76_rmw(dev, MT_WFDMA_AXI_R2A_CTRL, 386 MT_WFDMA_AXI_R2A_CTRL_OUTSTAND_MASK, 0x14); 387 388 /* WFDMA rx threshold */ 389 mt76_wr(dev, MT_WFDMA0_PAUSE_RX_Q_45_TH + hif1_ofs, 0xc000c); 390 mt76_wr(dev, MT_WFDMA0_PAUSE_RX_Q_67_TH + hif1_ofs, 0x10008); 391 mt76_wr(dev, MT_WFDMA0_PAUSE_RX_Q_89_TH + hif1_ofs, 0x10008); 392 mt76_wr(dev, MT_WFDMA0_PAUSE_RX_Q_RRO_TH + hif1_ofs, 0x20); 393 } 394 395 if (dev->hif2) { 396 /* fix hardware limitation, pcie1's rx ring3 is not available 397 * so, redirect pcie0 rx ring3 interrupt to pcie1 398 */ 399 if (mtk_wed_device_active(&dev->mt76.mmio.wed) && 400 dev->has_rro) 401 mt76_set(dev, MT_WFDMA0_RX_INT_PCIE_SEL + hif1_ofs, 402 MT_WFDMA0_RX_INT_SEL_RING6); 403 else 404 mt76_set(dev, MT_WFDMA0_RX_INT_PCIE_SEL, 405 MT_WFDMA0_RX_INT_SEL_RING3); 406 } 407 408 mt7996_dma_start(dev, reset, true); 409 } 410 411 #ifdef CONFIG_NET_MEDIATEK_SOC_WED 412 int mt7996_dma_rro_init(struct mt7996_dev *dev) 413 { 414 struct mt76_dev *mdev = &dev->mt76; 415 u32 irq_mask; 416 int ret; 417 418 /* ind cmd */ 419 mdev->q_rx[MT_RXQ_RRO_IND].flags = MT_WED_RRO_Q_IND; 420 mdev->q_rx[MT_RXQ_RRO_IND].wed = &mdev->mmio.wed; 421 ret = mt76_queue_alloc(dev, &mdev->q_rx[MT_RXQ_RRO_IND], 422 MT_RXQ_ID(MT_RXQ_RRO_IND), 423 MT7996_RX_RING_SIZE, 424 0, MT_RXQ_RRO_IND_RING_BASE); 425 if (ret) 426 return ret; 427 428 /* rx msdu page queue for band0 */ 429 mdev->q_rx[MT_RXQ_MSDU_PAGE_BAND0].flags = 430 MT_WED_RRO_Q_MSDU_PG(0) | MT_QFLAG_WED_RRO_EN; 431 mdev->q_rx[MT_RXQ_MSDU_PAGE_BAND0].wed = &mdev->mmio.wed; 432 ret = mt76_queue_alloc(dev, &mdev->q_rx[MT_RXQ_MSDU_PAGE_BAND0], 433 MT_RXQ_ID(MT_RXQ_MSDU_PAGE_BAND0), 434 MT7996_RX_RING_SIZE, 435 MT7996_RX_MSDU_PAGE_SIZE, 436 MT_RXQ_RING_BASE(MT_RXQ_MSDU_PAGE_BAND0)); 437 if (ret) 438 return ret; 439 440 if (mt7996_band_valid(dev, MT_BAND1)) { 441 /* rx msdu page queue for band1 */ 442 mdev->q_rx[MT_RXQ_MSDU_PAGE_BAND1].flags = 443 MT_WED_RRO_Q_MSDU_PG(1) | MT_QFLAG_WED_RRO_EN; 444 mdev->q_rx[MT_RXQ_MSDU_PAGE_BAND1].wed = &mdev->mmio.wed; 445 ret = mt76_queue_alloc(dev, &mdev->q_rx[MT_RXQ_MSDU_PAGE_BAND1], 446 MT_RXQ_ID(MT_RXQ_MSDU_PAGE_BAND1), 447 MT7996_RX_RING_SIZE, 448 MT7996_RX_MSDU_PAGE_SIZE, 449 MT_RXQ_RING_BASE(MT_RXQ_MSDU_PAGE_BAND1)); 450 if (ret) 451 return ret; 452 } 453 454 if (mt7996_band_valid(dev, MT_BAND2)) { 455 /* rx msdu page queue for band2 */ 456 mdev->q_rx[MT_RXQ_MSDU_PAGE_BAND2].flags = 457 MT_WED_RRO_Q_MSDU_PG(2) | MT_QFLAG_WED_RRO_EN; 458 mdev->q_rx[MT_RXQ_MSDU_PAGE_BAND2].wed = &mdev->mmio.wed; 459 ret = mt76_queue_alloc(dev, &mdev->q_rx[MT_RXQ_MSDU_PAGE_BAND2], 460 MT_RXQ_ID(MT_RXQ_MSDU_PAGE_BAND2), 461 MT7996_RX_RING_SIZE, 462 MT7996_RX_MSDU_PAGE_SIZE, 463 MT_RXQ_RING_BASE(MT_RXQ_MSDU_PAGE_BAND2)); 464 if (ret) 465 return ret; 466 } 467 468 irq_mask = mdev->mmio.irqmask | MT_INT_RRO_RX_DONE | 469 MT_INT_TX_DONE_BAND2; 470 mt76_wr(dev, MT_INT_MASK_CSR, irq_mask); 471 mtk_wed_device_start_hw_rro(&mdev->mmio.wed, irq_mask, false); 472 mt7996_irq_enable(dev, irq_mask); 473 474 return 0; 475 } 476 #endif /* CONFIG_NET_MEDIATEK_SOC_WED */ 477 478 int mt7996_dma_init(struct mt7996_dev *dev) 479 { 480 struct mtk_wed_device *wed = &dev->mt76.mmio.wed; 481 struct mtk_wed_device *wed_hif2 = &dev->mt76.mmio.wed_hif2; 482 u32 rx_base; 483 u32 hif1_ofs = 0; 484 int ret; 485 486 mt7996_dma_config(dev); 487 488 mt76_dma_attach(&dev->mt76); 489 490 if (dev->hif2) 491 hif1_ofs = MT_WFDMA0_PCIE1(0) - MT_WFDMA0(0); 492 493 mt7996_dma_disable(dev, true); 494 495 /* init tx queue */ 496 ret = mt7996_init_tx_queues(&dev->phy, 497 MT_TXQ_ID(dev->mphy.band_idx), 498 MT7996_TX_RING_SIZE, 499 MT_TXQ_RING_BASE(0), 500 wed); 501 if (ret) 502 return ret; 503 504 /* command to WM */ 505 ret = mt76_init_mcu_queue(&dev->mt76, MT_MCUQ_WM, 506 MT_MCUQ_ID(MT_MCUQ_WM), 507 MT7996_TX_MCU_RING_SIZE, 508 MT_MCUQ_RING_BASE(MT_MCUQ_WM)); 509 if (ret) 510 return ret; 511 512 /* command to WA */ 513 if (mt7996_has_wa(dev)) { 514 ret = mt76_init_mcu_queue(&dev->mt76, MT_MCUQ_WA, 515 MT_MCUQ_ID(MT_MCUQ_WA), 516 MT7996_TX_MCU_RING_SIZE, 517 MT_MCUQ_RING_BASE(MT_MCUQ_WA)); 518 if (ret) 519 return ret; 520 } 521 522 /* firmware download */ 523 ret = mt76_init_mcu_queue(&dev->mt76, MT_MCUQ_FWDL, 524 MT_MCUQ_ID(MT_MCUQ_FWDL), 525 MT7996_TX_FWDL_RING_SIZE, 526 MT_MCUQ_RING_BASE(MT_MCUQ_FWDL)); 527 if (ret) 528 return ret; 529 530 /* event from WM */ 531 ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MCU], 532 MT_RXQ_ID(MT_RXQ_MCU), 533 MT7996_RX_MCU_RING_SIZE, 534 MT7996_RX_MCU_BUF_SIZE, 535 MT_RXQ_RING_BASE(MT_RXQ_MCU)); 536 if (ret) 537 return ret; 538 539 /* event from WA, or SDO event for mt7990 */ 540 ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MCU_WA], 541 MT_RXQ_ID(MT_RXQ_MCU_WA), 542 MT7996_RX_MCU_RING_SIZE_WA, 543 MT7996_RX_MCU_BUF_SIZE, 544 MT_RXQ_RING_BASE(MT_RXQ_MCU_WA)); 545 if (ret) 546 return ret; 547 548 /* rx data queue for band0 and mt7996 band1 */ 549 if (mtk_wed_device_active(wed) && mtk_wed_get_rx_capa(wed)) { 550 dev->mt76.q_rx[MT_RXQ_MAIN].flags = MT_WED_Q_RX(0); 551 dev->mt76.q_rx[MT_RXQ_MAIN].wed = wed; 552 } 553 554 ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MAIN], 555 MT_RXQ_ID(MT_RXQ_MAIN), 556 MT7996_RX_RING_SIZE, 557 MT_RX_BUF_SIZE, 558 MT_RXQ_RING_BASE(MT_RXQ_MAIN)); 559 if (ret) 560 return ret; 561 562 /* tx free notify event from WA for band0 */ 563 if (mtk_wed_device_active(wed) && !dev->has_rro) { 564 dev->mt76.q_rx[MT_RXQ_MAIN_WA].flags = MT_WED_Q_TXFREE; 565 dev->mt76.q_rx[MT_RXQ_MAIN_WA].wed = wed; 566 } 567 568 if (mt7996_has_wa(dev)) { 569 ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MAIN_WA], 570 MT_RXQ_ID(MT_RXQ_MAIN_WA), 571 MT7996_RX_MCU_RING_SIZE, 572 MT_RX_BUF_SIZE, 573 MT_RXQ_RING_BASE(MT_RXQ_MAIN_WA)); 574 if (ret) 575 return ret; 576 } else { 577 if (mtk_wed_device_active(wed)) { 578 dev->mt76.q_rx[MT_RXQ_TXFREE_BAND0].flags = MT_WED_Q_TXFREE; 579 dev->mt76.q_rx[MT_RXQ_TXFREE_BAND0].wed = wed; 580 } 581 ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_TXFREE_BAND0], 582 MT_RXQ_ID(MT_RXQ_TXFREE_BAND0), 583 MT7996_RX_MCU_RING_SIZE, 584 MT7996_RX_BUF_SIZE, 585 MT_RXQ_RING_BASE(MT_RXQ_TXFREE_BAND0)); 586 if (ret) 587 return ret; 588 } 589 590 if (!mt7996_has_wa(dev) && dev->hif2) { 591 if (mtk_wed_device_active(wed)) { 592 dev->mt76.q_rx[MT_RXQ_TXFREE_BAND1].flags = MT_WED_Q_TXFREE; 593 dev->mt76.q_rx[MT_RXQ_TXFREE_BAND1].wed = wed; 594 } 595 ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_TXFREE_BAND1], 596 MT_RXQ_ID(MT_RXQ_TXFREE_BAND1), 597 MT7996_RX_MCU_RING_SIZE, 598 MT7996_RX_BUF_SIZE, 599 MT_RXQ_RING_BASE(MT_RXQ_TXFREE_BAND1)); 600 if (ret) 601 return ret; 602 } 603 604 if (mt7996_band_valid(dev, MT_BAND2)) { 605 /* rx data queue for mt7996 band2 */ 606 rx_base = MT_RXQ_RING_BASE(MT_RXQ_BAND2) + hif1_ofs; 607 ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_BAND2], 608 MT_RXQ_ID(MT_RXQ_BAND2), 609 MT7996_RX_RING_SIZE, 610 MT_RX_BUF_SIZE, 611 rx_base); 612 if (ret) 613 return ret; 614 615 /* tx free notify event from WA for mt7996 band2 616 * use pcie0's rx ring3, but, redirect pcie0 rx ring3 interrupt to pcie1 617 */ 618 if (mtk_wed_device_active(wed_hif2) && !dev->has_rro) { 619 dev->mt76.q_rx[MT_RXQ_BAND2_WA].flags = MT_WED_Q_TXFREE; 620 dev->mt76.q_rx[MT_RXQ_BAND2_WA].wed = wed_hif2; 621 } 622 623 ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_BAND2_WA], 624 MT_RXQ_ID(MT_RXQ_BAND2_WA), 625 MT7996_RX_MCU_RING_SIZE, 626 MT_RX_BUF_SIZE, 627 MT_RXQ_RING_BASE(MT_RXQ_BAND2_WA)); 628 if (ret) 629 return ret; 630 } else if (mt7996_band_valid(dev, MT_BAND1)) { 631 /* rx data queue for mt7992 band1 */ 632 rx_base = MT_RXQ_RING_BASE(MT_RXQ_BAND1) + hif1_ofs; 633 ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_BAND1], 634 MT_RXQ_ID(MT_RXQ_BAND1), 635 MT7996_RX_RING_SIZE, 636 MT_RX_BUF_SIZE, 637 rx_base); 638 if (ret) 639 return ret; 640 641 /* tx free notify event from WA for mt7992 band1 */ 642 if (mt7996_has_wa(dev)) { 643 rx_base = MT_RXQ_RING_BASE(MT_RXQ_BAND1_WA) + hif1_ofs; 644 ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_BAND1_WA], 645 MT_RXQ_ID(MT_RXQ_BAND1_WA), 646 MT7996_RX_MCU_RING_SIZE, 647 MT_RX_BUF_SIZE, 648 rx_base); 649 if (ret) 650 return ret; 651 } 652 } 653 654 if (mtk_wed_device_active(wed) && mtk_wed_get_rx_capa(wed) && 655 dev->has_rro) { 656 /* rx rro data queue for band0 */ 657 dev->mt76.q_rx[MT_RXQ_RRO_BAND0].flags = 658 MT_WED_RRO_Q_DATA(0) | MT_QFLAG_WED_RRO_EN; 659 dev->mt76.q_rx[MT_RXQ_RRO_BAND0].wed = wed; 660 ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_RRO_BAND0], 661 MT_RXQ_ID(MT_RXQ_RRO_BAND0), 662 MT7996_RX_RING_SIZE, 663 MT7996_RX_BUF_SIZE, 664 MT_RXQ_RING_BASE(MT_RXQ_RRO_BAND0)); 665 if (ret) 666 return ret; 667 668 /* tx free notify event from WA for band0 */ 669 dev->mt76.q_rx[MT_RXQ_TXFREE_BAND0].flags = MT_WED_Q_TXFREE; 670 dev->mt76.q_rx[MT_RXQ_TXFREE_BAND0].wed = wed; 671 672 ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_TXFREE_BAND0], 673 MT_RXQ_ID(MT_RXQ_TXFREE_BAND0), 674 MT7996_RX_MCU_RING_SIZE, 675 MT7996_RX_BUF_SIZE, 676 MT_RXQ_RING_BASE(MT_RXQ_TXFREE_BAND0)); 677 if (ret) 678 return ret; 679 680 if (mt7996_band_valid(dev, MT_BAND2)) { 681 /* rx rro data queue for band2 */ 682 dev->mt76.q_rx[MT_RXQ_RRO_BAND2].flags = 683 MT_WED_RRO_Q_DATA(1) | MT_QFLAG_WED_RRO_EN; 684 dev->mt76.q_rx[MT_RXQ_RRO_BAND2].wed = wed; 685 ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_RRO_BAND2], 686 MT_RXQ_ID(MT_RXQ_RRO_BAND2), 687 MT7996_RX_RING_SIZE, 688 MT7996_RX_BUF_SIZE, 689 MT_RXQ_RING_BASE(MT_RXQ_RRO_BAND2) + hif1_ofs); 690 if (ret) 691 return ret; 692 693 /* tx free notify event from MAC for band2 */ 694 if (mtk_wed_device_active(wed_hif2)) { 695 dev->mt76.q_rx[MT_RXQ_TXFREE_BAND2].flags = MT_WED_Q_TXFREE; 696 dev->mt76.q_rx[MT_RXQ_TXFREE_BAND2].wed = wed_hif2; 697 } 698 ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_TXFREE_BAND2], 699 MT_RXQ_ID(MT_RXQ_TXFREE_BAND2), 700 MT7996_RX_MCU_RING_SIZE, 701 MT7996_RX_BUF_SIZE, 702 MT_RXQ_RING_BASE(MT_RXQ_TXFREE_BAND2) + hif1_ofs); 703 if (ret) 704 return ret; 705 } 706 } 707 708 ret = mt76_init_queues(dev, mt76_dma_rx_poll); 709 if (ret < 0) 710 return ret; 711 712 netif_napi_add_tx(dev->mt76.tx_napi_dev, &dev->mt76.tx_napi, 713 mt7996_poll_tx); 714 napi_enable(&dev->mt76.tx_napi); 715 716 mt7996_dma_enable(dev, false); 717 718 return 0; 719 } 720 721 void mt7996_dma_reset(struct mt7996_dev *dev, bool force) 722 { 723 struct mt76_phy *phy2 = dev->mt76.phys[MT_BAND1]; 724 struct mt76_phy *phy3 = dev->mt76.phys[MT_BAND2]; 725 u32 hif1_ofs = MT_WFDMA0_PCIE1(0) - MT_WFDMA0(0); 726 int i; 727 728 mt76_clear(dev, MT_WFDMA0_GLO_CFG, 729 MT_WFDMA0_GLO_CFG_TX_DMA_EN | 730 MT_WFDMA0_GLO_CFG_RX_DMA_EN); 731 732 if (dev->hif2) 733 mt76_clear(dev, MT_WFDMA0_GLO_CFG + hif1_ofs, 734 MT_WFDMA0_GLO_CFG_TX_DMA_EN | 735 MT_WFDMA0_GLO_CFG_RX_DMA_EN); 736 737 usleep_range(1000, 2000); 738 739 for (i = 0; i < __MT_TXQ_MAX; i++) { 740 mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[i], true); 741 if (phy2) 742 mt76_queue_tx_cleanup(dev, phy2->q_tx[i], true); 743 if (phy3) 744 mt76_queue_tx_cleanup(dev, phy3->q_tx[i], true); 745 } 746 747 for (i = 0; i < __MT_MCUQ_MAX; i++) 748 mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[i], true); 749 750 mt76_for_each_q_rx(&dev->mt76, i) 751 mt76_queue_rx_cleanup(dev, &dev->mt76.q_rx[i]); 752 753 mt76_tx_status_check(&dev->mt76, true); 754 755 /* reset wfsys */ 756 if (force) 757 mt7996_wfsys_reset(dev); 758 759 if (dev->hif2 && mtk_wed_device_active(&dev->mt76.mmio.wed_hif2)) 760 mtk_wed_device_dma_reset(&dev->mt76.mmio.wed_hif2); 761 762 if (mtk_wed_device_active(&dev->mt76.mmio.wed)) 763 mtk_wed_device_dma_reset(&dev->mt76.mmio.wed); 764 765 mt7996_dma_disable(dev, force); 766 mt76_wed_dma_reset(&dev->mt76); 767 768 /* reset hw queues */ 769 for (i = 0; i < __MT_TXQ_MAX; i++) { 770 mt76_dma_reset_tx_queue(&dev->mt76, dev->mphy.q_tx[i]); 771 if (phy2) 772 mt76_dma_reset_tx_queue(&dev->mt76, phy2->q_tx[i]); 773 if (phy3) 774 mt76_dma_reset_tx_queue(&dev->mt76, phy3->q_tx[i]); 775 } 776 777 for (i = 0; i < __MT_MCUQ_MAX; i++) 778 mt76_queue_reset(dev, dev->mt76.q_mcu[i]); 779 780 mt76_for_each_q_rx(&dev->mt76, i) { 781 if (mtk_wed_device_active(&dev->mt76.mmio.wed)) 782 if (mt76_queue_is_wed_rro(&dev->mt76.q_rx[i]) || 783 mt76_queue_is_wed_tx_free(&dev->mt76.q_rx[i])) 784 continue; 785 786 mt76_queue_reset(dev, &dev->mt76.q_rx[i]); 787 } 788 789 mt76_tx_status_check(&dev->mt76, true); 790 791 mt76_for_each_q_rx(&dev->mt76, i) 792 mt76_queue_rx_reset(dev, i); 793 794 mt7996_dma_enable(dev, !force); 795 } 796 797 void mt7996_dma_cleanup(struct mt7996_dev *dev) 798 { 799 mt7996_dma_disable(dev, true); 800 801 mt76_dma_cleanup(&dev->mt76); 802 } 803