1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /* Copyright(c) 2023 Realtek Corporation
3 */
4
5 #include <linux/pci.h>
6
7 #include "mac.h"
8 #include "pci.h"
9 #include "reg.h"
10
11 enum pcie_rxbd_mode {
12 PCIE_RXBD_NORM = 0,
13 PCIE_RXBD_SEP,
14 PCIE_RXBD_EXT,
15 };
16
17 #define PL0_TMR_SCALE_ASIC 1
18 #define PL0_TMR_ANA_172US 0x800
19 #define PL0_TMR_MAC_1MS 0x27100
20 #define PL0_TMR_AUX_1MS 0x1E848
21
_patch_pcie_power_wake_be(struct rtw89_dev * rtwdev,bool power_up)22 static void _patch_pcie_power_wake_be(struct rtw89_dev *rtwdev, bool power_up)
23 {
24 if (power_up)
25 rtw89_write32_set(rtwdev, R_BE_HCI_OPT_CTRL, BIT_WAKE_CTRL_V1);
26 else
27 rtw89_write32_clr(rtwdev, R_BE_HCI_OPT_CTRL, BIT_WAKE_CTRL_V1);
28 }
29
rtw89_pci_set_io_rcy_be(struct rtw89_dev * rtwdev)30 static void rtw89_pci_set_io_rcy_be(struct rtw89_dev *rtwdev)
31 {
32 const struct rtw89_pci_info *info = rtwdev->pci_info;
33 u32 scale = PL0_TMR_SCALE_ASIC;
34 u32 val32;
35
36 if (info->io_rcy_en == MAC_AX_PCIE_ENABLE) {
37 val32 = info->io_rcy_tmr == MAC_AX_IO_RCY_ANA_TMR_DEF ?
38 PL0_TMR_ANA_172US : info->io_rcy_tmr;
39 val32 /= scale;
40
41 rtw89_write32(rtwdev, R_BE_AON_WDT_TMR, val32);
42 rtw89_write32(rtwdev, R_BE_MDIO_WDT_TMR, val32);
43 rtw89_write32(rtwdev, R_BE_LA_MODE_WDT_TMR, val32);
44 rtw89_write32(rtwdev, R_BE_WDT_AR_TMR, val32);
45 rtw89_write32(rtwdev, R_BE_WDT_AW_TMR, val32);
46 rtw89_write32(rtwdev, R_BE_WDT_W_TMR, val32);
47 rtw89_write32(rtwdev, R_BE_WDT_B_TMR, val32);
48 rtw89_write32(rtwdev, R_BE_WDT_R_TMR, val32);
49
50 val32 = info->io_rcy_tmr == MAC_AX_IO_RCY_ANA_TMR_DEF ?
51 PL0_TMR_MAC_1MS : info->io_rcy_tmr;
52 val32 /= scale;
53 rtw89_write32(rtwdev, R_BE_WLAN_WDT_TMR, val32);
54 rtw89_write32(rtwdev, R_BE_AXIDMA_WDT_TMR, val32);
55
56 val32 = info->io_rcy_tmr == MAC_AX_IO_RCY_ANA_TMR_DEF ?
57 PL0_TMR_AUX_1MS : info->io_rcy_tmr;
58 val32 /= scale;
59 rtw89_write32(rtwdev, R_BE_LOCAL_WDT_TMR, val32);
60 } else {
61 rtw89_write32_clr(rtwdev, R_BE_WLAN_WDT, B_BE_WLAN_WDT_ENABLE);
62 rtw89_write32_clr(rtwdev, R_BE_AXIDMA_WDT, B_BE_AXIDMA_WDT_ENABLE);
63 rtw89_write32_clr(rtwdev, R_BE_AON_WDT, B_BE_AON_WDT_ENABLE);
64 rtw89_write32_clr(rtwdev, R_BE_LOCAL_WDT, B_BE_LOCAL_WDT_ENABLE);
65 rtw89_write32_clr(rtwdev, R_BE_MDIO_WDT, B_BE_MDIO_WDT_ENABLE);
66 rtw89_write32_clr(rtwdev, R_BE_LA_MODE_WDT, B_BE_LA_MODE_WDT_ENABLE);
67 rtw89_write32_clr(rtwdev, R_BE_WDT_AR, B_BE_WDT_AR_ENABLE);
68 rtw89_write32_clr(rtwdev, R_BE_WDT_AW, B_BE_WDT_AW_ENABLE);
69 rtw89_write32_clr(rtwdev, R_BE_WDT_W, B_BE_WDT_W_ENABLE);
70 rtw89_write32_clr(rtwdev, R_BE_WDT_B, B_BE_WDT_B_ENABLE);
71 rtw89_write32_clr(rtwdev, R_BE_WDT_R, B_BE_WDT_R_ENABLE);
72 }
73 }
74
rtw89_pci_ctrl_wpdma_pcie_be(struct rtw89_dev * rtwdev,bool en)75 static void rtw89_pci_ctrl_wpdma_pcie_be(struct rtw89_dev *rtwdev, bool en)
76 {
77 if (en)
78 rtw89_write32_clr(rtwdev, R_BE_HAXI_DMA_STOP1, B_BE_STOP_WPDMA);
79 else
80 rtw89_write32_set(rtwdev, R_BE_HAXI_DMA_STOP1, B_BE_STOP_WPDMA);
81 }
82
rtw89_pci_ctrl_trxdma_pcie_be(struct rtw89_dev * rtwdev,enum mac_ax_pcie_func_ctrl tx_en,enum mac_ax_pcie_func_ctrl rx_en,enum mac_ax_pcie_func_ctrl io_en)83 static void rtw89_pci_ctrl_trxdma_pcie_be(struct rtw89_dev *rtwdev,
84 enum mac_ax_pcie_func_ctrl tx_en,
85 enum mac_ax_pcie_func_ctrl rx_en,
86 enum mac_ax_pcie_func_ctrl io_en)
87 {
88 u32 val;
89
90 val = rtw89_read32(rtwdev, R_BE_HAXI_INIT_CFG1);
91
92 if (tx_en == MAC_AX_PCIE_ENABLE)
93 val |= B_BE_TXDMA_EN;
94 else if (tx_en == MAC_AX_PCIE_DISABLE)
95 val &= ~B_BE_TXDMA_EN;
96
97 if (rx_en == MAC_AX_PCIE_ENABLE)
98 val |= B_BE_RXDMA_EN;
99 else if (rx_en == MAC_AX_PCIE_DISABLE)
100 val &= ~B_BE_RXDMA_EN;
101
102 if (io_en == MAC_AX_PCIE_ENABLE)
103 val &= ~B_BE_STOP_AXI_MST;
104 else if (io_en == MAC_AX_PCIE_DISABLE)
105 val |= B_BE_STOP_AXI_MST;
106
107 rtw89_write32(rtwdev, R_BE_HAXI_INIT_CFG1, val);
108 }
109
rtw89_pci_clr_idx_all_be(struct rtw89_dev * rtwdev)110 static void rtw89_pci_clr_idx_all_be(struct rtw89_dev *rtwdev)
111 {
112 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
113 struct rtw89_pci_rx_ring *rx_ring;
114 u32 val;
115
116 val = B_BE_CLR_CH0_IDX | B_BE_CLR_CH1_IDX | B_BE_CLR_CH2_IDX |
117 B_BE_CLR_CH3_IDX | B_BE_CLR_CH4_IDX | B_BE_CLR_CH5_IDX |
118 B_BE_CLR_CH6_IDX | B_BE_CLR_CH7_IDX | B_BE_CLR_CH8_IDX |
119 B_BE_CLR_CH9_IDX | B_BE_CLR_CH10_IDX | B_BE_CLR_CH11_IDX |
120 B_BE_CLR_CH12_IDX | B_BE_CLR_CH13_IDX | B_BE_CLR_CH14_IDX;
121 rtw89_write32(rtwdev, R_BE_TXBD_RWPTR_CLR1, val);
122
123 rtw89_write32(rtwdev, R_BE_RXBD_RWPTR_CLR1_V1,
124 B_BE_CLR_RXQ0_IDX | B_BE_CLR_RPQ0_IDX);
125
126 rx_ring = &rtwpci->rx_rings[RTW89_RXCH_RXQ];
127 rtw89_write16(rtwdev, R_BE_RXQ0_RXBD_IDX_V1, rx_ring->bd_ring.len - 1);
128
129 rx_ring = &rtwpci->rx_rings[RTW89_RXCH_RPQ];
130 rtw89_write16(rtwdev, R_BE_RPQ0_RXBD_IDX_V1, rx_ring->bd_ring.len - 1);
131 }
132
rtw89_pci_poll_txdma_ch_idle_be(struct rtw89_dev * rtwdev)133 static int rtw89_pci_poll_txdma_ch_idle_be(struct rtw89_dev *rtwdev)
134 {
135 u32 val;
136
137 return read_poll_timeout(rtw89_read32, val, (val & DMA_BUSY1_CHECK_BE) == 0,
138 10, 1000, false, rtwdev, R_BE_HAXI_DMA_BUSY1);
139 }
140
rtw89_pci_poll_rxdma_ch_idle_be(struct rtw89_dev * rtwdev)141 static int rtw89_pci_poll_rxdma_ch_idle_be(struct rtw89_dev *rtwdev)
142 {
143 u32 check;
144 u32 val;
145
146 check = B_BE_RXQ0_BUSY_V1 | B_BE_RPQ0_BUSY_V1;
147
148 return read_poll_timeout(rtw89_read32, val, (val & check) == 0,
149 10, 1000, false, rtwdev, R_BE_HAXI_DMA_BUSY1);
150 }
151
rtw89_pci_poll_dma_all_idle_be(struct rtw89_dev * rtwdev)152 static int rtw89_pci_poll_dma_all_idle_be(struct rtw89_dev *rtwdev)
153 {
154 int ret;
155
156 ret = rtw89_pci_poll_txdma_ch_idle_be(rtwdev);
157 if (ret) {
158 rtw89_err(rtwdev, "txdma ch busy\n");
159 return ret;
160 }
161
162 ret = rtw89_pci_poll_rxdma_ch_idle_be(rtwdev);
163 if (ret) {
164 rtw89_err(rtwdev, "rxdma ch busy\n");
165 return ret;
166 }
167
168 return 0;
169 }
170
rtw89_pci_mode_op_be(struct rtw89_dev * rtwdev)171 static void rtw89_pci_mode_op_be(struct rtw89_dev *rtwdev)
172 {
173 const struct rtw89_pci_info *info = rtwdev->pci_info;
174 u32 val32_init1, val32_rxapp, val32_exp;
175
176 val32_init1 = rtw89_read32(rtwdev, R_BE_HAXI_INIT_CFG1);
177 val32_rxapp = rtw89_read32(rtwdev, R_BE_RX_APPEND_MODE);
178 val32_exp = rtw89_read32(rtwdev, R_BE_HAXI_EXP_CTRL_V1);
179
180 if (info->rxbd_mode == MAC_AX_RXBD_PKT) {
181 val32_init1 = u32_replace_bits(val32_init1, PCIE_RXBD_NORM,
182 B_BE_RXQ_RXBD_MODE_MASK);
183 } else if (info->rxbd_mode == MAC_AX_RXBD_SEP) {
184 val32_init1 = u32_replace_bits(val32_init1, PCIE_RXBD_SEP,
185 B_BE_RXQ_RXBD_MODE_MASK);
186 val32_rxapp = u32_replace_bits(val32_rxapp, 0,
187 B_BE_APPEND_LEN_MASK);
188 }
189
190 val32_init1 = u32_replace_bits(val32_init1, info->tx_burst,
191 B_BE_MAX_TXDMA_MASK);
192 val32_init1 = u32_replace_bits(val32_init1, info->rx_burst,
193 B_BE_MAX_RXDMA_MASK);
194 val32_exp = u32_replace_bits(val32_exp, info->multi_tag_num,
195 B_BE_MAX_TAG_NUM_MASK);
196 val32_init1 = u32_replace_bits(val32_init1, info->wd_dma_idle_intvl,
197 B_BE_CFG_WD_PERIOD_IDLE_MASK);
198 val32_init1 = u32_replace_bits(val32_init1, info->wd_dma_act_intvl,
199 B_BE_CFG_WD_PERIOD_ACTIVE_MASK);
200
201 rtw89_write32(rtwdev, R_BE_HAXI_INIT_CFG1, val32_init1);
202 rtw89_write32(rtwdev, R_BE_RX_APPEND_MODE, val32_rxapp);
203 rtw89_write32(rtwdev, R_BE_HAXI_EXP_CTRL_V1, val32_exp);
204 }
205
rtw89_pci_rst_bdram_be(struct rtw89_dev * rtwdev)206 static int rtw89_pci_rst_bdram_be(struct rtw89_dev *rtwdev)
207 {
208 u32 val;
209
210 rtw89_write32_set(rtwdev, R_BE_HAXI_INIT_CFG1, B_BE_SET_BDRAM_BOUND);
211
212 return read_poll_timeout(rtw89_read32, val, !(val & B_BE_SET_BDRAM_BOUND),
213 50, 500000, false, rtwdev, R_BE_HAXI_INIT_CFG1);
214 }
215
rtw89_pci_debounce_be(struct rtw89_dev * rtwdev)216 static void rtw89_pci_debounce_be(struct rtw89_dev *rtwdev)
217 {
218 u32 val32;
219
220 val32 = rtw89_read32(rtwdev, R_BE_SYS_PAGE_CLK_GATED);
221 val32 = u32_replace_bits(val32, 0, B_BE_PCIE_PRST_DEBUNC_PERIOD_MASK);
222 val32 |= B_BE_SYM_PRST_DEBUNC_SEL;
223 rtw89_write32(rtwdev, R_BE_SYS_PAGE_CLK_GATED, val32);
224 }
225
rtw89_pci_ldo_low_pwr_be(struct rtw89_dev * rtwdev)226 static void rtw89_pci_ldo_low_pwr_be(struct rtw89_dev *rtwdev)
227 {
228 rtw89_write32_set(rtwdev, R_BE_SYS_PW_CTRL, B_BE_PSUS_OFF_CAPC_EN);
229 rtw89_write32_set(rtwdev, R_BE_SYS_PAGE_CLK_GATED,
230 B_BE_SOP_OFFPOOBS_PC | B_BE_CPHY_AUXCLK_OP |
231 B_BE_CPHY_POWER_READY_CHK);
232 rtw89_write32_clr(rtwdev, R_BE_SYS_SDIO_CTRL, B_BE_PCIE_FORCE_IBX_EN |
233 B_BE_PCIE_DIS_L2_RTK_PERST |
234 B_BE_PCIE_DIS_L2__CTRL_LDO_HCI);
235 rtw89_write32_clr(rtwdev, R_BE_L1_2_CTRL_HCILDO, B_BE_PCIE_DIS_L1_2_CTRL_HCILDO);
236 }
237
rtw89_pci_pcie_setting_be(struct rtw89_dev * rtwdev)238 static void rtw89_pci_pcie_setting_be(struct rtw89_dev *rtwdev)
239 {
240 const struct rtw89_chip_info *chip = rtwdev->chip;
241 struct rtw89_hal *hal = &rtwdev->hal;
242
243 rtw89_write32_set(rtwdev, R_BE_PCIE_FRZ_CLK, B_BE_PCIE_EN_AUX_CLK);
244 rtw89_write32_clr(rtwdev, R_BE_PCIE_PS_CTRL, B_BE_CMAC_EXIT_L1_EN);
245
246 if (chip->chip_id == RTL8922A && hal->cv == CHIP_CAV)
247 return;
248
249 rtw89_write32_set(rtwdev, R_BE_EFUSE_CTRL_2_V1, B_BE_R_SYM_AUTOLOAD_WITH_PMC_SEL);
250 rtw89_write32_set(rtwdev, R_BE_PCIE_LAT_CTRL, B_BE_SYM_AUX_CLK_SEL);
251 }
252
rtw89_pci_ser_setting_be(struct rtw89_dev * rtwdev)253 static void rtw89_pci_ser_setting_be(struct rtw89_dev *rtwdev)
254 {
255 u32 val32;
256
257 rtw89_write32(rtwdev, R_BE_PL1_DBG_INFO, 0x0);
258 rtw89_write32_set(rtwdev, R_BE_FWS1IMR, B_BE_PCIE_SER_TIMEOUT_INDIC_EN);
259 rtw89_write32_set(rtwdev, R_BE_SER_PL1_CTRL, B_BE_PL1_SER_PL1_EN);
260
261 val32 = rtw89_read32(rtwdev, R_BE_REG_PL1_MASK);
262 val32 |= B_BE_SER_PMU_IMR | B_BE_SER_L1SUB_IMR | B_BE_SER_PM_MASTER_IMR |
263 B_BE_SER_LTSSM_IMR | B_BE_SER_PM_CLK_MASK | B_BE_SER_PCLKREQ_ACK_MASK;
264 rtw89_write32(rtwdev, R_BE_REG_PL1_MASK, val32);
265 }
266
rtw89_pci_ctrl_txdma_ch_be(struct rtw89_dev * rtwdev,bool all_en,bool h2c_en)267 static void rtw89_pci_ctrl_txdma_ch_be(struct rtw89_dev *rtwdev, bool all_en,
268 bool h2c_en)
269 {
270 u32 mask_all;
271 u32 val;
272
273 mask_all = B_BE_STOP_CH0 | B_BE_STOP_CH1 | B_BE_STOP_CH2 |
274 B_BE_STOP_CH3 | B_BE_STOP_CH4 | B_BE_STOP_CH5 |
275 B_BE_STOP_CH6 | B_BE_STOP_CH7 | B_BE_STOP_CH8 |
276 B_BE_STOP_CH9 | B_BE_STOP_CH10 | B_BE_STOP_CH11;
277
278 val = rtw89_read32(rtwdev, R_BE_HAXI_DMA_STOP1);
279 val |= B_BE_STOP_CH13 | B_BE_STOP_CH14;
280
281 if (all_en)
282 val &= ~mask_all;
283 else
284 val |= mask_all;
285
286 if (h2c_en)
287 val &= ~B_BE_STOP_CH12;
288 else
289 val |= B_BE_STOP_CH12;
290
291 rtw89_write32(rtwdev, R_BE_HAXI_DMA_STOP1, val);
292 }
293
rtw89_pci_ops_mac_pre_init_be(struct rtw89_dev * rtwdev)294 static int rtw89_pci_ops_mac_pre_init_be(struct rtw89_dev *rtwdev)
295 {
296 int ret;
297
298 rtw89_pci_set_io_rcy_be(rtwdev);
299 _patch_pcie_power_wake_be(rtwdev, true);
300 rtw89_pci_ctrl_wpdma_pcie_be(rtwdev, false);
301 rtw89_pci_ctrl_trxdma_pcie_be(rtwdev, MAC_AX_PCIE_DISABLE,
302 MAC_AX_PCIE_DISABLE, MAC_AX_PCIE_DISABLE);
303 rtw89_pci_clr_idx_all_be(rtwdev);
304
305 ret = rtw89_pci_poll_dma_all_idle_be(rtwdev);
306 if (ret) {
307 rtw89_err(rtwdev, "[ERR] poll pcie dma all idle\n");
308 return ret;
309 }
310
311 rtw89_pci_mode_op_be(rtwdev);
312 rtw89_pci_ops_reset(rtwdev);
313
314 ret = rtw89_pci_rst_bdram_be(rtwdev);
315 if (ret) {
316 rtw89_err(rtwdev, "[ERR]pcie rst bdram\n");
317 return ret;
318 }
319
320 rtw89_pci_debounce_be(rtwdev);
321 rtw89_pci_ldo_low_pwr_be(rtwdev);
322 rtw89_pci_pcie_setting_be(rtwdev);
323 rtw89_pci_ser_setting_be(rtwdev);
324
325 rtw89_pci_ctrl_txdma_ch_be(rtwdev, false, true);
326 rtw89_pci_ctrl_trxdma_pcie_be(rtwdev, MAC_AX_PCIE_ENABLE,
327 MAC_AX_PCIE_ENABLE, MAC_AX_PCIE_ENABLE);
328
329 return 0;
330 }
331
rtw89_pci_ops_mac_pre_deinit_be(struct rtw89_dev * rtwdev)332 static int rtw89_pci_ops_mac_pre_deinit_be(struct rtw89_dev *rtwdev)
333 {
334 u32 val;
335
336 _patch_pcie_power_wake_be(rtwdev, false);
337
338 val = rtw89_read32_mask(rtwdev, R_BE_IC_PWR_STATE, B_BE_WLMAC_PWR_STE_MASK);
339 if (val == 0)
340 return 0;
341
342 rtw89_pci_ctrl_trxdma_pcie_be(rtwdev, MAC_AX_PCIE_DISABLE,
343 MAC_AX_PCIE_DISABLE, MAC_AX_PCIE_DISABLE);
344 rtw89_pci_clr_idx_all_be(rtwdev);
345
346 return 0;
347 }
348
rtw89_pci_ltr_set_v2(struct rtw89_dev * rtwdev,bool en)349 int rtw89_pci_ltr_set_v2(struct rtw89_dev *rtwdev, bool en)
350 {
351 u32 ctrl0, cfg0, cfg1, dec_ctrl, idle_ltcy, act_ltcy, dis_ltcy;
352
353 ctrl0 = rtw89_read32(rtwdev, R_BE_LTR_CTRL_0);
354 if (rtw89_pci_ltr_is_err_reg_val(ctrl0))
355 return -EINVAL;
356 cfg0 = rtw89_read32(rtwdev, R_BE_LTR_CFG_0);
357 if (rtw89_pci_ltr_is_err_reg_val(cfg0))
358 return -EINVAL;
359 cfg1 = rtw89_read32(rtwdev, R_BE_LTR_CFG_1);
360 if (rtw89_pci_ltr_is_err_reg_val(cfg1))
361 return -EINVAL;
362 dec_ctrl = rtw89_read32(rtwdev, R_BE_LTR_DECISION_CTRL_V1);
363 if (rtw89_pci_ltr_is_err_reg_val(dec_ctrl))
364 return -EINVAL;
365 idle_ltcy = rtw89_read32(rtwdev, R_BE_LTR_LATENCY_IDX3_V1);
366 if (rtw89_pci_ltr_is_err_reg_val(idle_ltcy))
367 return -EINVAL;
368 act_ltcy = rtw89_read32(rtwdev, R_BE_LTR_LATENCY_IDX1_V1);
369 if (rtw89_pci_ltr_is_err_reg_val(act_ltcy))
370 return -EINVAL;
371 dis_ltcy = rtw89_read32(rtwdev, R_BE_LTR_LATENCY_IDX0_V1);
372 if (rtw89_pci_ltr_is_err_reg_val(dis_ltcy))
373 return -EINVAL;
374
375 if (en) {
376 dec_ctrl |= B_BE_ENABLE_LTR_CTL_DECISION | B_BE_LTR_HW_DEC_EN_V1;
377 ctrl0 |= B_BE_LTR_HW_EN;
378 } else {
379 dec_ctrl &= ~(B_BE_ENABLE_LTR_CTL_DECISION | B_BE_LTR_HW_DEC_EN_V1 |
380 B_BE_LTR_EN_PORT_V1_MASK);
381 ctrl0 &= ~B_BE_LTR_HW_EN;
382 }
383
384 dec_ctrl = u32_replace_bits(dec_ctrl, PCI_LTR_SPC_500US,
385 B_BE_LTR_SPACE_IDX_MASK);
386 cfg0 = u32_replace_bits(cfg0, PCI_LTR_IDLE_TIMER_3_2MS,
387 B_BE_LTR_IDLE_TIMER_IDX_MASK);
388 cfg1 = u32_replace_bits(cfg1, 0xC0, B_BE_LTR_CMAC0_RX_USE_PG_TH_MASK);
389 cfg1 = u32_replace_bits(cfg1, 0xC0, B_BE_LTR_CMAC1_RX_USE_PG_TH_MASK);
390 cfg0 = u32_replace_bits(cfg0, 1, B_BE_LTR_IDX_ACTIVE_MASK);
391 cfg0 = u32_replace_bits(cfg0, 3, B_BE_LTR_IDX_IDLE_MASK);
392 dec_ctrl = u32_replace_bits(dec_ctrl, 0, B_BE_LTR_IDX_DISABLE_V1_MASK);
393
394 rtw89_write32(rtwdev, R_BE_LTR_LATENCY_IDX3_V1, 0x90039003);
395 rtw89_write32(rtwdev, R_BE_LTR_LATENCY_IDX1_V1, 0x880b880b);
396 rtw89_write32(rtwdev, R_BE_LTR_LATENCY_IDX0_V1, 0);
397 rtw89_write32(rtwdev, R_BE_LTR_DECISION_CTRL_V1, dec_ctrl);
398 rtw89_write32(rtwdev, R_BE_LTR_CFG_0, cfg0);
399 rtw89_write32(rtwdev, R_BE_LTR_CFG_1, cfg1);
400 rtw89_write32(rtwdev, R_BE_LTR_CTRL_0, ctrl0);
401
402 return 0;
403 }
404 EXPORT_SYMBOL(rtw89_pci_ltr_set_v2);
405
rtw89_pci_configure_mit_be(struct rtw89_dev * rtwdev)406 static void rtw89_pci_configure_mit_be(struct rtw89_dev *rtwdev)
407 {
408 u32 cnt;
409 u32 val;
410
411 rtw89_write32_mask(rtwdev, R_BE_PCIE_MIT0_TMR,
412 B_BE_PCIE_MIT0_RX_TMR_MASK, BE_MIT0_TMR_UNIT_1MS);
413
414 val = rtw89_read32(rtwdev, R_BE_PCIE_MIT0_CNT);
415 cnt = min_t(u32, U8_MAX, RTW89_PCI_RXBD_NUM_MAX / 2);
416 val = u32_replace_bits(val, cnt, B_BE_PCIE_RX_MIT0_CNT_MASK);
417 val = u32_replace_bits(val, 2, B_BE_PCIE_RX_MIT0_TMR_CNT_MASK);
418 rtw89_write32(rtwdev, R_BE_PCIE_MIT0_CNT, val);
419 }
420
rtw89_pci_ops_mac_post_init_be(struct rtw89_dev * rtwdev)421 static int rtw89_pci_ops_mac_post_init_be(struct rtw89_dev *rtwdev)
422 {
423 const struct rtw89_pci_info *info = rtwdev->pci_info;
424 int ret;
425
426 ret = info->ltr_set(rtwdev, true);
427 if (ret) {
428 rtw89_err(rtwdev, "pci ltr set fail\n");
429 return ret;
430 }
431
432 rtw89_pci_ctrl_trxdma_pcie_be(rtwdev, MAC_AX_PCIE_IGNORE,
433 MAC_AX_PCIE_IGNORE, MAC_AX_PCIE_ENABLE);
434 rtw89_pci_ctrl_wpdma_pcie_be(rtwdev, true);
435 rtw89_pci_ctrl_txdma_ch_be(rtwdev, true, true);
436 rtw89_pci_configure_mit_be(rtwdev);
437
438 return 0;
439 }
440
rtw89_pci_poll_io_idle_be(struct rtw89_dev * rtwdev)441 static int rtw89_pci_poll_io_idle_be(struct rtw89_dev *rtwdev)
442 {
443 u32 sts;
444 int ret;
445
446 ret = read_poll_timeout_atomic(rtw89_read32, sts,
447 !(sts & B_BE_HAXI_MST_BUSY),
448 10, 1000, false, rtwdev,
449 R_BE_HAXI_DMA_BUSY1);
450 if (ret) {
451 rtw89_err(rtwdev, "pci dmach busy1 0x%X\n", sts);
452 return ret;
453 }
454
455 return 0;
456 }
457
rtw89_pci_lv1rst_stop_dma_be(struct rtw89_dev * rtwdev)458 static int rtw89_pci_lv1rst_stop_dma_be(struct rtw89_dev *rtwdev)
459 {
460 int ret;
461
462 rtw89_pci_ctrl_dma_all(rtwdev, false);
463 ret = rtw89_pci_poll_io_idle_be(rtwdev);
464 if (!ret)
465 return 0;
466
467 rtw89_debug(rtwdev, RTW89_DBG_HCI,
468 "[PCIe] poll_io_idle fail; reset hci dma trx\n");
469
470 rtw89_mac_ctrl_hci_dma_trx(rtwdev, false);
471 rtw89_mac_ctrl_hci_dma_trx(rtwdev, true);
472
473 return rtw89_pci_poll_io_idle_be(rtwdev);
474 }
475
rtw89_pci_lv1rst_start_dma_be(struct rtw89_dev * rtwdev)476 static int rtw89_pci_lv1rst_start_dma_be(struct rtw89_dev *rtwdev)
477 {
478 int ret;
479
480 rtw89_mac_ctrl_hci_dma_trx(rtwdev, false);
481 rtw89_mac_ctrl_hci_dma_trx(rtwdev, true);
482 rtw89_pci_clr_idx_all(rtwdev);
483
484 ret = rtw89_pci_rst_bdram_be(rtwdev);
485 if (ret)
486 return ret;
487
488 rtw89_pci_ctrl_dma_all(rtwdev, true);
489 return 0;
490 }
491
492 const struct rtw89_pci_gen_def rtw89_pci_gen_be = {
493 .isr_rdu = B_BE_RDU_CH1_INT | B_BE_RDU_CH0_INT,
494 .isr_halt_c2h = B_BE_HALT_C2H_INT,
495 .isr_wdt_timeout = B_BE_WDT_TIMEOUT_INT,
496 .isr_clear_rpq = {R_BE_PCIE_DMA_ISR, B_BE_PCIE_RX_RPQ0_ISR_V1},
497 .isr_clear_rxq = {R_BE_PCIE_DMA_ISR, B_BE_PCIE_RX_RX0P2_ISR_V1},
498
499 .mac_pre_init = rtw89_pci_ops_mac_pre_init_be,
500 .mac_pre_deinit = rtw89_pci_ops_mac_pre_deinit_be,
501 .mac_post_init = rtw89_pci_ops_mac_post_init_be,
502
503 .clr_idx_all = rtw89_pci_clr_idx_all_be,
504 .rst_bdram = rtw89_pci_rst_bdram_be,
505
506 .lv1rst_stop_dma = rtw89_pci_lv1rst_stop_dma_be,
507 .lv1rst_start_dma = rtw89_pci_lv1rst_start_dma_be,
508 };
509 EXPORT_SYMBOL(rtw89_pci_gen_be);
510