xref: /linux/drivers/net/wireless/realtek/rtw89/rtw8852bt_rfk.c (revision 8be4d31cb8aaeea27bde4b7ddb26e28a89062ebf)
1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /* Copyright(c) 2024 Realtek Corporation
3  */
4 
5 #include "chan.h"
6 #include "coex.h"
7 #include "debug.h"
8 #include "fw.h"
9 #include "mac.h"
10 #include "phy.h"
11 #include "reg.h"
12 #include "rtw8852bt.h"
13 #include "rtw8852bt_rfk.h"
14 #include "rtw8852bt_rfk_table.h"
15 #include "rtw8852b_common.h"
16 
17 #define RTW8852BT_RXDCK_VER 0x1
18 #define RTW8852BT_IQK_VER 0x2a
19 #define RTW8852BT_SS 2
20 #define RTW8852BT_TSSI_PATH_NR 2
21 #define RTW8852BT_DPK_VER 0x06
22 #define DPK_RF_PATH_MAX_8852BT 2
23 
24 #define _TSSI_DE_MASK GENMASK(21, 12)
25 #define DPK_TXAGC_LOWER 0x2e
26 #define DPK_TXAGC_UPPER 0x3f
27 #define DPK_TXAGC_INVAL 0xff
28 #define RFREG_MASKRXBB 0x003e0
29 #define RFREG_MASKMODE 0xf0000
30 
31 enum rf_mode {
32 	RF_SHUT_DOWN = 0x0,
33 	RF_STANDBY = 0x1,
34 	RF_TX = 0x2,
35 	RF_RX = 0x3,
36 	RF_TXIQK = 0x4,
37 	RF_DPK = 0x5,
38 	RF_RXK1 = 0x6,
39 	RF_RXK2 = 0x7,
40 };
41 
42 enum rtw8852bt_dpk_id {
43 	LBK_RXIQK	= 0x06,
44 	SYNC		= 0x10,
45 	MDPK_IDL	= 0x11,
46 	MDPK_MPA	= 0x12,
47 	GAIN_LOSS	= 0x13,
48 	GAIN_CAL	= 0x14,
49 	DPK_RXAGC	= 0x15,
50 	KIP_PRESET	= 0x16,
51 	KIP_RESTORE	= 0x17,
52 	DPK_TXAGC	= 0x19,
53 	D_KIP_PRESET	= 0x28,
54 	D_TXAGC		= 0x29,
55 	D_RXAGC		= 0x2a,
56 	D_SYNC		= 0x2b,
57 	D_GAIN_LOSS	= 0x2c,
58 	D_MDPK_IDL	= 0x2d,
59 	D_GAIN_NORM	= 0x2f,
60 	D_KIP_THERMAL	= 0x30,
61 	D_KIP_RESTORE	= 0x31
62 };
63 
64 enum dpk_agc_step {
65 	DPK_AGC_STEP_SYNC_DGAIN,
66 	DPK_AGC_STEP_GAIN_ADJ,
67 	DPK_AGC_STEP_GAIN_LOSS_IDX,
68 	DPK_AGC_STEP_GL_GT_CRITERION,
69 	DPK_AGC_STEP_GL_LT_CRITERION,
70 	DPK_AGC_STEP_SET_TX_GAIN,
71 };
72 
73 enum rtw8852bt_iqk_type {
74 	ID_TXAGC = 0x0,
75 	ID_FLOK_COARSE = 0x1,
76 	ID_FLOK_FINE = 0x2,
77 	ID_TXK = 0x3,
78 	ID_RXAGC = 0x4,
79 	ID_RXK = 0x5,
80 	ID_NBTXK = 0x6,
81 	ID_NBRXK = 0x7,
82 	ID_FLOK_VBUFFER = 0x8,
83 	ID_A_FLOK_COARSE = 0x9,
84 	ID_G_FLOK_COARSE = 0xa,
85 	ID_A_FLOK_FINE = 0xb,
86 	ID_G_FLOK_FINE = 0xc,
87 	ID_IQK_RESTORE = 0x10,
88 };
89 
90 enum adc_ck {
91 	ADC_NA = 0,
92 	ADC_480M = 1,
93 	ADC_960M = 2,
94 	ADC_1920M = 3,
95 };
96 
97 enum dac_ck {
98 	DAC_40M = 0,
99 	DAC_80M = 1,
100 	DAC_120M = 2,
101 	DAC_160M = 3,
102 	DAC_240M = 4,
103 	DAC_320M = 5,
104 	DAC_480M = 6,
105 	DAC_960M = 7,
106 };
107 
108 static const u32 _tssi_trigger[RTW8852BT_TSSI_PATH_NR] = {0x5820, 0x7820};
109 static const u32 _tssi_cw_rpt_addr[RTW8852BT_TSSI_PATH_NR] = {0x1c18, 0x3c18};
110 static const u32 _tssi_cw_default_addr[RTW8852BT_TSSI_PATH_NR][4] = {
111 	{0x5634, 0x5630, 0x5630, 0x5630},
112 	{0x7634, 0x7630, 0x7630, 0x7630} };
113 static const u32 _tssi_cw_default_mask[4] = {
114 	0x000003ff, 0x3ff00000, 0x000ffc00, 0x000003ff};
115 static const u32 _tssi_de_cck_long[RF_PATH_NUM_8852BT] = {0x5858, 0x7858};
116 static const u32 _tssi_de_cck_short[RF_PATH_NUM_8852BT] = {0x5860, 0x7860};
117 static const u32 _tssi_de_mcs_20m[RF_PATH_NUM_8852BT] = {0x5838, 0x7838};
118 static const u32 _tssi_de_mcs_40m[RF_PATH_NUM_8852BT] = {0x5840, 0x7840};
119 static const u32 _tssi_de_mcs_80m[RF_PATH_NUM_8852BT] = {0x5848, 0x7848};
120 static const u32 _tssi_de_mcs_80m_80m[RF_PATH_NUM_8852BT] = {0x5850, 0x7850};
121 static const u32 _tssi_de_mcs_5m[RF_PATH_NUM_8852BT] = {0x5828, 0x7828};
122 static const u32 _tssi_de_mcs_10m[RF_PATH_NUM_8852BT] = {0x5830, 0x7830};
123 
124 static const u32 rtw8852bt_backup_bb_regs[] = {0x2344, 0x5800, 0x7800, 0x0704};
125 static const u32 rtw8852bt_backup_rf_regs[] = {
126 	0xde, 0xdf, 0x8b, 0x90, 0x97, 0x85, 0x5, 0x10005};
127 static const u32 rtw8852bt_backup_kip_regs[] = {
128 	0x813c, 0x8124, 0x8120, 0xc0d4, 0xc0d8, 0xc0c4, 0xc0ec,
129 	0x823c, 0x8224, 0x8220, 0xc1d4, 0xc1d8, 0xc1c4, 0xc1ec};
130 
131 #define BACKUP_BB_REGS_NR ARRAY_SIZE(rtw8852bt_backup_bb_regs)
132 #define BACKUP_RF_REGS_NR ARRAY_SIZE(rtw8852bt_backup_rf_regs)
133 #define BACKUP_KIP_REGS_NR ARRAY_SIZE(rtw8852bt_backup_kip_regs)
134 
_rfk_get_thermal(struct rtw89_dev * rtwdev,u8 kidx,enum rtw89_rf_path path)135 static void _rfk_get_thermal(struct rtw89_dev *rtwdev, u8 kidx, enum rtw89_rf_path path)
136 {
137 	struct rtw89_dpk_info *dpk = &rtwdev->dpk;
138 
139 	rtw89_write_rf(rtwdev, path, RR_TM, RR_TM_TRI, 0x1);
140 	rtw89_write_rf(rtwdev, path, RR_TM, RR_TM_TRI, 0x0);
141 	rtw89_write_rf(rtwdev, path, RR_TM, RR_TM_TRI, 0x1);
142 
143 	udelay(200);
144 
145 	dpk->bp[path][kidx].ther_dpk = rtw89_read_rf(rtwdev, path, RR_TM, RR_TM_VAL);
146 
147 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] thermal@DPK = 0x%x\n",
148 		    dpk->bp[path][kidx].ther_dpk);
149 }
150 
_rfk_backup_bb_reg(struct rtw89_dev * rtwdev,u32 backup_bb_reg_val[])151 static void _rfk_backup_bb_reg(struct rtw89_dev *rtwdev, u32 backup_bb_reg_val[])
152 {
153 	u32 i;
154 
155 	for (i = 0; i < BACKUP_BB_REGS_NR; i++) {
156 		backup_bb_reg_val[i] =
157 		rtw89_phy_read32_mask(rtwdev, rtw8852bt_backup_bb_regs[i], MASKDWORD);
158 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
159 			    "[RFK]backup bb reg : %x, value =%x\n",
160 			    rtw8852bt_backup_bb_regs[i], backup_bb_reg_val[i]);
161 	}
162 }
163 
_rfk_backup_kip_reg(struct rtw89_dev * rtwdev,u32 backup_kip_reg_val[])164 static void _rfk_backup_kip_reg(struct rtw89_dev *rtwdev, u32 backup_kip_reg_val[])
165 {
166 	u32 i;
167 
168 	for (i = 0; i < BACKUP_KIP_REGS_NR; i++) {
169 		backup_kip_reg_val[i] =
170 			rtw89_phy_read32_mask(rtwdev, rtw8852bt_backup_kip_regs[i],
171 					      MASKDWORD);
172 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Backup 0x%x = %x\n",
173 			    rtw8852bt_backup_kip_regs[i], backup_kip_reg_val[i]);
174 	}
175 }
176 
177 static
_rfk_backup_rf_reg(struct rtw89_dev * rtwdev,u32 backup_rf_reg_val[],u8 rf_path)178 void _rfk_backup_rf_reg(struct rtw89_dev *rtwdev, u32 backup_rf_reg_val[], u8 rf_path)
179 {
180 	u32 i;
181 
182 	for (i = 0; i < BACKUP_RF_REGS_NR; i++) {
183 		backup_rf_reg_val[i] =
184 			rtw89_read_rf(rtwdev, rf_path, rtw8852bt_backup_rf_regs[i],
185 				      RFREG_MASK);
186 
187 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Backup RF S%d 0x%x = %x\n",
188 			    rf_path, rtw8852bt_backup_rf_regs[i], backup_rf_reg_val[i]);
189 	}
190 }
191 
_rfk_reload_bb_reg(struct rtw89_dev * rtwdev,const u32 backup_bb_reg_val[])192 static void _rfk_reload_bb_reg(struct rtw89_dev *rtwdev, const u32 backup_bb_reg_val[])
193 {
194 	u32 i;
195 
196 	for (i = 0; i < BACKUP_BB_REGS_NR; i++) {
197 		rtw89_phy_write32_mask(rtwdev, rtw8852bt_backup_bb_regs[i],
198 				       MASKDWORD, backup_bb_reg_val[i]);
199 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
200 			    "[RFK]restore bb reg : %x, value =%x\n",
201 			    rtw8852bt_backup_bb_regs[i], backup_bb_reg_val[i]);
202 	}
203 }
204 
_rfk_reload_kip_reg(struct rtw89_dev * rtwdev,u32 backup_kip_reg_val[])205 static void _rfk_reload_kip_reg(struct rtw89_dev *rtwdev, u32 backup_kip_reg_val[])
206 {
207 	u32 i;
208 
209 	for (i = 0; i < BACKUP_KIP_REGS_NR; i++) {
210 		rtw89_phy_write32_mask(rtwdev, rtw8852bt_backup_kip_regs[i],
211 				       MASKDWORD, backup_kip_reg_val[i]);
212 
213 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
214 			    "[RFK]restore kip reg : %x, value =%x\n",
215 			    rtw8852bt_backup_kip_regs[i], backup_kip_reg_val[i]);
216 	}
217 }
218 
_rfk_reload_rf_reg(struct rtw89_dev * rtwdev,const u32 backup_rf_reg_val[],u8 rf_path)219 static void _rfk_reload_rf_reg(struct rtw89_dev *rtwdev,
220 			       const u32 backup_rf_reg_val[], u8 rf_path)
221 {
222 	u32 i;
223 
224 	for (i = 0; i < BACKUP_RF_REGS_NR; i++) {
225 		rtw89_write_rf(rtwdev, rf_path, rtw8852bt_backup_rf_regs[i],
226 			       RFREG_MASK, backup_rf_reg_val[i]);
227 
228 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
229 			    "[RFK]restore rf S%d reg: %x, value =%x\n", rf_path,
230 			    rtw8852bt_backup_rf_regs[i], backup_rf_reg_val[i]);
231 	}
232 }
233 
_kpath(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx)234 static u8 _kpath(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
235 {
236 	u8 val;
237 
238 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RFK]dbcc_en: %x,PHY%d\n",
239 		    rtwdev->dbcc_en, phy_idx);
240 
241 	if (!rtwdev->dbcc_en) {
242 		val = RF_AB;
243 	} else {
244 		if (phy_idx == RTW89_PHY_0)
245 			val = RF_A;
246 		else
247 			val = RF_B;
248 	}
249 	return val;
250 }
251 
252 static
_txck_force(struct rtw89_dev * rtwdev,enum rtw89_rf_path path,bool force,enum dac_ck ck)253 void _txck_force(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, bool force,
254 		 enum dac_ck ck)
255 {
256 	rtw89_phy_write32_mask(rtwdev, R_P0_RXCK | (path << 13), B_P0_TXCK_ON, 0x0);
257 
258 	if (!force)
259 		return;
260 
261 	rtw89_phy_write32_mask(rtwdev, R_P0_RXCK | (path << 13), B_P0_TXCK_VAL, ck);
262 	rtw89_phy_write32_mask(rtwdev, R_P0_RXCK | (path << 13), B_P0_TXCK_ON, 0x1);
263 }
264 
265 static
_rxck_force(struct rtw89_dev * rtwdev,enum rtw89_rf_path path,bool force,enum adc_ck ck)266 void _rxck_force(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, bool force,
267 		 enum adc_ck ck)
268 {
269 	u32 bw = 0;
270 
271 	rtw89_phy_write32_mask(rtwdev, R_P0_RXCK | (path << 13), B_P0_RXCK_ON, 0x0);
272 
273 	if (!force)
274 		return;
275 
276 	rtw89_phy_write32_mask(rtwdev, R_P0_RXCK | (path << 13), B_P0_RXCK_VAL, ck);
277 	rtw89_phy_write32_mask(rtwdev, R_P0_RXCK | (path << 13), B_P0_RXCK_ON, 0x1);
278 
279 	switch (ck) {
280 	case ADC_480M:
281 		bw = RTW89_CHANNEL_WIDTH_40;
282 		break;
283 	case ADC_960M:
284 		bw = RTW89_CHANNEL_WIDTH_80;
285 		break;
286 	case ADC_1920M:
287 		bw = RTW89_CHANNEL_WIDTH_160;
288 		break;
289 	default:
290 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "%s==>Invalid ck", __func__);
291 		break;
292 	}
293 
294 	rtw8852bx_adc_cfg(rtwdev, bw, path);
295 }
296 
_rfk_bb_afe_setting(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path,u8 kpath)297 static void _rfk_bb_afe_setting(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
298 				enum rtw89_rf_path path, u8 kpath)
299 {
300 	rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, MASKHWORD, 0x0303);
301 	rtw89_phy_write32_mask(rtwdev, R_P0_ADCFF_EN, B_P0_ADCFF_EN, 0x1);
302 	rtw89_phy_write32_mask(rtwdev, R_P1_ADCFF_EN, B_P1_ADCFF_EN, 0x1);
303 	rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_CLKG_FORCE, 0x3);
304 	rtw89_phy_write32_mask(rtwdev, R_P1_RFCTM, B_P1_CLKG_FORCE, 0x3);
305 	rtw89_phy_write32_mask(rtwdev, R_TXCKEN_FORCE, B_TXCKEN_FORCE_ALL, 0x1ffffff);
306 	rtw89_phy_write32_mask(rtwdev, R_FAHM, B_RXTD_CKEN, 0x1);
307 	rtw89_phy_write32_mask(rtwdev, R_UPD_CLK_ADC, B_UPD_GEN_ON, 0x1);
308 	rtw89_phy_write32_mask(rtwdev, R_TX_COLLISION_T2R_ST, B_TXRX_FORCE_VAL, 0x3ff);
309 	rtw89_phy_write32_mask(rtwdev, R_IOQ_IQK_DPK, B_IOQ_IQK_DPK_CLKEN, 0x3);
310 	rtw89_phy_write32_mask(rtwdev, R_IQK_DPK_RST, B_IQK_DPK_RST, 0x1);
311 	rtw89_phy_write32_mask(rtwdev, R_P0_PATH_RST, B_P0_PATH_RST, 0x1);
312 	rtw89_phy_write32_mask(rtwdev, R_P1_PATH_RST, B_P1_PATH_RST, 0x1);
313 	rtw89_phy_write32_mask(rtwdev, R_PD_CTRL, B_PD_HIT_DIS, 0x1);
314 	rtw89_phy_write32_mask(rtwdev, R_RXCCA, B_RXCCA_DIS, 0x1);
315 	rtw89_phy_write32_mask(rtwdev, R_DCFO_WEIGHT, B_DAC_CLK_IDX, 0x1);
316 
317 	_txck_force(rtwdev, RF_PATH_A, true, DAC_960M);
318 	_txck_force(rtwdev, RF_PATH_B, true, DAC_960M);
319 	_rxck_force(rtwdev, RF_PATH_A, true, ADC_1920M);
320 	_rxck_force(rtwdev, RF_PATH_B, true, ADC_1920M);
321 
322 	rtw89_phy_write32_mask(rtwdev, R_UPD_CLK_ADC,
323 			       B_UPD_CLK_ADC_VAL | B_UPD_CLK_ADC_ON, 0x5);
324 	rtw89_phy_write32_mask(rtwdev, R_P0_NRBW, B_P0_NRBW_DBG, 0x1);
325 	rtw89_phy_write32_mask(rtwdev, R_P1_DBGMOD, B_P1_DBGMOD_ON, 0x1);
326 	rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, MASKBYTE3, 0x1f);
327 	udelay(1);
328 	rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, MASKBYTE3, 0x13);
329 	rtw89_phy_write32_mask(rtwdev, R_ANAPAR, MASKHWORD, 0x0001);
330 	udelay(1);
331 	rtw89_phy_write32_mask(rtwdev, R_ANAPAR, MASKHWORD, 0x0041);
332 	rtw89_phy_write32_mask(rtwdev, R_P0_NRBW, B_P0_NRBW_RSTB, 0x1);
333 	rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, MASKHWORD, 0x3333);
334 
335 	rtw89_phy_write32_mask(rtwdev, R_TXPWRB_H, B_TXPWRB_RDY, 0x1);
336 	rtw89_phy_write32_mask(rtwdev, R_DPD_OFT_EN, MASKLWORD, 0x0000);
337 	rtw89_phy_write32_mask(rtwdev, R_P1_TXPW_FORCE, B_P1_TXPW_RDY, 0x1);
338 	rtw89_phy_write32_mask(rtwdev, R_P1_TXAGC_TH, MASKLWORD, 0x0000);
339 }
340 
_rfk_bb_afe_restore(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path,u8 kpath)341 static void _rfk_bb_afe_restore(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
342 				enum rtw89_rf_path path, u8 kpath)
343 {
344 	rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, MASKHWORD, 0x0303);
345 	rtw89_phy_write32_mask(rtwdev, R_P0_NRBW, B_P0_NRBW_DBG, 0x0);
346 	rtw89_phy_write32_mask(rtwdev, R_P1_DBGMOD, B_P1_DBGMOD_ON, 0x0);
347 	rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_CLKG_FORCE, 0x0);
348 	rtw89_phy_write32_mask(rtwdev, R_P1_RFCTM, B_P1_CLKG_FORCE, 0x0);
349 	rtw89_phy_write32_mask(rtwdev, R_TXCKEN_FORCE, B_TXCKEN_FORCE_ALL, 0x0);
350 	rtw89_phy_write32_mask(rtwdev, R_FAHM, B_RXTD_CKEN, 0x0);
351 	rtw89_phy_write32_mask(rtwdev, R_UPD_CLK_ADC, B_UPD_GEN_ON, 0x0);
352 	rtw89_phy_write32_mask(rtwdev, R_TX_COLLISION_T2R_ST, B_TXRX_FORCE_VAL, 0x63);
353 	rtw89_phy_write32_mask(rtwdev, R_P0_RXCK, B_P0_TXCK_ALL, 0x00);
354 	rtw89_phy_write32_mask(rtwdev, R_P1_RXCK, B_P1_TXCK_ALL, 0x00);
355 	rtw89_phy_write32_mask(rtwdev, R_UPD_CLK_ADC,
356 			       B_UPD_CLK_ADC_VAL | B_UPD_CLK_ADC_ON, 0x0);
357 	rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, MASKHWORD, 0x0000);
358 	rtw89_phy_write32_mask(rtwdev, R_P0_ADCFF_EN, B_P0_ADCFF_EN, 0x0);
359 	rtw89_phy_write32_mask(rtwdev, R_P1_ADCFF_EN, B_P1_ADCFF_EN, 0x0);
360 	rtw89_phy_write32_mask(rtwdev, R_PD_CTRL, B_PD_HIT_DIS, 0x0);
361 
362 	rtw89_phy_write32_mask(rtwdev, R_TXPWRB_H, B_TXPWRB_RDY, 0x0);
363 	rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_MV_AVG, B_P0_TXPW_RSTB, 0x1);
364 	rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_MV_AVG, B_P0_TXPW_RSTB, 0x2);
365 	rtw89_phy_write32_mask(rtwdev, R_P1_TXPW_FORCE, B_P1_TXPW_RDY, 0x0);
366 	rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_MV_AVG, B_P1_TXPW_RSTB, 0x1);
367 	rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_MV_AVG, B_P1_TXPW_RSTB, 0x2);
368 }
369 
_set_rx_dck(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path)370 static void _set_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
371 			enum rtw89_rf_path path)
372 {
373 	rtw89_write_rf(rtwdev, path, RR_DCK1, RR_DCK1_CLR, 0x0);
374 	rtw89_write_rf(rtwdev, path, RR_DCK, RR_DCK_LV, 0x0);
375 	rtw89_write_rf(rtwdev, path, RR_DCK, RR_DCK_LV, 0x1);
376 	mdelay(1);
377 }
378 
_rx_dck(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy)379 static void _rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
380 {
381 	u8 path, dck_tune;
382 	u32 rf_reg5;
383 
384 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
385 		    "[RX_DCK] ****** RXDCK Start (Ver: 0x%x, CV : 0x%x) ******\n",
386 		    RTW8852BT_RXDCK_VER, rtwdev->hal.cv);
387 
388 	for (path = 0; path < RF_PATH_NUM_8852BT; path++) {
389 		rf_reg5 = rtw89_read_rf(rtwdev, path, RR_RSV1, RFREG_MASK);
390 		dck_tune = rtw89_read_rf(rtwdev, path, RR_DCK, RR_DCK_FINE);
391 
392 		if (rtwdev->is_tssi_mode[path])
393 			rtw89_phy_write32_mask(rtwdev,
394 					       R_P0_TSSI_TRK + (path << 13),
395 					       B_P0_TSSI_TRK_EN, 0x1);
396 
397 		rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x0);
398 		rtw89_write_rf(rtwdev, path, RR_DCK, RR_DCK_FINE, 0x0);
399 		rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, RR_MOD_V_RX);
400 		_set_rx_dck(rtwdev, phy, path);
401 		rtw89_write_rf(rtwdev, path, RR_DCK, RR_DCK_FINE, dck_tune);
402 		rtw89_write_rf(rtwdev, path, RR_RSV1, RFREG_MASK, rf_reg5);
403 
404 		if (rtwdev->is_tssi_mode[path])
405 			rtw89_phy_write32_mask(rtwdev,
406 					       R_P0_TSSI_TRK + (path << 13),
407 					       B_P0_TSSI_TRK_EN, 0x0);
408 	}
409 }
410 
_rck(struct rtw89_dev * rtwdev,enum rtw89_rf_path path)411 static void _rck(struct rtw89_dev *rtwdev, enum rtw89_rf_path path)
412 {
413 	u32 rf_reg5;
414 	u32 rck_val;
415 	u32 val;
416 	int ret;
417 
418 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RCK] ====== S%d RCK ======\n", path);
419 
420 	rf_reg5 = rtw89_read_rf(rtwdev, path, RR_RSV1, RFREG_MASK);
421 
422 	rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x0);
423 	rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, RR_MOD_V_RX);
424 
425 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RCK] RF0x00 = 0x%05x\n",
426 		    rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASK));
427 
428 	/* RCK trigger */
429 	rtw89_write_rf(rtwdev, path, RR_RCKC, RFREG_MASK, 0x00240);
430 
431 	ret = read_poll_timeout_atomic(rtw89_read_rf, val, val, 2, 30,
432 				       false, rtwdev, path, RR_RCKS, BIT(3));
433 
434 	rck_val = rtw89_read_rf(rtwdev, path, RR_RCKC, RR_RCKC_CA);
435 
436 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RCK] rck_val = 0x%x, ret = %d\n",
437 		    rck_val, ret);
438 
439 	rtw89_write_rf(rtwdev, path, RR_RCKC, RFREG_MASK, rck_val);
440 	rtw89_write_rf(rtwdev, path, RR_RSV1, RFREG_MASK, rf_reg5);
441 
442 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RCK] RF 0x1b = 0x%x\n",
443 		    rtw89_read_rf(rtwdev, path, RR_RCKC, RFREG_MASK));
444 }
445 
_drck(struct rtw89_dev * rtwdev)446 static void _drck(struct rtw89_dev *rtwdev)
447 {
448 	u32 rck_d;
449 	u32 val;
450 	int ret;
451 
452 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]Ddie RCK start!!!\n");
453 	rtw89_phy_write32_mask(rtwdev, R_DRCK, B_DRCK_EN, 0x1);
454 
455 	ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val,
456 				       1, 10000, false,
457 				       rtwdev, R_DRCK_RES, B_DRCK_POL);
458 	if (ret)
459 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DRCK timeout\n");
460 
461 	rtw89_phy_write32_mask(rtwdev, R_DRCK, B_DRCK_EN, 0x0);
462 	rtw89_phy_write32_mask(rtwdev, R_DRCK_FH, B_DRCK_LAT, 0x1);
463 	udelay(1);
464 	rtw89_phy_write32_mask(rtwdev, R_DRCK_FH, B_DRCK_LAT, 0x0);
465 
466 	rck_d = rtw89_phy_read32_mask(rtwdev, R_DRCK_RES, 0x7c00);
467 	rtw89_phy_write32_mask(rtwdev, R_DRCK, B_DRCK_IDLE, 0x0);
468 	rtw89_phy_write32_mask(rtwdev, R_DRCK, B_DRCK_VAL, rck_d);
469 
470 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0xc0c4 = 0x%x\n",
471 		    rtw89_phy_read32_mask(rtwdev, R_DRCK, MASKDWORD));
472 }
473 
_dack_backup_s0(struct rtw89_dev * rtwdev)474 static void _dack_backup_s0(struct rtw89_dev *rtwdev)
475 {
476 	struct rtw89_dack_info *dack = &rtwdev->dack;
477 	u8 i;
478 
479 	rtw89_phy_write32_mask(rtwdev, R_P0_NRBW, B_P0_NRBW_DBG, 0x1);
480 
481 	for (i = 0; i < 0x10; i++) {
482 		rtw89_phy_write32_mask(rtwdev, R_DCOF0, B_DCOF0_V, i);
483 		dack->msbk_d[0][0][i] =
484 			rtw89_phy_read32_mask(rtwdev, R_DACK_S0P2, B_DACK_S0M0);
485 
486 		rtw89_phy_write32_mask(rtwdev, R_DCOF8, B_DCOF8_V, i);
487 		dack->msbk_d[0][1][i] =
488 			rtw89_phy_read32_mask(rtwdev, R_DACK_S0P3, B_DACK_S0M1);
489 	}
490 
491 	dack->biask_d[0][0] =
492 		rtw89_phy_read32_mask(rtwdev, R_DACK_BIAS00, B_DACK_BIAS00);
493 	dack->biask_d[0][1] =
494 		rtw89_phy_read32_mask(rtwdev, R_DACK_BIAS01, B_DACK_BIAS01);
495 
496 	dack->dadck_d[0][0] =
497 		rtw89_phy_read32_mask(rtwdev, R_DACK_DADCK00, B_DACK_DADCK00);
498 	dack->dadck_d[0][1] =
499 		rtw89_phy_read32_mask(rtwdev, R_DACK_DADCK01, B_DACK_DADCK01);
500 }
501 
_dack_backup_s1(struct rtw89_dev * rtwdev)502 static void _dack_backup_s1(struct rtw89_dev *rtwdev)
503 {
504 	struct rtw89_dack_info *dack = &rtwdev->dack;
505 	u8 i;
506 
507 	rtw89_phy_write32_mask(rtwdev, R_P1_DBGMOD, B_P1_DBGMOD_ON, 0x1);
508 
509 	for (i = 0; i < 0x10; i++) {
510 		rtw89_phy_write32_mask(rtwdev, R_DACK10, B_DACK10, i);
511 		dack->msbk_d[1][0][i] =
512 			rtw89_phy_read32_mask(rtwdev, R_DACK10S, B_DACK10S);
513 
514 		rtw89_phy_write32_mask(rtwdev, R_DACK11, B_DACK11, i);
515 		dack->msbk_d[1][1][i] =
516 			rtw89_phy_read32_mask(rtwdev, R_DACK11S, B_DACK11S);
517 	}
518 
519 	dack->biask_d[1][0] =
520 		rtw89_phy_read32_mask(rtwdev, R_DACK_BIAS10, B_DACK_BIAS10);
521 	dack->biask_d[1][1] =
522 		rtw89_phy_read32_mask(rtwdev, R_DACK_BIAS11, B_DACK_BIAS11);
523 
524 	dack->dadck_d[1][0] =
525 		rtw89_phy_read32_mask(rtwdev, R_DACK_DADCK10, B_DACK_DADCK10);
526 	dack->dadck_d[1][1] =
527 		rtw89_phy_read32_mask(rtwdev, R_DACK_DADCK11, B_DACK_DADCK11);
528 }
529 
530 static
_dack_reset(struct rtw89_dev * rtwdev,enum rtw89_rf_path path)531 void _dack_reset(struct rtw89_dev *rtwdev, enum rtw89_rf_path path)
532 {
533 	if (path == RF_PATH_A) {
534 		rtw89_phy_write32_mask(rtwdev, R_DCOF0, B_DCOF0_RST, 0x0);
535 		rtw89_phy_write32_mask(rtwdev, R_DCOF0, B_DCOF0_RST, 0x1);
536 	} else {
537 		rtw89_phy_write32_mask(rtwdev, R_DACK10, B_DACK10_RST, 0x0);
538 		rtw89_phy_write32_mask(rtwdev, R_DACK10, B_DACK10_RST, 0x1);
539 	}
540 }
541 
542 static
_dack_reload_by_path(struct rtw89_dev * rtwdev,u8 path,u8 index)543 void _dack_reload_by_path(struct rtw89_dev *rtwdev, u8 path, u8 index)
544 {
545 	struct rtw89_dack_info *dack = &rtwdev->dack;
546 	u32 tmp, tmp_offset, tmp_reg;
547 	u32 idx_offset, path_offset;
548 	u8 i;
549 
550 	if (index == 0)
551 		idx_offset = 0;
552 	else
553 		idx_offset = 0x14;
554 
555 	if (path == RF_PATH_A)
556 		path_offset = 0;
557 	else
558 		path_offset = 0x28;
559 
560 	tmp_offset = idx_offset + path_offset;
561 
562 	rtw89_phy_write32_mask(rtwdev, R_DCOF1, B_DCOF1_RST, 0x1);
563 	rtw89_phy_write32_mask(rtwdev, R_DCOF9, B_DCOF9_RST, 0x1);
564 	rtw89_phy_write32_mask(rtwdev, R_DACK1_K, B_DACK1_RST, 0x1);
565 	rtw89_phy_write32_mask(rtwdev, R_DACK2_K, B_DACK2_RST, 0x1);
566 
567 	/* msbk_d: 15/14/13/12 */
568 	tmp = 0x0;
569 	for (i = 0; i < 4; i++)
570 		tmp |= dack->msbk_d[path][index][i + 12] << (i * 8);
571 	tmp_reg = 0xc200 + tmp_offset;
572 	rtw89_phy_write32(rtwdev, tmp_reg, tmp);
573 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x=0x%x\n", tmp_reg,
574 		    rtw89_phy_read32_mask(rtwdev, tmp_reg, MASKDWORD));
575 
576 	/* msbk_d: 11/10/9/8 */
577 	tmp = 0x0;
578 	for (i = 0; i < 4; i++)
579 		tmp |= dack->msbk_d[path][index][i + 8] << (i * 8);
580 	tmp_reg = 0xc204 + tmp_offset;
581 	rtw89_phy_write32(rtwdev, tmp_reg, tmp);
582 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x=0x%x\n", tmp_reg,
583 		    rtw89_phy_read32_mask(rtwdev, tmp_reg, MASKDWORD));
584 
585 	/* msbk_d: 7/6/5/4 */
586 	tmp = 0x0;
587 	for (i = 0; i < 4; i++)
588 		tmp |= dack->msbk_d[path][index][i + 4] << (i * 8);
589 	tmp_reg = 0xc208 + tmp_offset;
590 	rtw89_phy_write32(rtwdev, tmp_reg, tmp);
591 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x=0x%x\n", tmp_reg,
592 		    rtw89_phy_read32_mask(rtwdev, tmp_reg, MASKDWORD));
593 
594 	/* msbk_d: 3/2/1/0 */
595 	tmp = 0x0;
596 	for (i = 0; i < 4; i++)
597 		tmp |= dack->msbk_d[path][index][i] << (i * 8);
598 	tmp_reg = 0xc20c + tmp_offset;
599 	rtw89_phy_write32(rtwdev, tmp_reg, tmp);
600 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x=0x%x\n", tmp_reg,
601 		    rtw89_phy_read32_mask(rtwdev, tmp_reg, MASKDWORD));
602 
603 	/* dadak_d/biask_d */
604 	tmp = (dack->biask_d[path][index] << 22) |
605 	      (dack->dadck_d[path][index] << 14);
606 	tmp_reg = 0xc210 + tmp_offset;
607 	rtw89_phy_write32(rtwdev, tmp_reg, tmp);
608 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x=0x%x\n", tmp_reg,
609 		    rtw89_phy_read32_mask(rtwdev, tmp_reg, MASKDWORD));
610 
611 	/* enable DACK result from reg */
612 	rtw89_phy_write32_mask(rtwdev, R_DACKN0_CTL + tmp_offset, B_DACKN0_EN, 0x1);
613 }
614 
615 static
_dack_reload(struct rtw89_dev * rtwdev,enum rtw89_rf_path path)616 void _dack_reload(struct rtw89_dev *rtwdev, enum rtw89_rf_path path)
617 {
618 	u8 i;
619 
620 	for (i = 0; i < 2; i++)
621 		_dack_reload_by_path(rtwdev, path, i);
622 }
623 
_dack_s0_poll(struct rtw89_dev * rtwdev)624 static bool _dack_s0_poll(struct rtw89_dev *rtwdev)
625 {
626 	if (rtw89_phy_read32_mask(rtwdev, R_DACK_S0P0, B_DACK_S0P0_OK) == 0 ||
627 	    rtw89_phy_read32_mask(rtwdev, R_DACK_S0P1, B_DACK_S0P1_OK) == 0 ||
628 	    rtw89_phy_read32_mask(rtwdev, R_DACK_S0P2, B_DACK_S0P2_OK) == 0 ||
629 	    rtw89_phy_read32_mask(rtwdev, R_DACK_S0P3, B_DACK_S0P3_OK) == 0)
630 		return false;
631 
632 	return true;
633 }
634 
_dack_s0(struct rtw89_dev * rtwdev)635 static void _dack_s0(struct rtw89_dev *rtwdev)
636 {
637 	struct rtw89_dack_info *dack = &rtwdev->dack;
638 	bool done;
639 	int ret;
640 
641 	_txck_force(rtwdev, RF_PATH_A, true, DAC_160M);
642 
643 	rtw89_phy_write32_mask(rtwdev, R_P0_NRBW, B_P0_NRBW_DBG, 0x1);
644 	rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, BIT(28), 0x1);
645 	rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_EN1, 0x0);
646 	udelay(100);
647 	rtw89_phy_write32_mask(rtwdev, R_DCOF1, B_DCOF1_VAL, 0x30);
648 	rtw89_phy_write32_mask(rtwdev, R_DCOF9, B_DCOF9_VAL, 0x30);
649 
650 	_dack_reset(rtwdev, RF_PATH_A);
651 
652 	rtw89_phy_write32_mask(rtwdev, R_DCOF1, B_DCOF1_S, 0x1);
653 	udelay(1);
654 
655 	dack->msbk_timeout[0] = false;
656 
657 	ret = read_poll_timeout_atomic(_dack_s0_poll, done, done,
658 				       1, 20000, false, rtwdev);
659 	if (ret) {
660 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 DACK timeout\n");
661 		dack->msbk_timeout[0] = true;
662 	}
663 
664 	rtw89_phy_write32_mask(rtwdev, R_DCOF1, B_DCOF1_S, 0x0);
665 
666 	_txck_force(rtwdev, RF_PATH_A, false, DAC_960M);
667 	_dack_backup_s0(rtwdev);
668 	_dack_reload(rtwdev, RF_PATH_A);
669 
670 	rtw89_phy_write32_mask(rtwdev, R_P0_NRBW, B_P0_NRBW_DBG, 0x0);
671 }
672 
_dack_s1_poll(struct rtw89_dev * rtwdev)673 static bool _dack_s1_poll(struct rtw89_dev *rtwdev)
674 {
675 	if (rtw89_phy_read32_mask(rtwdev, R_DACK_S1P0, B_DACK_S1P0_OK) == 0 ||
676 	    rtw89_phy_read32_mask(rtwdev, R_DACK_S1P1, B_DACK_S1P1_OK) == 0 ||
677 	    rtw89_phy_read32_mask(rtwdev, R_DACK_S1P2, B_DACK_S1P2_OK) == 0 ||
678 	    rtw89_phy_read32_mask(rtwdev, R_DACK_S1P3, B_DACK_S1P3_OK) == 0)
679 		return false;
680 
681 	return true;
682 }
683 
_dack_s1(struct rtw89_dev * rtwdev)684 static void _dack_s1(struct rtw89_dev *rtwdev)
685 {
686 	struct rtw89_dack_info *dack = &rtwdev->dack;
687 	bool done;
688 	int ret;
689 
690 	_txck_force(rtwdev, RF_PATH_B, true, DAC_160M);
691 
692 	rtw89_phy_write32_mask(rtwdev, R_P1_DBGMOD, B_P1_DBGMOD_ON, 0x1);
693 	rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, BIT(28), 0x1);
694 	rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_EN1, 0x0);
695 	udelay(100);
696 	rtw89_phy_write32_mask(rtwdev, R_DACK1_K, B_DACK1_VAL, 0x30);
697 	rtw89_phy_write32_mask(rtwdev, R_DACK2_K, B_DACK2_VAL, 0x30);
698 
699 	_dack_reset(rtwdev, RF_PATH_B);
700 
701 	rtw89_phy_write32_mask(rtwdev, R_DACK1_K, B_DACK1_EN, 0x1);
702 	udelay(1);
703 
704 	dack->msbk_timeout[1] = false;
705 
706 	ret = read_poll_timeout_atomic(_dack_s1_poll, done, done,
707 				       1, 10000, false, rtwdev);
708 	if (ret) {
709 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 DACK timeout\n");
710 		dack->msbk_timeout[1] = true;
711 	}
712 
713 	rtw89_phy_write32_mask(rtwdev, R_DACK1_K, B_DACK1_EN, 0x0);
714 
715 	_txck_force(rtwdev, RF_PATH_B, false, DAC_960M);
716 	_dack_backup_s1(rtwdev);
717 	_dack_reload(rtwdev, RF_PATH_B);
718 
719 	rtw89_phy_write32_mask(rtwdev, R_P1_DBGMOD, B_P1_DBGMOD_ON, 0x0);
720 }
721 
_dack(struct rtw89_dev * rtwdev)722 static void _dack(struct rtw89_dev *rtwdev)
723 {
724 	_dack_s0(rtwdev);
725 	_dack_s1(rtwdev);
726 }
727 
_dack_dump(struct rtw89_dev * rtwdev)728 static void _dack_dump(struct rtw89_dev *rtwdev)
729 {
730 	struct rtw89_dack_info *dack = &rtwdev->dack;
731 	u8 i;
732 	u8 t;
733 
734 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
735 		    "[DACK]S0 ADC_DCK ic = 0x%x, qc = 0x%x\n",
736 		    dack->addck_d[0][0], dack->addck_d[0][1]);
737 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
738 		    "[DACK]S1 ADC_DCK ic = 0x%x, qc = 0x%x\n",
739 		    dack->addck_d[1][0], dack->addck_d[1][1]);
740 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
741 		    "[DACK]S0 DAC_DCK ic = 0x%x, qc = 0x%x\n",
742 		    dack->dadck_d[0][0], dack->dadck_d[0][1]);
743 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
744 		    "[DACK]S1 DAC_DCK ic = 0x%x, qc = 0x%x\n",
745 		    dack->dadck_d[1][0], dack->dadck_d[1][1]);
746 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
747 		    "[DACK]S0 biask ic = 0x%x, qc = 0x%x\n",
748 		    dack->biask_d[0][0], dack->biask_d[0][1]);
749 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
750 		    "[DACK]S1 biask ic = 0x%x, qc = 0x%x\n",
751 		    dack->biask_d[1][0], dack->biask_d[1][1]);
752 
753 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 MSBK ic:\n");
754 	for (i = 0; i < 0x10; i++) {
755 		t = dack->msbk_d[0][0][i];
756 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x\n", t);
757 	}
758 
759 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 MSBK qc:\n");
760 	for (i = 0; i < RTW89_DACK_MSBK_NR; i++) {
761 		t = dack->msbk_d[0][1][i];
762 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x\n", t);
763 	}
764 
765 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 MSBK ic:\n");
766 	for (i = 0; i < RTW89_DACK_MSBK_NR; i++) {
767 		t = dack->msbk_d[1][0][i];
768 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x\n", t);
769 	}
770 
771 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 MSBK qc:\n");
772 	for (i = 0; i < RTW89_DACK_MSBK_NR; i++) {
773 		t = dack->msbk_d[1][1][i];
774 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x\n", t);
775 	}
776 }
777 
_addck_ori(struct rtw89_dev * rtwdev)778 static void _addck_ori(struct rtw89_dev *rtwdev)
779 {
780 	struct rtw89_dack_info *dack = &rtwdev->dack;
781 	u32 val;
782 	int ret;
783 
784 	rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0_MAN, 0x0);
785 	rtw89_phy_write32_mask(rtwdev, R_ADDCK1, B_ADDCK1_MAN, 0x0);
786 	rtw89_phy_write32_mask(rtwdev, R_P0_NRBW, B_P0_NRBW_DBG, 0x1);
787 	rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_ADCCLK, 0x0);
788 	rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_FLTRST, 0x0);
789 	rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_FLTRST, 0x1);
790 
791 	rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15_H, 0xf);
792 	udelay(100);
793 
794 	rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_EN, 0x0);
795 	rtw89_phy_write32_mask(rtwdev, R_PATH0_SAMPL_DLY_T_V1, BIT(4), 0x1);
796 	rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15_H, 0x3);
797 	rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0_TRG, 0x1);
798 	rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0_TRG, 0x0);
799 	udelay(1);
800 
801 	rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0, 0x1);
802 	dack->addck_timeout[0] = false;
803 
804 	ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val,
805 				       1, 10000, false,
806 				       rtwdev, R_ADDCKR0, BIT(0));
807 	if (ret) {
808 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 ADDCK timeout\n");
809 		dack->addck_timeout[0] = true;
810 	}
811 
812 	rtw89_phy_write32_mask(rtwdev, R_PATH0_SAMPL_DLY_T_V1, BIT(4), 0x0);
813 	rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_EN, 0x1);
814 	rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15_H, 0xc);
815 	rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_ADCCLK, 0x1);
816 
817 	rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0, 0x0);
818 	dack->addck_d[0][0] =
819 		rtw89_phy_read32_mask(rtwdev, R_ADDCKR0, B_ADDCKR0_A0);
820 	dack->addck_d[0][1] =
821 		rtw89_phy_read32_mask(rtwdev, R_ADDCKR0, B_ADDCKR0_A1);
822 	rtw89_phy_write32_mask(rtwdev, R_P0_NRBW, B_P0_NRBW_DBG, 0x0);
823 
824 	rtw89_phy_write32_mask(rtwdev, R_P1_DBGMOD, B_P1_DBGMOD_ON, 0x1);
825 	rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_ADCCLK, 0x0);
826 	rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_FLTRST, 0x0);
827 	rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_FLTRST, 0x1);
828 
829 	rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15_H, 0xf);
830 	udelay(100);
831 
832 	rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_EN, 0x0);
833 	rtw89_phy_write32_mask(rtwdev, R_PATH1_SAMPL_DLY_T_V1, BIT(4), 0x1);
834 	rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15_H, 0x3);
835 	rtw89_phy_write32_mask(rtwdev, R_ADDCK1, B_ADDCK1_TRG, 0x1);
836 	rtw89_phy_write32_mask(rtwdev, R_ADDCK1, B_ADDCK1_TRG, 0x0);
837 	udelay(1);
838 
839 	rtw89_phy_write32_mask(rtwdev, R_ADDCK1, B_ADDCK1, 0x1);
840 	dack->addck_timeout[1] = false;
841 
842 	ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val,
843 				       1, 10000, false,
844 				       rtwdev, R_ADDCKR1, BIT(0));
845 	if (ret) {
846 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 ADDCK timeout\n");
847 		dack->addck_timeout[1] = true;
848 	}
849 
850 	rtw89_phy_write32_mask(rtwdev, R_PATH1_SAMPL_DLY_T_V1, BIT(4), 0x0);
851 	rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_EN, 0x1);
852 	rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15_H, 0xc);
853 	rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_ADCCLK, 0x1);
854 
855 	rtw89_phy_write32_mask(rtwdev, R_ADDCK1, B_ADDCK1, 0x0);
856 	dack->addck_d[1][0] =
857 		rtw89_phy_read32_mask(rtwdev, R_ADDCKR1, B_ADDCKR1_A0);
858 	dack->addck_d[1][1] =
859 		rtw89_phy_read32_mask(rtwdev, R_ADDCKR1, B_ADDCKR1_A1);
860 
861 	rtw89_phy_write32_mask(rtwdev, R_P1_DBGMOD, B_P1_DBGMOD_ON, 0x0);
862 }
863 
_addck_reload(struct rtw89_dev * rtwdev)864 static void _addck_reload(struct rtw89_dev *rtwdev)
865 {
866 	struct rtw89_dack_info *dack = &rtwdev->dack;
867 
868 	rtw89_phy_write32_mask(rtwdev, R_ADDCK0_RL, B_ADDCK0_RL1, dack->addck_d[0][0]);
869 	rtw89_phy_write32_mask(rtwdev, R_ADDCK0_RL, B_ADDCK0_RL0, dack->addck_d[0][1]);
870 
871 	rtw89_phy_write32_mask(rtwdev, R_ADDCK0_RL, B_ADDCK0_RLS, 0x3);
872 
873 	rtw89_phy_write32_mask(rtwdev, R_ADDCK1_RL, B_ADDCK1_RL1, dack->addck_d[1][0]);
874 	rtw89_phy_write32_mask(rtwdev, R_ADDCK1_RL, B_ADDCK1_RL0, dack->addck_d[1][1]);
875 
876 	rtw89_phy_write32_mask(rtwdev, R_ADDCK1_RL, B_ADDCK1_RLS, 0x3);
877 }
878 
_dack_manual_off(struct rtw89_dev * rtwdev)879 static void _dack_manual_off(struct rtw89_dev *rtwdev)
880 {
881 	rtw89_phy_write32_mask(rtwdev, R_ADDCK0_RL, B_ADDCK0_RLS, 0x0);
882 	rtw89_phy_write32_mask(rtwdev, R_ADDCK1_RL, B_ADDCK1_RLS, 0x0);
883 
884 	rtw89_phy_write32_mask(rtwdev, R_DACKN0_CTL, B_DACKN0_EN, 0x0);
885 	rtw89_phy_write32_mask(rtwdev, R_DACKN1_CTL, B_DACKN1_ON, 0x0);
886 	rtw89_phy_write32_mask(rtwdev, R_DACKN2_CTL, B_DACKN2_ON, 0x0);
887 	rtw89_phy_write32_mask(rtwdev, R_DACKN3_CTL, B_DACKN3_ON, 0x0);
888 }
889 
_dac_cal(struct rtw89_dev * rtwdev,bool force)890 static void _dac_cal(struct rtw89_dev *rtwdev, bool force)
891 {
892 	struct rtw89_dack_info *dack = &rtwdev->dack;
893 
894 	dack->dack_done = false;
895 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DACK start!!!\n");
896 
897 	_drck(rtwdev);
898 	_dack_manual_off(rtwdev);
899 	rtw89_write_rf(rtwdev, RF_PATH_A, RR_RSV1, RFREG_MASK, 0x0);
900 	rtw89_write_rf(rtwdev, RF_PATH_B, RR_RSV1, RFREG_MASK, 0x0);
901 	rtw89_write_rf(rtwdev, RF_PATH_A, RR_MOD, RFREG_MASK, 0x337e1);
902 	rtw89_write_rf(rtwdev, RF_PATH_B, RR_MOD, RFREG_MASK, 0x337e1);
903 	_rxck_force(rtwdev, RF_PATH_A, true, ADC_960M);
904 	_rxck_force(rtwdev, RF_PATH_B, true, ADC_960M);
905 	_addck_ori(rtwdev);
906 
907 	_rxck_force(rtwdev, RF_PATH_A, false, ADC_960M);
908 	_rxck_force(rtwdev, RF_PATH_B, false, ADC_960M);
909 	_addck_reload(rtwdev);
910 
911 	rtw89_write_rf(rtwdev, RF_PATH_A, RR_MODOPT, RFREG_MASK, 0x0);
912 	rtw89_write_rf(rtwdev, RF_PATH_B, RR_MODOPT, RFREG_MASK, 0x0);
913 
914 	_dack(rtwdev);
915 	_dack_dump(rtwdev);
916 	dack->dack_done = true;
917 	rtw89_write_rf(rtwdev, RF_PATH_A, RR_RSV1, RFREG_MASK, 0x1);
918 	rtw89_write_rf(rtwdev, RF_PATH_B, RR_RSV1, RFREG_MASK, 0x1);
919 
920 	dack->dack_cnt++;
921 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DACK finish!!!\n");
922 }
923 
_iqk_check_cal(struct rtw89_dev * rtwdev,u8 path,u8 ktype)924 static bool _iqk_check_cal(struct rtw89_dev *rtwdev, u8 path, u8 ktype)
925 {
926 	bool notready = false;
927 	u32 val;
928 	int ret;
929 
930 	ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val == 0x55,
931 				       10, 8200, false,
932 				       rtwdev, R_RFK_ST, MASKBYTE0);
933 	if (ret)
934 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]NCTL1 IQK timeout!!!\n");
935 
936 	udelay(10);
937 
938 	ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val == 0x8000,
939 				       10, 400, false,
940 				       rtwdev, R_RPT_COM, B_RPT_COM_RDY);
941 	if (ret) {
942 		notready = true;
943 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]NCTL2 IQK timeout!!!\n");
944 	}
945 
946 	udelay(10);
947 	rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, MASKBYTE0, 0x0);
948 
949 	return notready;
950 }
951 
_iqk_one_shot(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx,u8 path,u8 ktype)952 static bool _iqk_one_shot(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
953 			  u8 path, u8 ktype)
954 {
955 	struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
956 	u32 iqk_cmd;
957 	bool fail;
958 
959 	switch (ktype) {
960 	case ID_TXAGC:
961 		iqk_cmd = 0x008 | (1 << (4 + path)) | (path << 1);
962 		break;
963 	case ID_FLOK_COARSE:
964 		iqk_cmd = 0x108 | (1 << (4 + path));
965 		break;
966 	case ID_FLOK_FINE:
967 		iqk_cmd = 0x208 | (1 << (4 + path));
968 		break;
969 	case ID_FLOK_VBUFFER:
970 		iqk_cmd = 0x308 | (1 << (4 + path));
971 		break;
972 	case ID_TXK:
973 		iqk_cmd = 0x008 | (1 << (path + 4)) |
974 			  (((0x8 + iqk_info->iqk_bw[path]) & 0xf) << 8);
975 		break;
976 	case ID_RXAGC:
977 		iqk_cmd = 0x508 | (1 << (4 + path)) | (path << 1);
978 		break;
979 	case ID_RXK:
980 		iqk_cmd = 0x008 | (1 << (path + 4)) |
981 			  (((0xb + iqk_info->iqk_bw[path]) & 0xf) << 8);
982 		break;
983 	case ID_NBTXK:
984 		iqk_cmd = 0x408 | (1 << (4 + path));
985 		break;
986 	case ID_NBRXK:
987 		iqk_cmd = 0x608 | (1 << (4 + path));
988 		break;
989 	default:
990 		return false;
991 	}
992 
993 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s, iqk_cmd = %x\n",
994 		    __func__, iqk_cmd + 1);
995 
996 	rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD, iqk_cmd + 1);
997 	fail = _iqk_check_cal(rtwdev, path, ktype);
998 
999 	return fail;
1000 }
1001 
_iqk_txk_setting(struct rtw89_dev * rtwdev,u8 path)1002 static void _iqk_txk_setting(struct rtw89_dev *rtwdev, u8 path)
1003 {
1004 	struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1005 
1006 	switch (iqk_info->iqk_band[path]) {
1007 	case RTW89_BAND_2G:
1008 		rtw89_write_rf(rtwdev, path, RR_TXG1, RR_TXG1_ATT2, 0x0);
1009 		rtw89_write_rf(rtwdev, path, RR_TXG1, RR_TXG1_ATT1, 0x0);
1010 		rtw89_write_rf(rtwdev, path, RR_TXG2, RR_TXG2_ATT0, 0x1);
1011 		rtw89_write_rf(rtwdev, path, RR_TXGA, RR_TXGA_LOK_EXT, 0x0);
1012 		rtw89_write_rf(rtwdev, path, RR_LUTWE, RR_LUTWE_LOK, 0x1);
1013 		rtw89_write_rf(rtwdev, path, RR_LUTWA, RR_LUTWA_M1, 0x00);
1014 		rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_IQK, 0x403e);
1015 		rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0, 0x0);
1016 		rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1, 0x5);
1017 		udelay(1);
1018 		break;
1019 	case RTW89_BAND_5G:
1020 		rtw89_write_rf(rtwdev, path, RR_BIASA, RR_BIASA_A, 0x1);
1021 		rtw89_write_rf(rtwdev, path, RR_TXGA, RR_TXGA_LOK_EXT, 0x0);
1022 		rtw89_write_rf(rtwdev, path, RR_LUTWE, RR_LUTWE_LOK, 0x1);
1023 		rtw89_write_rf(rtwdev, path, RR_LUTWA, RR_LUTWA_M1, 0x80);
1024 		rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_IQK, 0x403e);
1025 		rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0, 0x0);
1026 		rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1, 0x4);
1027 		udelay(1);
1028 		break;
1029 	default:
1030 		break;
1031 	}
1032 }
1033 
_iqk_2g_lok(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx,u8 path)1034 static bool _iqk_2g_lok(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, u8 path)
1035 {
1036 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__);
1037 
1038 	rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x0);
1039 	rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x1);
1040 	rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), B_KIP_IQP_IQSW, 0x09);
1041 	rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_TXT, 0x021);
1042 	rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD, 0x00000119 + (path << 4));
1043 
1044 	_iqk_check_cal(rtwdev, path, ID_FLOK_COARSE);
1045 	rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00);
1046 	rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x0);
1047 
1048 	rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x12);
1049 	rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x1);
1050 	rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), B_KIP_IQP_IQSW, 0x24);
1051 	rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD, 0x00000319 + (path << 4));
1052 
1053 	_iqk_check_cal(rtwdev, path, ID_FLOK_VBUFFER);
1054 	rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00);
1055 	rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x0);
1056 
1057 	rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x0);
1058 	rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x1);
1059 	rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), B_KIP_IQP_IQSW, 0x09);
1060 	rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD, 0x00000219 + (path << 4));
1061 
1062 	_iqk_check_cal(rtwdev, path, ID_FLOK_COARSE);
1063 	rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00);
1064 	rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x0);
1065 
1066 	rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x12);
1067 	rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x1);
1068 	rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), B_KIP_IQP_IQSW, 0x24);
1069 	rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD, 0x00000319 + (path << 4));
1070 
1071 	_iqk_check_cal(rtwdev, path, ID_FLOK_VBUFFER);
1072 
1073 	rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00);
1074 	rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x0);
1075 
1076 	return false;
1077 }
1078 
_iqk_5g_lok(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx,u8 path)1079 static bool _iqk_5g_lok(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, u8 path)
1080 {
1081 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__);
1082 
1083 	rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x0);
1084 	rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x1);
1085 	rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), B_KIP_IQP_IQSW, 0x09);
1086 	rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_TXT, 0x021);
1087 	rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD, 0x00000119 + (path << 4));
1088 
1089 	_iqk_check_cal(rtwdev, path, ID_FLOK_COARSE);
1090 	rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00);
1091 	rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x0);
1092 
1093 	rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x12);
1094 	rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x1);
1095 	rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), B_KIP_IQP_IQSW, 0x24);
1096 	rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD, 0x00000319 + (path << 4));
1097 
1098 	_iqk_check_cal(rtwdev, path, ID_FLOK_VBUFFER);
1099 	rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00);
1100 	rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x0);
1101 
1102 	rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x0);
1103 	rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x1);
1104 	rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), B_KIP_IQP_IQSW, 0x09);
1105 	rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD, 0x00000219 + (path << 4));
1106 
1107 	_iqk_check_cal(rtwdev, path, ID_FLOK_COARSE);
1108 	rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00);
1109 	rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x0);
1110 
1111 	rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x12);
1112 	rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x1);
1113 	rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), B_KIP_IQP_IQSW, 0x24);
1114 	rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD, 0x00000319 + (path << 4));
1115 
1116 	_iqk_check_cal(rtwdev, path, ID_FLOK_VBUFFER);
1117 	rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00);
1118 	rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x0);
1119 
1120 	return false;
1121 }
1122 
_iqk_2g_tx(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx,u8 path)1123 static bool _iqk_2g_tx(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, u8 path)
1124 {
1125 	static const u32 g_power_range[4] = {0x0, 0x0, 0x0, 0x0};
1126 	static const u32 g_track_range[4] = {0x4, 0x4, 0x6, 0x6};
1127 	static const u32 g_gain_bb[4] = {0x08, 0x0e, 0x08, 0x0e};
1128 	static const u32 g_itqt[4] = {0x09, 0x12, 0x1b, 0x24};
1129 	struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1130 	bool notready = false;
1131 	bool kfail = false;
1132 	u8 gp;
1133 
1134 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__);
1135 
1136 	for (gp = 0x0; gp < 0x4; gp++) {
1137 		rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0,
1138 			       g_power_range[gp]);
1139 		rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1,
1140 			       g_track_range[gp]);
1141 		rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG,
1142 			       g_gain_bb[gp]);
1143 		rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
1144 				       0x00000100, 0x1);
1145 		rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
1146 				       0x00000010, 0x1);
1147 		rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
1148 				       0x00000004, 0x0);
1149 		rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
1150 				       0x00000003, gp);
1151 		rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_TXT,
1152 				       0x009);
1153 		rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8),
1154 				       B_KIP_IQP_IQSW, g_itqt[gp]);
1155 		notready = _iqk_one_shot(rtwdev, phy_idx, path, ID_NBTXK);
1156 		iqk_info->nb_txcfir[path] =
1157 			rtw89_phy_read32_mask(rtwdev, R_TXIQC + (path << 8), MASKDWORD);
1158 
1159 		if (iqk_info->is_nbiqk)
1160 			break;
1161 
1162 		rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8),
1163 				       B_KIP_IQP_IQSW, g_itqt[gp]);
1164 		notready = _iqk_one_shot(rtwdev, phy_idx, path, ID_TXK);
1165 		rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00);
1166 
1167 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
1168 			    "[IQK]S%x, gp = 0x%x, 0x8%x38 = 0x%x\n",
1169 			    path, gp, 1 << path, iqk_info->nb_txcfir[path]);
1170 	}
1171 
1172 	if (!notready)
1173 		kfail = !!rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, B_NCTL_RPT_FLG);
1174 
1175 	if (kfail) {
1176 		iqk_info->nb_txcfir[path] = 0x40000002;
1177 		rtw89_phy_write32_mask(rtwdev, R_IQK_RES + (path << 8),
1178 				       B_IQK_RES_TXCFIR, 0x0);
1179 	}
1180 
1181 	return kfail;
1182 }
1183 
_iqk_5g_tx(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx,u8 path)1184 static bool _iqk_5g_tx(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, u8 path)
1185 {
1186 	static const u32 a_power_range[4] = {0x0, 0x0, 0x0, 0x0};
1187 	static const u32 a_track_range[4] = {0x3, 0x3, 0x6, 0x6};
1188 	static const u32 a_gain_bb[4] = {0x08, 0x10, 0x08, 0x0e};
1189 	static const u32 a_itqt[4] = {0x09, 0x12, 0x1b, 0x24};
1190 	struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1191 	bool notready = false;
1192 	bool kfail = false;
1193 	u8 gp;
1194 
1195 	for (gp = 0x0; gp < 0x4; gp++) {
1196 		rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0, a_power_range[gp]);
1197 		rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1, a_track_range[gp]);
1198 		rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, a_gain_bb[gp]);
1199 
1200 		rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8),
1201 				       MASKDWORD, a_itqt[gp]);
1202 		rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
1203 				       0x00000100, 0x1);
1204 		rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
1205 				       0x00000010, 0x1);
1206 		rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
1207 				       0x00000004, 0x0);
1208 		rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
1209 				       0x00000003, gp);
1210 		rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_TXT,
1211 				       0x009);
1212 		rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8),
1213 				       B_KIP_IQP_IQSW, a_itqt[gp]);
1214 
1215 		notready = _iqk_one_shot(rtwdev, phy_idx, path, ID_NBTXK);
1216 		iqk_info->nb_txcfir[path] =
1217 			rtw89_phy_read32_mask(rtwdev, R_TXIQC + (path << 8), MASKDWORD);
1218 
1219 		if (iqk_info->is_nbiqk)
1220 			break;
1221 
1222 		rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8),
1223 				       B_KIP_IQP_IQSW, a_itqt[gp]);
1224 		notready = _iqk_one_shot(rtwdev, phy_idx, path, ID_TXK);
1225 		rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00);
1226 
1227 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
1228 			    "[IQK]S%x, gp = 0x%x, 0x8%x38 = 0x%x\n",
1229 			    path, gp, 1 << path, iqk_info->nb_txcfir[path]);
1230 	}
1231 
1232 	if (!notready)
1233 		kfail = !!rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, B_NCTL_RPT_FLG);
1234 
1235 	if (kfail) {
1236 		iqk_info->nb_txcfir[path] = 0x40000002;
1237 		rtw89_phy_write32_mask(rtwdev, R_IQK_RES + (path << 8),
1238 				       B_IQK_RES_TXCFIR, 0x0);
1239 	}
1240 
1241 	return kfail;
1242 }
1243 
_iqk_adc_fifo_rst(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx,u8 path)1244 static void _iqk_adc_fifo_rst(struct rtw89_dev *rtwdev,
1245 			      enum rtw89_phy_idx phy_idx, u8 path)
1246 {
1247 	rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_RXK, 0x0303);
1248 	udelay(10);
1249 	rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_RXK, 0x3333);
1250 }
1251 
_iqk_rxclk_setting(struct rtw89_dev * rtwdev,u8 path)1252 static void _iqk_rxclk_setting(struct rtw89_dev *rtwdev, u8 path)
1253 {
1254 	struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1255 
1256 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__);
1257 	rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_RXK, 0x0303);
1258 
1259 	if (iqk_info->iqk_bw[path] == RTW89_CHANNEL_WIDTH_80) {
1260 		_rxck_force(rtwdev, RF_PATH_A, true, ADC_960M);
1261 		_rxck_force(rtwdev, RF_PATH_B, true, ADC_960M);
1262 		udelay(1);
1263 
1264 		rtw89_phy_write32_mask(rtwdev, R_UPD_CLK_ADC,
1265 				       B_UPD_CLK_ADC_ON, 0x1);
1266 		rtw89_phy_write32_mask(rtwdev, R_UPD_CLK_ADC,
1267 				       B_UPD_CLK_ADC_VAL, 0x1);
1268 		rtw89_phy_write32_mask(rtwdev, R_PATH0_SAMPL_DLY_T_V1,
1269 				       B_PATH0_SAMPL_DLY_T_MSK_V1, 0x2);
1270 		rtw89_phy_write32_mask(rtwdev, R_PATH1_SAMPL_DLY_T_V1,
1271 				       B_PATH1_SAMPL_DLY_T_MSK_V1, 0x2);
1272 		rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW1, B_P0_CFCH_BW1, 0x8);
1273 		rtw89_phy_write32_mask(rtwdev, R_PATH1_BW_SEL_V1,
1274 				       B_PATH1_BW_SEL_MSK_V1, 0x8);
1275 	} else {
1276 		_rxck_force(rtwdev, RF_PATH_A, true, ADC_480M);
1277 		_rxck_force(rtwdev, RF_PATH_B, true, ADC_480M);
1278 		udelay(1);
1279 
1280 		rtw89_phy_write32_mask(rtwdev, R_UPD_CLK_ADC,
1281 				       B_UPD_CLK_ADC_ON, 0x1);
1282 		rtw89_phy_write32_mask(rtwdev, R_UPD_CLK_ADC,
1283 				       B_UPD_CLK_ADC_VAL, 0x0);
1284 		rtw89_phy_write32_mask(rtwdev, R_PATH0_SAMPL_DLY_T_V1,
1285 				       B_PATH0_SAMPL_DLY_T_MSK_V1, 0x3);
1286 		rtw89_phy_write32_mask(rtwdev, R_PATH1_SAMPL_DLY_T_V1,
1287 				       B_PATH1_SAMPL_DLY_T_MSK_V1, 0x3);
1288 		rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW1, B_P0_CFCH_BW1, 0xf);
1289 		rtw89_phy_write32_mask(rtwdev, R_PATH1_BW_SEL_V1,
1290 				       B_PATH1_BW_SEL_MSK_V1, 0xf);
1291 	}
1292 
1293 	rtw89_phy_write32_mask(rtwdev, R_PATH0_SAMPL_DLY_T_V1, 0x00000780, 0x8);
1294 	rtw89_phy_write32_mask(rtwdev, R_PATH1_SAMPL_DLY_T_V1, 0x00000780, 0x8);
1295 	rtw89_phy_write32_mask(rtwdev, R_PATH0_SAMPL_DLY_T_V1, 0x00007800, 0x2);
1296 	rtw89_phy_write32_mask(rtwdev, R_PATH1_SAMPL_DLY_T_V1, 0x00007800, 0x2);
1297 	rtw89_phy_write32_mask(rtwdev, R_DRCK, B_DRCK_MUL, 0x0);
1298 	rtw89_phy_write32_mask(rtwdev, R_P0_NRBW, B_P0_NRBW_DBG, 0x1);
1299 	rtw89_phy_write32_mask(rtwdev, R_P1_DBGMOD, B_P1_DBGMOD_ON, 0x1);
1300 	udelay(1);
1301 	rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15, 0x0f);
1302 	udelay(1);
1303 	rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15, 0x03);
1304 	rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_15, 0xa001);
1305 	udelay(1);
1306 	rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_15, 0xa041);
1307 	rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_RXK, 0x3333);
1308 }
1309 
_iqk_2g_rx(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx,u8 path)1310 static bool _iqk_2g_rx(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, u8 path)
1311 {
1312 	static const u32 g_idxrxgain[2] = {0x212, 0x310};
1313 	static const u32 g_idxattc2[2] = {0x00, 0x20};
1314 	static const u32 g_idxattc1[2] = {0x3, 0x2};
1315 	static const u32 g_idxrxagc[2] = {0x0, 0x2};
1316 	static const u32 g_idx[2] = {0x0, 0x2};
1317 	struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1318 	bool notready = false;
1319 	bool kfail = false;
1320 	u32 rf_18, tmp;
1321 	u8 gp;
1322 
1323 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__);
1324 
1325 	rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, 0xc);
1326 	rtw89_write_rf(rtwdev, path, RR_RXK, RR_RXK_SEL2G, 0x1);
1327 	rf_18 = rtw89_read_rf(rtwdev, path, RR_CFGCH, RFREG_MASK);
1328 	rtw89_write_rf(rtwdev, path, RR_RSV4, RFREG_MASK, rf_18);
1329 
1330 	for (gp = 0x0; gp < 0x2; gp++) {
1331 		rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_RGM, g_idxrxgain[gp]);
1332 		rtw89_write_rf(rtwdev, path, RR_RXBB, RR_RXBB_C2G, g_idxattc2[gp]);
1333 		rtw89_write_rf(rtwdev, path, RR_RXBB, RR_RXBB_C1G, g_idxattc1[gp]);
1334 
1335 		rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
1336 				       0x00000100, 0x1);
1337 		rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
1338 				       0x00000010, 0x0);
1339 		rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
1340 				       0x00000007, g_idx[gp]);
1341 		rtw89_write_rf(rtwdev, path, RR_RXKPLL, RFREG_MASK, 0x80013);
1342 		udelay(100);
1343 		udelay(100);
1344 
1345 		tmp = rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASK);
1346 		rtw89_phy_write32_mask(rtwdev, R_IQK_DIF2, B_IQK_DIF2_RXPI, tmp);
1347 		rtw89_phy_write32_mask(rtwdev, R_IQK_RXA, B_IQK_RXAGC, g_idxrxagc[gp]);
1348 		rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x1);
1349 		rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_RXT, 0x11);
1350 
1351 		notready = _iqk_one_shot(rtwdev, phy_idx, path, ID_RXAGC);
1352 		rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x0);
1353 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, rf rxbb  = %x\n", path,
1354 			    rtw89_read_rf(rtwdev, path, RR_MOD, 0x003c0));
1355 
1356 		rtw89_write_rf(rtwdev, path, RR_RXKPLL, RFREG_MASK, 0x80013);
1357 		udelay(100);
1358 		udelay(100);
1359 		rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x1);
1360 		rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_RXT, 0x011);
1361 
1362 		notready = _iqk_one_shot(rtwdev, phy_idx, path, ID_NBRXK);
1363 		iqk_info->nb_rxcfir[path] =
1364 			rtw89_phy_read32_mask(rtwdev, R_RXIQC + (path << 8),
1365 					      MASKDWORD) | 0x2;
1366 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
1367 			    "[IQK]S%x, gp = 0x%x, 0x8%x3c = 0x%x\n", path,
1368 			    g_idx[gp], 1 << path, iqk_info->nb_rxcfir[path]);
1369 
1370 		rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00);
1371 		rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x0);
1372 
1373 		if (iqk_info->is_nbiqk)
1374 			break;
1375 
1376 		rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x1);
1377 		notready = _iqk_one_shot(rtwdev, phy_idx, path, ID_RXK);
1378 		rtw89_phy_write32_mask(rtwdev, R_NCTL_N1,  B_NCTL_N1_CIP, 0x00);
1379 		rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x0);
1380 	}
1381 
1382 	if (!notready)
1383 		kfail = !!rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, B_NCTL_RPT_FLG);
1384 
1385 	if (kfail) {
1386 		iqk_info->nb_txcfir[path] = 0x40000002;
1387 		rtw89_phy_write32_mask(rtwdev, R_IQK_RES + (path << 8),
1388 				       B_IQK_RES_RXCFIR, 0x0);
1389 	}
1390 	rtw89_write_rf(rtwdev, path, RR_RXK, RR_RXK_SEL2G, 0x0);
1391 
1392 	return kfail;
1393 }
1394 
_iqk_5g_rx(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx,u8 path)1395 static bool _iqk_5g_rx(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, u8 path)
1396 {
1397 	static const u32 a_idxrxgain[2] = {0x110, 0x290};
1398 	static const u32 a_idxattc2[2] = {0x0f, 0x0f};
1399 	static const u32 a_idxattc1[2] = {0x2, 0x2};
1400 	static const u32 a_idxrxagc[2] = {0x4, 0x6};
1401 	static const u32 a_idx[2] = {0x0, 0x2};
1402 	struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1403 	bool notready = false;
1404 	bool kfail = false;
1405 	u32 rf_18, tmp;
1406 	u8 gp;
1407 
1408 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__);
1409 
1410 	rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, 0xc);
1411 	rtw89_write_rf(rtwdev, path, RR_RXK, RR_RXK_SEL5G, 0x1);
1412 	rf_18 = rtw89_read_rf(rtwdev, path, RR_CFGCH, RFREG_MASK);
1413 	rtw89_write_rf(rtwdev, path, RR_RSV4, RFREG_MASK, rf_18);
1414 
1415 	for (gp = 0x0; gp < 0x2; gp++) {
1416 		rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_RGM, a_idxrxgain[gp]);
1417 		rtw89_write_rf(rtwdev, path, RR_RXA2, RR_RXA2_HATT, a_idxattc2[gp]);
1418 		rtw89_write_rf(rtwdev, path, RR_RXA2, RR_RXA2_CC2, a_idxattc1[gp]);
1419 
1420 		rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
1421 				       0x00000100, 0x1);
1422 		rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
1423 				       0x00000010, 0x0);
1424 		rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
1425 				       0x00000007, a_idx[gp]);
1426 		rtw89_write_rf(rtwdev, path, RR_RXKPLL, RFREG_MASK, 0x80013);
1427 		udelay(100);
1428 		udelay(100);
1429 
1430 		tmp = rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASK);
1431 		rtw89_phy_write32_mask(rtwdev, R_IQK_DIF2, B_IQK_DIF2_RXPI, tmp);
1432 		rtw89_phy_write32_mask(rtwdev, R_IQK_RXA, B_IQK_RXAGC, a_idxrxagc[gp]);
1433 		rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x1);
1434 		rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_RXT, 0x11);
1435 
1436 		notready = _iqk_one_shot(rtwdev, phy_idx, path, ID_RXAGC);
1437 		rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x0);
1438 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, rf rxbb  = %x\n", path,
1439 			    rtw89_read_rf(rtwdev, path, RR_MOD, 0x003c0));
1440 
1441 		rtw89_write_rf(rtwdev, path, RR_RXKPLL, RFREG_MASK, 0x80013);
1442 		udelay(200);
1443 		rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x1);
1444 		rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_RXT, 0x011);
1445 		notready = _iqk_one_shot(rtwdev, phy_idx, path, ID_NBRXK);
1446 		iqk_info->nb_rxcfir[path] =
1447 			rtw89_phy_read32_mask(rtwdev, R_RXIQC + (path << 8),
1448 					      MASKDWORD) | 0x2;
1449 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
1450 			    "[IQK]S%x, gp = 0x%x, 0x8%x3c = 0x%x\n",
1451 			    path, a_idx[gp], 1 << path, iqk_info->nb_rxcfir[path]);
1452 		rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00);
1453 		rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x0);
1454 
1455 		if (iqk_info->is_nbiqk)
1456 			break;
1457 
1458 		rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x1);
1459 		notready = _iqk_one_shot(rtwdev, phy_idx, path, ID_RXK);
1460 		rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00);
1461 		rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x0);
1462 	}
1463 
1464 	if (!notready)
1465 		kfail = !!rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, B_NCTL_RPT_FLG);
1466 
1467 	if (kfail) {
1468 		iqk_info->nb_txcfir[path] = 0x40000002;
1469 		rtw89_phy_write32_mask(rtwdev, R_IQK_RES + (path << 8),
1470 				       B_IQK_RES_RXCFIR, 0x0);
1471 	}
1472 	rtw89_write_rf(rtwdev, path, RR_RXK, RR_RXK_SEL2G, 0x0);
1473 
1474 	return kfail;
1475 }
1476 
_iqk_by_path(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx,u8 path)1477 static void _iqk_by_path(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, u8 path)
1478 {
1479 	struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1480 	bool lok_result = false;
1481 	bool txk_result = false;
1482 	bool rxk_result = false;
1483 	u8 i;
1484 
1485 	for (i = 0; i < 3; i++) {
1486 		_iqk_txk_setting(rtwdev, path);
1487 		if (iqk_info->iqk_band[path] == RTW89_BAND_2G)
1488 			lok_result = _iqk_2g_lok(rtwdev, phy_idx, path);
1489 		else
1490 			lok_result = _iqk_5g_lok(rtwdev, phy_idx, path);
1491 
1492 		if (!lok_result)
1493 			break;
1494 	}
1495 
1496 	if (lok_result) {
1497 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
1498 			    "[IQK]!!!!!!!!!!LOK by Pass !!!!!!!!!!!\n");
1499 		rtw89_write_rf(rtwdev, path, RR_DTXLOK, RFREG_MASK, 0x80200);
1500 		rtw89_write_rf(rtwdev, path, RR_RSV2, RFREG_MASK, 0x80200);
1501 		rtw89_write_rf(rtwdev, path, RR_LOKVB, RFREG_MASK, 0x80200);
1502 	}
1503 
1504 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]RF_0x08[00:19] = 0x%x\n",
1505 		    rtw89_read_rf(rtwdev, path, RR_DTXLOK, RFREG_MASK));
1506 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]RF_0x09[00:19] = 0x%x\n",
1507 		    rtw89_read_rf(rtwdev, path, RR_RSV2, RFREG_MASK));
1508 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]RF_0x0a[00:19] = 0x%x\n",
1509 		    rtw89_read_rf(rtwdev, path, RR_LOKVB, RFREG_MASK));
1510 
1511 	if (iqk_info->iqk_band[path] == RTW89_BAND_2G)
1512 		txk_result = _iqk_2g_tx(rtwdev, phy_idx, path);
1513 	else
1514 		txk_result = _iqk_5g_tx(rtwdev, phy_idx, path);
1515 
1516 	_iqk_rxclk_setting(rtwdev, path);
1517 	_iqk_adc_fifo_rst(rtwdev, phy_idx, path);
1518 
1519 	if (iqk_info->iqk_band[path] == RTW89_BAND_2G)
1520 		rxk_result = _iqk_2g_rx(rtwdev, phy_idx, path);
1521 	else
1522 		rxk_result = _iqk_5g_rx(rtwdev, phy_idx, path);
1523 
1524 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
1525 		    "[IQK]result  : lok_= %x, txk_= %x, rxk_= %x\n",
1526 		    lok_result, txk_result, rxk_result);
1527 }
1528 
_iqk_get_ch_info(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,u8 path,enum rtw89_chanctx_idx chanctx_idx)1529 static void _iqk_get_ch_info(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, u8 path,
1530 			     enum rtw89_chanctx_idx chanctx_idx)
1531 {
1532 	const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
1533 	struct rtw89_rfk_mcc_info_data *rfk_mcc = rtwdev->rfk_mcc.data;
1534 	struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1535 	u8 idx = rfk_mcc->table_idx;
1536 	u32 reg_rf18;
1537 	u32 reg_35c;
1538 
1539 	reg_rf18 = rtw89_read_rf(rtwdev, path, RR_CFGCH, RFREG_MASK);
1540 	reg_35c = rtw89_phy_read32_mask(rtwdev, R_CIRST, B_CIRST_SYN);
1541 
1542 	iqk_info->iqk_band[path] = chan->band_type;
1543 	iqk_info->iqk_bw[path] = chan->band_width;
1544 	iqk_info->iqk_ch[path] = chan->channel;
1545 	iqk_info->iqk_mcc_ch[idx][path] = chan->channel;
1546 	iqk_info->iqk_table_idx[path] = idx;
1547 
1548 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, 0x18= 0x%x, idx = %x\n",
1549 		    path, reg_rf18, idx);
1550 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, 0x18= 0x%x\n",
1551 		    path, reg_rf18);
1552 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, 0x35c= 0x%x\n",
1553 		    path, reg_35c);
1554 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]times = 0x%x, ch =%x\n",
1555 		    iqk_info->iqk_times, idx);
1556 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]iqk_mcc_ch[%x][%x] = 0x%x\n",
1557 		    idx, path, iqk_info->iqk_mcc_ch[idx][path]);
1558 }
1559 
_iqk_start_iqk(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx,u8 path)1560 static void _iqk_start_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, u8 path)
1561 {
1562 	_iqk_by_path(rtwdev, phy_idx, path);
1563 }
1564 
_iqk_restore(struct rtw89_dev * rtwdev,u8 path)1565 static void _iqk_restore(struct rtw89_dev *rtwdev, u8 path)
1566 {
1567 	struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1568 
1569 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "===> %s\n", __func__);
1570 
1571 	if (iqk_info->is_nbiqk) {
1572 		rtw89_phy_write32_mask(rtwdev, R_TXIQC + (path << 8),
1573 				       MASKDWORD, iqk_info->nb_txcfir[path]);
1574 		rtw89_phy_write32_mask(rtwdev, R_RXIQC + (path << 8),
1575 				       MASKDWORD, iqk_info->nb_rxcfir[path]);
1576 	} else {
1577 		rtw89_phy_write32_mask(rtwdev, R_TXIQC + (path << 8),
1578 				       MASKDWORD, 0x40000000);
1579 		rtw89_phy_write32_mask(rtwdev, R_RXIQC + (path << 8),
1580 				       MASKDWORD, 0x40000000);
1581 	}
1582 	rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD,
1583 			       0x00000e19 + (path << 4));
1584 
1585 	_iqk_check_cal(rtwdev, path, 0x0);
1586 
1587 	rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00);
1588 	rtw89_phy_write32_mask(rtwdev, R_NCTL_RPT, MASKDWORD, 0x00000000);
1589 	rtw89_phy_write32_mask(rtwdev, R_KIP_SYSCFG, MASKDWORD, 0x80000000);
1590 
1591 	rtw89_phy_write32_mask(rtwdev, R_KIP_CLK, MASKDWORD, 0x0);
1592 	rtw89_phy_write32_mask(rtwdev, R_IQRSN, B_IQRSN_K2, 0x0);
1593 	rtw89_phy_write32_mask(rtwdev, R_CFIR_SYS + (path << 8), BIT(28), 0x0);
1594 
1595 	rtw89_write_rf(rtwdev, path, RR_LUTWE, RR_LUTWE_LOK, 0x0);
1596 	rtw89_write_rf(rtwdev, path, RR_LUTWE, RR_LUTWE_LOK, 0x0);
1597 	rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, 0x3);
1598 	rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x1);
1599 	rtw89_write_rf(rtwdev, path, RR_BBDC, RR_BBDC_SEL, 0x1);
1600 }
1601 
_iqk_afebb_restore(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx,u8 path)1602 static void _iqk_afebb_restore(struct rtw89_dev *rtwdev,
1603 			       enum rtw89_phy_idx phy_idx, u8 path)
1604 {
1605 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "===> %s\n", __func__);
1606 	rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_RXK, 0x0303);
1607 	rtw89_phy_write32_mask(rtwdev, R_P0_NRBW, B_P0_NRBW_DBG, 0x0);
1608 	rtw89_phy_write32_mask(rtwdev, R_P1_DBGMOD, B_P1_DBGMOD_ON, 0x0);
1609 	rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_CLKG_FORCE, 0x0);
1610 	rtw89_phy_write32_mask(rtwdev, R_P1_RFCTM, B_P1_CLKG_FORCE, 0x0);
1611 	rtw89_phy_write32_mask(rtwdev, R_TXCKEN_FORCE, B_TXCKEN_FORCE_ALL, 0x0000000);
1612 	rtw89_phy_write32_mask(rtwdev, R_FAHM, B_RXTD_CKEN, 0x0);
1613 	rtw89_phy_write32_mask(rtwdev, R_UPD_CLK_ADC, B_UPD_GEN_ON, 0x0);
1614 	rtw89_phy_write32_mask(rtwdev, R_TX_COLLISION_T2R_ST, 0x0000001f, 0x03);
1615 	rtw89_phy_write32_mask(rtwdev, R_TX_COLLISION_T2R_ST, 0x000003e0, 0x03);
1616 	rtw89_phy_write32_mask(rtwdev, R_P0_RXCK, B_P0_TXCK_ALL, 0x00);
1617 	rtw89_phy_write32_mask(rtwdev, R_P1_RXCK, B_P1_TXCK_ALL, 0x00);
1618 	rtw89_phy_write32_mask(rtwdev, R_UPD_CLK_ADC,
1619 			       B_UPD_CLK_ADC_VAL | B_UPD_CLK_ADC_ON, 0x0);
1620 	rtw89_phy_write32_mask(rtwdev, R_PD_CTRL, B_PD_HIT_DIS, 0x0);
1621 	rtw89_phy_write32_mask(rtwdev, R_RXCCA, B_RXCCA_DIS, 0x0);
1622 	rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_RXK, 0x0000);
1623 	rtw89_phy_write32_mask(rtwdev, R_P0_ADCFF_EN, B_P0_ADCFF_EN, 0x0);
1624 	rtw89_phy_write32_mask(rtwdev, R_P1_ADCFF_EN, B_P1_ADCFF_EN, 0x0);
1625 }
1626 
_iqk_preset(struct rtw89_dev * rtwdev,u8 path)1627 static void _iqk_preset(struct rtw89_dev *rtwdev, u8 path)
1628 {
1629 	struct rtw89_rfk_mcc_info_data *rfk_mcc = rtwdev->rfk_mcc.data;
1630 	u8 idx = rfk_mcc->table_idx;
1631 
1632 	rtw89_phy_write32_mask(rtwdev, R_COEF_SEL + (path << 8), 0x00000001, idx);
1633 	rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), 0x00000008, idx);
1634 	rtw89_phy_write32_mask(rtwdev, R_TXIQC + (path << 8), MASKDWORD, 0x40000000);
1635 	rtw89_phy_write32_mask(rtwdev, R_RXIQC + (path << 8), MASKDWORD, 0x40000000);
1636 
1637 	rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x0);
1638 	rtw89_write_rf(rtwdev, path, RR_BBDC, RR_BBDC_SEL, 0x0);
1639 	rtw89_phy_write32_mask(rtwdev, R_NCTL_RPT, MASKDWORD, 0x00000080);
1640 	rtw89_phy_write32_mask(rtwdev, R_KIP_SYSCFG, MASKDWORD, 0x81ff010a);
1641 }
1642 
_iqk_macbb_setting(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx,u8 path)1643 static void _iqk_macbb_setting(struct rtw89_dev *rtwdev,
1644 			       enum rtw89_phy_idx phy_idx, u8 path)
1645 {
1646 	rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_RXK, 0x0303);
1647 	rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_GOT_TXRX, 0x3);
1648 	rtw89_phy_write32_mask(rtwdev, R_P1_RFCTM, B_P1_GOT_TXRX, 0x3);
1649 	rtw89_phy_write32_mask(rtwdev, R_P0_ADCFF_EN, B_P0_ADCFF_EN, 0x1);
1650 	rtw89_phy_write32_mask(rtwdev, R_P1_ADCFF_EN, B_P1_ADCFF_EN, 0x1);
1651 	rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_CLKG_FORCE, 0x3);
1652 	rtw89_phy_write32_mask(rtwdev, R_P1_RFCTM, B_P0_CLKG_FORCE, 0x3);
1653 	rtw89_phy_write32_mask(rtwdev, R_TXCKEN_FORCE, B_TXCKEN_FORCE_ALL, 0x1ffffff);
1654 	rtw89_phy_write32_mask(rtwdev, R_FAHM, B_RXTD_CKEN, 0x1);
1655 	rtw89_phy_write32_mask(rtwdev, R_UPD_CLK_ADC, B_UPD_GEN_ON, 0x1);
1656 	rtw89_phy_write32_mask(rtwdev, R_TX_COLLISION_T2R_ST, B_TXRX_FORCE_VAL, 0x3ff);
1657 	rtw89_phy_write32_mask(rtwdev, R_IOQ_IQK_DPK, B_IOQ_IQK_DPK_CLKEN,  0x3);
1658 	rtw89_phy_write32_mask(rtwdev, R_IQK_DPK_RST, B_IQK_DPK_RST, 0x1);
1659 	rtw89_phy_write32_mask(rtwdev, R_P0_PATH_RST, B_P0_PATH_RST, 0x1);
1660 	rtw89_phy_write32_mask(rtwdev, R_P1_PATH_RST, B_P1_PATH_RST, 0x1);
1661 	rtw89_phy_write32_mask(rtwdev, R_PD_CTRL, B_PD_HIT_DIS, 0x1);
1662 	rtw89_phy_write32_mask(rtwdev, R_RXCCA, B_RXCCA_DIS, 0x1);
1663 	rtw89_phy_write32_mask(rtwdev, R_DCFO_WEIGHT, B_DAC_CLK_IDX, 0x1);
1664 
1665 	_txck_force(rtwdev, RF_PATH_A, true, DAC_960M);
1666 	_txck_force(rtwdev, RF_PATH_B, true, DAC_960M);
1667 	_rxck_force(rtwdev, RF_PATH_A, true, ADC_1920M);
1668 	_rxck_force(rtwdev, RF_PATH_B, true, ADC_1920M);
1669 
1670 	rtw89_phy_write32_mask(rtwdev, R_UPD_CLK_ADC, B_UPD_CLK_ADC_ON, 0x1);
1671 	rtw89_phy_write32_mask(rtwdev, R_UPD_CLK_ADC, B_UPD_CLK_ADC_VAL, 0x2);
1672 
1673 	rtw89_phy_write32_mask(rtwdev, R_P0_NRBW, B_P0_NRBW_DBG, 0x1);
1674 	rtw89_phy_write32_mask(rtwdev, R_P1_DBGMOD, B_P1_DBGMOD_ON, 0x1);
1675 	udelay(10);
1676 	rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15, 0x1f);
1677 	udelay(10);
1678 	rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15, 0x13);
1679 	rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_15, 0x0001);
1680 	udelay(10);
1681 	rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_15, 0x0041);
1682 	rtw89_phy_write32_mask(rtwdev, R_P0_NRBW, B_P0_NRBW_RSTB, 0x1);
1683 	rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_RXK, 0x3333);
1684 }
1685 
_iqk_init(struct rtw89_dev * rtwdev)1686 static void _iqk_init(struct rtw89_dev *rtwdev)
1687 {
1688 	struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1689 	u8 idx, path;
1690 
1691 	rtw89_phy_write32_mask(rtwdev, R_IQKINF, MASKDWORD, 0x0);
1692 
1693 	if (iqk_info->is_iqk_init)
1694 		return;
1695 
1696 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__);
1697 	iqk_info->is_iqk_init = true;
1698 	iqk_info->is_nbiqk = false;
1699 	iqk_info->iqk_fft_en = false;
1700 	iqk_info->iqk_sram_en = false;
1701 	iqk_info->iqk_cfir_en = false;
1702 	iqk_info->iqk_xym_en = false;
1703 	iqk_info->iqk_times = 0x0;
1704 
1705 	for (idx = 0; idx < RTW89_IQK_CHS_NR; idx++) {
1706 		iqk_info->iqk_channel[idx] = 0x0;
1707 		for (path = 0; path < RTW8852BT_SS; path++) {
1708 			iqk_info->lok_cor_fail[idx][path] = false;
1709 			iqk_info->lok_fin_fail[idx][path] = false;
1710 			iqk_info->iqk_tx_fail[idx][path] = false;
1711 			iqk_info->iqk_rx_fail[idx][path] = false;
1712 			iqk_info->iqk_mcc_ch[idx][path] = 0x0;
1713 			iqk_info->iqk_table_idx[path] = 0x0;
1714 		}
1715 	}
1716 }
1717 
_wait_rx_mode(struct rtw89_dev * rtwdev,u8 kpath)1718 static void _wait_rx_mode(struct rtw89_dev *rtwdev, u8 kpath)
1719 {
1720 	u32 rf_mode;
1721 	u8 path;
1722 	int ret;
1723 
1724 	for (path = 0; path < RF_PATH_MAX; path++) {
1725 		if (!(kpath & BIT(path)))
1726 			continue;
1727 
1728 		ret = read_poll_timeout_atomic(rtw89_read_rf, rf_mode,
1729 					       rf_mode != 2, 2, 5000, false,
1730 					       rtwdev, path, RR_MOD, RR_MOD_MASK);
1731 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
1732 			    "[RFK] Wait S%d to Rx mode!! (ret = %d)\n", path, ret);
1733 	}
1734 }
1735 
_tmac_tx_pause(struct rtw89_dev * rtwdev,enum rtw89_phy_idx band_idx,bool is_pause)1736 static void _tmac_tx_pause(struct rtw89_dev *rtwdev, enum rtw89_phy_idx band_idx,
1737 			   bool is_pause)
1738 {
1739 	if (!is_pause)
1740 		return;
1741 
1742 	_wait_rx_mode(rtwdev, _kpath(rtwdev, band_idx));
1743 }
1744 
_doiqk(struct rtw89_dev * rtwdev,bool force,enum rtw89_phy_idx phy_idx,u8 path,enum rtw89_chanctx_idx chanctx_idx)1745 static void _doiqk(struct rtw89_dev *rtwdev, bool force,
1746 		   enum rtw89_phy_idx phy_idx, u8 path,
1747 		   enum rtw89_chanctx_idx chanctx_idx)
1748 {
1749 	struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1750 	u32 backup_bb_val[BACKUP_BB_REGS_NR];
1751 	u32 backup_rf_val[RTW8852BT_SS][BACKUP_RF_REGS_NR];
1752 	u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, RF_AB, chanctx_idx);
1753 
1754 	rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_ONESHOT_START);
1755 
1756 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
1757 		    "[IQK]==========IQK start!!!!!==========\n");
1758 	iqk_info->iqk_times++;
1759 	iqk_info->version = RTW8852BT_IQK_VER;
1760 
1761 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]Test Ver 0x%x\n", iqk_info->version);
1762 	_iqk_get_ch_info(rtwdev, phy_idx, path, chanctx_idx);
1763 
1764 	_rfk_backup_bb_reg(rtwdev, backup_bb_val);
1765 	_rfk_backup_rf_reg(rtwdev, backup_rf_val[path], path);
1766 	_iqk_macbb_setting(rtwdev, phy_idx, path);
1767 	_iqk_preset(rtwdev, path);
1768 	_iqk_start_iqk(rtwdev, phy_idx, path);
1769 	_iqk_restore(rtwdev, path);
1770 	_iqk_afebb_restore(rtwdev, phy_idx, path);
1771 	_rfk_reload_bb_reg(rtwdev, backup_bb_val);
1772 	_rfk_reload_rf_reg(rtwdev, backup_rf_val[path], path);
1773 
1774 	rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_ONESHOT_STOP);
1775 }
1776 
_iqk(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx,bool force,enum rtw89_chanctx_idx chanctx_idx)1777 static void _iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, bool force,
1778 		 enum rtw89_chanctx_idx chanctx_idx)
1779 {
1780 	u8 kpath = _kpath(rtwdev, phy_idx);
1781 
1782 	switch (kpath) {
1783 	case RF_A:
1784 		_doiqk(rtwdev, force, phy_idx, RF_PATH_A, chanctx_idx);
1785 		break;
1786 	case RF_B:
1787 		_doiqk(rtwdev, force, phy_idx, RF_PATH_B, chanctx_idx);
1788 		break;
1789 	case RF_AB:
1790 		_doiqk(rtwdev, force, phy_idx, RF_PATH_A, chanctx_idx);
1791 		_doiqk(rtwdev, force, phy_idx, RF_PATH_B, chanctx_idx);
1792 		break;
1793 	default:
1794 		break;
1795 	}
1796 }
1797 
_dpk_onoff(struct rtw89_dev * rtwdev,enum rtw89_rf_path path,bool off)1798 static void _dpk_onoff(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, bool off)
1799 {
1800 	struct rtw89_dpk_info *dpk = &rtwdev->dpk;
1801 	u8 val, kidx = dpk->cur_idx[path];
1802 	bool off_reverse;
1803 
1804 	val = dpk->is_dpk_enable && !off && dpk->bp[path][kidx].path_ok;
1805 
1806 	if (off)
1807 		off_reverse = false;
1808 	else
1809 		off_reverse = true;
1810 
1811 	val = dpk->is_dpk_enable & off_reverse & dpk->bp[path][kidx].path_ok;
1812 
1813 	rtw89_phy_write32_mask(rtwdev, R_DPD_CH0A + (path << 8) + (kidx << 2),
1814 			       BIT(24), val);
1815 
1816 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d[%d] DPK %s !!!\n", path,
1817 		    kidx, str_enable_disable(dpk->is_dpk_enable & off_reverse));
1818 }
1819 
_dpk_one_shot(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path,enum rtw8852bt_dpk_id id)1820 static void _dpk_one_shot(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
1821 			  enum rtw89_rf_path path, enum rtw8852bt_dpk_id id)
1822 {
1823 	u16 dpk_cmd;
1824 	u32 val;
1825 	int ret;
1826 
1827 	dpk_cmd = (id << 8) | (0x19 + (path << 4));
1828 	rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD, dpk_cmd);
1829 
1830 	ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val == 0x55,
1831 				       1, 30000, false,
1832 				       rtwdev, R_RFK_ST, MASKBYTE0);
1833 	if (ret)
1834 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] one-shot 1 over 30ms!!!!\n");
1835 
1836 	udelay(1);
1837 	rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, MASKDWORD, 0x00030000);
1838 
1839 	ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val == 0x8000,
1840 				       1, 2000, false,
1841 				       rtwdev, R_RPT_COM, MASKLWORD);
1842 	if (ret)
1843 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] one-shot 2 over 2ms!!!!\n");
1844 
1845 	rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, MASKBYTE0, 0x0);
1846 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
1847 		    "[DPK] one-shot for %s = 0x%04x\n",
1848 		    id == 0x06 ? "LBK_RXIQK" :
1849 		    id == 0x10 ? "SYNC" :
1850 		    id == 0x11 ? "MDPK_IDL" :
1851 		    id == 0x12 ? "MDPK_MPA" :
1852 		    id == 0x13 ? "GAIN_LOSS" :
1853 		    id == 0x14 ? "PWR_CAL" :
1854 		    id == 0x15 ? "DPK_RXAGC" :
1855 		    id == 0x16 ? "KIP_PRESET" :
1856 		    id == 0x17 ? "KIP_RESTORE" :
1857 		    "DPK_TXAGC", dpk_cmd);
1858 }
1859 
_dpk_rx_dck(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path)1860 static void _dpk_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
1861 			enum rtw89_rf_path path)
1862 {
1863 	rtw89_write_rf(rtwdev, path, RR_DCK, RR_DCK_LV, 0x0);
1864 	rtw89_write_rf(rtwdev, path, RR_DCK, RR_DCK_LV, 0x1);
1865 
1866 	udelay(600);
1867 
1868 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d RXDCK\n", path);
1869 }
1870 
_dpk_information(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path,enum rtw89_chanctx_idx chanctx_idx)1871 static void _dpk_information(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
1872 			     enum rtw89_rf_path path, enum rtw89_chanctx_idx chanctx_idx)
1873 {
1874 	const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
1875 	struct rtw89_dpk_info *dpk = &rtwdev->dpk;
1876 
1877 	u8 kidx = dpk->cur_idx[path];
1878 
1879 	dpk->bp[path][kidx].band = chan->band_type;
1880 	dpk->bp[path][kidx].ch = chan->channel;
1881 	dpk->bp[path][kidx].bw = chan->band_width;
1882 
1883 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
1884 		    "[DPK] S%d[%d] (PHY%d): TSSI %s/ DBCC %s/ %s/ CH%d/ %s\n",
1885 		    path, dpk->cur_idx[path], phy,
1886 		    rtwdev->is_tssi_mode[path] ? "on" : "off",
1887 		    rtwdev->dbcc_en ? "on" : "off",
1888 		    dpk->bp[path][kidx].band == 0 ? "2G" :
1889 		    dpk->bp[path][kidx].band == 1 ? "5G" : "6G",
1890 		    dpk->bp[path][kidx].ch,
1891 		    dpk->bp[path][kidx].bw == 0 ? "20M" :
1892 		    dpk->bp[path][kidx].bw == 1 ? "40M" : "80M");
1893 }
1894 
_dpk_tssi_pause(struct rtw89_dev * rtwdev,enum rtw89_rf_path path,bool is_pause)1895 static void _dpk_tssi_pause(struct rtw89_dev *rtwdev,
1896 			    enum rtw89_rf_path path, bool is_pause)
1897 {
1898 	rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK + (path << 13),
1899 			       B_P0_TSSI_TRK_EN, is_pause);
1900 
1901 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d TSSI %s\n", path,
1902 		    is_pause ? "pause" : "resume");
1903 }
1904 
_dpk_kip_restore(struct rtw89_dev * rtwdev,enum rtw89_rf_path path)1905 static void _dpk_kip_restore(struct rtw89_dev *rtwdev,
1906 			     enum rtw89_rf_path path)
1907 {
1908 	rtw89_phy_write32_mask(rtwdev, R_NCTL_RPT, MASKDWORD, 0x00000000);
1909 	rtw89_phy_write32_mask(rtwdev, R_KIP_SYSCFG, MASKDWORD, 0x80000000);
1910 
1911 	if (rtwdev->hal.cv > CHIP_CAV)
1912 		rtw89_phy_write32_mask(rtwdev, R_DPD_COM + (path << 8),
1913 				       B_DPD_COM_OF, 0x1);
1914 
1915 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d restore KIP\n", path);
1916 }
1917 
_dpk_lbk_rxiqk(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path,u8 cur_rxbb,u32 rf_18)1918 static void _dpk_lbk_rxiqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
1919 			   enum rtw89_rf_path path, u8 cur_rxbb, u32 rf_18)
1920 {
1921 	rtw89_phy_write32_mask(rtwdev, R_MDPK_RX_DCK, B_MDPK_RX_DCK_EN, 0x1);
1922 	rtw89_phy_write32_mask(rtwdev, R_IQK_RES + (path << 8), B_IQK_RES_RXCFIR, 0x0);
1923 
1924 	rtw89_write_rf(rtwdev, path, RR_RSV4, RFREG_MASK, rf_18);
1925 	rtw89_write_rf(rtwdev, path, RR_MOD, RFREG_MASKMODE, 0xd);
1926 	rtw89_write_rf(rtwdev, path, RR_RXK, RR_RXK_PLLEN, 0x1);
1927 
1928 	if (cur_rxbb >= 0x11)
1929 		rtw89_write_rf(rtwdev, path, RR_TXIQK, RR_TXIQK_ATT1, 0x13);
1930 	else if (cur_rxbb <= 0xa)
1931 		rtw89_write_rf(rtwdev, path, RR_TXIQK, RR_TXIQK_ATT1, 0x00);
1932 	else
1933 		rtw89_write_rf(rtwdev, path, RR_TXIQK, RR_TXIQK_ATT1, 0x05);
1934 
1935 	rtw89_write_rf(rtwdev, path, RR_XGLNA2, RR_XGLNA2_SW, 0x0);
1936 	rtw89_write_rf(rtwdev, path, RR_RXKPLL, RR_RXKPLL_POW, 0x0);
1937 	rtw89_write_rf(rtwdev, path, RR_RXKPLL, RFREG_MASK, 0x80014);
1938 
1939 	udelay(100);
1940 
1941 	rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x1);
1942 	rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_RXT, 0x025);
1943 
1944 	_dpk_one_shot(rtwdev, phy, path, LBK_RXIQK);
1945 
1946 	rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x0);
1947 
1948 	rtw89_write_rf(rtwdev, path, RR_RXK, RR_RXK_PLLEN, 0x0);
1949 	rtw89_phy_write32_mask(rtwdev, R_MDPK_RX_DCK, B_MDPK_RX_DCK_EN, 0x0);
1950 	rtw89_phy_write32_mask(rtwdev, R_KPATH_CFG, B_KPATH_CFG_ED, 0x0);
1951 	rtw89_phy_write32_mask(rtwdev, R_LOAD_COEF + (path << 8), B_LOAD_COEF_DI, 0x1);
1952 	rtw89_write_rf(rtwdev, path, RR_MOD, RFREG_MASKMODE, 0x5);
1953 }
1954 
_dpk_rf_setting(struct rtw89_dev * rtwdev,u8 gain,enum rtw89_rf_path path,u8 kidx)1955 static void _dpk_rf_setting(struct rtw89_dev *rtwdev, u8 gain,
1956 			    enum rtw89_rf_path path, u8 kidx)
1957 {
1958 	struct rtw89_dpk_info *dpk = &rtwdev->dpk;
1959 
1960 	if (dpk->bp[path][kidx].band == RTW89_BAND_2G) {
1961 		rtw89_write_rf(rtwdev, path, RR_MOD, RFREG_MASK, 0x50220);
1962 		rtw89_write_rf(rtwdev, path, RR_RXBB, RR_RXBB_FATT, 0xf2);
1963 		rtw89_write_rf(rtwdev, path, RR_LUTDBG, RR_LUTDBG_TIA, 0x1);
1964 		rtw89_write_rf(rtwdev, path, RR_TIA, RR_TIA_N6, 0x1);
1965 	} else {
1966 		rtw89_write_rf(rtwdev, path, RR_MOD, RFREG_MASK, 0x50220);
1967 		rtw89_write_rf(rtwdev, path, RR_RXA2, RR_RAA2_SWATT, 0x5);
1968 		rtw89_write_rf(rtwdev, path, RR_LUTDBG, RR_LUTDBG_TIA, 0x1);
1969 		rtw89_write_rf(rtwdev, path, RR_TIA, RR_TIA_N6, 0x1);
1970 		rtw89_write_rf(rtwdev, path, RR_RXA_LNA, RFREG_MASK, 0x920FC);
1971 		rtw89_write_rf(rtwdev, path, RR_XALNA2, RFREG_MASK, 0x002C0);
1972 		rtw89_write_rf(rtwdev, path, RR_IQGEN, RFREG_MASK, 0x38800);
1973 	}
1974 
1975 	rtw89_write_rf(rtwdev, path, RR_RCKD, RR_RCKD_BW, 0x1);
1976 	rtw89_write_rf(rtwdev, path, RR_BTC, RR_BTC_TXBB, dpk->bp[path][kidx].bw + 1);
1977 	rtw89_write_rf(rtwdev, path, RR_BTC, RR_BTC_RXBB, 0x0);
1978 }
1979 
_dpk_bypass_rxcfir(struct rtw89_dev * rtwdev,enum rtw89_rf_path path,bool is_bypass)1980 static void _dpk_bypass_rxcfir(struct rtw89_dev *rtwdev,
1981 			       enum rtw89_rf_path path, bool is_bypass)
1982 {
1983 	if (is_bypass) {
1984 		rtw89_phy_write32_mask(rtwdev, R_RXIQC + (path << 8),
1985 				       B_RXIQC_BYPASS2, 0x1);
1986 		rtw89_phy_write32_mask(rtwdev, R_RXIQC + (path << 8),
1987 				       B_RXIQC_BYPASS, 0x1);
1988 	} else {
1989 		rtw89_phy_write32_mask(rtwdev, R_RXIQC + (path << 8),
1990 				       B_RXIQC_BYPASS2, 0x0);
1991 		rtw89_phy_write32_mask(rtwdev, R_RXIQC + (path << 8),
1992 				       B_RXIQC_BYPASS, 0x0);
1993 	}
1994 }
1995 
1996 static
_dpk_tpg_sel(struct rtw89_dev * rtwdev,enum rtw89_rf_path path,u8 kidx)1997 void _dpk_tpg_sel(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, u8 kidx)
1998 {
1999 	struct rtw89_dpk_info *dpk = &rtwdev->dpk;
2000 
2001 	if (dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_80)
2002 		rtw89_phy_write32_mask(rtwdev, R_TPG_MOD, B_TPG_MOD_F, 0x0);
2003 	else if (dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_40)
2004 		rtw89_phy_write32_mask(rtwdev, R_TPG_MOD, B_TPG_MOD_F, 0x2);
2005 	else
2006 		rtw89_phy_write32_mask(rtwdev, R_TPG_MOD, B_TPG_MOD_F, 0x1);
2007 
2008 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] TPG_Select for %s\n",
2009 		    dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_80 ? "80M" :
2010 		    dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_40 ? "40M" : "20M");
2011 }
2012 
_dpk_table_select(struct rtw89_dev * rtwdev,enum rtw89_rf_path path,u8 kidx,u8 gain)2013 static void _dpk_table_select(struct rtw89_dev *rtwdev,
2014 			      enum rtw89_rf_path path, u8 kidx, u8 gain)
2015 {
2016 	u8 val;
2017 
2018 	val = 0x80 + kidx * 0x20 + gain * 0x10;
2019 	rtw89_phy_write32_mask(rtwdev, R_DPD_CH0 + (path << 8), MASKBYTE3, val);
2020 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
2021 		    "[DPK] table select for Kidx[%d], Gain[%d] (0x%x)\n", kidx,
2022 		    gain, val);
2023 }
2024 
_dpk_sync_check(struct rtw89_dev * rtwdev,enum rtw89_rf_path path,u8 kidx)2025 static bool _dpk_sync_check(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, u8 kidx)
2026 {
2027 #define DPK_SYNC_TH_DC_I 200
2028 #define DPK_SYNC_TH_DC_Q 200
2029 #define DPK_SYNC_TH_CORR 170
2030 	struct rtw89_dpk_info *dpk = &rtwdev->dpk;
2031 	u8 corr_val, corr_idx;
2032 	u16 dc_i, dc_q;
2033 	u32 corr, dc;
2034 
2035 	rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, 0x0);
2036 
2037 	corr = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKDWORD);
2038 	corr_idx = u32_get_bits(corr, B_PRT_COM_CORI);
2039 	corr_val = u32_get_bits(corr, B_PRT_COM_CORV);
2040 
2041 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
2042 		    "[DPK] S%d Corr_idx / Corr_val = %d / %d\n",
2043 		    path, corr_idx, corr_val);
2044 
2045 	dpk->corr_idx[path][kidx] = corr_idx;
2046 	dpk->corr_val[path][kidx] = corr_val;
2047 
2048 	rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, 0x9);
2049 
2050 	dc = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKDWORD);
2051 	dc_i = u32_get_bits(dc, B_PRT_COM_DCI);
2052 	dc_q = u32_get_bits(dc, B_PRT_COM_DCQ);
2053 
2054 	dc_i = abs(sign_extend32(dc_i, 11));
2055 	dc_q = abs(sign_extend32(dc_q, 11));
2056 
2057 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d DC I/Q, = %d / %d\n",
2058 		    path, dc_i, dc_q);
2059 
2060 	dpk->dc_i[path][kidx] = dc_i;
2061 	dpk->dc_q[path][kidx] = dc_q;
2062 
2063 	if (dc_i > DPK_SYNC_TH_DC_I || dc_q > DPK_SYNC_TH_DC_Q ||
2064 	    corr_val < DPK_SYNC_TH_CORR)
2065 		return true;
2066 	else
2067 		return false;
2068 }
2069 
_dpk_sync(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path,u8 kidx)2070 static void _dpk_sync(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2071 		      enum rtw89_rf_path path, u8 kidx)
2072 {
2073 	_dpk_one_shot(rtwdev, phy, path, SYNC);
2074 }
2075 
_dpk_dgain_read(struct rtw89_dev * rtwdev)2076 static u16 _dpk_dgain_read(struct rtw89_dev *rtwdev)
2077 {
2078 	u16 dgain;
2079 
2080 	rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, 0x0);
2081 
2082 	dgain = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_DCI);
2083 
2084 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] DGain = 0x%x\n", dgain);
2085 
2086 	return dgain;
2087 }
2088 
_dpk_dgain_mapping(struct rtw89_dev * rtwdev,u16 dgain)2089 static s8 _dpk_dgain_mapping(struct rtw89_dev *rtwdev, u16 dgain)
2090 {
2091 	static const u16 bnd[15] = {
2092 		0xbf1, 0xaa5, 0x97d, 0x875, 0x789, 0x6b7, 0x5fc, 0x556,
2093 		0x4c1, 0x43d, 0x3c7, 0x35e, 0x2ac, 0x262, 0x220
2094 	};
2095 	s8 offset;
2096 
2097 	if (dgain >= bnd[0])
2098 		offset = 0x6;
2099 	else if (bnd[0] > dgain && dgain >= bnd[1])
2100 		offset = 0x6;
2101 	else if (bnd[1] > dgain && dgain >= bnd[2])
2102 		offset = 0x5;
2103 	else if (bnd[2] > dgain && dgain >= bnd[3])
2104 		offset = 0x4;
2105 	else if (bnd[3] > dgain && dgain >= bnd[4])
2106 		offset = 0x3;
2107 	else if (bnd[4] > dgain && dgain >= bnd[5])
2108 		offset = 0x2;
2109 	else if (bnd[5] > dgain && dgain >= bnd[6])
2110 		offset = 0x1;
2111 	else if (bnd[6] > dgain && dgain >= bnd[7])
2112 		offset = 0x0;
2113 	else if (bnd[7] > dgain && dgain >= bnd[8])
2114 		offset = 0xff;
2115 	else if (bnd[8] > dgain && dgain >= bnd[9])
2116 		offset = 0xfe;
2117 	else if (bnd[9] > dgain && dgain >= bnd[10])
2118 		offset = 0xfd;
2119 	else if (bnd[10] > dgain && dgain >= bnd[11])
2120 		offset = 0xfc;
2121 	else if (bnd[11] > dgain && dgain >= bnd[12])
2122 		offset = 0xfb;
2123 	else if (bnd[12] > dgain && dgain >= bnd[13])
2124 		offset = 0xfa;
2125 	else if (bnd[13] > dgain && dgain >= bnd[14])
2126 		offset = 0xf9;
2127 	else if (bnd[14] > dgain)
2128 		offset = 0xf8;
2129 	else
2130 		offset = 0x0;
2131 
2132 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] DGain offset = %d\n", offset);
2133 
2134 	return offset;
2135 }
2136 
_dpk_gainloss_read(struct rtw89_dev * rtwdev)2137 static u8 _dpk_gainloss_read(struct rtw89_dev *rtwdev)
2138 {
2139 	rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, 0x6);
2140 	rtw89_phy_write32_mask(rtwdev, R_DPK_CFG2, B_DPK_CFG2_ST, 0x1);
2141 
2142 	return rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_GL);
2143 }
2144 
_dpk_gainloss(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path,u8 kidx)2145 static void _dpk_gainloss(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2146 			  enum rtw89_rf_path path, u8 kidx)
2147 {
2148 	_dpk_one_shot(rtwdev, phy, path, GAIN_LOSS);
2149 
2150 	rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, 0x6);
2151 	rtw89_phy_write32_mask(rtwdev, R_DPK_CFG2, B_DPK_CFG2_ST, 0x1);
2152 }
2153 
_dpk_kip_preset(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path,u8 kidx)2154 static void _dpk_kip_preset(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2155 			    enum rtw89_rf_path path, u8 kidx)
2156 {
2157 	_dpk_tpg_sel(rtwdev, path, kidx);
2158 	_dpk_one_shot(rtwdev, phy, path, KIP_PRESET);
2159 }
2160 
_dpk_kip_pwr_clk_on(struct rtw89_dev * rtwdev,enum rtw89_rf_path path)2161 static void _dpk_kip_pwr_clk_on(struct rtw89_dev *rtwdev,
2162 				enum rtw89_rf_path path)
2163 {
2164 	rtw89_phy_write32_mask(rtwdev, R_NCTL_RPT, MASKDWORD, 0x00000080);
2165 	rtw89_phy_write32_mask(rtwdev, R_KIP_SYSCFG, MASKDWORD, 0x807f030a);
2166 	rtw89_phy_write32_mask(rtwdev, R_CFIR_SYS + (path << 8), MASKDWORD, 0xce000a08);
2167 
2168 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] KIP Power/CLK on\n");
2169 }
2170 
2171 static
_dpk_txagc_check_8852bt(struct rtw89_dev * rtwdev,enum rtw89_rf_path path,u8 txagc)2172 u8 _dpk_txagc_check_8852bt(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, u8 txagc)
2173 {
2174 	struct rtw89_dpk_info *dpk = &rtwdev->dpk;
2175 
2176 	if (txagc >= dpk->max_dpk_txagc[path])
2177 		txagc = dpk->max_dpk_txagc[path];
2178 
2179 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Set TxAGC = 0x%x\n", txagc);
2180 
2181 	return txagc;
2182 }
2183 
_dpk_kip_set_txagc(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path,u8 txagc)2184 static void _dpk_kip_set_txagc(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2185 			       enum rtw89_rf_path path, u8 txagc)
2186 {
2187 	u8 val;
2188 
2189 	val = _dpk_txagc_check_8852bt(rtwdev, path, txagc);
2190 	rtw89_write_rf(rtwdev, path, RR_TXAGC, RFREG_MASK, val);
2191 	rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x1);
2192 	_dpk_one_shot(rtwdev, phy, path, DPK_TXAGC);
2193 	rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x0);
2194 
2195 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] set TXAGC = 0x%x\n", txagc);
2196 }
2197 
_dpk_kip_set_rxagc(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path)2198 static void _dpk_kip_set_rxagc(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2199 			       enum rtw89_rf_path path)
2200 {
2201 	rtw89_phy_write32_mask(rtwdev, R_KIP_MOD, B_KIP_MOD, 0x50220);
2202 	rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x1);
2203 	_dpk_one_shot(rtwdev, phy, path, DPK_RXAGC);
2204 	rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x0);
2205 }
2206 
_dpk_set_offset(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path,u8 txagc,s8 gain_offset)2207 static u8 _dpk_set_offset(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2208 			  enum rtw89_rf_path path, u8 txagc, s8 gain_offset)
2209 {
2210 	txagc = rtw89_read_rf(rtwdev, path, RR_TXAGC, RFREG_MASK);
2211 
2212 	if ((txagc - gain_offset) < DPK_TXAGC_LOWER)
2213 		txagc = DPK_TXAGC_LOWER;
2214 	else if ((txagc - gain_offset) > DPK_TXAGC_UPPER)
2215 		txagc = DPK_TXAGC_UPPER;
2216 	else
2217 		txagc = txagc - gain_offset;
2218 
2219 	_dpk_kip_set_txagc(rtwdev, phy, path, txagc);
2220 
2221 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] tmp_txagc (GL=%d) = 0x%x\n",
2222 		    gain_offset, txagc);
2223 	return txagc;
2224 }
2225 
_dpk_pas_read(struct rtw89_dev * rtwdev,enum rtw89_rf_path path,u8 is_check)2226 static bool _dpk_pas_read(struct rtw89_dev *rtwdev, enum rtw89_rf_path path,
2227 			  u8 is_check)
2228 {
2229 	u32 val1_i = 0, val1_q = 0, val2_i = 0, val2_q = 0;
2230 	u8 i;
2231 
2232 	rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, MASKBYTE2, 0x06);
2233 	rtw89_phy_write32_mask(rtwdev, R_DPK_CFG2, B_DPK_CFG2_ST, 0x0);
2234 	rtw89_phy_write32_mask(rtwdev, R_DPK_CFG3, MASKBYTE2, 0x08);
2235 
2236 	if (is_check) {
2237 		rtw89_phy_write32_mask(rtwdev, R_DPK_CFG3, MASKBYTE3, 0x00);
2238 		val1_i = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKHWORD);
2239 		val1_i = abs(sign_extend32(val1_i, 11));
2240 		val1_q = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKLWORD);
2241 		val1_q = abs(sign_extend32(val1_q, 11));
2242 
2243 		rtw89_phy_write32_mask(rtwdev, R_DPK_CFG3, MASKBYTE3, 0x1f);
2244 		val2_i = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKHWORD);
2245 		val2_i = abs(sign_extend32(val2_i, 11));
2246 		val2_q = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKLWORD);
2247 		val2_q = abs(sign_extend32(val2_q, 11));
2248 
2249 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] PAS_delta = 0x%x\n",
2250 			    phy_div(val1_i * val1_i + val1_q * val1_q,
2251 				    val2_i * val2_i + val2_q * val2_q));
2252 	} else {
2253 		for (i = 0; i < 32; i++) {
2254 			rtw89_phy_write32_mask(rtwdev, R_DPK_CFG3, MASKBYTE3, i);
2255 			rtw89_debug(rtwdev, RTW89_DBG_RFK,
2256 				    "[DPK] PAS_Read[%02d]= 0x%08x\n", i,
2257 				    rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKDWORD));
2258 		}
2259 	}
2260 
2261 	if (val1_i * val1_i + val1_q * val1_q >=
2262 	    (val2_i * val2_i + val2_q * val2_q) * 8 / 5)
2263 		return true;
2264 
2265 	return false;
2266 }
2267 
_dpk_agc(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path,u8 kidx,u8 init_txagc,bool loss_only,enum rtw89_chanctx_idx chanctx_idx)2268 static u8 _dpk_agc(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2269 		   enum rtw89_rf_path path, u8 kidx, u8 init_txagc,
2270 		   bool loss_only, enum rtw89_chanctx_idx chanctx_idx)
2271 {
2272 	const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
2273 	struct rtw89_dpk_info *dpk = &rtwdev->dpk;
2274 	u8 goout = 0, agc_cnt = 0, limited_rxbb = 0, gl_cnt = 0;
2275 	u8 tmp_txagc, tmp_rxbb, tmp_gl_idx = 0;
2276 	u8 step = DPK_AGC_STEP_SYNC_DGAIN;
2277 	int limit = 200;
2278 	s8 offset = 0;
2279 	u16 dgain = 0;
2280 	u32 rf_18;
2281 
2282 	tmp_txagc = init_txagc;
2283 
2284 	tmp_rxbb = rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASKRXBB);
2285 	rf_18 = rtw89_read_rf(rtwdev, path, RR_CFGCH, RFREG_MASK);
2286 
2287 	do {
2288 		switch (step) {
2289 		case DPK_AGC_STEP_SYNC_DGAIN:
2290 			_dpk_sync(rtwdev, phy, path, kidx);
2291 			if (agc_cnt == 0) {
2292 				if (chan->band_width < 2)
2293 					_dpk_bypass_rxcfir(rtwdev, path, true);
2294 				else
2295 					_dpk_lbk_rxiqk(rtwdev, phy, path,
2296 						       tmp_rxbb, rf_18);
2297 			}
2298 
2299 			if (_dpk_sync_check(rtwdev, path, kidx) == true) {
2300 				tmp_txagc = 0xff;
2301 				goout = 1;
2302 				break;
2303 			}
2304 
2305 			dgain = _dpk_dgain_read(rtwdev);
2306 			offset = _dpk_dgain_mapping(rtwdev, dgain);
2307 
2308 			if (loss_only == 1 || limited_rxbb == 1 || offset == 0)
2309 				step = DPK_AGC_STEP_GAIN_LOSS_IDX;
2310 			else
2311 				step = DPK_AGC_STEP_GAIN_ADJ;
2312 			break;
2313 		case DPK_AGC_STEP_GAIN_ADJ:
2314 			tmp_rxbb = rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASKRXBB);
2315 
2316 			if (tmp_rxbb + offset > 0x1f) {
2317 				tmp_rxbb = 0x1f;
2318 				limited_rxbb = 1;
2319 			} else if (tmp_rxbb + offset < 0) {
2320 				tmp_rxbb = 0;
2321 				limited_rxbb = 1;
2322 			} else {
2323 				tmp_rxbb = tmp_rxbb + offset;
2324 			}
2325 
2326 			rtw89_write_rf(rtwdev, path, RR_MOD, RFREG_MASKRXBB, tmp_rxbb);
2327 			rtw89_debug(rtwdev, RTW89_DBG_RFK,
2328 				    "[DPK] Adjust RXBB (%d) = 0x%x\n", offset, tmp_rxbb);
2329 
2330 			if (chan->band_width == RTW89_CHANNEL_WIDTH_80)
2331 				_dpk_lbk_rxiqk(rtwdev, phy, path, tmp_rxbb, rf_18);
2332 			if (dgain > 1922 || dgain < 342)
2333 				step = DPK_AGC_STEP_SYNC_DGAIN;
2334 			else
2335 				step = DPK_AGC_STEP_GAIN_LOSS_IDX;
2336 
2337 			agc_cnt++;
2338 			break;
2339 		case DPK_AGC_STEP_GAIN_LOSS_IDX:
2340 			_dpk_gainloss(rtwdev, phy, path, kidx);
2341 
2342 			tmp_gl_idx = _dpk_gainloss_read(rtwdev);
2343 
2344 			if ((tmp_gl_idx == 0 && _dpk_pas_read(rtwdev, path, true)) ||
2345 			    tmp_gl_idx >= 7)
2346 				step = DPK_AGC_STEP_GL_GT_CRITERION;
2347 			else if (tmp_gl_idx == 0)
2348 				step = DPK_AGC_STEP_GL_LT_CRITERION;
2349 			else
2350 				step = DPK_AGC_STEP_SET_TX_GAIN;
2351 
2352 			gl_cnt++;
2353 			break;
2354 		case DPK_AGC_STEP_GL_GT_CRITERION:
2355 			if (tmp_txagc == 0x2e ||
2356 			    tmp_txagc == dpk->max_dpk_txagc[path]) {
2357 				goout = 1;
2358 				rtw89_debug(rtwdev, RTW89_DBG_RFK,
2359 					    "[DPK] Txagc@lower bound!!\n");
2360 			} else {
2361 				tmp_txagc = _dpk_set_offset(rtwdev, phy, path,
2362 							    tmp_txagc, 0x3);
2363 			}
2364 			step = DPK_AGC_STEP_GAIN_LOSS_IDX;
2365 			agc_cnt++;
2366 			break;
2367 
2368 		case DPK_AGC_STEP_GL_LT_CRITERION:
2369 			if (tmp_txagc == 0x3f || tmp_txagc == dpk->max_dpk_txagc[path]) {
2370 				goout = 1;
2371 				rtw89_debug(rtwdev, RTW89_DBG_RFK,
2372 					    "[DPK] Txagc@upper bound!!\n");
2373 			} else {
2374 				tmp_txagc = _dpk_set_offset(rtwdev, phy, path,
2375 							    tmp_txagc, 0xfe);
2376 			}
2377 			step = DPK_AGC_STEP_GAIN_LOSS_IDX;
2378 			agc_cnt++;
2379 			break;
2380 
2381 		case DPK_AGC_STEP_SET_TX_GAIN:
2382 			tmp_txagc = _dpk_set_offset(rtwdev, phy, path, tmp_txagc,
2383 						    tmp_gl_idx);
2384 			goout = 1;
2385 			agc_cnt++;
2386 			break;
2387 
2388 		default:
2389 			goout = 1;
2390 			break;
2391 		}
2392 	} while (!goout && agc_cnt < 6 && limit-- > 0);
2393 
2394 	if (gl_cnt >= 6)
2395 		_dpk_pas_read(rtwdev, path, false);
2396 
2397 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
2398 		    "[DPK] Txagc / RXBB for DPK = 0x%x / 0x%x\n", tmp_txagc, tmp_rxbb);
2399 
2400 	return tmp_txagc;
2401 }
2402 
_dpk_set_mdpd_para(struct rtw89_dev * rtwdev,enum rtw89_rf_path path,u8 order)2403 static void _dpk_set_mdpd_para(struct rtw89_dev *rtwdev,
2404 			       enum rtw89_rf_path path, u8 order)
2405 {
2406 	struct rtw89_dpk_info *dpk = &rtwdev->dpk;
2407 
2408 	switch (order) {
2409 	case 0: /* (5,3,1) */
2410 		rtw89_phy_write32_mask(rtwdev, R_LDL_NORM, B_LDL_NORM_OP, order);
2411 		rtw89_phy_write32_mask(rtwdev, R_LDL_NORM, B_LDL_NORM_PN, 0x3);
2412 		rtw89_phy_write32_mask(rtwdev, R_MDPK_SYNC, B_MDPK_SYNC_MAN, 0x1);
2413 		dpk->dpk_order[path] = 0x3;
2414 		break;
2415 	case 1: /* (5,3,0) */
2416 		rtw89_phy_write32_mask(rtwdev, R_LDL_NORM, B_LDL_NORM_OP, order);
2417 		rtw89_phy_write32_mask(rtwdev, R_LDL_NORM, B_LDL_NORM_PN, 0x0);
2418 		rtw89_phy_write32_mask(rtwdev, R_MDPK_SYNC, B_MDPK_SYNC_MAN, 0x0);
2419 		dpk->dpk_order[path] = 0x1;
2420 		break;
2421 	case 2: /* (5,0,0) */
2422 		rtw89_phy_write32_mask(rtwdev, R_LDL_NORM, B_LDL_NORM_OP, order);
2423 		rtw89_phy_write32_mask(rtwdev, R_LDL_NORM, B_LDL_NORM_PN, 0x0);
2424 		rtw89_phy_write32_mask(rtwdev, R_MDPK_SYNC, B_MDPK_SYNC_MAN, 0x0);
2425 		dpk->dpk_order[path] = 0x0;
2426 		break;
2427 	default:
2428 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
2429 			    "[DPK] Wrong MDPD order!!(0x%x)\n", order);
2430 		break;
2431 	}
2432 
2433 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Set %s for IDL\n",
2434 		    order == 0x0 ? "(5,3,1)" :
2435 		    order == 0x1 ? "(5,3,0)" : "(5,0,0)");
2436 
2437 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
2438 		    "[DPK] Set MDPD order to 0x%x for IDL\n", order);
2439 }
2440 
_dpk_idl_mpa(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path,u8 kidx,u8 gain)2441 static void _dpk_idl_mpa(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2442 			 enum rtw89_rf_path path, u8 kidx, u8 gain)
2443 {
2444 	struct rtw89_dpk_info *dpk = &rtwdev->dpk;
2445 
2446 	if (dpk->bp[path][kidx].bw < RTW89_CHANNEL_WIDTH_80 &&
2447 	    dpk->bp[path][kidx].band == RTW89_BAND_5G)
2448 		_dpk_set_mdpd_para(rtwdev, path, 0x2);
2449 	else
2450 		_dpk_set_mdpd_para(rtwdev, path, 0x0);
2451 
2452 	_dpk_one_shot(rtwdev, phy, path, MDPK_IDL);
2453 }
2454 
_dpk_fill_result(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path,u8 kidx,u8 gain,u8 txagc)2455 static void _dpk_fill_result(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2456 			     enum rtw89_rf_path path, u8 kidx, u8 gain, u8 txagc)
2457 {
2458 	struct rtw89_dpk_info *dpk = &rtwdev->dpk;
2459 	u8 gs = dpk->dpk_gs[phy];
2460 	u16 pwsf = 0x78;
2461 
2462 	rtw89_phy_write32_mask(rtwdev, R_COEF_SEL + (path << 8), BIT(8), kidx);
2463 
2464 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
2465 		    "[DPK] Fill txagc/ pwsf/ gs = 0x%x/ 0x%x/ 0x%x\n",
2466 		    txagc, pwsf, gs);
2467 
2468 	dpk->bp[path][kidx].txagc_dpk = txagc;
2469 	rtw89_phy_write32_mask(rtwdev, R_TXAGC_RFK + (path << 8),
2470 			       0x3F << ((gain << 3) + (kidx << 4)), txagc);
2471 
2472 	dpk->bp[path][kidx].pwsf = pwsf;
2473 	rtw89_phy_write32_mask(rtwdev, R_DPD_BND + (path << 8) + (kidx << 2),
2474 			       0x1FF << (gain << 4), pwsf);
2475 
2476 	rtw89_phy_write32_mask(rtwdev, R_LOAD_COEF + (path << 8), B_LOAD_COEF_MDPD, 0x1);
2477 	rtw89_phy_write32_mask(rtwdev, R_LOAD_COEF + (path << 8), B_LOAD_COEF_MDPD, 0x0);
2478 
2479 	dpk->bp[path][kidx].gs = gs;
2480 	if (dpk->dpk_gs[phy] == 0x7f)
2481 		rtw89_phy_write32_mask(rtwdev,
2482 				       R_DPD_CH0A + (path << 8) + (kidx << 2),
2483 				       MASKDWORD, 0x007f7f7f);
2484 	else
2485 		rtw89_phy_write32_mask(rtwdev,
2486 				       R_DPD_CH0A + (path << 8) + (kidx << 2),
2487 				       MASKDWORD, 0x005b5b5b);
2488 
2489 	rtw89_phy_write32_mask(rtwdev, R_DPD_CH0A + (path << 8) + (kidx << 2),
2490 			       B_DPD_ORDER_V1, dpk->dpk_order[path]);
2491 
2492 	rtw89_phy_write32_mask(rtwdev, R_DPD_V1 + (path << 8), MASKDWORD, 0x0);
2493 	rtw89_phy_write32_mask(rtwdev, R_MDPK_SYNC, B_MDPK_SYNC_SEL, 0x0);
2494 }
2495 
_dpk_reload_check(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path,enum rtw89_chanctx_idx chanctx_idx)2496 static bool _dpk_reload_check(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2497 			      enum rtw89_rf_path path, enum rtw89_chanctx_idx chanctx_idx)
2498 {
2499 	const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
2500 	struct rtw89_dpk_info *dpk = &rtwdev->dpk;
2501 	u8 idx, cur_band, cur_ch;
2502 	bool is_reload = false;
2503 
2504 	cur_band = chan->band_type;
2505 	cur_ch = chan->channel;
2506 
2507 	for (idx = 0; idx < RTW89_DPK_BKUP_NUM; idx++) {
2508 		if (cur_band != dpk->bp[path][idx].band ||
2509 		    cur_ch != dpk->bp[path][idx].ch)
2510 			continue;
2511 
2512 		rtw89_phy_write32_mask(rtwdev, R_COEF_SEL + (path << 8),
2513 				       B_COEF_SEL_MDPD, idx);
2514 		dpk->cur_idx[path] = idx;
2515 		is_reload = true;
2516 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
2517 			    "[DPK] reload S%d[%d] success\n", path, idx);
2518 	}
2519 
2520 	return is_reload;
2521 }
2522 
2523 static
_rf_direct_cntrl(struct rtw89_dev * rtwdev,enum rtw89_rf_path path,bool is_bybb)2524 void _rf_direct_cntrl(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, bool is_bybb)
2525 {
2526 	if (is_bybb)
2527 		rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x1);
2528 	else
2529 		rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x0);
2530 }
2531 
2532 static
_drf_direct_cntrl(struct rtw89_dev * rtwdev,enum rtw89_rf_path path,bool is_bybb)2533 void _drf_direct_cntrl(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, bool is_bybb)
2534 {
2535 	if (is_bybb)
2536 		rtw89_write_rf(rtwdev, path, RR_BBDC, RR_BBDC_SEL, 0x1);
2537 	else
2538 		rtw89_write_rf(rtwdev, path, RR_BBDC, RR_BBDC_SEL, 0x0);
2539 }
2540 
_dpk_main(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path,u8 gain,enum rtw89_chanctx_idx chanctx_idx)2541 static bool _dpk_main(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2542 		      enum rtw89_rf_path path, u8 gain,
2543 		      enum rtw89_chanctx_idx chanctx_idx)
2544 {
2545 	struct rtw89_dpk_info *dpk = &rtwdev->dpk;
2546 	u8 txagc = 0x38, kidx = dpk->cur_idx[path];
2547 	bool is_fail = false;
2548 
2549 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
2550 		    "[DPK] ========= S%d[%d] DPK Start =========\n", path, kidx);
2551 
2552 	_rf_direct_cntrl(rtwdev, path, false);
2553 	_drf_direct_cntrl(rtwdev, path, false);
2554 
2555 	_dpk_kip_pwr_clk_on(rtwdev, path);
2556 	_dpk_kip_set_txagc(rtwdev, phy, path, txagc);
2557 	_dpk_rf_setting(rtwdev, gain, path, kidx);
2558 	_dpk_rx_dck(rtwdev, phy, path);
2559 	_dpk_kip_preset(rtwdev, phy, path, kidx);
2560 	_dpk_kip_set_rxagc(rtwdev, phy, path);
2561 	_dpk_table_select(rtwdev, path, kidx, gain);
2562 
2563 	txagc = _dpk_agc(rtwdev, phy, path, kidx, txagc, false, chanctx_idx);
2564 
2565 	_rfk_get_thermal(rtwdev, kidx, path);
2566 
2567 	if (txagc == 0xff) {
2568 		is_fail = true;
2569 		goto _error;
2570 	}
2571 
2572 	_dpk_idl_mpa(rtwdev, phy, path, kidx, gain);
2573 
2574 	rtw89_write_rf(rtwdev, path, RR_MOD, RFREG_MASKMODE, RF_RX);
2575 	_dpk_fill_result(rtwdev, phy, path, kidx, gain, txagc);
2576 
2577 _error:
2578 	if (!is_fail)
2579 		dpk->bp[path][kidx].path_ok = 1;
2580 	else
2581 		dpk->bp[path][kidx].path_ok = 0;
2582 
2583 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d[%d] DPK %s\n", path, kidx,
2584 		    is_fail ? "Check" : "Success");
2585 
2586 	_dpk_onoff(rtwdev, path, is_fail);
2587 
2588 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d[%d] DPK %s\n", path, kidx,
2589 		    is_fail ? "Check" : "Success");
2590 
2591 	return is_fail;
2592 }
2593 
_dpk_cal_select(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,u8 kpath,enum rtw89_chanctx_idx chanctx_idx)2594 static void _dpk_cal_select(struct rtw89_dev *rtwdev,
2595 			    enum rtw89_phy_idx phy, u8 kpath,
2596 			    enum rtw89_chanctx_idx chanctx_idx)
2597 {
2598 	struct rtw89_dpk_info *dpk = &rtwdev->dpk;
2599 	u32 backup_kip_val[BACKUP_KIP_REGS_NR];
2600 	u32 backup_bb_val[BACKUP_BB_REGS_NR];
2601 	u32 backup_rf_val[RTW8852BT_SS][BACKUP_RF_REGS_NR];
2602 	bool reloaded[2] = {false};
2603 	u8 path;
2604 
2605 	for (path = 0; path < DPK_RF_PATH_MAX_8852BT; path++) {
2606 		reloaded[path] = _dpk_reload_check(rtwdev, phy, path, chanctx_idx);
2607 		if (!reloaded[path] && dpk->bp[path][0].ch != 0)
2608 			dpk->cur_idx[path] = !dpk->cur_idx[path];
2609 		else
2610 			_dpk_onoff(rtwdev, path, false);
2611 	}
2612 
2613 	_rfk_backup_bb_reg(rtwdev, backup_bb_val);
2614 	_rfk_backup_kip_reg(rtwdev, backup_kip_val);
2615 
2616 	for (path = 0; path < DPK_RF_PATH_MAX_8852BT; path++) {
2617 		_rfk_backup_rf_reg(rtwdev, backup_rf_val[path], path);
2618 		_dpk_information(rtwdev, phy, path, chanctx_idx);
2619 		if (rtwdev->is_tssi_mode[path])
2620 			_dpk_tssi_pause(rtwdev, path, true);
2621 	}
2622 
2623 	_rfk_bb_afe_setting(rtwdev, phy, path, kpath);
2624 
2625 	for (path = 0; path < DPK_RF_PATH_MAX_8852BT; path++)
2626 		_dpk_main(rtwdev, phy, path, 1, chanctx_idx);
2627 
2628 	_rfk_bb_afe_restore(rtwdev, phy, path, kpath);
2629 
2630 	_dpk_kip_restore(rtwdev, path);
2631 	_rfk_reload_bb_reg(rtwdev, backup_bb_val);
2632 	_rfk_reload_kip_reg(rtwdev, backup_kip_val);
2633 
2634 	for (path = 0; path < DPK_RF_PATH_MAX_8852BT; path++) {
2635 		_rfk_reload_rf_reg(rtwdev, backup_rf_val[path], path);
2636 		if (rtwdev->is_tssi_mode[path])
2637 			_dpk_tssi_pause(rtwdev, path, false);
2638 	}
2639 }
2640 
_dpk_bypass_check(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_chanctx_idx chanctx_idx)2641 static bool _dpk_bypass_check(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2642 			      enum rtw89_chanctx_idx chanctx_idx)
2643 {
2644 	const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
2645 	struct rtw89_fem_info *fem = &rtwdev->fem;
2646 
2647 	if (fem->epa_2g && chan->band_type == RTW89_BAND_2G) {
2648 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
2649 			    "[DPK] Skip DPK due to 2G_ext_PA exist!!\n");
2650 		return true;
2651 	} else if (fem->epa_5g && chan->band_type == RTW89_BAND_5G) {
2652 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
2653 			    "[DPK] Skip DPK due to 5G_ext_PA exist!!\n");
2654 		return true;
2655 	} else if (fem->epa_6g && chan->band_type == RTW89_BAND_6G) {
2656 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
2657 			    "[DPK] Skip DPK due to 6G_ext_PA exist!!\n");
2658 		return true;
2659 	}
2660 
2661 	return false;
2662 }
2663 
_dpk_force_bypass(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy)2664 static void _dpk_force_bypass(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
2665 {
2666 	u8 path, kpath;
2667 
2668 	kpath = _kpath(rtwdev, phy);
2669 
2670 	for (path = 0; path < RTW8852BT_SS; path++) {
2671 		if (kpath & BIT(path))
2672 			_dpk_onoff(rtwdev, path, true);
2673 	}
2674 }
2675 
_dpk_track(struct rtw89_dev * rtwdev)2676 static void _dpk_track(struct rtw89_dev *rtwdev)
2677 {
2678 	struct rtw89_dpk_info *dpk = &rtwdev->dpk;
2679 	s8 txagc_bb, txagc_bb_tp, ini_diff = 0, txagc_ofst;
2680 	s8 delta_ther[2] = {};
2681 	u8 trk_idx, txagc_rf;
2682 	u8 path, kidx;
2683 	u16 pwsf[2];
2684 	u8 cur_ther;
2685 	u32 tmp;
2686 
2687 	for (path = 0; path < RF_PATH_NUM_8852BT; path++) {
2688 		kidx = dpk->cur_idx[path];
2689 
2690 		rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
2691 			    "[DPK_TRK] ================[S%d[%d] (CH %d)]================\n",
2692 			    path, kidx, dpk->bp[path][kidx].ch);
2693 
2694 		cur_ther = ewma_thermal_read(&rtwdev->phystat.avg_thermal[path]);
2695 
2696 		rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
2697 			    "[DPK_TRK] thermal now = %d\n", cur_ther);
2698 
2699 		if (dpk->bp[path][kidx].ch && cur_ther)
2700 			delta_ther[path] = dpk->bp[path][kidx].ther_dpk - cur_ther;
2701 
2702 		if (dpk->bp[path][kidx].band == RTW89_BAND_2G)
2703 			delta_ther[path] = delta_ther[path] * 3 / 2;
2704 		else
2705 			delta_ther[path] = delta_ther[path] * 5 / 2;
2706 
2707 		txagc_rf = rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB + (path << 13),
2708 						 B_TXAGC_RF);
2709 
2710 		if (rtwdev->is_tssi_mode[path]) {
2711 			trk_idx = rtw89_read_rf(rtwdev, path, RR_TXA, RR_TXA_TRK);
2712 
2713 			rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
2714 				    "[DPK_TRK] txagc_RF / track_idx = 0x%x / %d\n",
2715 				    txagc_rf, trk_idx);
2716 
2717 			txagc_bb =
2718 				rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB + (path << 13),
2719 						      MASKBYTE2);
2720 			txagc_bb_tp =
2721 				rtw89_phy_read32_mask(rtwdev, R_TXAGC_TP + (path << 13),
2722 						      B_TXAGC_TP);
2723 
2724 			rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
2725 				    "[DPK_TRK] txagc_bb_tp / txagc_bb = 0x%x / 0x%x\n",
2726 				    txagc_bb_tp, txagc_bb);
2727 
2728 			txagc_ofst =
2729 				rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB + (path << 13),
2730 						      MASKBYTE3);
2731 
2732 			rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
2733 				    "[DPK_TRK] txagc_offset / delta_ther = %d / %d\n",
2734 				    txagc_ofst, delta_ther[path]);
2735 			tmp = rtw89_phy_read32_mask(rtwdev, R_DPD_COM + (path << 8),
2736 						    B_DPD_COM_OF);
2737 			if (tmp == 0x1) {
2738 				txagc_ofst = 0;
2739 				rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
2740 					    "[DPK_TRK] HW txagc offset mode\n");
2741 			}
2742 
2743 			if (txagc_rf && cur_ther)
2744 				ini_diff = txagc_ofst + (delta_ther[path]);
2745 
2746 			tmp = rtw89_phy_read32_mask(rtwdev,
2747 						    R_P0_TXDPD + (path << 13),
2748 						    B_P0_TXDPD);
2749 			if (tmp == 0x0) {
2750 				pwsf[0] = dpk->bp[path][kidx].pwsf +
2751 					  txagc_bb_tp - txagc_bb + ini_diff;
2752 				pwsf[1] = dpk->bp[path][kidx].pwsf +
2753 					  txagc_bb_tp - txagc_bb + ini_diff;
2754 			} else {
2755 				pwsf[0] = dpk->bp[path][kidx].pwsf + ini_diff;
2756 				pwsf[1] = dpk->bp[path][kidx].pwsf + ini_diff;
2757 			}
2758 		} else {
2759 			pwsf[0] = (dpk->bp[path][kidx].pwsf + delta_ther[path]) & 0x1ff;
2760 			pwsf[1] = (dpk->bp[path][kidx].pwsf + delta_ther[path]) & 0x1ff;
2761 		}
2762 
2763 		tmp = rtw89_phy_read32_mask(rtwdev, R_DPK_TRK, B_DPK_TRK_DIS);
2764 		if (!tmp && txagc_rf) {
2765 			rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
2766 				    "[DPK_TRK] New pwsf[0] / pwsf[1] = 0x%x / 0x%x\n",
2767 				    pwsf[0], pwsf[1]);
2768 
2769 			rtw89_phy_write32_mask(rtwdev,
2770 					       R_DPD_BND + (path << 8) + (kidx << 2),
2771 					       B_DPD_BND_0, pwsf[0]);
2772 			rtw89_phy_write32_mask(rtwdev,
2773 					       R_DPD_BND + (path << 8) + (kidx << 2),
2774 					       B_DPD_BND_1, pwsf[1]);
2775 		}
2776 	}
2777 }
2778 
_set_dpd_backoff(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy)2779 static void _set_dpd_backoff(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
2780 {
2781 	struct rtw89_dpk_info *dpk = &rtwdev->dpk;
2782 	u8 tx_scale, ofdm_bkof, path, kpath;
2783 
2784 	kpath = _kpath(rtwdev, phy);
2785 
2786 	ofdm_bkof = rtw89_phy_read32_mask(rtwdev, R_DPD_BF + (phy << 13), B_DPD_BF_OFDM);
2787 	tx_scale = rtw89_phy_read32_mask(rtwdev, R_DPD_BF + (phy << 13), B_DPD_BF_SCA);
2788 
2789 	if (ofdm_bkof + tx_scale >= 44) {
2790 		/* move dpd backoff to bb, and set dpd backoff to 0 */
2791 		dpk->dpk_gs[phy] = 0x7f;
2792 		for (path = 0; path < RF_PATH_NUM_8852BT; path++) {
2793 			if (!(kpath & BIT(path)))
2794 				continue;
2795 
2796 			rtw89_phy_write32_mask(rtwdev, R_DPD_CH0A + (path << 8),
2797 					       B_DPD_CFG, 0x7f7f7f);
2798 			rtw89_debug(rtwdev, RTW89_DBG_RFK,
2799 				    "[RFK] Set S%d DPD backoff to 0dB\n", path);
2800 		}
2801 	} else {
2802 		dpk->dpk_gs[phy] = 0x5b;
2803 	}
2804 }
2805 
_tssi_dpk_off(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy)2806 static void _tssi_dpk_off(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
2807 {
2808 	rtw89_phy_write32_mask(rtwdev, R_DPD_CH0A, BIT(24), 0x0);
2809 	rtw89_phy_write32_mask(rtwdev, R_DPD_CH0B, BIT(24), 0x0);
2810 }
2811 
_tssi_rf_setting(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path,const struct rtw89_chan * chan)2812 static void _tssi_rf_setting(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2813 			     enum rtw89_rf_path path, const struct rtw89_chan *chan)
2814 {
2815 	enum rtw89_band band = chan->band_type;
2816 
2817 	if (band == RTW89_BAND_2G)
2818 		rtw89_write_rf(rtwdev, path, RR_TXPOW, RR_TXPOW_TXG, 0x1);
2819 	else
2820 		rtw89_write_rf(rtwdev, path, RR_TXPOW, RR_TXPOW_TXA, 0x1);
2821 }
2822 
_tssi_set_sys(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path,const struct rtw89_chan * chan)2823 static void _tssi_set_sys(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2824 			  enum rtw89_rf_path path, const struct rtw89_chan *chan)
2825 {
2826 	enum rtw89_band band = chan->band_type;
2827 
2828 	rtw89_rfk_parser(rtwdev, &rtw8852bt_tssi_sys_defs_tbl);
2829 
2830 	if (chan->band_width == RTW89_CHANNEL_WIDTH_80)
2831 		rtw89_phy_write32_mask(rtwdev, R_RSTB_ASYNC, B_RSTB_ASYNC_BW80, 0x1);
2832 	else
2833 		rtw89_phy_write32_mask(rtwdev, R_RSTB_ASYNC, B_RSTB_ASYNC_BW80, 0x0);
2834 
2835 	if (path == RF_PATH_A)
2836 		rtw89_rfk_parser_by_cond(rtwdev, band == RTW89_BAND_2G,
2837 					 &rtw8852bt_tssi_sys_a_defs_2g_tbl,
2838 					 &rtw8852bt_tssi_sys_a_defs_5g_tbl);
2839 	else
2840 		rtw89_rfk_parser_by_cond(rtwdev, band == RTW89_BAND_2G,
2841 					 &rtw8852bt_tssi_sys_b_defs_2g_tbl,
2842 					 &rtw8852bt_tssi_sys_b_defs_5g_tbl);
2843 }
2844 
_tssi_ini_txpwr_ctrl_bb(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path)2845 static void _tssi_ini_txpwr_ctrl_bb(struct rtw89_dev *rtwdev,
2846 				    enum rtw89_phy_idx phy,
2847 				    enum rtw89_rf_path path)
2848 {
2849 	rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
2850 				 &rtw8852bt_tssi_init_txpwr_defs_a_tbl,
2851 				 &rtw8852bt_tssi_init_txpwr_defs_b_tbl);
2852 }
2853 
_tssi_ini_txpwr_ctrl_bb_he_tb(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path)2854 static void _tssi_ini_txpwr_ctrl_bb_he_tb(struct rtw89_dev *rtwdev,
2855 					  enum rtw89_phy_idx phy,
2856 					  enum rtw89_rf_path path)
2857 {
2858 	rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
2859 				 &rtw8852bt_tssi_init_txpwr_he_tb_defs_a_tbl,
2860 				 &rtw8852bt_tssi_init_txpwr_he_tb_defs_b_tbl);
2861 }
2862 
_tssi_set_dck(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path)2863 static void _tssi_set_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2864 			  enum rtw89_rf_path path)
2865 {
2866 	rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
2867 				 &rtw8852bt_tssi_dck_defs_a_tbl,
2868 				 &rtw8852bt_tssi_dck_defs_b_tbl);
2869 }
2870 
_tssi_set_tmeter_tbl(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path,const struct rtw89_chan * chan)2871 static void _tssi_set_tmeter_tbl(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2872 				 enum rtw89_rf_path path, const struct rtw89_chan *chan)
2873 {
2874 #define RTW8852BT_TSSI_GET_VAL(ptr, idx)			\
2875 ({							\
2876 	s8 *__ptr = (ptr);				\
2877 	u8 __idx = (idx), __i, __v;			\
2878 	u32 __val = 0;					\
2879 	for (__i = 0; __i < 4; __i++) {			\
2880 		__v = (__ptr[__idx + __i]);		\
2881 		__val |= (__v << (8 * __i));		\
2882 	}						\
2883 	__val;						\
2884 })
2885 	struct rtw89_fw_txpwr_track_cfg *trk = rtwdev->fw.elm_info.txpwr_trk;
2886 	struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
2887 	u8 ch = chan->channel;
2888 	u8 subband = chan->subband_type;
2889 	const s8 *thm_up_a = NULL;
2890 	const s8 *thm_down_a = NULL;
2891 	const s8 *thm_up_b = NULL;
2892 	const s8 *thm_down_b = NULL;
2893 	u8 thermal = 0xff;
2894 	s8 thm_ofst[64] = {0};
2895 	u32 tmp = 0;
2896 	u8 i, j;
2897 
2898 	switch (subband) {
2899 	default:
2900 	case RTW89_CH_2G:
2901 		thm_up_a = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_2GA_P][0];
2902 		thm_down_a = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_2GA_N][0];
2903 		thm_up_b = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_2GB_P][0];
2904 		thm_down_b = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_2GB_N][0];
2905 		break;
2906 	case RTW89_CH_5G_BAND_1:
2907 		thm_up_a = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GA_P][0];
2908 		thm_down_a = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GA_N][0];
2909 		thm_up_b = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GB_P][0];
2910 		thm_down_b = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GB_N][0];
2911 		break;
2912 	case RTW89_CH_5G_BAND_3:
2913 		thm_up_a = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GA_P][1];
2914 		thm_down_a = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GA_N][1];
2915 		thm_up_b = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GB_P][1];
2916 		thm_down_b = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GB_N][1];
2917 		break;
2918 	case RTW89_CH_5G_BAND_4:
2919 		thm_up_a = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GA_P][2];
2920 		thm_down_a = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GA_N][2];
2921 		thm_up_b = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GB_P][2];
2922 		thm_down_b = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GB_N][2];
2923 		break;
2924 	}
2925 
2926 	if (path == RF_PATH_A) {
2927 		thermal = tssi_info->thermal[RF_PATH_A];
2928 
2929 		rtw89_debug(rtwdev, RTW89_DBG_TSSI,
2930 			    "[TSSI] ch=%d thermal_pathA=0x%x\n", ch, thermal);
2931 
2932 		rtw89_phy_write32_mask(rtwdev, R_P0_TMETER, B_P0_TMETER_DIS, 0x0);
2933 		rtw89_phy_write32_mask(rtwdev, R_P0_TMETER, B_P0_TMETER_TRK, 0x1);
2934 
2935 		if (thermal == 0xff) {
2936 			rtw89_phy_write32_mask(rtwdev, R_P0_TMETER, B_P0_TMETER, 32);
2937 			rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_VAL, 32);
2938 
2939 			for (i = 0; i < 64; i += 4) {
2940 				rtw89_phy_write32(rtwdev, R_P0_TSSI_BASE + i, 0x0);
2941 
2942 				rtw89_debug(rtwdev, RTW89_DBG_TSSI,
2943 					    "[TSSI] write 0x%x val=0x%08x\n",
2944 					    R_P0_TSSI_BASE + i, 0x0);
2945 			}
2946 
2947 		} else {
2948 			rtw89_phy_write32_mask(rtwdev, R_P0_TMETER, B_P0_TMETER,
2949 					       thermal);
2950 			rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_VAL,
2951 					       thermal);
2952 
2953 			i = 0;
2954 			for (j = 0; j < 32; j++)
2955 				thm_ofst[j] = i < DELTA_SWINGIDX_SIZE ?
2956 					      -thm_down_a[i++] :
2957 					      -thm_down_a[DELTA_SWINGIDX_SIZE - 1];
2958 
2959 			i = 1;
2960 			for (j = 63; j >= 32; j--)
2961 				thm_ofst[j] = i < DELTA_SWINGIDX_SIZE ?
2962 					      thm_up_a[i++] :
2963 					      thm_up_a[DELTA_SWINGIDX_SIZE - 1];
2964 
2965 			for (i = 0; i < 64; i += 4) {
2966 				tmp = RTW8852BT_TSSI_GET_VAL(thm_ofst, i);
2967 				rtw89_phy_write32(rtwdev, R_P0_TSSI_BASE + i, tmp);
2968 
2969 				rtw89_debug(rtwdev, RTW89_DBG_TSSI,
2970 					    "[TSSI] write 0x%x val=0x%08x\n",
2971 					    0x5c00 + i, tmp);
2972 			}
2973 		}
2974 		rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, R_P0_RFCTM_RDY, 0x1);
2975 		rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, R_P0_RFCTM_RDY, 0x0);
2976 
2977 	} else {
2978 		thermal = tssi_info->thermal[RF_PATH_B];
2979 
2980 		rtw89_debug(rtwdev, RTW89_DBG_TSSI,
2981 			    "[TSSI] ch=%d thermal_pathB=0x%x\n", ch, thermal);
2982 
2983 		rtw89_phy_write32_mask(rtwdev, R_P1_TMETER, B_P1_TMETER_DIS, 0x0);
2984 		rtw89_phy_write32_mask(rtwdev, R_P1_TMETER, B_P1_TMETER_TRK, 0x1);
2985 
2986 		if (thermal == 0xff) {
2987 			rtw89_phy_write32_mask(rtwdev, R_P1_TMETER, B_P1_TMETER, 32);
2988 			rtw89_phy_write32_mask(rtwdev, R_P1_RFCTM, B_P1_RFCTM_VAL, 32);
2989 
2990 			for (i = 0; i < 64; i += 4) {
2991 				rtw89_phy_write32(rtwdev, R_TSSI_THOF + i, 0x0);
2992 
2993 				rtw89_debug(rtwdev, RTW89_DBG_TSSI,
2994 					    "[TSSI] write 0x%x val=0x%08x\n",
2995 					    0x7c00 + i, 0x0);
2996 			}
2997 
2998 		} else {
2999 			rtw89_phy_write32_mask(rtwdev, R_P1_TMETER, B_P1_TMETER,
3000 					       thermal);
3001 			rtw89_phy_write32_mask(rtwdev, R_P1_RFCTM, B_P1_RFCTM_VAL,
3002 					       thermal);
3003 
3004 			i = 0;
3005 			for (j = 0; j < 32; j++)
3006 				thm_ofst[j] = i < DELTA_SWINGIDX_SIZE ?
3007 					      -thm_down_b[i++] :
3008 					      -thm_down_b[DELTA_SWINGIDX_SIZE - 1];
3009 
3010 			i = 1;
3011 			for (j = 63; j >= 32; j--)
3012 				thm_ofst[j] = i < DELTA_SWINGIDX_SIZE ?
3013 					      thm_up_b[i++] :
3014 					      thm_up_b[DELTA_SWINGIDX_SIZE - 1];
3015 
3016 			for (i = 0; i < 64; i += 4) {
3017 				tmp = RTW8852BT_TSSI_GET_VAL(thm_ofst, i);
3018 				rtw89_phy_write32(rtwdev, R_TSSI_THOF + i, tmp);
3019 
3020 				rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3021 					    "[TSSI] write 0x%x val=0x%08x\n",
3022 					    0x7c00 + i, tmp);
3023 			}
3024 		}
3025 		rtw89_phy_write32_mask(rtwdev, R_P1_RFCTM, R_P1_RFCTM_RDY, 0x1);
3026 		rtw89_phy_write32_mask(rtwdev, R_P1_RFCTM, R_P1_RFCTM_RDY, 0x0);
3027 	}
3028 #undef RTW8852BT_TSSI_GET_VAL
3029 }
3030 
_tssi_set_dac_gain_tbl(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path)3031 static void _tssi_set_dac_gain_tbl(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
3032 				   enum rtw89_rf_path path)
3033 {
3034 	rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
3035 				 &rtw8852bt_tssi_dac_gain_defs_a_tbl,
3036 				 &rtw8852bt_tssi_dac_gain_defs_b_tbl);
3037 }
3038 
_tssi_slope_cal_org(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path,const struct rtw89_chan * chan)3039 static void _tssi_slope_cal_org(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
3040 				enum rtw89_rf_path path, const struct rtw89_chan *chan)
3041 {
3042 	enum rtw89_band band = chan->band_type;
3043 
3044 	if (path == RF_PATH_A)
3045 		rtw89_rfk_parser_by_cond(rtwdev, band == RTW89_BAND_2G,
3046 					 &rtw8852bt_tssi_slope_a_defs_2g_tbl,
3047 					 &rtw8852bt_tssi_slope_a_defs_5g_tbl);
3048 	else
3049 		rtw89_rfk_parser_by_cond(rtwdev, band == RTW89_BAND_2G,
3050 					 &rtw8852bt_tssi_slope_b_defs_2g_tbl,
3051 					 &rtw8852bt_tssi_slope_b_defs_5g_tbl);
3052 }
3053 
_tssi_alignment_default(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path,bool all,const struct rtw89_chan * chan)3054 static void _tssi_alignment_default(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
3055 				    enum rtw89_rf_path path, bool all,
3056 				    const struct rtw89_chan *chan)
3057 {
3058 	enum rtw89_band band = chan->band_type;
3059 	const struct rtw89_rfk_tbl *tbl = NULL;
3060 	u8 ch = chan->channel;
3061 
3062 	if (path == RF_PATH_A) {
3063 		if (band == RTW89_BAND_2G)
3064 			tbl = &rtw8852bt_tssi_align_a_2g_all_defs_tbl;
3065 		else if (ch >= 36 && ch <= 64)
3066 			tbl = &rtw8852bt_tssi_align_a_5g1_all_defs_tbl;
3067 		else if (ch >= 100 && ch <= 144)
3068 			tbl = &rtw8852bt_tssi_align_a_5g2_all_defs_tbl;
3069 		else if (ch >= 149 && ch <= 177)
3070 			tbl = &rtw8852bt_tssi_align_a_5g3_all_defs_tbl;
3071 	} else {
3072 		if (ch >= 1 && ch <= 14)
3073 			tbl = &rtw8852bt_tssi_align_b_2g_all_defs_tbl;
3074 		else if (ch >= 36 && ch <= 64)
3075 			tbl = &rtw8852bt_tssi_align_b_5g1_all_defs_tbl;
3076 		else if (ch >= 100 && ch <= 144)
3077 			tbl = &rtw8852bt_tssi_align_b_5g2_all_defs_tbl;
3078 		else if (ch >= 149 && ch <= 177)
3079 			tbl = &rtw8852bt_tssi_align_b_5g3_all_defs_tbl;
3080 	}
3081 
3082 	if (tbl)
3083 		rtw89_rfk_parser(rtwdev, tbl);
3084 }
3085 
_tssi_set_tssi_slope(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path)3086 static void _tssi_set_tssi_slope(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
3087 				 enum rtw89_rf_path path)
3088 {
3089 	rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
3090 				 &rtw8852bt_tssi_slope_defs_a_tbl,
3091 				 &rtw8852bt_tssi_slope_defs_b_tbl);
3092 }
3093 
_tssi_set_tssi_track(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path)3094 static void _tssi_set_tssi_track(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
3095 				 enum rtw89_rf_path path)
3096 {
3097 	if (path == RF_PATH_A)
3098 		rtw89_phy_write32_mask(rtwdev, R_P0_TSSIC, B_P0_TSSIC_BYPASS, 0x0);
3099 	else
3100 		rtw89_phy_write32_mask(rtwdev, R_P1_TSSIC, B_P1_TSSIC_BYPASS, 0x0);
3101 }
3102 
_tssi_set_txagc_offset_mv_avg(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path)3103 static void _tssi_set_txagc_offset_mv_avg(struct rtw89_dev *rtwdev,
3104 					  enum rtw89_phy_idx phy,
3105 					  enum rtw89_rf_path path)
3106 {
3107 	rtw89_debug(rtwdev, RTW89_DBG_TSSI, "======>%s   path=%d\n", __func__,
3108 		    path);
3109 
3110 	if (path == RF_PATH_A)
3111 		rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_MV_AVG,
3112 				       B_P0_TSSI_MV_MIX, 0x010);
3113 	else
3114 		rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_MV_AVG,
3115 				       B_P1_RFCTM_DEL, 0x010);
3116 }
3117 
_tssi_enable(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy)3118 static void _tssi_enable(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
3119 {
3120 	u8 i;
3121 
3122 	for (i = 0; i < RF_PATH_NUM_8852BT; i++) {
3123 		_tssi_set_tssi_track(rtwdev, phy, i);
3124 		_tssi_set_txagc_offset_mv_avg(rtwdev, phy, i);
3125 
3126 		if (i == RF_PATH_A) {
3127 			rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_MV_AVG,
3128 					       B_P0_TSSI_MV_CLR, 0x0);
3129 			rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_AVG,
3130 					       B_P0_TSSI_EN, 0x0);
3131 			rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_AVG,
3132 					       B_P0_TSSI_EN, 0x1);
3133 			rtw89_write_rf(rtwdev, i, RR_TXGA_V1,
3134 				       RR_TXGA_V1_TRK_EN, 0x1);
3135 			rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK,
3136 					       B_P0_TSSI_RFC, 0x3);
3137 
3138 			rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK,
3139 					       B_P0_TSSI_OFT, 0xc0);
3140 			rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK,
3141 					       B_P0_TSSI_OFT_EN, 0x0);
3142 			rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK,
3143 					       B_P0_TSSI_OFT_EN, 0x1);
3144 
3145 			rtwdev->is_tssi_mode[RF_PATH_A] = true;
3146 		} else {
3147 			rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_MV_AVG,
3148 					       B_P1_TSSI_MV_CLR, 0x0);
3149 			rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_AVG,
3150 					       B_P1_TSSI_EN, 0x0);
3151 			rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_AVG,
3152 					       B_P1_TSSI_EN, 0x1);
3153 			rtw89_write_rf(rtwdev, i, RR_TXGA_V1,
3154 				       RR_TXGA_V1_TRK_EN, 0x1);
3155 			rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK,
3156 					       B_P1_TSSI_RFC, 0x3);
3157 
3158 			rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK,
3159 					       B_P1_TSSI_OFT, 0xc0);
3160 			rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK,
3161 					       B_P1_TSSI_OFT_EN, 0x0);
3162 			rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK,
3163 					       B_P1_TSSI_OFT_EN, 0x1);
3164 
3165 			rtwdev->is_tssi_mode[RF_PATH_B] = true;
3166 		}
3167 	}
3168 }
3169 
_tssi_disable(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy)3170 static void _tssi_disable(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
3171 {
3172 	rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_AVG, B_P0_TSSI_EN, 0x0);
3173 	rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_RFC, 0x1);
3174 	rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_MV_AVG, B_P0_TSSI_MV_CLR, 0x1);
3175 	rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_AVG, B_P1_TSSI_EN, 0x0);
3176 	rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_RFC, 0x1);
3177 	rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_MV_AVG, B_P1_TSSI_MV_CLR, 0x1);
3178 
3179 	rtwdev->is_tssi_mode[RF_PATH_A] = false;
3180 	rtwdev->is_tssi_mode[RF_PATH_B] = false;
3181 }
3182 
_tssi_get_cck_group(struct rtw89_dev * rtwdev,u8 ch)3183 static u32 _tssi_get_cck_group(struct rtw89_dev *rtwdev, u8 ch)
3184 {
3185 	switch (ch) {
3186 	case 1 ... 2:
3187 		return 0;
3188 	case 3 ... 5:
3189 		return 1;
3190 	case 6 ... 8:
3191 		return 2;
3192 	case 9 ... 11:
3193 		return 3;
3194 	case 12 ... 13:
3195 		return 4;
3196 	case 14:
3197 		return 5;
3198 	}
3199 
3200 	return 0;
3201 }
3202 
3203 #define TSSI_EXTRA_GROUP_BIT (BIT(31))
3204 #define TSSI_EXTRA_GROUP(idx) (TSSI_EXTRA_GROUP_BIT | (idx))
3205 #define IS_TSSI_EXTRA_GROUP(group) ((group) & TSSI_EXTRA_GROUP_BIT)
3206 #define TSSI_EXTRA_GET_GROUP_IDX1(group) ((group) & ~TSSI_EXTRA_GROUP_BIT)
3207 #define TSSI_EXTRA_GET_GROUP_IDX2(group) (TSSI_EXTRA_GET_GROUP_IDX1(group) + 1)
3208 
_tssi_get_ofdm_group(struct rtw89_dev * rtwdev,u8 ch)3209 static u32 _tssi_get_ofdm_group(struct rtw89_dev *rtwdev, u8 ch)
3210 {
3211 	switch (ch) {
3212 	case 1 ... 2:
3213 		return 0;
3214 	case 3 ... 5:
3215 		return 1;
3216 	case 6 ... 8:
3217 		return 2;
3218 	case 9 ... 11:
3219 		return 3;
3220 	case 12 ... 14:
3221 		return 4;
3222 	case 36 ... 40:
3223 		return 5;
3224 	case 41 ... 43:
3225 		return TSSI_EXTRA_GROUP(5);
3226 	case 44 ... 48:
3227 		return 6;
3228 	case 49 ... 51:
3229 		return TSSI_EXTRA_GROUP(6);
3230 	case 52 ... 56:
3231 		return 7;
3232 	case 57 ... 59:
3233 		return TSSI_EXTRA_GROUP(7);
3234 	case 60 ... 64:
3235 		return 8;
3236 	case 100 ... 104:
3237 		return 9;
3238 	case 105 ... 107:
3239 		return TSSI_EXTRA_GROUP(9);
3240 	case 108 ... 112:
3241 		return 10;
3242 	case 113 ... 115:
3243 		return TSSI_EXTRA_GROUP(10);
3244 	case 116 ... 120:
3245 		return 11;
3246 	case 121 ... 123:
3247 		return TSSI_EXTRA_GROUP(11);
3248 	case 124 ... 128:
3249 		return 12;
3250 	case 129 ... 131:
3251 		return TSSI_EXTRA_GROUP(12);
3252 	case 132 ... 136:
3253 		return 13;
3254 	case 137 ... 139:
3255 		return TSSI_EXTRA_GROUP(13);
3256 	case 140 ... 144:
3257 		return 14;
3258 	case 149 ... 153:
3259 		return 15;
3260 	case 154 ... 156:
3261 		return TSSI_EXTRA_GROUP(15);
3262 	case 157 ... 161:
3263 		return 16;
3264 	case 162 ... 164:
3265 		return TSSI_EXTRA_GROUP(16);
3266 	case 165 ... 169:
3267 		return 17;
3268 	case 170 ... 172:
3269 		return TSSI_EXTRA_GROUP(17);
3270 	case 173 ... 177:
3271 		return 18;
3272 	}
3273 
3274 	return 0;
3275 }
3276 
_tssi_get_trim_group(struct rtw89_dev * rtwdev,u8 ch)3277 static u32 _tssi_get_trim_group(struct rtw89_dev *rtwdev, u8 ch)
3278 {
3279 	switch (ch) {
3280 	case 1 ... 8:
3281 		return 0;
3282 	case 9 ... 14:
3283 		return 1;
3284 	case 36 ... 48:
3285 		return 2;
3286 	case 52 ... 64:
3287 		return 3;
3288 	case 100 ... 112:
3289 		return 4;
3290 	case 116 ... 128:
3291 		return 5;
3292 	case 132 ... 144:
3293 		return 6;
3294 	case 149 ... 177:
3295 		return 7;
3296 	}
3297 
3298 	return 0;
3299 }
3300 
_tssi_get_ofdm_de(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path,const struct rtw89_chan * chan)3301 static s8 _tssi_get_ofdm_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
3302 			    enum rtw89_rf_path path, const struct rtw89_chan *chan)
3303 {
3304 	struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
3305 	u8 ch = chan->channel;
3306 	u32 gidx, gidx_1st, gidx_2nd;
3307 	s8 de_1st;
3308 	s8 de_2nd;
3309 	s8 val;
3310 
3311 	gidx = _tssi_get_ofdm_group(rtwdev, ch);
3312 
3313 	rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3314 		    "[TSSI][TRIM]: path=%d mcs group_idx=0x%x\n", path, gidx);
3315 
3316 	if (IS_TSSI_EXTRA_GROUP(gidx)) {
3317 		gidx_1st = TSSI_EXTRA_GET_GROUP_IDX1(gidx);
3318 		gidx_2nd = TSSI_EXTRA_GET_GROUP_IDX2(gidx);
3319 		de_1st = tssi_info->tssi_mcs[path][gidx_1st];
3320 		de_2nd = tssi_info->tssi_mcs[path][gidx_2nd];
3321 		val = (de_1st + de_2nd) / 2;
3322 
3323 		rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3324 			    "[TSSI][TRIM]: path=%d mcs de=%d 1st=%d 2nd=%d\n",
3325 			    path, val, de_1st, de_2nd);
3326 	} else {
3327 		val = tssi_info->tssi_mcs[path][gidx];
3328 
3329 		rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3330 			    "[TSSI][TRIM]: path=%d mcs de=%d\n", path, val);
3331 	}
3332 
3333 	return val;
3334 }
3335 
_tssi_get_ofdm_trim_de(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path,const struct rtw89_chan * chan)3336 static s8 _tssi_get_ofdm_trim_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
3337 				 enum rtw89_rf_path path, const struct rtw89_chan *chan)
3338 {
3339 	struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
3340 	u8 ch = chan->channel;
3341 	u32 tgidx, tgidx_1st, tgidx_2nd;
3342 	s8 tde_1st;
3343 	s8 tde_2nd;
3344 	s8 val;
3345 
3346 	tgidx = _tssi_get_trim_group(rtwdev, ch);
3347 
3348 	rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3349 		    "[TSSI][TRIM]: path=%d mcs trim_group_idx=0x%x\n",
3350 		    path, tgidx);
3351 
3352 	if (IS_TSSI_EXTRA_GROUP(tgidx)) {
3353 		tgidx_1st = TSSI_EXTRA_GET_GROUP_IDX1(tgidx);
3354 		tgidx_2nd = TSSI_EXTRA_GET_GROUP_IDX2(tgidx);
3355 		tde_1st = tssi_info->tssi_trim[path][tgidx_1st];
3356 		tde_2nd = tssi_info->tssi_trim[path][tgidx_2nd];
3357 		val = (tde_1st + tde_2nd) / 2;
3358 
3359 		rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3360 			    "[TSSI][TRIM]: path=%d mcs trim_de=%d 1st=%d 2nd=%d\n",
3361 			    path, val, tde_1st, tde_2nd);
3362 	} else {
3363 		val = tssi_info->tssi_trim[path][tgidx];
3364 
3365 		rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3366 			    "[TSSI][TRIM]: path=%d mcs trim_de=%d\n",
3367 			    path, val);
3368 	}
3369 
3370 	return val;
3371 }
3372 
_tssi_set_efuse_to_de(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,const struct rtw89_chan * chan)3373 static void _tssi_set_efuse_to_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
3374 				  const struct rtw89_chan *chan)
3375 {
3376 	struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
3377 	u8 ch = chan->channel;
3378 	u8 gidx;
3379 	s8 ofdm_de;
3380 	s8 trim_de;
3381 	s32 val;
3382 	u32 i;
3383 
3384 	rtw89_debug(rtwdev, RTW89_DBG_TSSI, "[TSSI][TRIM]: phy=%d ch=%d\n",
3385 		    phy, ch);
3386 
3387 	for (i = RF_PATH_A; i < RF_PATH_NUM_8852BT; i++) {
3388 		gidx = _tssi_get_cck_group(rtwdev, ch);
3389 		trim_de = _tssi_get_ofdm_trim_de(rtwdev, phy, i, chan);
3390 		val = tssi_info->tssi_cck[i][gidx] + trim_de;
3391 
3392 		rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3393 			    "[TSSI][TRIM]: path=%d cck[%d]=0x%x trim=0x%x\n",
3394 			    i, gidx, tssi_info->tssi_cck[i][gidx], trim_de);
3395 
3396 		rtw89_phy_write32_mask(rtwdev, _tssi_de_cck_long[i], _TSSI_DE_MASK, val);
3397 		rtw89_phy_write32_mask(rtwdev, _tssi_de_cck_short[i], _TSSI_DE_MASK, val);
3398 
3399 		rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3400 			    "[TSSI] Set TSSI CCK DE 0x%x[21:12]=0x%x\n",
3401 			    _tssi_de_cck_long[i],
3402 			    rtw89_phy_read32_mask(rtwdev, _tssi_de_cck_long[i],
3403 						  _TSSI_DE_MASK));
3404 
3405 		ofdm_de = _tssi_get_ofdm_de(rtwdev, phy, i, chan);
3406 		trim_de = _tssi_get_ofdm_trim_de(rtwdev, phy, i, chan);
3407 		val = ofdm_de + trim_de;
3408 
3409 		rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3410 			    "[TSSI][TRIM]: path=%d mcs=0x%x trim=0x%x\n",
3411 			    i, ofdm_de, trim_de);
3412 
3413 		rtw89_phy_write32_mask(rtwdev, _tssi_de_mcs_20m[i], _TSSI_DE_MASK, val);
3414 		rtw89_phy_write32_mask(rtwdev, _tssi_de_mcs_40m[i], _TSSI_DE_MASK, val);
3415 		rtw89_phy_write32_mask(rtwdev, _tssi_de_mcs_80m[i], _TSSI_DE_MASK, val);
3416 		rtw89_phy_write32_mask(rtwdev, _tssi_de_mcs_80m_80m[i],
3417 				       _TSSI_DE_MASK, val);
3418 		rtw89_phy_write32_mask(rtwdev, _tssi_de_mcs_5m[i], _TSSI_DE_MASK, val);
3419 		rtw89_phy_write32_mask(rtwdev, _tssi_de_mcs_10m[i], _TSSI_DE_MASK, val);
3420 
3421 		rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3422 			    "[TSSI] Set TSSI MCS DE 0x%x[21:12]=0x%x\n",
3423 			    _tssi_de_mcs_20m[i],
3424 			    rtw89_phy_read32_mask(rtwdev, _tssi_de_mcs_20m[i],
3425 						  _TSSI_DE_MASK));
3426 	}
3427 }
3428 
_tssi_alimentk_dump_result(struct rtw89_dev * rtwdev,enum rtw89_rf_path path)3429 static void _tssi_alimentk_dump_result(struct rtw89_dev *rtwdev, enum rtw89_rf_path path)
3430 {
3431 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
3432 		    "[TSSI PA K]\n0x%x = 0x%08x\n0x%x = 0x%08x\n0x%x = 0x%08x\n0x%x = 0x%08x\n"
3433 		    "0x%x = 0x%08x\n0x%x = 0x%08x\n0x%x = 0x%08x\n0x%x = 0x%08x\n",
3434 		    R_TSSI_PA_K1 + (path << 13),
3435 		    rtw89_phy_read32(rtwdev, R_TSSI_PA_K1 + (path << 13)),
3436 		    R_TSSI_PA_K2 + (path << 13),
3437 		    rtw89_phy_read32(rtwdev, R_TSSI_PA_K2 + (path << 13)),
3438 		    R_P0_TSSI_ALIM1 + (path << 13),
3439 		    rtw89_phy_read32(rtwdev, R_P0_TSSI_ALIM1 + (path << 13)),
3440 		    R_P0_TSSI_ALIM3 + (path << 13),
3441 		    rtw89_phy_read32(rtwdev, R_P0_TSSI_ALIM3 + (path << 13)),
3442 		    R_TSSI_PA_K5 + (path << 13),
3443 		    rtw89_phy_read32(rtwdev, R_TSSI_PA_K5 + (path << 13)),
3444 		    R_P0_TSSI_ALIM2 + (path << 13),
3445 		    rtw89_phy_read32(rtwdev, R_P0_TSSI_ALIM2 + (path << 13)),
3446 		    R_P0_TSSI_ALIM4 + (path << 13),
3447 		    rtw89_phy_read32(rtwdev, R_P0_TSSI_ALIM4 + (path << 13)),
3448 		    R_TSSI_PA_K8 + (path << 13),
3449 		    rtw89_phy_read32(rtwdev, R_TSSI_PA_K8 + (path << 13)));
3450 }
3451 
_tssi_alimentk_done(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path,const struct rtw89_chan * chan)3452 static void _tssi_alimentk_done(struct rtw89_dev *rtwdev,
3453 				enum rtw89_phy_idx phy, enum rtw89_rf_path path,
3454 				const struct rtw89_chan *chan)
3455 {
3456 	struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
3457 	u8 channel = chan->channel;
3458 	u8 band;
3459 
3460 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
3461 		    "======>%s   phy=%d   path=%d\n", __func__, phy, path);
3462 
3463 	if (channel >= 1 && channel <= 14)
3464 		band = TSSI_ALIMK_2G;
3465 	else if (channel >= 36 && channel <= 64)
3466 		band = TSSI_ALIMK_5GL;
3467 	else if (channel >= 100 && channel <= 144)
3468 		band = TSSI_ALIMK_5GM;
3469 	else if (channel >= 149 && channel <= 177)
3470 		band = TSSI_ALIMK_5GH;
3471 	else
3472 		band = TSSI_ALIMK_2G;
3473 
3474 	if (tssi_info->alignment_done[path][band]) {
3475 		rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_ALIM1 + (path << 13), MASKDWORD,
3476 				       tssi_info->alignment_value[path][band][0]);
3477 		rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_ALIM3 + (path << 13), MASKDWORD,
3478 				       tssi_info->alignment_value[path][band][1]);
3479 		rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_ALIM2 + (path << 13), MASKDWORD,
3480 				       tssi_info->alignment_value[path][band][2]);
3481 		rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_ALIM4 + (path << 13), MASKDWORD,
3482 				       tssi_info->alignment_value[path][band][3]);
3483 	}
3484 
3485 	_tssi_alimentk_dump_result(rtwdev, path);
3486 }
3487 
_tssi_hw_tx(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path,u16 cnt,u16 period,s16 pwr_dbm,u8 enable,const struct rtw89_chan * chan)3488 static void _tssi_hw_tx(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
3489 			enum rtw89_rf_path path, u16 cnt, u16 period, s16 pwr_dbm,
3490 			u8 enable, const struct rtw89_chan *chan)
3491 {
3492 	enum rtw89_rf_path_bit rx_path;
3493 
3494 	if (path == RF_PATH_A)
3495 		rx_path = RF_A;
3496 	else if (path == RF_PATH_B)
3497 		rx_path = RF_B;
3498 	else if (path == RF_PATH_AB)
3499 		rx_path = RF_AB;
3500 	else
3501 		rx_path = RF_ABCD; /* don't change path, but still set others */
3502 
3503 	if (enable) {
3504 		rtw8852bx_bb_set_plcp_tx(rtwdev);
3505 		rtw8852bx_bb_cfg_tx_path(rtwdev, path);
3506 		rtw8852bx_bb_ctrl_rx_path(rtwdev, rx_path, chan);
3507 		rtw8852bx_bb_set_power(rtwdev, pwr_dbm, phy);
3508 	}
3509 
3510 	rtw8852bx_bb_set_pmac_pkt_tx(rtwdev, enable, cnt, period, 20, phy, chan);
3511 }
3512 
_tssi_backup_bb_registers(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,const u32 reg[],u32 reg_backup[],u32 reg_num)3513 static void _tssi_backup_bb_registers(struct rtw89_dev *rtwdev,
3514 				      enum rtw89_phy_idx phy, const u32 reg[],
3515 				      u32 reg_backup[], u32 reg_num)
3516 {
3517 	u32 i;
3518 
3519 	for (i = 0; i < reg_num; i++) {
3520 		reg_backup[i] = rtw89_phy_read32_mask(rtwdev, reg[i], MASKDWORD);
3521 
3522 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
3523 			    "[TSSI] Backup BB 0x%x = 0x%x\n", reg[i],
3524 			    reg_backup[i]);
3525 	}
3526 }
3527 
_tssi_reload_bb_registers(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,const u32 reg[],u32 reg_backup[],u32 reg_num)3528 static void _tssi_reload_bb_registers(struct rtw89_dev *rtwdev,
3529 				      enum rtw89_phy_idx phy, const u32 reg[],
3530 				      u32 reg_backup[], u32 reg_num)
3531 
3532 {
3533 	u32 i;
3534 
3535 	for (i = 0; i < reg_num; i++) {
3536 		rtw89_phy_write32_mask(rtwdev, reg[i], MASKDWORD, reg_backup[i]);
3537 
3538 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
3539 			    "[TSSI] Reload BB 0x%x = 0x%x\n", reg[i],
3540 			    reg_backup[i]);
3541 	}
3542 }
3543 
_tssi_ch_to_idx(struct rtw89_dev * rtwdev,u8 channel)3544 static u8 _tssi_ch_to_idx(struct rtw89_dev *rtwdev, u8 channel)
3545 {
3546 	u8 channel_index;
3547 
3548 	if (channel >= 1 && channel <= 14)
3549 		channel_index = channel - 1;
3550 	else if (channel >= 36 && channel <= 64)
3551 		channel_index = (channel - 36) / 2 + 14;
3552 	else if (channel >= 100 && channel <= 144)
3553 		channel_index = ((channel - 100) / 2) + 15 + 14;
3554 	else if (channel >= 149 && channel <= 177)
3555 		channel_index = ((channel - 149) / 2) + 38 + 14;
3556 	else
3557 		channel_index = 0;
3558 
3559 	return channel_index;
3560 }
3561 
_tssi_get_cw_report(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path,const s16 * power,u32 * tssi_cw_rpt,const struct rtw89_chan * chan)3562 static bool _tssi_get_cw_report(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
3563 				enum rtw89_rf_path path, const s16 *power,
3564 				u32 *tssi_cw_rpt, const struct rtw89_chan *chan)
3565 {
3566 	u32 tx_counter, tx_counter_tmp;
3567 	const int retry = 100;
3568 	u32 tmp;
3569 	int j, k;
3570 
3571 	for (j = 0; j < RTW8852BT_TSSI_PATH_NR; j++) {
3572 		rtw89_phy_write32_mask(rtwdev, _tssi_trigger[path], B_P0_TSSI_EN, 0x0);
3573 		rtw89_phy_write32_mask(rtwdev, _tssi_trigger[path], B_P0_TSSI_EN, 0x1);
3574 
3575 		tx_counter = rtw89_phy_read32_mask(rtwdev, R_TX_COUNTER, MASKLWORD);
3576 
3577 		tmp = rtw89_phy_read32_mask(rtwdev, _tssi_trigger[path], MASKDWORD);
3578 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
3579 			    "[TSSI PA K] 0x%x = 0x%08x   path=%d\n",
3580 			    _tssi_trigger[path], tmp, path);
3581 
3582 		if (j == 0)
3583 			_tssi_hw_tx(rtwdev, phy, path, 100, 5000, power[j], true,
3584 				    chan);
3585 		else
3586 			_tssi_hw_tx(rtwdev, phy, RF_PATH_ABCD, 100, 5000, power[j], true,
3587 				    chan);
3588 
3589 		tx_counter_tmp = rtw89_phy_read32_mask(rtwdev, R_TX_COUNTER, MASKLWORD);
3590 		tx_counter_tmp -= tx_counter;
3591 
3592 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
3593 			    "[TSSI PA K] First HWTXcounter=%d path=%d\n",
3594 			    tx_counter_tmp, path);
3595 
3596 		for (k = 0; k < retry; k++) {
3597 			tmp = rtw89_phy_read32_mask(rtwdev, _tssi_cw_rpt_addr[path],
3598 						    B_TSSI_CWRPT_RDY);
3599 			if (tmp)
3600 				break;
3601 
3602 			udelay(30);
3603 
3604 			tx_counter_tmp =
3605 				rtw89_phy_read32_mask(rtwdev, R_TX_COUNTER, MASKLWORD);
3606 			tx_counter_tmp -= tx_counter;
3607 
3608 			rtw89_debug(rtwdev, RTW89_DBG_RFK,
3609 				    "[TSSI PA K] Flow k = %d HWTXcounter=%d path=%d\n",
3610 				    k, tx_counter_tmp, path);
3611 		}
3612 
3613 		if (k >= retry) {
3614 			rtw89_debug(rtwdev, RTW89_DBG_RFK,
3615 				    "[TSSI PA K] TSSI finish bit k > %d mp:100ms normal:30us path=%d\n",
3616 				    k, path);
3617 
3618 			_tssi_hw_tx(rtwdev, phy, path, 100, 5000, power[j], false, chan);
3619 			return false;
3620 		}
3621 
3622 		tssi_cw_rpt[j] =
3623 			rtw89_phy_read32_mask(rtwdev, _tssi_cw_rpt_addr[path],
3624 					      B_TSSI_CWRPT);
3625 
3626 		_tssi_hw_tx(rtwdev, phy, path, 100, 5000, power[j], false, chan);
3627 
3628 		tx_counter_tmp = rtw89_phy_read32_mask(rtwdev, R_TX_COUNTER, MASKLWORD);
3629 		tx_counter_tmp -= tx_counter;
3630 
3631 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
3632 			    "[TSSI PA K] Final HWTXcounter=%d path=%d\n",
3633 			    tx_counter_tmp, path);
3634 	}
3635 
3636 	return true;
3637 }
3638 
_tssi_alimentk(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_rf_path path,const struct rtw89_chan * chan)3639 static void _tssi_alimentk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
3640 			   enum rtw89_rf_path path, const struct rtw89_chan *chan)
3641 {
3642 	static const u32 bb_reg[8] = {0x5820, 0x7820, 0x4978, 0x58e4,
3643 				      0x78e4, 0x49c0, 0x0d18, 0x0d80};
3644 	static const s16 power_2g[4] = {48, 20, 4, -8};
3645 	static const s16 power_5g[4] = {48, 20, 4, 4};
3646 	struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
3647 	s32 tssi_alim_offset_1, tssi_alim_offset_2, tssi_alim_offset_3;
3648 	u32 tssi_cw_rpt[RTW8852BT_TSSI_PATH_NR] = {};
3649 	u8 channel = chan->channel;
3650 	u8 ch_idx = _tssi_ch_to_idx(rtwdev, channel);
3651 	struct rtw8852bx_bb_tssi_bak tssi_bak;
3652 	s32 aliment_diff, tssi_cw_default;
3653 	u32 bb_reg_backup[8] = {};
3654 	ktime_t start_time;
3655 	const s16 *power;
3656 	s64 this_time;
3657 	u8 band;
3658 	bool ok;
3659 	u32 tmp;
3660 	u8 j;
3661 
3662 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
3663 		    "======> %s   channel=%d   path=%d\n", __func__, channel,
3664 		    path);
3665 
3666 	start_time = ktime_get();
3667 
3668 	if (chan->band_type == RTW89_BAND_2G)
3669 		power = power_2g;
3670 	else
3671 		power = power_5g;
3672 
3673 	if (channel >= 1 && channel <= 14)
3674 		band = TSSI_ALIMK_2G;
3675 	else if (channel >= 36 && channel <= 64)
3676 		band = TSSI_ALIMK_5GL;
3677 	else if (channel >= 100 && channel <= 144)
3678 		band = TSSI_ALIMK_5GM;
3679 	else if (channel >= 149 && channel <= 177)
3680 		band = TSSI_ALIMK_5GH;
3681 	else
3682 		band = TSSI_ALIMK_2G;
3683 
3684 	rtw8852bx_bb_backup_tssi(rtwdev, phy, &tssi_bak);
3685 	_tssi_backup_bb_registers(rtwdev, phy, bb_reg, bb_reg_backup,
3686 				  ARRAY_SIZE(bb_reg_backup));
3687 
3688 	rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_AVG, B_P0_TSSI_AVG, 0x8);
3689 	rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_AVG, B_P1_TSSI_AVG, 0x8);
3690 	rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_MV_AVG, B_P0_TSSI_MV_AVG, 0x2);
3691 	rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_MV_AVG, B_P1_TSSI_MV_AVG, 0x2);
3692 
3693 	ok = _tssi_get_cw_report(rtwdev, phy, path, power, tssi_cw_rpt, chan);
3694 	if (!ok)
3695 		goto out;
3696 
3697 	for (j = 0; j < RTW8852BT_TSSI_PATH_NR; j++) {
3698 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
3699 			    "[TSSI PA K] power[%d]=%d  tssi_cw_rpt[%d]=%d\n", j,
3700 			    power[j], j, tssi_cw_rpt[j]);
3701 	}
3702 
3703 	tmp = rtw89_phy_read32_mask(rtwdev, _tssi_cw_default_addr[path][1],
3704 				    _tssi_cw_default_mask[1]);
3705 	tssi_cw_default = sign_extend32(tmp, 8);
3706 	tssi_alim_offset_1 = tssi_cw_rpt[0] - ((power[0] - power[1]) * 2) -
3707 			     tssi_cw_rpt[1] + tssi_cw_default;
3708 	aliment_diff = tssi_alim_offset_1 - tssi_cw_default;
3709 
3710 	tmp = rtw89_phy_read32_mask(rtwdev, _tssi_cw_default_addr[path][2],
3711 				    _tssi_cw_default_mask[2]);
3712 	tssi_cw_default = sign_extend32(tmp, 8);
3713 	tssi_alim_offset_2 = tssi_cw_default + aliment_diff;
3714 
3715 	tmp = rtw89_phy_read32_mask(rtwdev, _tssi_cw_default_addr[path][3],
3716 				    _tssi_cw_default_mask[3]);
3717 	tssi_cw_default = sign_extend32(tmp, 8);
3718 	tssi_alim_offset_3 = tssi_cw_default + aliment_diff;
3719 
3720 	if (path == RF_PATH_A) {
3721 		tmp = FIELD_PREP(B_P1_TSSI_ALIM11, tssi_alim_offset_1) |
3722 		      FIELD_PREP(B_P1_TSSI_ALIM12, tssi_alim_offset_2) |
3723 		      FIELD_PREP(B_P1_TSSI_ALIM13, tssi_alim_offset_3);
3724 
3725 		rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_ALIM1, B_P0_TSSI_ALIM1, tmp);
3726 		rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_ALIM2, B_P0_TSSI_ALIM2, tmp);
3727 
3728 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
3729 			    "[TSSI PA K] tssi_alim_offset = 0x%x   0x%x   0x%x   0x%x\n",
3730 			    rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM3, B_P0_TSSI_ALIM31),
3731 			    rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM1, B_P0_TSSI_ALIM11),
3732 			    rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM1, B_P0_TSSI_ALIM12),
3733 			    rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM1, B_P0_TSSI_ALIM13));
3734 	} else {
3735 		tmp = FIELD_PREP(B_P1_TSSI_ALIM11, tssi_alim_offset_1) |
3736 		      FIELD_PREP(B_P1_TSSI_ALIM12, tssi_alim_offset_2) |
3737 		      FIELD_PREP(B_P1_TSSI_ALIM13, tssi_alim_offset_3);
3738 
3739 		rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_ALIM1, B_P1_TSSI_ALIM1, tmp);
3740 		rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_ALIM2, B_P1_TSSI_ALIM2, tmp);
3741 
3742 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
3743 			    "[TSSI PA K] tssi_alim_offset = 0x%x   0x%x   0x%x   0x%x\n",
3744 			    rtw89_phy_read32_mask(rtwdev, R_P1_TSSI_ALIM3, B_P1_TSSI_ALIM31),
3745 			    rtw89_phy_read32_mask(rtwdev, R_P1_TSSI_ALIM1, B_P1_TSSI_ALIM11),
3746 			    rtw89_phy_read32_mask(rtwdev, R_P1_TSSI_ALIM1, B_P1_TSSI_ALIM12),
3747 			    rtw89_phy_read32_mask(rtwdev, R_P1_TSSI_ALIM1, B_P1_TSSI_ALIM13));
3748 	}
3749 
3750 	tssi_info->alignment_done[path][band] = true;
3751 	tssi_info->alignment_value[path][band][0] =
3752 		rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM1 + (path << 13), MASKDWORD);
3753 	tssi_info->alignment_value[path][band][1] =
3754 		rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM3 + (path << 13), MASKDWORD);
3755 	tssi_info->alignment_value[path][band][2] =
3756 		rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM2 + (path << 13), MASKDWORD);
3757 	tssi_info->alignment_value[path][band][3] =
3758 		rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM4 + (path << 13), MASKDWORD);
3759 
3760 	tssi_info->check_backup_aligmk[path][ch_idx] = true;
3761 	tssi_info->alignment_backup_by_ch[path][ch_idx][0] =
3762 		rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM1 + (path << 13), MASKDWORD);
3763 	tssi_info->alignment_backup_by_ch[path][ch_idx][1] =
3764 		rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM3 + (path << 13), MASKDWORD);
3765 	tssi_info->alignment_backup_by_ch[path][ch_idx][2] =
3766 		rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM2 + (path << 13), MASKDWORD);
3767 	tssi_info->alignment_backup_by_ch[path][ch_idx][3] =
3768 		rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM4 + (path << 13), MASKDWORD);
3769 
3770 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
3771 		    "[TSSI PA K] tssi_info->alignment_value[path=%d][band=%d][0], 0x%x = 0x%08x\n",
3772 		    path, band, R_P0_TSSI_ALIM1 + (path << 13),
3773 		    tssi_info->alignment_value[path][band][0]);
3774 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
3775 		    "[TSSI PA K] tssi_info->alignment_value[path=%d][band=%d][1], 0x%x = 0x%08x\n",
3776 		    path, band, R_P0_TSSI_ALIM3 + (path << 13),
3777 		    tssi_info->alignment_value[path][band][1]);
3778 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
3779 		    "[TSSI PA K] tssi_info->alignment_value[path=%d][band=%d][2], 0x%x = 0x%08x\n",
3780 		    path, band, R_P0_TSSI_ALIM2 + (path << 13),
3781 		    tssi_info->alignment_value[path][band][2]);
3782 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
3783 		    "[TSSI PA K] tssi_info->alignment_value[path=%d][band=%d][3], 0x%x = 0x%08x\n",
3784 		    path, band, R_P0_TSSI_ALIM4 + (path << 13),
3785 		    tssi_info->alignment_value[path][band][3]);
3786 
3787 out:
3788 	_tssi_reload_bb_registers(rtwdev, phy, bb_reg, bb_reg_backup,
3789 				  ARRAY_SIZE(bb_reg_backup));
3790 	rtw8852bx_bb_restore_tssi(rtwdev, phy, &tssi_bak);
3791 	rtw8852bx_bb_tx_mode_switch(rtwdev, phy, 0);
3792 
3793 	this_time = ktime_us_delta(ktime_get(), start_time);
3794 	tssi_info->tssi_alimk_time += this_time;
3795 
3796 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
3797 		    "[TSSI PA K] %s processing time = %lld us (acc = %llu us)\n",
3798 		    __func__, this_time, tssi_info->tssi_alimk_time);
3799 }
3800 
rtw8852bt_dpk_init(struct rtw89_dev * rtwdev)3801 void rtw8852bt_dpk_init(struct rtw89_dev *rtwdev)
3802 {
3803 	struct rtw89_dpk_info *dpk = &rtwdev->dpk;
3804 
3805 	u8 path;
3806 
3807 	for (path = 0; path < 2; path++) {
3808 		dpk->cur_idx[path] = 0;
3809 		dpk->max_dpk_txagc[path] = 0x3F;
3810 	}
3811 
3812 	dpk->is_dpk_enable = true;
3813 	dpk->is_dpk_reload_en = false;
3814 	_set_dpd_backoff(rtwdev, RTW89_PHY_0);
3815 }
3816 
rtw8852bt_rck(struct rtw89_dev * rtwdev)3817 void rtw8852bt_rck(struct rtw89_dev *rtwdev)
3818 {
3819 	u8 path;
3820 
3821 	for (path = 0; path < RF_PATH_NUM_8852BT; path++)
3822 		_rck(rtwdev, path);
3823 }
3824 
rtw8852bt_dack(struct rtw89_dev * rtwdev,enum rtw89_chanctx_idx chanctx_idx)3825 void rtw8852bt_dack(struct rtw89_dev *rtwdev, enum rtw89_chanctx_idx chanctx_idx)
3826 {
3827 	u8 phy_map = rtw89_btc_phymap(rtwdev, RTW89_PHY_0, 0, chanctx_idx);
3828 
3829 	rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DACK, BTC_WRFK_START);
3830 	_dac_cal(rtwdev, false);
3831 	rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DACK, BTC_WRFK_STOP);
3832 }
3833 
rtw8852bt_iqk(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx,enum rtw89_chanctx_idx chanctx_idx)3834 void rtw8852bt_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
3835 		   enum rtw89_chanctx_idx chanctx_idx)
3836 {
3837 	u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0, chanctx_idx);
3838 	u32 tx_en;
3839 
3840 	rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_START);
3841 	rtw89_chip_stop_sch_tx(rtwdev, phy_idx, &tx_en, RTW89_SCH_TX_SEL_ALL);
3842 	_wait_rx_mode(rtwdev, _kpath(rtwdev, phy_idx));
3843 
3844 	_iqk_init(rtwdev);
3845 	_iqk(rtwdev, phy_idx, false, chanctx_idx);
3846 
3847 	rtw89_chip_resume_sch_tx(rtwdev, phy_idx, tx_en);
3848 	rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_STOP);
3849 }
3850 
rtw8852bt_rx_dck(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx,enum rtw89_chanctx_idx chanctx_idx)3851 void rtw8852bt_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
3852 		      enum rtw89_chanctx_idx chanctx_idx)
3853 {
3854 	u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0, chanctx_idx);
3855 	u32 tx_en;
3856 
3857 	rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_RXDCK, BTC_WRFK_START);
3858 	rtw89_chip_stop_sch_tx(rtwdev, phy_idx, &tx_en, RTW89_SCH_TX_SEL_ALL);
3859 	_wait_rx_mode(rtwdev, _kpath(rtwdev, phy_idx));
3860 
3861 	_rx_dck(rtwdev, phy_idx);
3862 
3863 	rtw89_chip_resume_sch_tx(rtwdev, phy_idx, tx_en);
3864 	rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_RXDCK, BTC_WRFK_STOP);
3865 }
3866 
rtw8852bt_dpk(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx,enum rtw89_chanctx_idx chanctx_idx)3867 void rtw8852bt_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
3868 		   enum rtw89_chanctx_idx chanctx_idx)
3869 {
3870 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
3871 		    "[DPK] ****** DPK Start (Ver: 0x%x) ******\n", RTW8852BT_DPK_VER);
3872 
3873 	if (_dpk_bypass_check(rtwdev, phy_idx, chanctx_idx))
3874 		_dpk_force_bypass(rtwdev, phy_idx);
3875 	else
3876 		_dpk_cal_select(rtwdev, phy_idx, RF_AB, chanctx_idx);
3877 }
3878 
rtw8852bt_dpk_track(struct rtw89_dev * rtwdev)3879 void rtw8852bt_dpk_track(struct rtw89_dev *rtwdev)
3880 {
3881 	_dpk_track(rtwdev);
3882 }
3883 
rtw8852bt_tssi(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,bool hwtx_en,enum rtw89_chanctx_idx chanctx_idx)3884 void rtw8852bt_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
3885 		    bool hwtx_en, enum rtw89_chanctx_idx chanctx_idx)
3886 {
3887 	const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
3888 	static const u32 reg[2] = {R_DPD_CH0A, R_DPD_CH0B};
3889 	u8 phy_map = rtw89_btc_phymap(rtwdev, phy, RF_AB, chanctx_idx);
3890 	u32 reg_backup[2] = {};
3891 	u32 tx_en;
3892 	u8 i;
3893 
3894 	_tssi_backup_bb_registers(rtwdev, phy, reg, reg_backup, 2);
3895 	rtw89_debug(rtwdev, RTW89_DBG_TSSI, "[TSSI] %s: phy=%d\n", __func__, phy);
3896 	rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_ONESHOT_START);
3897 
3898 	_tssi_dpk_off(rtwdev, phy);
3899 	_tssi_disable(rtwdev, phy);
3900 
3901 	for (i = RF_PATH_A; i < RF_PATH_NUM_8852BT; i++) {
3902 		_tssi_rf_setting(rtwdev, phy, i, chan);
3903 		_tssi_set_sys(rtwdev, phy, i, chan);
3904 		_tssi_ini_txpwr_ctrl_bb(rtwdev, phy, i);
3905 		_tssi_ini_txpwr_ctrl_bb_he_tb(rtwdev, phy, i);
3906 		_tssi_set_dck(rtwdev, phy, i);
3907 		_tssi_set_tmeter_tbl(rtwdev, phy, i, chan);
3908 		_tssi_set_dac_gain_tbl(rtwdev, phy, i);
3909 		_tssi_slope_cal_org(rtwdev, phy, i, chan);
3910 		_tssi_alignment_default(rtwdev, phy, i, true, chan);
3911 		_tssi_set_tssi_slope(rtwdev, phy, i);
3912 
3913 		rtw89_chip_stop_sch_tx(rtwdev, phy, &tx_en, RTW89_SCH_TX_SEL_ALL);
3914 		_tmac_tx_pause(rtwdev, phy, true);
3915 		if (hwtx_en)
3916 			_tssi_alimentk(rtwdev, phy, i, chan);
3917 		_tmac_tx_pause(rtwdev, phy, false);
3918 		rtw89_chip_resume_sch_tx(rtwdev, phy, tx_en);
3919 	}
3920 
3921 	_tssi_enable(rtwdev, phy);
3922 	_tssi_set_efuse_to_de(rtwdev, phy, chan);
3923 
3924 	_tssi_reload_bb_registers(rtwdev, phy, reg, reg_backup, 2);
3925 
3926 	rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_ONESHOT_STOP);
3927 }
3928 
rtw8852bt_tssi_scan(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,const struct rtw89_chan * chan)3929 void rtw8852bt_tssi_scan(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
3930 			 const struct rtw89_chan *chan)
3931 {
3932 	struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
3933 	u8 channel = chan->channel;
3934 	u8 band;
3935 	u32 i;
3936 
3937 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
3938 		    "======>%s   phy=%d  channel=%d\n", __func__, phy, channel);
3939 
3940 	if (channel >= 1 && channel <= 14)
3941 		band = TSSI_ALIMK_2G;
3942 	else if (channel >= 36 && channel <= 64)
3943 		band = TSSI_ALIMK_5GL;
3944 	else if (channel >= 100 && channel <= 144)
3945 		band = TSSI_ALIMK_5GM;
3946 	else if (channel >= 149 && channel <= 177)
3947 		band = TSSI_ALIMK_5GH;
3948 	else
3949 		band = TSSI_ALIMK_2G;
3950 
3951 	_tssi_disable(rtwdev, phy);
3952 
3953 	for (i = RF_PATH_A; i < RTW8852BT_TSSI_PATH_NR; i++) {
3954 		_tssi_rf_setting(rtwdev, phy, i, chan);
3955 		_tssi_set_sys(rtwdev, phy, i, chan);
3956 		_tssi_set_tmeter_tbl(rtwdev, phy, i, chan);
3957 
3958 		if (tssi_info->alignment_done[i][band])
3959 			_tssi_alimentk_done(rtwdev, phy, i, chan);
3960 		else
3961 			_tssi_alignment_default(rtwdev, phy, i, true, chan);
3962 	}
3963 
3964 	_tssi_enable(rtwdev, phy);
3965 	_tssi_set_efuse_to_de(rtwdev, phy, chan);
3966 }
3967 
rtw8852bt_tssi_default_txagc(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,bool enable,enum rtw89_chanctx_idx chanctx_idx)3968 static void rtw8852bt_tssi_default_txagc(struct rtw89_dev *rtwdev,
3969 					 enum rtw89_phy_idx phy, bool enable,
3970 					 enum rtw89_chanctx_idx chanctx_idx)
3971 {
3972 	const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
3973 	u8 channel = chan->channel;
3974 
3975 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "======> %s   ch=%d\n",
3976 		    __func__, channel);
3977 
3978 	if (enable)
3979 		return;
3980 
3981 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
3982 		    "======>%s 1 SCAN_END Set 0x5818[7:0]=0x%x 0x7818[7:0]=0x%x\n",
3983 		    __func__,
3984 		    rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT),
3985 		    rtw89_phy_read32_mask(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_OFT));
3986 
3987 	rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT, 0xc0);
3988 	rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_OFT,  0xc0);
3989 	rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT_EN, 0x0);
3990 	rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT_EN, 0x1);
3991 	rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_OFT_EN, 0x0);
3992 	rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_OFT_EN, 0x1);
3993 
3994 	_tssi_alimentk_done(rtwdev, phy, RF_PATH_A, chan);
3995 	_tssi_alimentk_done(rtwdev, phy, RF_PATH_B, chan);
3996 
3997 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
3998 		    "======>%s 2 SCAN_END Set 0x5818[7:0]=0x%x 0x7818[7:0]=0x%x\n",
3999 		    __func__,
4000 		    rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT),
4001 		    rtw89_phy_read32_mask(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_OFT));
4002 
4003 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
4004 		    "======> %s   SCAN_END\n", __func__);
4005 }
4006 
rtw8852bt_wifi_scan_notify(struct rtw89_dev * rtwdev,bool scan_start,enum rtw89_phy_idx phy_idx,enum rtw89_chanctx_idx chanctx_idx)4007 void rtw8852bt_wifi_scan_notify(struct rtw89_dev *rtwdev, bool scan_start,
4008 				enum rtw89_phy_idx phy_idx,
4009 				enum rtw89_chanctx_idx chanctx_idx)
4010 {
4011 	if (scan_start)
4012 		rtw8852bt_tssi_default_txagc(rtwdev, phy_idx, true, chanctx_idx);
4013 	else
4014 		rtw8852bt_tssi_default_txagc(rtwdev, phy_idx, false, chanctx_idx);
4015 }
4016 
_bw_setting(struct rtw89_dev * rtwdev,enum rtw89_rf_path path,enum rtw89_bandwidth bw,bool dav)4017 static void _bw_setting(struct rtw89_dev *rtwdev, enum rtw89_rf_path path,
4018 			enum rtw89_bandwidth bw, bool dav)
4019 {
4020 	u32 rf_reg18;
4021 	u32 reg18_addr = dav ? RR_CFGCH : RR_CFGCH_V1;
4022 
4023 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RFK]===> %s\n", __func__);
4024 
4025 	rf_reg18 = rtw89_read_rf(rtwdev, path, reg18_addr, RFREG_MASK);
4026 	if (rf_reg18 == INV_RF_DATA) {
4027 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
4028 			    "[RFK]Invalid RF_0x18 for Path-%d\n", path);
4029 		return;
4030 	}
4031 	rf_reg18 &= ~RR_CFGCH_BW;
4032 
4033 	switch (bw) {
4034 	case RTW89_CHANNEL_WIDTH_5:
4035 	case RTW89_CHANNEL_WIDTH_10:
4036 	case RTW89_CHANNEL_WIDTH_20:
4037 		rf_reg18 |= FIELD_PREP(RR_CFGCH_BW, CFGCH_BW_20M);
4038 		break;
4039 	case RTW89_CHANNEL_WIDTH_40:
4040 		rf_reg18 |= FIELD_PREP(RR_CFGCH_BW, CFGCH_BW_40M);
4041 		break;
4042 	case RTW89_CHANNEL_WIDTH_80:
4043 		rf_reg18 |= FIELD_PREP(RR_CFGCH_BW, CFGCH_BW_80M);
4044 		break;
4045 	default:
4046 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RFK]Fail to set CH\n");
4047 	}
4048 
4049 	rf_reg18 &= ~(RR_CFGCH_POW_LCK | RR_CFGCH_TRX_AH | RR_CFGCH_BCN |
4050 		      RR_CFGCH_BW2) & RFREG_MASK;
4051 	rf_reg18 |= RR_CFGCH_BW2;
4052 	rtw89_write_rf(rtwdev, path, reg18_addr, RFREG_MASK, rf_reg18);
4053 
4054 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RFK] set %x at path%d, %x =0x%x\n",
4055 		    bw, path, reg18_addr,
4056 		    rtw89_read_rf(rtwdev, path, reg18_addr, RFREG_MASK));
4057 }
4058 
_ctrl_bw(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_bandwidth bw)4059 static void _ctrl_bw(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
4060 		     enum rtw89_bandwidth bw)
4061 {
4062 	_bw_setting(rtwdev, RF_PATH_A, bw, true);
4063 	_bw_setting(rtwdev, RF_PATH_B, bw, true);
4064 	_bw_setting(rtwdev, RF_PATH_A, bw, false);
4065 	_bw_setting(rtwdev, RF_PATH_B, bw, false);
4066 }
4067 
_set_s0_arfc18(struct rtw89_dev * rtwdev,u32 val)4068 static bool _set_s0_arfc18(struct rtw89_dev *rtwdev, u32 val)
4069 {
4070 	u32 tmp;
4071 	int ret;
4072 
4073 	rtw89_write_rf(rtwdev, RF_PATH_A, RR_LCK_TRG, RR_LCK_TRGSEL, 0x1);
4074 	rtw89_write_rf(rtwdev, RF_PATH_A, RR_CFGCH, RFREG_MASK, val);
4075 
4076 	ret = read_poll_timeout_atomic(rtw89_read_rf, tmp, tmp == 0, 1, 1000,
4077 				       false, rtwdev, RF_PATH_A, RR_LPF, RR_LPF_BUSY);
4078 	if (ret)
4079 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[LCK]LCK timeout\n");
4080 
4081 	rtw89_write_rf(rtwdev, RF_PATH_A, RR_LCK_TRG, RR_LCK_TRGSEL, 0x0);
4082 	return !!ret;
4083 }
4084 
_lck_check(struct rtw89_dev * rtwdev)4085 static void _lck_check(struct rtw89_dev *rtwdev)
4086 {
4087 	u32 tmp;
4088 
4089 	if (rtw89_read_rf(rtwdev, RF_PATH_A, RR_SYNFB, RR_SYNFB_LK) == 0) {
4090 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[LCK]SYN MMD reset\n");
4091 
4092 		rtw89_write_rf(rtwdev, RF_PATH_A, RR_MMD, RR_MMD_RST_EN, 0x1);
4093 		rtw89_write_rf(rtwdev, RF_PATH_A, RR_MMD, RR_MMD_RST_SYN, 0x0);
4094 		rtw89_write_rf(rtwdev, RF_PATH_A, RR_MMD, RR_MMD_RST_SYN, 0x1);
4095 		rtw89_write_rf(rtwdev, RF_PATH_A, RR_MMD, RR_MMD_RST_EN, 0x0);
4096 	}
4097 
4098 	udelay(10);
4099 
4100 	if (rtw89_read_rf(rtwdev, RF_PATH_A, RR_SYNFB, RR_SYNFB_LK) == 0) {
4101 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[LCK]re-set RF 0x18\n");
4102 
4103 		rtw89_write_rf(rtwdev, RF_PATH_A, RR_LCK_TRG, RR_LCK_TRGSEL, 0x1);
4104 		tmp = rtw89_read_rf(rtwdev, RF_PATH_A, RR_CFGCH, RFREG_MASK);
4105 		_set_s0_arfc18(rtwdev, tmp);
4106 		rtw89_write_rf(rtwdev, RF_PATH_A, RR_LCK_TRG, RR_LCK_TRGSEL, 0x0);
4107 	}
4108 
4109 	if (rtw89_read_rf(rtwdev, RF_PATH_A, RR_SYNFB, RR_SYNFB_LK) == 0) {
4110 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[LCK]SYN off/on\n");
4111 
4112 		tmp = rtw89_read_rf(rtwdev, RF_PATH_A, RR_POW, RFREG_MASK);
4113 		rtw89_write_rf(rtwdev, RF_PATH_A, RR_POW, RFREG_MASK, tmp);
4114 		tmp = rtw89_read_rf(rtwdev, RF_PATH_A, RR_SX, RFREG_MASK);
4115 		rtw89_write_rf(rtwdev, RF_PATH_A, RR_SX, RFREG_MASK, tmp);
4116 
4117 		rtw89_write_rf(rtwdev, RF_PATH_A, RR_SYNLUT, RR_SYNLUT_MOD, 0x1);
4118 		rtw89_write_rf(rtwdev, RF_PATH_A, RR_POW, RR_POW_SYN, 0x0);
4119 		rtw89_write_rf(rtwdev, RF_PATH_A, RR_POW, RR_POW_SYN, 0x3);
4120 		rtw89_write_rf(rtwdev, RF_PATH_A, RR_SYNLUT, RR_SYNLUT_MOD, 0x0);
4121 
4122 		rtw89_write_rf(rtwdev, RF_PATH_A, RR_LCK_TRG, RR_LCK_TRGSEL, 0x1);
4123 		tmp = rtw89_read_rf(rtwdev, RF_PATH_A, RR_CFGCH, RFREG_MASK);
4124 		_set_s0_arfc18(rtwdev, tmp);
4125 		rtw89_write_rf(rtwdev, RF_PATH_A, RR_LCK_TRG, RR_LCK_TRGSEL, 0x0);
4126 
4127 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[LCK]0xb2=%x, 0xc5=%x\n",
4128 			    rtw89_read_rf(rtwdev, RF_PATH_A, RR_VCO, RFREG_MASK),
4129 			    rtw89_read_rf(rtwdev, RF_PATH_A, RR_SYNFB, RFREG_MASK));
4130 	}
4131 }
4132 
_set_ch(struct rtw89_dev * rtwdev,u32 val)4133 static void _set_ch(struct rtw89_dev *rtwdev, u32 val)
4134 {
4135 	bool timeout;
4136 	u32 bak;
4137 
4138 	bak = rtw89_read_rf(rtwdev, RF_PATH_A, RR_LDO, RFREG_MASK);
4139 	rtw89_write_rf(rtwdev, RF_PATH_A, RR_LDO, RR_LDO_SEL, 0x1);
4140 	timeout = _set_s0_arfc18(rtwdev, val);
4141 	rtw89_write_rf(rtwdev, RF_PATH_A, RR_LDO, RFREG_MASK, bak);
4142 	if (!timeout)
4143 		_lck_check(rtwdev);
4144 }
4145 
_ch_setting(struct rtw89_dev * rtwdev,enum rtw89_rf_path path,u8 central_ch,bool dav)4146 static void _ch_setting(struct rtw89_dev *rtwdev, enum rtw89_rf_path path,
4147 			u8 central_ch, bool dav)
4148 {
4149 	u32 reg18_addr = dav ? RR_CFGCH : RR_CFGCH_V1;
4150 	bool is_2g_ch = central_ch <= 14;
4151 	u32 rf_reg18;
4152 
4153 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RFK]===> %s\n", __func__);
4154 
4155 	rf_reg18 = rtw89_read_rf(rtwdev, path, reg18_addr, RFREG_MASK);
4156 	rf_reg18 &= ~(RR_CFGCH_BAND1 | RR_CFGCH_POW_LCK | RR_CFGCH_TRX_AH |
4157 		      RR_CFGCH_BCN | RR_CFGCH_BAND0 | RR_CFGCH_CH);
4158 	rf_reg18 |= FIELD_PREP(RR_CFGCH_CH, central_ch);
4159 
4160 	if (!is_2g_ch)
4161 		rf_reg18 |= FIELD_PREP(RR_CFGCH_BAND1, CFGCH_BAND1_5G) |
4162 			    FIELD_PREP(RR_CFGCH_BAND0, CFGCH_BAND0_5G);
4163 
4164 	rf_reg18 &= ~(RR_CFGCH_POW_LCK | RR_CFGCH_TRX_AH | RR_CFGCH_BCN |
4165 		      RR_CFGCH_BW2) & RFREG_MASK;
4166 	rf_reg18 |= RR_CFGCH_BW2;
4167 
4168 	if (path == RF_PATH_A && dav)
4169 		_set_ch(rtwdev, rf_reg18);
4170 	else
4171 		rtw89_write_rf(rtwdev, path, reg18_addr, RFREG_MASK, rf_reg18);
4172 
4173 	rtw89_write_rf(rtwdev, path, RR_LCKST, RR_LCKST_BIN, 0);
4174 	rtw89_write_rf(rtwdev, path, RR_LCKST, RR_LCKST_BIN, 1);
4175 
4176 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
4177 		    "[RFK]CH: %d for Path-%d, reg0x%x = 0x%x\n",
4178 		    central_ch, path, reg18_addr,
4179 		    rtw89_read_rf(rtwdev, path, reg18_addr, RFREG_MASK));
4180 }
4181 
_ctrl_ch(struct rtw89_dev * rtwdev,u8 central_ch)4182 static void _ctrl_ch(struct rtw89_dev *rtwdev, u8 central_ch)
4183 {
4184 	_ch_setting(rtwdev, RF_PATH_A, central_ch, true);
4185 	_ch_setting(rtwdev, RF_PATH_B, central_ch, true);
4186 	_ch_setting(rtwdev, RF_PATH_A, central_ch, false);
4187 	_ch_setting(rtwdev, RF_PATH_B, central_ch, false);
4188 }
4189 
_set_rxbb_bw(struct rtw89_dev * rtwdev,enum rtw89_bandwidth bw,enum rtw89_rf_path path)4190 static void _set_rxbb_bw(struct rtw89_dev *rtwdev, enum rtw89_bandwidth bw,
4191 			 enum rtw89_rf_path path)
4192 {
4193 	rtw89_write_rf(rtwdev, path, RR_LUTWE2, RR_LUTWE2_RTXBW, 0x1);
4194 	rtw89_write_rf(rtwdev, path, RR_LUTWA, RR_LUTWA_M2, 0x12);
4195 
4196 	if (bw == RTW89_CHANNEL_WIDTH_20)
4197 		rtw89_write_rf(rtwdev, path, RR_LUTWD0, RR_LUTWD0_LB, 0x1b);
4198 	else if (bw == RTW89_CHANNEL_WIDTH_40)
4199 		rtw89_write_rf(rtwdev, path, RR_LUTWD0, RR_LUTWD0_LB, 0x13);
4200 	else if (bw == RTW89_CHANNEL_WIDTH_80)
4201 		rtw89_write_rf(rtwdev, path, RR_LUTWD0, RR_LUTWD0_LB, 0xb);
4202 	else
4203 		rtw89_write_rf(rtwdev, path, RR_LUTWD0, RR_LUTWD0_LB, 0x3);
4204 
4205 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RFK] set S%d RXBB BW 0x3F = 0x%x\n",
4206 		    path, rtw89_read_rf(rtwdev, path, RR_LUTWD0, RR_LUTWD0_LB));
4207 
4208 	rtw89_write_rf(rtwdev, path, RR_LUTWE2, RR_LUTWE2_RTXBW, 0x0);
4209 }
4210 
_rxbb_bw(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,enum rtw89_bandwidth bw)4211 static void _rxbb_bw(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
4212 		     enum rtw89_bandwidth bw)
4213 {
4214 	u8 kpath, path;
4215 
4216 	kpath = _kpath(rtwdev, phy);
4217 
4218 	for (path = 0; path < RF_PATH_NUM_8852BT; path++) {
4219 		if (!(kpath & BIT(path)))
4220 			continue;
4221 
4222 		_set_rxbb_bw(rtwdev, bw, path);
4223 	}
4224 }
4225 
rtw8852bt_ctrl_bw_ch(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,u8 central_ch,enum rtw89_band band,enum rtw89_bandwidth bw)4226 static void rtw8852bt_ctrl_bw_ch(struct rtw89_dev *rtwdev,
4227 				 enum rtw89_phy_idx phy, u8 central_ch,
4228 				 enum rtw89_band band, enum rtw89_bandwidth bw)
4229 {
4230 	_ctrl_ch(rtwdev, central_ch);
4231 	_ctrl_bw(rtwdev, phy, bw);
4232 	_rxbb_bw(rtwdev, phy, bw);
4233 }
4234 
rtw8852bt_set_channel_rf(struct rtw89_dev * rtwdev,const struct rtw89_chan * chan,enum rtw89_phy_idx phy_idx)4235 void rtw8852bt_set_channel_rf(struct rtw89_dev *rtwdev,
4236 			      const struct rtw89_chan *chan,
4237 			      enum rtw89_phy_idx phy_idx)
4238 {
4239 	rtw8852bt_ctrl_bw_ch(rtwdev, phy_idx, chan->channel, chan->band_type,
4240 			     chan->band_width);
4241 }
4242 
rtw8852bt_mcc_get_ch_info(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx)4243 void rtw8852bt_mcc_get_ch_info(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
4244 {
4245 	const struct rtw89_chan *chan = rtw89_mgnt_chan_get(rtwdev, 0);
4246 	struct rtw89_rfk_mcc_info_data *rfk_mcc = rtwdev->rfk_mcc.data;
4247 	struct rtw89_rfk_chan_desc desc[__RTW89_RFK_CHS_NR_V0] = {};
4248 	u8 idx;
4249 
4250 	for (idx = 0; idx < ARRAY_SIZE(desc); idx++) {
4251 		struct rtw89_rfk_chan_desc *p = &desc[idx];
4252 
4253 		p->ch = rfk_mcc->ch[idx];
4254 
4255 		p->has_band = true;
4256 		p->band = rfk_mcc->band[idx];
4257 	}
4258 
4259 	idx = rtw89_rfk_chan_lookup(rtwdev, desc, ARRAY_SIZE(desc), chan);
4260 
4261 	rfk_mcc->ch[idx] = chan->channel;
4262 	rfk_mcc->band[idx] = chan->band_type;
4263 	rfk_mcc->table_idx = idx;
4264 }
4265 
rtw8852bt_rfk_chanctx_cb(struct rtw89_dev * rtwdev,enum rtw89_chanctx_state state)4266 void rtw8852bt_rfk_chanctx_cb(struct rtw89_dev *rtwdev,
4267 			      enum rtw89_chanctx_state state)
4268 {
4269 	struct rtw89_dpk_info *dpk = &rtwdev->dpk;
4270 	u8 path;
4271 
4272 	switch (state) {
4273 	case RTW89_CHANCTX_STATE_MCC_START:
4274 		dpk->is_dpk_enable = false;
4275 		for (path = 0; path < RTW8852BT_SS; path++)
4276 			_dpk_onoff(rtwdev, path, false);
4277 		break;
4278 	case RTW89_CHANCTX_STATE_MCC_STOP:
4279 		dpk->is_dpk_enable = true;
4280 		for (path = 0; path < RTW8852BT_SS; path++)
4281 			_dpk_onoff(rtwdev, path, false);
4282 		rtw8852bt_dpk(rtwdev, RTW89_PHY_0, RTW89_CHANCTX_0);
4283 		break;
4284 	default:
4285 		break;
4286 	}
4287 }
4288