xref: /linux/drivers/net/wireless/realtek/rtw89/phy.c (revision 91a4855d6c03e770e42f17c798a36a3c46e63de2)
1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /* Copyright(c) 2019-2020  Realtek Corporation
3  */
4 
5 #include "acpi.h"
6 #include "chan.h"
7 #include "coex.h"
8 #include "debug.h"
9 #include "fw.h"
10 #include "mac.h"
11 #include "phy.h"
12 #include "ps.h"
13 #include "reg.h"
14 #include "sar.h"
15 #include "txrx.h"
16 #include "util.h"
17 
18 static u32 rtw89_phy0_phy1_offset(struct rtw89_dev *rtwdev, u32 addr)
19 {
20 	const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def;
21 
22 	return phy->phy0_phy1_offset(rtwdev, addr);
23 }
24 
25 static u16 get_max_amsdu_len(struct rtw89_dev *rtwdev,
26 			     const struct rtw89_ra_report *report)
27 {
28 	u32 bit_rate = report->bit_rate;
29 
30 	/* lower than ofdm, do not aggregate */
31 	if (bit_rate < 550)
32 		return 1;
33 
34 	/* avoid AMSDU for legacy rate */
35 	if (report->might_fallback_legacy)
36 		return 1;
37 
38 	/* lower than 20M vht 2ss mcs8, make it small */
39 	if (bit_rate < 1800)
40 		return 1200;
41 
42 	/* lower than 40M vht 2ss mcs9, make it medium */
43 	if (bit_rate < 4000)
44 		return 2600;
45 
46 	/* not yet 80M vht 2ss mcs8/9, make it twice regular packet size */
47 	if (bit_rate < 7000)
48 		return 3500;
49 
50 	return rtwdev->chip->max_amsdu_limit;
51 }
52 
53 static u64 get_mcs_ra_mask(u16 mcs_map, u8 highest_mcs, u8 gap)
54 {
55 	u64 ra_mask = 0;
56 	u8 mcs_cap;
57 	int i, nss;
58 
59 	for (i = 0, nss = 12; i < 4; i++, mcs_map >>= 2, nss += 12) {
60 		mcs_cap = mcs_map & 0x3;
61 		switch (mcs_cap) {
62 		case 2:
63 			ra_mask |= GENMASK_ULL(highest_mcs, 0) << nss;
64 			break;
65 		case 1:
66 			ra_mask |= GENMASK_ULL(highest_mcs - gap, 0) << nss;
67 			break;
68 		case 0:
69 			ra_mask |= GENMASK_ULL(highest_mcs - gap * 2, 0) << nss;
70 			break;
71 		default:
72 			break;
73 		}
74 	}
75 
76 	return ra_mask;
77 }
78 
79 static u64 get_he_ra_mask(struct ieee80211_link_sta *link_sta)
80 {
81 	struct ieee80211_sta_he_cap cap = link_sta->he_cap;
82 	u16 mcs_map;
83 
84 	switch (link_sta->bandwidth) {
85 	case IEEE80211_STA_RX_BW_160:
86 		if (cap.he_cap_elem.phy_cap_info[0] &
87 		    IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G)
88 			mcs_map = le16_to_cpu(cap.he_mcs_nss_supp.rx_mcs_80p80);
89 		else
90 			mcs_map = le16_to_cpu(cap.he_mcs_nss_supp.rx_mcs_160);
91 		break;
92 	default:
93 		mcs_map = le16_to_cpu(cap.he_mcs_nss_supp.rx_mcs_80);
94 	}
95 
96 	/* MCS11, MCS9, MCS7 */
97 	return get_mcs_ra_mask(mcs_map, 11, 2);
98 }
99 
100 static u64 get_eht_mcs_ra_mask(u8 *max_nss, u8 start_mcs, u8 n_nss)
101 {
102 	u64 nss_mcs_shift;
103 	u64 nss_mcs_val;
104 	u64 mask = 0;
105 	int i, j;
106 	u8 nss;
107 
108 	for (i = 0; i < n_nss; i++) {
109 		nss = u8_get_bits(max_nss[i], IEEE80211_EHT_MCS_NSS_RX);
110 		if (!nss)
111 			continue;
112 
113 		nss_mcs_val = GENMASK_ULL(start_mcs + i * 2, 0);
114 
115 		for (j = 0, nss_mcs_shift = 12; j < nss; j++, nss_mcs_shift += 16)
116 			mask |= nss_mcs_val << nss_mcs_shift;
117 	}
118 
119 	return mask;
120 }
121 
122 static u64 get_eht_ra_mask(struct rtw89_vif_link *rtwvif_link,
123 			   struct ieee80211_link_sta *link_sta)
124 {
125 	struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link);
126 	struct ieee80211_eht_mcs_nss_supp_20mhz_only *mcs_nss_20mhz;
127 	struct ieee80211_sta_eht_cap *eht_cap = &link_sta->eht_cap;
128 	struct ieee80211_eht_mcs_nss_supp_bw *mcs_nss;
129 	u8 *he_phy_cap = link_sta->he_cap.he_cap_elem.phy_cap_info;
130 
131 	switch (link_sta->bandwidth) {
132 	case IEEE80211_STA_RX_BW_320:
133 		mcs_nss = &eht_cap->eht_mcs_nss_supp.bw._320;
134 		/* MCS 9, 11, 13 */
135 		return get_eht_mcs_ra_mask(mcs_nss->rx_tx_max_nss, 9, 3);
136 	case IEEE80211_STA_RX_BW_160:
137 		mcs_nss = &eht_cap->eht_mcs_nss_supp.bw._160;
138 		/* MCS 9, 11, 13 */
139 		return get_eht_mcs_ra_mask(mcs_nss->rx_tx_max_nss, 9, 3);
140 	case IEEE80211_STA_RX_BW_20:
141 		if (vif->type == NL80211_IFTYPE_AP &&
142 		    !(he_phy_cap[0] & IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_MASK_ALL)) {
143 			mcs_nss_20mhz = &eht_cap->eht_mcs_nss_supp.only_20mhz;
144 			/* MCS 7, 9, 11, 13 */
145 			return get_eht_mcs_ra_mask(mcs_nss_20mhz->rx_tx_max_nss, 7, 4);
146 		}
147 		fallthrough;
148 	case IEEE80211_STA_RX_BW_80:
149 	default:
150 		mcs_nss = &eht_cap->eht_mcs_nss_supp.bw._80;
151 		/* MCS 9, 11, 13 */
152 		return get_eht_mcs_ra_mask(mcs_nss->rx_tx_max_nss, 9, 3);
153 	}
154 }
155 
156 #define RA_FLOOR_TABLE_SIZE	7
157 #define RA_FLOOR_UP_GAP		3
158 static u64 rtw89_phy_ra_mask_rssi(struct rtw89_dev *rtwdev, u8 rssi,
159 				  u8 ratr_state)
160 {
161 	u8 rssi_lv_t[RA_FLOOR_TABLE_SIZE] = {30, 44, 48, 52, 56, 60, 100};
162 	u8 rssi_lv = 0;
163 	u8 i;
164 
165 	rssi >>= 1;
166 	for (i = 0; i < RA_FLOOR_TABLE_SIZE; i++) {
167 		if (i >= ratr_state)
168 			rssi_lv_t[i] += RA_FLOOR_UP_GAP;
169 		if (rssi < rssi_lv_t[i]) {
170 			rssi_lv = i;
171 			break;
172 		}
173 	}
174 	if (rssi_lv == 0)
175 		return 0xffffffffffffffffULL;
176 	else if (rssi_lv == 1)
177 		return 0xfffffffffffffff0ULL;
178 	else if (rssi_lv == 2)
179 		return 0xffffffffffffefe0ULL;
180 	else if (rssi_lv == 3)
181 		return 0xffffffffffffcfc0ULL;
182 	else if (rssi_lv == 4)
183 		return 0xffffffffffff8f80ULL;
184 	else if (rssi_lv >= 5)
185 		return 0xffffffffffff0f00ULL;
186 
187 	return 0xffffffffffffffffULL;
188 }
189 
190 static u64 rtw89_phy_ra_mask_recover(u64 ra_mask, u64 ra_mask_bak)
191 {
192 	if ((ra_mask & ~(RA_MASK_CCK_RATES | RA_MASK_OFDM_RATES)) == 0)
193 		ra_mask |= (ra_mask_bak & ~(RA_MASK_CCK_RATES | RA_MASK_OFDM_RATES));
194 
195 	if (ra_mask == 0)
196 		ra_mask |= (ra_mask_bak & (RA_MASK_CCK_RATES | RA_MASK_OFDM_RATES));
197 
198 	return ra_mask;
199 }
200 
201 static u64 rtw89_phy_ra_mask_cfg(struct rtw89_dev *rtwdev,
202 				 struct rtw89_sta_link *rtwsta_link,
203 				 struct ieee80211_link_sta *link_sta,
204 				 const struct rtw89_chan *chan)
205 {
206 	struct cfg80211_bitrate_mask *mask = &rtwsta_link->mask;
207 	enum nl80211_band band;
208 	u64 cfg_mask;
209 
210 	if (!rtwsta_link->use_cfg_mask)
211 		return -1;
212 
213 	switch (chan->band_type) {
214 	case RTW89_BAND_2G:
215 		band = NL80211_BAND_2GHZ;
216 		cfg_mask = u64_encode_bits(mask->control[NL80211_BAND_2GHZ].legacy,
217 					   RA_MASK_CCK_RATES | RA_MASK_OFDM_RATES);
218 		break;
219 	case RTW89_BAND_5G:
220 		band = NL80211_BAND_5GHZ;
221 		cfg_mask = u64_encode_bits(mask->control[NL80211_BAND_5GHZ].legacy,
222 					   RA_MASK_OFDM_RATES);
223 		break;
224 	case RTW89_BAND_6G:
225 		band = NL80211_BAND_6GHZ;
226 		cfg_mask = u64_encode_bits(mask->control[NL80211_BAND_6GHZ].legacy,
227 					   RA_MASK_OFDM_RATES);
228 		break;
229 	default:
230 		rtw89_warn(rtwdev, "unhandled band type %d\n", chan->band_type);
231 		return -1;
232 	}
233 
234 	if (link_sta->eht_cap.has_eht) {
235 		cfg_mask |= u64_encode_bits(mask->control[band].eht_mcs[0],
236 					    RA_MASK_EHT_1SS_RATES);
237 		cfg_mask |= u64_encode_bits(mask->control[band].eht_mcs[1],
238 					    RA_MASK_EHT_2SS_RATES);
239 	} else if (link_sta->he_cap.has_he) {
240 		cfg_mask |= u64_encode_bits(mask->control[band].he_mcs[0],
241 					    RA_MASK_HE_1SS_RATES);
242 		cfg_mask |= u64_encode_bits(mask->control[band].he_mcs[1],
243 					    RA_MASK_HE_2SS_RATES);
244 	} else if (link_sta->vht_cap.vht_supported) {
245 		cfg_mask |= u64_encode_bits(mask->control[band].vht_mcs[0],
246 					    RA_MASK_VHT_1SS_RATES);
247 		cfg_mask |= u64_encode_bits(mask->control[band].vht_mcs[1],
248 					    RA_MASK_VHT_2SS_RATES);
249 	} else if (link_sta->ht_cap.ht_supported) {
250 		cfg_mask |= u64_encode_bits(mask->control[band].ht_mcs[0],
251 					    RA_MASK_HT_1SS_RATES);
252 		cfg_mask |= u64_encode_bits(mask->control[band].ht_mcs[1],
253 					    RA_MASK_HT_2SS_RATES);
254 	}
255 
256 	return cfg_mask;
257 }
258 
259 static const u64
260 rtw89_ra_mask_ht_rates[4] = {RA_MASK_HT_1SS_RATES, RA_MASK_HT_2SS_RATES,
261 			     RA_MASK_HT_3SS_RATES, RA_MASK_HT_4SS_RATES};
262 static const u64
263 rtw89_ra_mask_vht_rates[4] = {RA_MASK_VHT_1SS_RATES, RA_MASK_VHT_2SS_RATES,
264 			      RA_MASK_VHT_3SS_RATES, RA_MASK_VHT_4SS_RATES};
265 static const u64
266 rtw89_ra_mask_he_rates[4] = {RA_MASK_HE_1SS_RATES, RA_MASK_HE_2SS_RATES,
267 			     RA_MASK_HE_3SS_RATES, RA_MASK_HE_4SS_RATES};
268 static const u64
269 rtw89_ra_mask_eht_rates[4] = {RA_MASK_EHT_1SS_RATES, RA_MASK_EHT_2SS_RATES,
270 			      RA_MASK_EHT_3SS_RATES, RA_MASK_EHT_4SS_RATES};
271 static const u64
272 rtw89_ra_mask_eht_mcs0_11[4] = {RA_MASK_EHT_1SS_MCS0_11, RA_MASK_EHT_2SS_MCS0_11,
273 				RA_MASK_EHT_3SS_MCS0_11, RA_MASK_EHT_4SS_MCS0_11};
274 
275 static void rtw89_phy_ra_gi_ltf(struct rtw89_dev *rtwdev,
276 				struct rtw89_sta_link *rtwsta_link,
277 				struct ieee80211_link_sta *link_sta,
278 				const struct rtw89_chan *chan,
279 				bool *fix_giltf_en, u8 *fix_giltf)
280 {
281 	struct cfg80211_bitrate_mask *mask = &rtwsta_link->mask;
282 	u8 band = chan->band_type;
283 	enum nl80211_band nl_band = rtw89_hw_to_nl80211_band(band);
284 	u8 ltf, gi;
285 
286 	*fix_giltf_en = true;
287 
288 	if (rtwdev->chip->chip_id == RTL8852C &&
289 	    chan->band_width == RTW89_CHANNEL_WIDTH_160 &&
290 	    rtw89_sta_link_has_su_mu_4xhe08(link_sta))
291 		*fix_giltf = RTW89_GILTF_SGI_4XHE08;
292 	else
293 		*fix_giltf = RTW89_GILTF_2XHE08;
294 
295 	if (!rtwsta_link->use_cfg_mask)
296 		return;
297 
298 	if (link_sta->eht_cap.has_eht) {
299 		ltf = mask->control[nl_band].eht_ltf;
300 		gi = mask->control[nl_band].eht_gi;
301 	} else if (link_sta->he_cap.has_he) {
302 		ltf = mask->control[nl_band].he_ltf;
303 		gi = mask->control[nl_band].he_gi;
304 	} else {
305 		return;
306 	}
307 
308 	if (ltf == 2 && gi == 2)
309 		*fix_giltf = RTW89_GILTF_LGI_4XHE32;
310 	else if (ltf == 2 && gi == 0)
311 		*fix_giltf = RTW89_GILTF_SGI_4XHE08;
312 	else if (ltf == 1 && gi == 1)
313 		*fix_giltf = RTW89_GILTF_2XHE16;
314 	else if (ltf == 1 && gi == 0)
315 		*fix_giltf = RTW89_GILTF_2XHE08;
316 	else if (ltf == 0 && gi == 1)
317 		*fix_giltf = RTW89_GILTF_1XHE16;
318 	else if (ltf == 0 && gi == 0)
319 		*fix_giltf = RTW89_GILTF_1XHE08;
320 }
321 
322 static void rtw89_phy_ra_sta_update(struct rtw89_dev *rtwdev,
323 				    struct rtw89_vif_link *rtwvif_link,
324 				    struct rtw89_sta_link *rtwsta_link,
325 				    struct ieee80211_link_sta *link_sta,
326 				    bool p2p, bool csi)
327 {
328 	struct rtw89_phy_rate_pattern *rate_pattern = &rtwvif_link->rate_pattern;
329 	struct rtw89_ra_info *ra = &rtwsta_link->ra;
330 	const struct rtw89_chan *chan = rtw89_chan_get(rtwdev,
331 						       rtwvif_link->chanctx_idx);
332 	const u64 *high_rate_masks = rtw89_ra_mask_ht_rates;
333 	u8 rssi = ewma_rssi_read(&rtwsta_link->avg_rssi);
334 	u64 ra_mask = 0;
335 	u64 ra_mask_bak;
336 	u8 mode = 0;
337 	u8 csi_mode = RTW89_RA_RPT_MODE_LEGACY;
338 	u8 bw_mode = 0;
339 	u8 stbc_en = 0;
340 	u8 ldpc_en = 0;
341 	u8 fix_giltf = 0;
342 	u8 i;
343 	bool sgi = false;
344 	bool fix_giltf_en = false;
345 
346 	memset(ra, 0, sizeof(*ra));
347 	/* Set the ra mask from sta's capability */
348 	if (link_sta->eht_cap.has_eht) {
349 		mode |= RTW89_RA_MODE_EHT;
350 		ra_mask |= get_eht_ra_mask(rtwvif_link, link_sta);
351 
352 		if (rtwdev->hal.no_mcs_12_13)
353 			high_rate_masks = rtw89_ra_mask_eht_mcs0_11;
354 		else
355 			high_rate_masks = rtw89_ra_mask_eht_rates;
356 
357 		rtw89_phy_ra_gi_ltf(rtwdev, rtwsta_link, link_sta,
358 				    chan, &fix_giltf_en, &fix_giltf);
359 	} else if (link_sta->he_cap.has_he) {
360 		mode |= RTW89_RA_MODE_HE;
361 		csi_mode = RTW89_RA_RPT_MODE_HE;
362 		ra_mask |= get_he_ra_mask(link_sta);
363 		high_rate_masks = rtw89_ra_mask_he_rates;
364 		if (link_sta->he_cap.he_cap_elem.phy_cap_info[2] &
365 		    IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ)
366 			stbc_en = 1;
367 		if (link_sta->he_cap.he_cap_elem.phy_cap_info[1] &
368 		    IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD)
369 			ldpc_en = 1;
370 		rtw89_phy_ra_gi_ltf(rtwdev, rtwsta_link, link_sta,
371 				    chan, &fix_giltf_en, &fix_giltf);
372 	} else if (link_sta->vht_cap.vht_supported) {
373 		u16 mcs_map = le16_to_cpu(link_sta->vht_cap.vht_mcs.rx_mcs_map);
374 
375 		mode |= RTW89_RA_MODE_VHT;
376 		csi_mode = RTW89_RA_RPT_MODE_VHT;
377 		/* MCS9 (non-20MHz), MCS8, MCS7 */
378 		if (link_sta->bandwidth == IEEE80211_STA_RX_BW_20)
379 			ra_mask |= get_mcs_ra_mask(mcs_map, 8, 1);
380 		else
381 			ra_mask |= get_mcs_ra_mask(mcs_map, 9, 1);
382 		high_rate_masks = rtw89_ra_mask_vht_rates;
383 		if (link_sta->vht_cap.cap & IEEE80211_VHT_CAP_RXSTBC_MASK)
384 			stbc_en = 1;
385 		if (link_sta->vht_cap.cap & IEEE80211_VHT_CAP_RXLDPC)
386 			ldpc_en = 1;
387 	} else if (link_sta->ht_cap.ht_supported) {
388 		mode |= RTW89_RA_MODE_HT;
389 		csi_mode = RTW89_RA_RPT_MODE_HT;
390 		ra_mask |= ((u64)link_sta->ht_cap.mcs.rx_mask[3] << 48) |
391 			   ((u64)link_sta->ht_cap.mcs.rx_mask[2] << 36) |
392 			   ((u64)link_sta->ht_cap.mcs.rx_mask[1] << 24) |
393 			   ((u64)link_sta->ht_cap.mcs.rx_mask[0] << 12);
394 		high_rate_masks = rtw89_ra_mask_ht_rates;
395 		if (link_sta->ht_cap.cap & IEEE80211_HT_CAP_RX_STBC)
396 			stbc_en = 1;
397 		if (link_sta->ht_cap.cap & IEEE80211_HT_CAP_LDPC_CODING)
398 			ldpc_en = 1;
399 	}
400 
401 	switch (chan->band_type) {
402 	case RTW89_BAND_2G:
403 		ra_mask |= link_sta->supp_rates[NL80211_BAND_2GHZ];
404 		if (link_sta->supp_rates[NL80211_BAND_2GHZ] & 0xf)
405 			mode |= RTW89_RA_MODE_CCK;
406 		if (link_sta->supp_rates[NL80211_BAND_2GHZ] & 0xff0)
407 			mode |= RTW89_RA_MODE_OFDM;
408 		break;
409 	case RTW89_BAND_5G:
410 		ra_mask |= (u64)link_sta->supp_rates[NL80211_BAND_5GHZ] << 4;
411 		mode |= RTW89_RA_MODE_OFDM;
412 		break;
413 	case RTW89_BAND_6G:
414 		ra_mask |= (u64)link_sta->supp_rates[NL80211_BAND_6GHZ] << 4;
415 		mode |= RTW89_RA_MODE_OFDM;
416 		break;
417 	default:
418 		rtw89_err(rtwdev, "Unknown band type\n");
419 		break;
420 	}
421 
422 	ra_mask_bak = ra_mask;
423 
424 	if (mode >= RTW89_RA_MODE_HT) {
425 		u64 mask = 0;
426 		for (i = 0; i < rtwdev->hal.tx_nss; i++)
427 			mask |= high_rate_masks[i];
428 		if (mode & RTW89_RA_MODE_OFDM)
429 			mask |= RA_MASK_SUBOFDM_RATES;
430 		if (mode & RTW89_RA_MODE_CCK)
431 			mask |= RA_MASK_SUBCCK_RATES;
432 		ra_mask &= mask;
433 	} else if (mode & RTW89_RA_MODE_OFDM) {
434 		ra_mask &= (RA_MASK_OFDM_RATES | RA_MASK_SUBCCK_RATES);
435 	}
436 
437 	if (mode != RTW89_RA_MODE_CCK)
438 		ra_mask &= rtw89_phy_ra_mask_rssi(rtwdev, rssi, 0);
439 
440 	ra_mask = rtw89_phy_ra_mask_recover(ra_mask, ra_mask_bak);
441 	ra_mask &= rtw89_phy_ra_mask_cfg(rtwdev, rtwsta_link, link_sta, chan);
442 
443 	switch (link_sta->bandwidth) {
444 	case IEEE80211_STA_RX_BW_160:
445 		bw_mode = RTW89_CHANNEL_WIDTH_160;
446 		sgi = link_sta->vht_cap.vht_supported &&
447 		      (link_sta->vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_160);
448 		break;
449 	case IEEE80211_STA_RX_BW_80:
450 		bw_mode = RTW89_CHANNEL_WIDTH_80;
451 		sgi = link_sta->vht_cap.vht_supported &&
452 		      (link_sta->vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_80);
453 		break;
454 	case IEEE80211_STA_RX_BW_40:
455 		bw_mode = RTW89_CHANNEL_WIDTH_40;
456 		sgi = link_sta->ht_cap.ht_supported &&
457 		      (link_sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40);
458 		break;
459 	default:
460 		bw_mode = RTW89_CHANNEL_WIDTH_20;
461 		sgi = link_sta->ht_cap.ht_supported &&
462 		      (link_sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20);
463 		break;
464 	}
465 
466 	if (link_sta->he_cap.he_cap_elem.phy_cap_info[3] &
467 	    IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_16_QAM)
468 		ra->dcm_cap = 1;
469 
470 	if (rate_pattern->enable && !p2p) {
471 		ra_mask = rtw89_phy_ra_mask_cfg(rtwdev, rtwsta_link, link_sta, chan);
472 		ra_mask &= rate_pattern->ra_mask;
473 		mode = rate_pattern->ra_mode;
474 	}
475 
476 	ra->bw_cap = bw_mode;
477 	ra->er_cap = rtwsta_link->er_cap;
478 	ra->mode_ctrl = mode;
479 	ra->macid = rtwsta_link->mac_id;
480 	ra->stbc_cap = stbc_en;
481 	ra->ldpc_cap = ldpc_en;
482 	ra->ss_num = min(link_sta->rx_nss, rtwdev->hal.tx_nss) - 1;
483 	ra->en_sgi = sgi;
484 	ra->ra_mask = ra_mask;
485 	ra->fix_giltf_en = fix_giltf_en;
486 	ra->fix_giltf = fix_giltf;
487 	ra->partial_bw_er = link_sta->he_cap.has_he ?
488 			    !!(link_sta->he_cap.he_cap_elem.phy_cap_info[6] &
489 			       IEEE80211_HE_PHY_CAP6_PARTIAL_BW_EXT_RANGE) : 0;
490 	ra->band = chan->band_type;
491 
492 	if (!csi)
493 		return;
494 
495 	ra->fixed_csi_rate_en = false;
496 	ra->ra_csi_rate_en = true;
497 	ra->cr_tbl_sel = false;
498 	ra->band_num = rtwvif_link->phy_idx;
499 	ra->csi_bw = bw_mode;
500 	ra->csi_gi_ltf = RTW89_GILTF_LGI_4XHE32;
501 	ra->csi_mcs_ss_idx = 5;
502 	ra->csi_mode = csi_mode;
503 }
504 
505 void rtw89_phy_ra_update_sta_link(struct rtw89_dev *rtwdev,
506 				  struct rtw89_sta_link *rtwsta_link,
507 				  u32 changed)
508 {
509 	struct rtw89_vif_link *rtwvif_link = rtwsta_link->rtwvif_link;
510 	struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link);
511 	struct rtw89_ra_info *ra = &rtwsta_link->ra;
512 	struct ieee80211_link_sta *link_sta;
513 
514 	rcu_read_lock();
515 
516 	link_sta = rtw89_sta_rcu_dereference_link(rtwsta_link, false);
517 	rtw89_phy_ra_sta_update(rtwdev, rtwvif_link, rtwsta_link,
518 				link_sta, vif->p2p, false);
519 
520 	rcu_read_unlock();
521 
522 	if (changed & IEEE80211_RC_SUPP_RATES_CHANGED)
523 		ra->upd_mask = 1;
524 	if (changed & (IEEE80211_RC_BW_CHANGED | IEEE80211_RC_NSS_CHANGED))
525 		ra->upd_bw_nss_mask = 1;
526 
527 	rtw89_debug(rtwdev, RTW89_DBG_RA,
528 		    "ra updat: macid = %d, bw = %d, nss = %d, gi = %d %d",
529 		    ra->macid,
530 		    ra->bw_cap,
531 		    ra->ss_num,
532 		    ra->en_sgi,
533 		    ra->giltf);
534 
535 	rtw89_fw_h2c_ra(rtwdev, ra, false);
536 }
537 
538 void rtw89_phy_ra_update_sta(struct rtw89_dev *rtwdev, struct ieee80211_sta *sta,
539 			     u32 changed)
540 {
541 	struct rtw89_sta *rtwsta = sta_to_rtwsta(sta);
542 	struct rtw89_sta_link *rtwsta_link;
543 	unsigned int link_id;
544 
545 	rtw89_sta_for_each_link(rtwsta, rtwsta_link, link_id)
546 		rtw89_phy_ra_update_sta_link(rtwdev, rtwsta_link, changed);
547 }
548 
549 static bool __check_rate_pattern(struct rtw89_phy_rate_pattern *next,
550 				 u16 rate_base, u64 ra_mask, u8 ra_mode,
551 				 u32 rate_ctrl, u32 ctrl_skip, bool force)
552 {
553 	u8 n, c;
554 
555 	if (rate_ctrl == ctrl_skip)
556 		return true;
557 
558 	n = hweight32(rate_ctrl);
559 	if (n == 0)
560 		return true;
561 
562 	if (force && n != 1)
563 		return false;
564 
565 	if (next->enable)
566 		return false;
567 
568 	c = __fls(rate_ctrl);
569 	next->rate = rate_base + c;
570 	next->ra_mode = ra_mode;
571 	next->ra_mask = ra_mask;
572 	next->enable = true;
573 
574 	return true;
575 }
576 
577 enum __rtw89_hw_rate_invalid_bases {
578 	/* no EHT rate for ax chip */
579 	RTW89_HW_RATE_EHT_NSS1_MCS0 = RTW89_HW_RATE_INVAL,
580 	RTW89_HW_RATE_EHT_NSS2_MCS0 = RTW89_HW_RATE_INVAL,
581 	RTW89_HW_RATE_EHT_NSS3_MCS0 = RTW89_HW_RATE_INVAL,
582 	RTW89_HW_RATE_EHT_NSS4_MCS0 = RTW89_HW_RATE_INVAL,
583 };
584 
585 #define RTW89_HW_RATE_BY_CHIP_GEN(rate) \
586 	{ \
587 		[RTW89_CHIP_AX] = RTW89_HW_RATE_ ## rate, \
588 		[RTW89_CHIP_BE] = RTW89_HW_RATE_V1_ ## rate, \
589 	}
590 
591 static
592 void __rtw89_phy_rate_pattern_vif(struct rtw89_dev *rtwdev,
593 				  struct rtw89_vif_link *rtwvif_link,
594 				  const struct cfg80211_bitrate_mask *mask)
595 {
596 	struct ieee80211_supported_band *sband;
597 	struct rtw89_phy_rate_pattern next_pattern = {0};
598 	const struct rtw89_chan *chan = rtw89_chan_get(rtwdev,
599 						       rtwvif_link->chanctx_idx);
600 	static const u16 hw_rate_eht[][RTW89_CHIP_GEN_NUM] = {
601 		RTW89_HW_RATE_BY_CHIP_GEN(EHT_NSS1_MCS0),
602 		RTW89_HW_RATE_BY_CHIP_GEN(EHT_NSS2_MCS0),
603 		RTW89_HW_RATE_BY_CHIP_GEN(EHT_NSS3_MCS0),
604 		RTW89_HW_RATE_BY_CHIP_GEN(EHT_NSS4_MCS0),
605 	};
606 	static const u16 hw_rate_he[][RTW89_CHIP_GEN_NUM] = {
607 		RTW89_HW_RATE_BY_CHIP_GEN(HE_NSS1_MCS0),
608 		RTW89_HW_RATE_BY_CHIP_GEN(HE_NSS2_MCS0),
609 		RTW89_HW_RATE_BY_CHIP_GEN(HE_NSS3_MCS0),
610 		RTW89_HW_RATE_BY_CHIP_GEN(HE_NSS4_MCS0),
611 	};
612 	static const u16 hw_rate_vht[][RTW89_CHIP_GEN_NUM] = {
613 		RTW89_HW_RATE_BY_CHIP_GEN(VHT_NSS1_MCS0),
614 		RTW89_HW_RATE_BY_CHIP_GEN(VHT_NSS2_MCS0),
615 		RTW89_HW_RATE_BY_CHIP_GEN(VHT_NSS3_MCS0),
616 		RTW89_HW_RATE_BY_CHIP_GEN(VHT_NSS4_MCS0),
617 	};
618 	static const u16 hw_rate_ht[][RTW89_CHIP_GEN_NUM] = {
619 		RTW89_HW_RATE_BY_CHIP_GEN(MCS0),
620 		RTW89_HW_RATE_BY_CHIP_GEN(MCS8),
621 		RTW89_HW_RATE_BY_CHIP_GEN(MCS16),
622 		RTW89_HW_RATE_BY_CHIP_GEN(MCS24),
623 	};
624 	u8 band = chan->band_type;
625 	enum nl80211_band nl_band = rtw89_hw_to_nl80211_band(band);
626 	enum rtw89_chip_gen chip_gen = rtwdev->chip->chip_gen;
627 	u8 tx_nss = rtwdev->hal.tx_nss;
628 	u8 i;
629 
630 	if (chip_gen == RTW89_CHIP_AX)
631 		goto rs_11ax;
632 
633 	for (i = 0; i < tx_nss; i++)
634 		if (!__check_rate_pattern(&next_pattern, hw_rate_eht[i][chip_gen],
635 					  RA_MASK_EHT_RATES, RTW89_RA_MODE_EHT,
636 					  mask->control[nl_band].eht_mcs[i],
637 					  0, true))
638 			goto out;
639 
640 rs_11ax:
641 	for (i = 0; i < tx_nss; i++)
642 		if (!__check_rate_pattern(&next_pattern, hw_rate_he[i][chip_gen],
643 					  RA_MASK_HE_RATES, RTW89_RA_MODE_HE,
644 					  mask->control[nl_band].he_mcs[i],
645 					  0, true))
646 			goto out;
647 
648 	for (i = 0; i < tx_nss; i++)
649 		if (!__check_rate_pattern(&next_pattern, hw_rate_vht[i][chip_gen],
650 					  RA_MASK_VHT_RATES, RTW89_RA_MODE_VHT,
651 					  mask->control[nl_band].vht_mcs[i],
652 					  0, true))
653 			goto out;
654 
655 	for (i = 0; i < tx_nss; i++)
656 		if (!__check_rate_pattern(&next_pattern, hw_rate_ht[i][chip_gen],
657 					  RA_MASK_HT_RATES, RTW89_RA_MODE_HT,
658 					  mask->control[nl_band].ht_mcs[i],
659 					  0, true))
660 			goto out;
661 
662 	/* lagacy cannot be empty for nl80211_parse_tx_bitrate_mask, and
663 	 * require at least one basic rate for ieee80211_set_bitrate_mask,
664 	 * so the decision just depends on if all bitrates are set or not.
665 	 */
666 	sband = rtwdev->hw->wiphy->bands[nl_band];
667 	if (band == RTW89_BAND_2G) {
668 		if (!__check_rate_pattern(&next_pattern, RTW89_HW_RATE_CCK1,
669 					  RA_MASK_CCK_RATES | RA_MASK_OFDM_RATES,
670 					  RTW89_RA_MODE_CCK | RTW89_RA_MODE_OFDM,
671 					  mask->control[nl_band].legacy,
672 					  BIT(sband->n_bitrates) - 1, false))
673 			goto out;
674 	} else {
675 		if (!__check_rate_pattern(&next_pattern, RTW89_HW_RATE_OFDM6,
676 					  RA_MASK_OFDM_RATES, RTW89_RA_MODE_OFDM,
677 					  mask->control[nl_band].legacy,
678 					  BIT(sband->n_bitrates) - 1, false))
679 			goto out;
680 	}
681 
682 	if (!next_pattern.enable)
683 		goto out;
684 
685 	if (unlikely(next_pattern.rate >= RTW89_HW_RATE_INVAL)) {
686 		rtw89_debug(rtwdev, RTW89_DBG_RA,
687 			    "pattern invalid target: chip_gen %d, mode 0x%x\n",
688 			    chip_gen, next_pattern.ra_mode);
689 		goto out;
690 	}
691 
692 	rtwvif_link->rate_pattern = next_pattern;
693 	rtw89_debug(rtwdev, RTW89_DBG_RA,
694 		    "configure pattern: rate 0x%x, mask 0x%llx, mode 0x%x\n",
695 		    next_pattern.rate,
696 		    next_pattern.ra_mask,
697 		    next_pattern.ra_mode);
698 	return;
699 
700 out:
701 	rtwvif_link->rate_pattern.enable = false;
702 	rtw89_debug(rtwdev, RTW89_DBG_RA, "unset rate pattern\n");
703 }
704 
705 void rtw89_phy_rate_pattern_vif(struct rtw89_dev *rtwdev,
706 				struct ieee80211_vif *vif,
707 				const struct cfg80211_bitrate_mask *mask)
708 {
709 	struct rtw89_vif *rtwvif = vif_to_rtwvif(vif);
710 	struct rtw89_vif_link *rtwvif_link;
711 	unsigned int link_id;
712 
713 	rtw89_vif_for_each_link(rtwvif, rtwvif_link, link_id)
714 		__rtw89_phy_rate_pattern_vif(rtwdev, rtwvif_link, mask);
715 }
716 
717 static void rtw89_phy_ra_update_sta_iter(void *data, struct ieee80211_sta *sta)
718 {
719 	struct rtw89_dev *rtwdev = (struct rtw89_dev *)data;
720 
721 	rtw89_phy_ra_update_sta(rtwdev, sta, IEEE80211_RC_SUPP_RATES_CHANGED);
722 }
723 
724 void rtw89_phy_ra_update(struct rtw89_dev *rtwdev)
725 {
726 	ieee80211_iterate_stations_atomic(rtwdev->hw,
727 					  rtw89_phy_ra_update_sta_iter,
728 					  rtwdev);
729 }
730 
731 void rtw89_phy_ra_assoc(struct rtw89_dev *rtwdev, struct rtw89_sta_link *rtwsta_link)
732 {
733 	struct rtw89_vif_link *rtwvif_link = rtwsta_link->rtwvif_link;
734 	struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link);
735 	struct rtw89_ra_info *ra = &rtwsta_link->ra;
736 	u8 rssi = ewma_rssi_read(&rtwsta_link->avg_rssi) >> RSSI_FACTOR;
737 	struct ieee80211_link_sta *link_sta;
738 	bool csi;
739 
740 	rcu_read_lock();
741 
742 	link_sta = rtw89_sta_rcu_dereference_link(rtwsta_link, true);
743 	csi = rtw89_sta_has_beamformer_cap(link_sta);
744 
745 	rtw89_phy_ra_sta_update(rtwdev, rtwvif_link, rtwsta_link,
746 				link_sta, vif->p2p, csi);
747 
748 	rcu_read_unlock();
749 
750 	if (rssi > 40)
751 		ra->init_rate_lv = 1;
752 	else if (rssi > 20)
753 		ra->init_rate_lv = 2;
754 	else if (rssi > 1)
755 		ra->init_rate_lv = 3;
756 	else
757 		ra->init_rate_lv = 0;
758 	ra->upd_all = 1;
759 	rtw89_debug(rtwdev, RTW89_DBG_RA,
760 		    "ra assoc: macid = %d, mode = %d, bw = %d, nss = %d, lv = %d",
761 		    ra->macid,
762 		    ra->mode_ctrl,
763 		    ra->bw_cap,
764 		    ra->ss_num,
765 		    ra->init_rate_lv);
766 	rtw89_debug(rtwdev, RTW89_DBG_RA,
767 		    "ra assoc: dcm = %d, er = %d, ldpc = %d, stbc = %d, gi = %d %d",
768 		    ra->dcm_cap,
769 		    ra->er_cap,
770 		    ra->ldpc_cap,
771 		    ra->stbc_cap,
772 		    ra->en_sgi,
773 		    ra->giltf);
774 
775 	rtw89_fw_h2c_ra(rtwdev, ra, csi);
776 }
777 
778 void rtw89_phy_ra_recalc_agg_limit(struct rtw89_dev *rtwdev)
779 {
780 	const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
781 	const struct rtw89_reg_def *ra_limit = &mac->ra_agg_limit;
782 	struct ieee80211_sta *sta;
783 	struct rtw89_sta *rtwsta;
784 	u16 agg_num = U16_MAX;
785 	u8 tid;
786 
787 	for_each_station(sta, rtwdev->hw) {
788 		rtwsta = sta_to_rtwsta(sta);
789 
790 		for_each_set_bit(tid, rtwsta->ampdu_map, IEEE80211_NUM_TIDS)
791 			agg_num = min(agg_num, rtwsta->ampdu_params[tid].agg_num);
792 	}
793 
794 	if (agg_num == U16_MAX)
795 		agg_num = 0x3F;
796 	else
797 		agg_num = clamp(agg_num, 1, 256) - 1;
798 
799 	rtw89_write32_idx(rtwdev, ra_limit->addr, ra_limit->mask, agg_num, RTW89_MAC_0);
800 	if (!rtwdev->dbcc_en)
801 		return;
802 	rtw89_write32_idx(rtwdev, ra_limit->addr, ra_limit->mask, agg_num, RTW89_MAC_1);
803 }
804 
805 u8 rtw89_phy_get_txsc(struct rtw89_dev *rtwdev,
806 		      const struct rtw89_chan *chan,
807 		      enum rtw89_bandwidth dbw)
808 {
809 	enum rtw89_bandwidth cbw = chan->band_width;
810 	u8 pri_ch = chan->primary_channel;
811 	u8 central_ch = chan->channel;
812 	u8 txsc_idx = 0;
813 	u8 tmp = 0;
814 
815 	if (cbw == dbw || cbw == RTW89_CHANNEL_WIDTH_20)
816 		return txsc_idx;
817 
818 	switch (cbw) {
819 	case RTW89_CHANNEL_WIDTH_40:
820 		txsc_idx = pri_ch > central_ch ? 1 : 2;
821 		break;
822 	case RTW89_CHANNEL_WIDTH_80:
823 		if (dbw == RTW89_CHANNEL_WIDTH_20) {
824 			if (pri_ch > central_ch)
825 				txsc_idx = (pri_ch - central_ch) >> 1;
826 			else
827 				txsc_idx = ((central_ch - pri_ch) >> 1) + 1;
828 		} else {
829 			txsc_idx = pri_ch > central_ch ? 9 : 10;
830 		}
831 		break;
832 	case RTW89_CHANNEL_WIDTH_160:
833 		if (pri_ch > central_ch)
834 			tmp = (pri_ch - central_ch) >> 1;
835 		else
836 			tmp = ((central_ch - pri_ch) >> 1) + 1;
837 
838 		if (dbw == RTW89_CHANNEL_WIDTH_20) {
839 			txsc_idx = tmp;
840 		} else if (dbw == RTW89_CHANNEL_WIDTH_40) {
841 			if (tmp == 1 || tmp == 3)
842 				txsc_idx = 9;
843 			else if (tmp == 5 || tmp == 7)
844 				txsc_idx = 11;
845 			else if (tmp == 2 || tmp == 4)
846 				txsc_idx = 10;
847 			else if (tmp == 6 || tmp == 8)
848 				txsc_idx = 12;
849 			else
850 				return 0xff;
851 		} else {
852 			txsc_idx = pri_ch > central_ch ? 13 : 14;
853 		}
854 		break;
855 	case RTW89_CHANNEL_WIDTH_80_80:
856 		if (dbw == RTW89_CHANNEL_WIDTH_20) {
857 			if (pri_ch > central_ch)
858 				txsc_idx = (10 - (pri_ch - central_ch)) >> 1;
859 			else
860 				txsc_idx = ((central_ch - pri_ch) >> 1) + 5;
861 		} else if (dbw == RTW89_CHANNEL_WIDTH_40) {
862 			txsc_idx = pri_ch > central_ch ? 10 : 12;
863 		} else {
864 			txsc_idx = 14;
865 		}
866 		break;
867 	default:
868 		break;
869 	}
870 
871 	return txsc_idx;
872 }
873 EXPORT_SYMBOL(rtw89_phy_get_txsc);
874 
875 u8 rtw89_phy_get_txsb(struct rtw89_dev *rtwdev, const struct rtw89_chan *chan,
876 		      enum rtw89_bandwidth dbw)
877 {
878 	enum rtw89_bandwidth cbw = chan->band_width;
879 	u8 pri_ch = chan->primary_channel;
880 	u8 central_ch = chan->channel;
881 	u8 txsb_idx = 0;
882 
883 	if (cbw == dbw || cbw == RTW89_CHANNEL_WIDTH_20)
884 		return txsb_idx;
885 
886 	switch (cbw) {
887 	case RTW89_CHANNEL_WIDTH_40:
888 		txsb_idx = pri_ch > central_ch ? 1 : 0;
889 		break;
890 	case RTW89_CHANNEL_WIDTH_80:
891 		if (dbw == RTW89_CHANNEL_WIDTH_20)
892 			txsb_idx = (pri_ch - central_ch + 6) / 4;
893 		else
894 			txsb_idx = pri_ch > central_ch ? 1 : 0;
895 		break;
896 	case RTW89_CHANNEL_WIDTH_160:
897 		if (dbw == RTW89_CHANNEL_WIDTH_20)
898 			txsb_idx = (pri_ch - central_ch + 14) / 4;
899 		else if (dbw == RTW89_CHANNEL_WIDTH_40)
900 			txsb_idx = (pri_ch - central_ch + 12) / 8;
901 		else
902 			txsb_idx = pri_ch > central_ch ? 1 : 0;
903 		break;
904 	case RTW89_CHANNEL_WIDTH_320:
905 		if (dbw == RTW89_CHANNEL_WIDTH_20)
906 			txsb_idx = (pri_ch - central_ch + 30) / 4;
907 		else if (dbw == RTW89_CHANNEL_WIDTH_40)
908 			txsb_idx = (pri_ch - central_ch + 28) / 8;
909 		else if (dbw == RTW89_CHANNEL_WIDTH_80)
910 			txsb_idx = (pri_ch - central_ch + 24) / 16;
911 		else
912 			txsb_idx = pri_ch > central_ch ? 1 : 0;
913 		break;
914 	default:
915 		break;
916 	}
917 
918 	return txsb_idx;
919 }
920 EXPORT_SYMBOL(rtw89_phy_get_txsb);
921 
922 static bool rtw89_phy_check_swsi_busy(struct rtw89_dev *rtwdev)
923 {
924 	return !!rtw89_phy_read32_mask(rtwdev, R_SWSI_V1, B_SWSI_W_BUSY_V1) ||
925 	       !!rtw89_phy_read32_mask(rtwdev, R_SWSI_V1, B_SWSI_R_BUSY_V1);
926 }
927 
928 u32 rtw89_phy_read_rf(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
929 		      u32 addr, u32 mask)
930 {
931 	const struct rtw89_chip_info *chip = rtwdev->chip;
932 	const u32 *base_addr = chip->rf_base_addr;
933 	u32 val, direct_addr;
934 
935 	if (rf_path >= rtwdev->chip->rf_path_num) {
936 		rtw89_err(rtwdev, "unsupported rf path (%d)\n", rf_path);
937 		return INV_RF_DATA;
938 	}
939 
940 	addr &= 0xff;
941 	direct_addr = base_addr[rf_path] + (addr << 2);
942 	mask &= RFREG_MASK;
943 
944 	val = rtw89_phy_read32_mask(rtwdev, direct_addr, mask);
945 
946 	return val;
947 }
948 EXPORT_SYMBOL(rtw89_phy_read_rf);
949 
950 static u32 rtw89_phy_read_rf_a(struct rtw89_dev *rtwdev,
951 			       enum rtw89_rf_path rf_path, u32 addr, u32 mask)
952 {
953 	bool busy;
954 	bool done;
955 	u32 val;
956 	int ret;
957 
958 	ret = read_poll_timeout_atomic(rtw89_phy_check_swsi_busy, busy, !busy,
959 				       1, 30, false, rtwdev);
960 	if (ret) {
961 		rtw89_err(rtwdev, "read rf busy swsi\n");
962 		return INV_RF_DATA;
963 	}
964 
965 	mask &= RFREG_MASK;
966 
967 	val = FIELD_PREP(B_SWSI_READ_ADDR_PATH_V1, rf_path) |
968 	      FIELD_PREP(B_SWSI_READ_ADDR_ADDR_V1, addr);
969 	rtw89_phy_write32_mask(rtwdev, R_SWSI_READ_ADDR_V1, B_SWSI_READ_ADDR_V1, val);
970 	udelay(2);
971 
972 	ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, done, done, 1,
973 				       30, false, rtwdev, R_SWSI_V1,
974 				       B_SWSI_R_DATA_DONE_V1);
975 	if (ret) {
976 		if (!test_bit(RTW89_FLAG_UNPLUGGED, rtwdev->flags))
977 			rtw89_err(rtwdev, "read swsi busy\n");
978 		return INV_RF_DATA;
979 	}
980 
981 	return rtw89_phy_read32_mask(rtwdev, R_SWSI_V1, mask);
982 }
983 
984 u32 rtw89_phy_read_rf_v1(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
985 			 u32 addr, u32 mask)
986 {
987 	bool ad_sel = FIELD_GET(RTW89_RF_ADDR_ADSEL_MASK, addr);
988 
989 	if (rf_path >= rtwdev->chip->rf_path_num) {
990 		rtw89_err(rtwdev, "unsupported rf path (%d)\n", rf_path);
991 		return INV_RF_DATA;
992 	}
993 
994 	if (ad_sel)
995 		return rtw89_phy_read_rf(rtwdev, rf_path, addr, mask);
996 	else
997 		return rtw89_phy_read_rf_a(rtwdev, rf_path, addr, mask);
998 }
999 EXPORT_SYMBOL(rtw89_phy_read_rf_v1);
1000 
1001 static u32 rtw89_phy_read_full_rf_v2_a(struct rtw89_dev *rtwdev,
1002 				       enum rtw89_rf_path rf_path, u32 addr)
1003 {
1004 	static const u16 r_addr_ofst[2] = {0x2C24, 0x2D24};
1005 	static const u16 addr_ofst[2] = {0x2ADC, 0x2BDC};
1006 	bool busy, done;
1007 	int ret;
1008 	u32 val;
1009 
1010 	rtw89_phy_write32_mask(rtwdev, addr_ofst[rf_path], B_HWSI_ADD_CTL_MASK, 0x1);
1011 	ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, busy, !busy,
1012 				       1, 3800, false,
1013 				       rtwdev, r_addr_ofst[rf_path], B_HWSI_VAL_BUSY);
1014 	if (ret) {
1015 		rtw89_warn(rtwdev, "poll HWSI is busy\n");
1016 		return INV_RF_DATA;
1017 	}
1018 
1019 	rtw89_phy_write32_mask(rtwdev, addr_ofst[rf_path], B_HWSI_ADD_MASK, addr);
1020 	rtw89_phy_write32_mask(rtwdev, addr_ofst[rf_path], B_HWSI_ADD_RD, 0x1);
1021 	udelay(2);
1022 
1023 	ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, done, done,
1024 				       1, 3800, false,
1025 				       rtwdev, r_addr_ofst[rf_path], B_HWSI_VAL_RDONE);
1026 	if (ret) {
1027 		rtw89_warn(rtwdev, "read HWSI is busy\n");
1028 		val = INV_RF_DATA;
1029 		goto out;
1030 	}
1031 
1032 	val = rtw89_phy_read32_mask(rtwdev, r_addr_ofst[rf_path], RFREG_MASK);
1033 out:
1034 	rtw89_phy_write32_mask(rtwdev, addr_ofst[rf_path], B_HWSI_ADD_POLL_MASK, 0);
1035 
1036 	return val;
1037 }
1038 
1039 static u32 rtw89_phy_read_rf_v2_a(struct rtw89_dev *rtwdev,
1040 				  enum rtw89_rf_path rf_path, u32 addr, u32 mask)
1041 {
1042 	u32 val;
1043 
1044 	val = rtw89_phy_read_full_rf_v2_a(rtwdev, rf_path, addr);
1045 
1046 	return (val & mask) >> __ffs(mask);
1047 }
1048 
1049 u32 rtw89_phy_read_rf_v2(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
1050 			 u32 addr, u32 mask)
1051 {
1052 	bool ad_sel = u32_get_bits(addr, RTW89_RF_ADDR_ADSEL_MASK);
1053 
1054 	if (rf_path >= rtwdev->chip->rf_path_num) {
1055 		rtw89_err(rtwdev, "unsupported rf path (%d)\n", rf_path);
1056 		return INV_RF_DATA;
1057 	}
1058 
1059 	if (ad_sel)
1060 		return rtw89_phy_read_rf(rtwdev, rf_path, addr, mask);
1061 	else
1062 		return rtw89_phy_read_rf_v2_a(rtwdev, rf_path, addr, mask);
1063 }
1064 EXPORT_SYMBOL(rtw89_phy_read_rf_v2);
1065 
1066 static u32 rtw89_phy_read_full_rf_v3_a(struct rtw89_dev *rtwdev,
1067 				       enum rtw89_rf_path rf_path, u32 addr)
1068 {
1069 	bool done;
1070 	u32 busy;
1071 	int ret;
1072 	u32 val;
1073 
1074 	ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, busy, !busy,
1075 				       1, 30, false,
1076 				       rtwdev, R_SW_SI_DATA_BE4,
1077 				       B_SW_SI_W_BUSY_BE4 | B_SW_SI_R_BUSY_BE4);
1078 	if (ret) {
1079 		rtw89_warn(rtwdev, "poll HWSI is busy\n");
1080 		return INV_RF_DATA;
1081 	}
1082 
1083 	val = u32_encode_bits(rf_path, GENMASK(10, 8)) |
1084 	      u32_encode_bits(addr, GENMASK(7, 0));
1085 
1086 	rtw89_phy_write32_mask(rtwdev, R_SW_SI_READ_ADDR_BE4, B_SW_SI_READ_ADDR_BE4, val);
1087 
1088 	ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, done, done,
1089 				       1, 30, false,
1090 				       rtwdev, R_SW_SI_DATA_BE4, B_SW_SI_READ_DATA_DONE_BE4);
1091 	if (ret) {
1092 		rtw89_warn(rtwdev, "read HWSI is busy\n");
1093 		return INV_RF_DATA;
1094 	}
1095 
1096 	val = rtw89_phy_read32_mask(rtwdev, R_SW_SI_DATA_BE4, B_SW_SI_READ_DATA_BE4);
1097 
1098 	return val;
1099 }
1100 
1101 static u32 rtw89_phy_read_rf_v3_a(struct rtw89_dev *rtwdev,
1102 				  enum rtw89_rf_path rf_path, u32 addr, u32 mask)
1103 {
1104 	u32 val;
1105 
1106 	val = rtw89_phy_read_full_rf_v3_a(rtwdev, rf_path, addr);
1107 
1108 	return (val & mask) >> __ffs(mask);
1109 }
1110 
1111 u32 rtw89_phy_read_rf_v3(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
1112 			 u32 addr, u32 mask)
1113 {
1114 	bool ad_sel = u32_get_bits(addr, RTW89_RF_ADDR_ADSEL_MASK);
1115 
1116 	if (rf_path >= rtwdev->chip->rf_path_num) {
1117 		rtw89_err(rtwdev, "unsupported rf path (%d)\n", rf_path);
1118 		return INV_RF_DATA;
1119 	}
1120 
1121 	if (ad_sel)
1122 		return rtw89_phy_read_rf(rtwdev, rf_path, addr, mask);
1123 	else
1124 		return rtw89_phy_read_rf_v3_a(rtwdev, rf_path, addr, mask);
1125 }
1126 EXPORT_SYMBOL(rtw89_phy_read_rf_v3);
1127 
1128 bool rtw89_phy_write_rf(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
1129 			u32 addr, u32 mask, u32 data)
1130 {
1131 	const struct rtw89_chip_info *chip = rtwdev->chip;
1132 	const u32 *base_addr = chip->rf_base_addr;
1133 	u32 direct_addr;
1134 
1135 	if (rf_path >= rtwdev->chip->rf_path_num) {
1136 		rtw89_err(rtwdev, "unsupported rf path (%d)\n", rf_path);
1137 		return false;
1138 	}
1139 
1140 	addr &= 0xff;
1141 	direct_addr = base_addr[rf_path] + (addr << 2);
1142 	mask &= RFREG_MASK;
1143 
1144 	rtw89_phy_write32_mask(rtwdev, direct_addr, mask, data);
1145 
1146 	/* delay to ensure writing properly */
1147 	udelay(1);
1148 
1149 	return true;
1150 }
1151 EXPORT_SYMBOL(rtw89_phy_write_rf);
1152 
1153 static bool rtw89_phy_write_rf_a(struct rtw89_dev *rtwdev,
1154 				 enum rtw89_rf_path rf_path, u32 addr, u32 mask,
1155 				 u32 data)
1156 {
1157 	u8 bit_shift;
1158 	u32 val;
1159 	bool busy, b_msk_en = false;
1160 	int ret;
1161 
1162 	ret = read_poll_timeout_atomic(rtw89_phy_check_swsi_busy, busy, !busy,
1163 				       1, 30, false, rtwdev);
1164 	if (ret) {
1165 		rtw89_err(rtwdev, "write rf busy swsi\n");
1166 		return false;
1167 	}
1168 
1169 	data &= RFREG_MASK;
1170 	mask &= RFREG_MASK;
1171 
1172 	if (mask != RFREG_MASK) {
1173 		b_msk_en = true;
1174 		rtw89_phy_write32_mask(rtwdev, R_SWSI_BIT_MASK_V1, RFREG_MASK,
1175 				       mask);
1176 		bit_shift = __ffs(mask);
1177 		data = (data << bit_shift) & RFREG_MASK;
1178 	}
1179 
1180 	val = FIELD_PREP(B_SWSI_DATA_BIT_MASK_EN_V1, b_msk_en) |
1181 	      FIELD_PREP(B_SWSI_DATA_PATH_V1, rf_path) |
1182 	      FIELD_PREP(B_SWSI_DATA_ADDR_V1, addr) |
1183 	      FIELD_PREP(B_SWSI_DATA_VAL_V1, data);
1184 
1185 	rtw89_phy_write32_mask(rtwdev, R_SWSI_DATA_V1, MASKDWORD, val);
1186 
1187 	return true;
1188 }
1189 
1190 bool rtw89_phy_write_rf_v1(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
1191 			   u32 addr, u32 mask, u32 data)
1192 {
1193 	bool ad_sel = FIELD_GET(RTW89_RF_ADDR_ADSEL_MASK, addr);
1194 
1195 	if (rf_path >= rtwdev->chip->rf_path_num) {
1196 		rtw89_err(rtwdev, "unsupported rf path (%d)\n", rf_path);
1197 		return false;
1198 	}
1199 
1200 	if (ad_sel)
1201 		return rtw89_phy_write_rf(rtwdev, rf_path, addr, mask, data);
1202 	else
1203 		return rtw89_phy_write_rf_a(rtwdev, rf_path, addr, mask, data);
1204 }
1205 EXPORT_SYMBOL(rtw89_phy_write_rf_v1);
1206 
1207 static
1208 bool rtw89_phy_write_full_rf_v2_a(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
1209 				  u32 addr, u32 data)
1210 {
1211 	static const u32 addr_is_idle[2] = {0x2C24, 0x2D24};
1212 	static const u32 addr_ofst[2] = {0x2AE0, 0x2BE0};
1213 	bool busy;
1214 	u32 val;
1215 	int ret;
1216 
1217 	ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, busy, !busy,
1218 				       1, 3800, false,
1219 				       rtwdev, addr_is_idle[rf_path], BIT(29));
1220 	if (ret) {
1221 		rtw89_warn(rtwdev, "[%s] HWSI is busy\n", __func__);
1222 		return false;
1223 	}
1224 
1225 	val = u32_encode_bits(addr, B_HWSI_DATA_ADDR) |
1226 	      u32_encode_bits(data, B_HWSI_DATA_VAL);
1227 
1228 	rtw89_phy_write32(rtwdev, addr_ofst[rf_path], val);
1229 
1230 	return true;
1231 }
1232 
1233 static
1234 bool rtw89_phy_write_rf_a_v2(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
1235 			     u32 addr, u32 mask, u32 data)
1236 {
1237 	u32 val;
1238 
1239 	if (mask == RFREG_MASK) {
1240 		val = data;
1241 	} else {
1242 		val = rtw89_phy_read_full_rf_v2_a(rtwdev, rf_path, addr);
1243 		val &= ~mask;
1244 		val |= (data << __ffs(mask)) & mask;
1245 	}
1246 
1247 	return rtw89_phy_write_full_rf_v2_a(rtwdev, rf_path, addr, val);
1248 }
1249 
1250 bool rtw89_phy_write_rf_v2(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
1251 			   u32 addr, u32 mask, u32 data)
1252 {
1253 	bool ad_sel = u32_get_bits(addr, RTW89_RF_ADDR_ADSEL_MASK);
1254 
1255 	if (rf_path >= rtwdev->chip->rf_path_num) {
1256 		rtw89_err(rtwdev, "unsupported rf path (%d)\n", rf_path);
1257 		return INV_RF_DATA;
1258 	}
1259 
1260 	if (ad_sel)
1261 		return rtw89_phy_write_rf(rtwdev, rf_path, addr, mask, data);
1262 	else
1263 		return rtw89_phy_write_rf_a_v2(rtwdev, rf_path, addr, mask, data);
1264 }
1265 EXPORT_SYMBOL(rtw89_phy_write_rf_v2);
1266 
1267 static
1268 bool rtw89_phy_write_full_rf_v3_a(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
1269 				  u32 addr, u32 data)
1270 {
1271 	u32 busy;
1272 	u32 val;
1273 	int ret;
1274 
1275 	ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, busy, !busy,
1276 				       1, 30, false,
1277 				       rtwdev, R_SW_SI_DATA_BE4,
1278 				       B_SW_SI_W_BUSY_BE4 | B_SW_SI_R_BUSY_BE4);
1279 	if (ret) {
1280 		rtw89_warn(rtwdev, "[%s] HWSI is busy\n", __func__);
1281 		return false;
1282 	}
1283 
1284 	val = u32_encode_bits(rf_path, B_SW_SI_DATA_PATH_BE4) |
1285 	      u32_encode_bits(addr, B_SW_SI_DATA_ADR_BE4) |
1286 	      u32_encode_bits(data, B_SW_SI_DATA_DAT_BE4);
1287 
1288 	rtw89_phy_write32(rtwdev, R_SW_SI_WDATA_BE4, val);
1289 
1290 	return true;
1291 }
1292 
1293 static
1294 bool rtw89_phy_write_rf_a_v3(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
1295 			     u32 addr, u32 mask, u32 data)
1296 {
1297 	u32 val;
1298 
1299 	if (mask == RFREG_MASK) {
1300 		val = data;
1301 	} else {
1302 		val = rtw89_phy_read_full_rf_v3_a(rtwdev, rf_path, addr);
1303 		val &= ~mask;
1304 		val |= (data << __ffs(mask)) & mask;
1305 	}
1306 
1307 	return rtw89_phy_write_full_rf_v3_a(rtwdev, rf_path, addr, val);
1308 }
1309 
1310 bool rtw89_phy_write_rf_v3(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
1311 			   u32 addr, u32 mask, u32 data)
1312 {
1313 	bool ad_sel = u32_get_bits(addr, RTW89_RF_ADDR_ADSEL_MASK);
1314 
1315 	if (rf_path >= rtwdev->chip->rf_path_num) {
1316 		rtw89_err(rtwdev, "unsupported rf path (%d)\n", rf_path);
1317 		return INV_RF_DATA;
1318 	}
1319 
1320 	if (ad_sel)
1321 		return rtw89_phy_write_rf(rtwdev, rf_path, addr, mask, data);
1322 	else
1323 		return rtw89_phy_write_rf_a_v3(rtwdev, rf_path, addr, mask, data);
1324 }
1325 EXPORT_SYMBOL(rtw89_phy_write_rf_v3);
1326 
1327 static bool rtw89_chip_rf_v1(struct rtw89_dev *rtwdev)
1328 {
1329 	return rtwdev->chip->ops->write_rf == rtw89_phy_write_rf_v1;
1330 }
1331 
1332 static void __rtw89_phy_bb_reset(struct rtw89_dev *rtwdev,
1333 				 enum rtw89_phy_idx phy_idx)
1334 {
1335 	const struct rtw89_chip_info *chip = rtwdev->chip;
1336 
1337 	chip->ops->bb_reset(rtwdev, phy_idx);
1338 }
1339 
1340 static void rtw89_phy_bb_reset(struct rtw89_dev *rtwdev)
1341 {
1342 	__rtw89_phy_bb_reset(rtwdev, RTW89_PHY_0);
1343 	if (rtwdev->dbcc_en)
1344 		__rtw89_phy_bb_reset(rtwdev, RTW89_PHY_1);
1345 }
1346 
1347 static void rtw89_phy_config_bb_reg(struct rtw89_dev *rtwdev,
1348 				    const struct rtw89_reg2_def *reg,
1349 				    enum rtw89_rf_path rf_path,
1350 				    void *extra_data)
1351 {
1352 	u32 addr;
1353 
1354 	if (reg->addr == 0xfe) {
1355 		mdelay(50);
1356 	} else if (reg->addr == 0xfd) {
1357 		mdelay(5);
1358 	} else if (reg->addr == 0xfc) {
1359 		mdelay(1);
1360 	} else if (reg->addr == 0xfb) {
1361 		udelay(50);
1362 	} else if (reg->addr == 0xfa) {
1363 		udelay(5);
1364 	} else if (reg->addr == 0xf9) {
1365 		udelay(1);
1366 	} else if (reg->data == BYPASS_CR_DATA) {
1367 		rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, "Bypass CR 0x%x\n", reg->addr);
1368 	} else {
1369 		addr = reg->addr;
1370 
1371 		if ((uintptr_t)extra_data == RTW89_PHY_1)
1372 			addr += rtw89_phy0_phy1_offset(rtwdev, reg->addr);
1373 
1374 		rtw89_phy_write32(rtwdev, addr, reg->data);
1375 	}
1376 }
1377 
1378 union rtw89_phy_bb_gain_arg {
1379 	u32 addr;
1380 	struct {
1381 		union {
1382 			u8 type;
1383 			struct {
1384 				u8 rxsc_start:4;
1385 				u8 bw:4;
1386 			};
1387 		};
1388 		u8 path;
1389 		u8 gain_band;
1390 		u8 cfg_type;
1391 	};
1392 } __packed;
1393 
1394 static void
1395 rtw89_phy_cfg_bb_gain_error(struct rtw89_dev *rtwdev,
1396 			    union rtw89_phy_bb_gain_arg arg, u32 data)
1397 {
1398 	struct rtw89_phy_bb_gain_info *gain = &rtwdev->bb_gain.ax;
1399 	u8 type = arg.type;
1400 	u8 path = arg.path;
1401 	u8 gband = arg.gain_band;
1402 	int i;
1403 
1404 	switch (type) {
1405 	case 0:
1406 		for (i = 0; i < 4; i++, data >>= 8)
1407 			gain->lna_gain[gband][path][i] = data & 0xff;
1408 		break;
1409 	case 1:
1410 		for (i = 4; i < 7; i++, data >>= 8)
1411 			gain->lna_gain[gband][path][i] = data & 0xff;
1412 		break;
1413 	case 2:
1414 		for (i = 0; i < 2; i++, data >>= 8)
1415 			gain->tia_gain[gband][path][i] = data & 0xff;
1416 		break;
1417 	default:
1418 		rtw89_warn(rtwdev,
1419 			   "bb gain error {0x%x:0x%x} with unknown type: %d\n",
1420 			   arg.addr, data, type);
1421 		break;
1422 	}
1423 }
1424 
1425 enum rtw89_phy_bb_rxsc_start_idx {
1426 	RTW89_BB_RXSC_START_IDX_FULL = 0,
1427 	RTW89_BB_RXSC_START_IDX_20 = 1,
1428 	RTW89_BB_RXSC_START_IDX_20_1 = 5,
1429 	RTW89_BB_RXSC_START_IDX_40 = 9,
1430 	RTW89_BB_RXSC_START_IDX_80 = 13,
1431 };
1432 
1433 static void
1434 rtw89_phy_cfg_bb_rpl_ofst(struct rtw89_dev *rtwdev,
1435 			  union rtw89_phy_bb_gain_arg arg, u32 data)
1436 {
1437 	struct rtw89_phy_bb_gain_info *gain = &rtwdev->bb_gain.ax;
1438 	u8 rxsc_start = arg.rxsc_start;
1439 	u8 bw = arg.bw;
1440 	u8 path = arg.path;
1441 	u8 gband = arg.gain_band;
1442 	u8 rxsc;
1443 	s8 ofst;
1444 	int i;
1445 
1446 	switch (bw) {
1447 	case RTW89_CHANNEL_WIDTH_20:
1448 		gain->rpl_ofst_20[gband][path] = (s8)data;
1449 		break;
1450 	case RTW89_CHANNEL_WIDTH_40:
1451 		if (rxsc_start == RTW89_BB_RXSC_START_IDX_FULL) {
1452 			gain->rpl_ofst_40[gband][path][0] = (s8)data;
1453 		} else if (rxsc_start == RTW89_BB_RXSC_START_IDX_20) {
1454 			for (i = 0; i < 2; i++, data >>= 8) {
1455 				rxsc = RTW89_BB_RXSC_START_IDX_20 + i;
1456 				ofst = (s8)(data & 0xff);
1457 				gain->rpl_ofst_40[gband][path][rxsc] = ofst;
1458 			}
1459 		}
1460 		break;
1461 	case RTW89_CHANNEL_WIDTH_80:
1462 		if (rxsc_start == RTW89_BB_RXSC_START_IDX_FULL) {
1463 			gain->rpl_ofst_80[gband][path][0] = (s8)data;
1464 		} else if (rxsc_start == RTW89_BB_RXSC_START_IDX_20) {
1465 			for (i = 0; i < 4; i++, data >>= 8) {
1466 				rxsc = RTW89_BB_RXSC_START_IDX_20 + i;
1467 				ofst = (s8)(data & 0xff);
1468 				gain->rpl_ofst_80[gband][path][rxsc] = ofst;
1469 			}
1470 		} else if (rxsc_start == RTW89_BB_RXSC_START_IDX_40) {
1471 			for (i = 0; i < 2; i++, data >>= 8) {
1472 				rxsc = RTW89_BB_RXSC_START_IDX_40 + i;
1473 				ofst = (s8)(data & 0xff);
1474 				gain->rpl_ofst_80[gband][path][rxsc] = ofst;
1475 			}
1476 		}
1477 		break;
1478 	case RTW89_CHANNEL_WIDTH_160:
1479 		if (rxsc_start == RTW89_BB_RXSC_START_IDX_FULL) {
1480 			gain->rpl_ofst_160[gband][path][0] = (s8)data;
1481 		} else if (rxsc_start == RTW89_BB_RXSC_START_IDX_20) {
1482 			for (i = 0; i < 4; i++, data >>= 8) {
1483 				rxsc = RTW89_BB_RXSC_START_IDX_20 + i;
1484 				ofst = (s8)(data & 0xff);
1485 				gain->rpl_ofst_160[gband][path][rxsc] = ofst;
1486 			}
1487 		} else if (rxsc_start == RTW89_BB_RXSC_START_IDX_20_1) {
1488 			for (i = 0; i < 4; i++, data >>= 8) {
1489 				rxsc = RTW89_BB_RXSC_START_IDX_20_1 + i;
1490 				ofst = (s8)(data & 0xff);
1491 				gain->rpl_ofst_160[gband][path][rxsc] = ofst;
1492 			}
1493 		} else if (rxsc_start == RTW89_BB_RXSC_START_IDX_40) {
1494 			for (i = 0; i < 4; i++, data >>= 8) {
1495 				rxsc = RTW89_BB_RXSC_START_IDX_40 + i;
1496 				ofst = (s8)(data & 0xff);
1497 				gain->rpl_ofst_160[gband][path][rxsc] = ofst;
1498 			}
1499 		} else if (rxsc_start == RTW89_BB_RXSC_START_IDX_80) {
1500 			for (i = 0; i < 2; i++, data >>= 8) {
1501 				rxsc = RTW89_BB_RXSC_START_IDX_80 + i;
1502 				ofst = (s8)(data & 0xff);
1503 				gain->rpl_ofst_160[gband][path][rxsc] = ofst;
1504 			}
1505 		}
1506 		break;
1507 	default:
1508 		rtw89_warn(rtwdev,
1509 			   "bb rpl ofst {0x%x:0x%x} with unknown bw: %d\n",
1510 			   arg.addr, data, bw);
1511 		break;
1512 	}
1513 }
1514 
1515 static void
1516 rtw89_phy_cfg_bb_gain_bypass(struct rtw89_dev *rtwdev,
1517 			     union rtw89_phy_bb_gain_arg arg, u32 data)
1518 {
1519 	struct rtw89_phy_bb_gain_info *gain = &rtwdev->bb_gain.ax;
1520 	u8 type = arg.type;
1521 	u8 path = arg.path;
1522 	u8 gband = arg.gain_band;
1523 	int i;
1524 
1525 	switch (type) {
1526 	case 0:
1527 		for (i = 0; i < 4; i++, data >>= 8)
1528 			gain->lna_gain_bypass[gband][path][i] = data & 0xff;
1529 		break;
1530 	case 1:
1531 		for (i = 4; i < 7; i++, data >>= 8)
1532 			gain->lna_gain_bypass[gband][path][i] = data & 0xff;
1533 		break;
1534 	default:
1535 		rtw89_warn(rtwdev,
1536 			   "bb gain bypass {0x%x:0x%x} with unknown type: %d\n",
1537 			   arg.addr, data, type);
1538 		break;
1539 	}
1540 }
1541 
1542 static void
1543 rtw89_phy_cfg_bb_gain_op1db(struct rtw89_dev *rtwdev,
1544 			    union rtw89_phy_bb_gain_arg arg, u32 data)
1545 {
1546 	struct rtw89_phy_bb_gain_info *gain = &rtwdev->bb_gain.ax;
1547 	u8 type = arg.type;
1548 	u8 path = arg.path;
1549 	u8 gband = arg.gain_band;
1550 	int i;
1551 
1552 	switch (type) {
1553 	case 0:
1554 		for (i = 0; i < 4; i++, data >>= 8)
1555 			gain->lna_op1db[gband][path][i] = data & 0xff;
1556 		break;
1557 	case 1:
1558 		for (i = 4; i < 7; i++, data >>= 8)
1559 			gain->lna_op1db[gband][path][i] = data & 0xff;
1560 		break;
1561 	case 2:
1562 		for (i = 0; i < 4; i++, data >>= 8)
1563 			gain->tia_lna_op1db[gband][path][i] = data & 0xff;
1564 		break;
1565 	case 3:
1566 		for (i = 4; i < 8; i++, data >>= 8)
1567 			gain->tia_lna_op1db[gband][path][i] = data & 0xff;
1568 		break;
1569 	default:
1570 		rtw89_warn(rtwdev,
1571 			   "bb gain op1db {0x%x:0x%x} with unknown type: %d\n",
1572 			   arg.addr, data, type);
1573 		break;
1574 	}
1575 }
1576 
1577 static void rtw89_phy_config_bb_gain_ax(struct rtw89_dev *rtwdev,
1578 					const struct rtw89_reg2_def *reg,
1579 					enum rtw89_rf_path rf_path,
1580 					void *extra_data)
1581 {
1582 	const struct rtw89_chip_info *chip = rtwdev->chip;
1583 	union rtw89_phy_bb_gain_arg arg = { .addr = reg->addr };
1584 	struct rtw89_efuse *efuse = &rtwdev->efuse;
1585 
1586 	if (arg.gain_band >= RTW89_BB_GAIN_BAND_NR)
1587 		return;
1588 
1589 	if (arg.path >= chip->rf_path_num)
1590 		return;
1591 
1592 	if (arg.addr >= 0xf9 && arg.addr <= 0xfe) {
1593 		rtw89_warn(rtwdev, "bb gain table with flow ctrl\n");
1594 		return;
1595 	}
1596 
1597 	switch (arg.cfg_type) {
1598 	case 0:
1599 		rtw89_phy_cfg_bb_gain_error(rtwdev, arg, reg->data);
1600 		break;
1601 	case 1:
1602 		rtw89_phy_cfg_bb_rpl_ofst(rtwdev, arg, reg->data);
1603 		break;
1604 	case 2:
1605 		rtw89_phy_cfg_bb_gain_bypass(rtwdev, arg, reg->data);
1606 		break;
1607 	case 3:
1608 		rtw89_phy_cfg_bb_gain_op1db(rtwdev, arg, reg->data);
1609 		break;
1610 	case 4:
1611 		/* This cfg_type is only used by rfe_type >= 50 with eFEM */
1612 		if (efuse->rfe_type < 50)
1613 			break;
1614 		fallthrough;
1615 	default:
1616 		rtw89_warn(rtwdev,
1617 			   "bb gain {0x%x:0x%x} with unknown cfg type: %d\n",
1618 			   arg.addr, reg->data, arg.cfg_type);
1619 		break;
1620 	}
1621 }
1622 
1623 static void
1624 rtw89_phy_cofig_rf_reg_store(struct rtw89_dev *rtwdev,
1625 			     const struct rtw89_reg2_def *reg,
1626 			     enum rtw89_rf_path rf_path,
1627 			     struct rtw89_fw_h2c_rf_reg_info *info)
1628 {
1629 	u16 idx = info->curr_idx % RTW89_H2C_RF_PAGE_SIZE;
1630 	u8 page = info->curr_idx / RTW89_H2C_RF_PAGE_SIZE;
1631 
1632 	if (page >= RTW89_H2C_RF_PAGE_NUM) {
1633 		rtw89_warn(rtwdev, "RF parameters exceed size. path=%d, idx=%d",
1634 			   rf_path, info->curr_idx);
1635 		return;
1636 	}
1637 
1638 	info->rtw89_phy_config_rf_h2c[page][idx] =
1639 		cpu_to_le32((reg->addr << 20) | reg->data);
1640 	info->curr_idx++;
1641 }
1642 
1643 static int rtw89_phy_config_rf_reg_fw(struct rtw89_dev *rtwdev,
1644 				      struct rtw89_fw_h2c_rf_reg_info *info)
1645 {
1646 	u16 remain = info->curr_idx;
1647 	u16 len = 0;
1648 	u8 i;
1649 	int ret = 0;
1650 
1651 	if (remain > RTW89_H2C_RF_PAGE_NUM * RTW89_H2C_RF_PAGE_SIZE) {
1652 		rtw89_warn(rtwdev,
1653 			   "rf reg h2c total len %d larger than %d\n",
1654 			   remain, RTW89_H2C_RF_PAGE_NUM * RTW89_H2C_RF_PAGE_SIZE);
1655 		ret = -EINVAL;
1656 		goto out;
1657 	}
1658 
1659 	for (i = 0; i < RTW89_H2C_RF_PAGE_NUM && remain; i++, remain -= len) {
1660 		len = remain > RTW89_H2C_RF_PAGE_SIZE ? RTW89_H2C_RF_PAGE_SIZE : remain;
1661 		ret = rtw89_fw_h2c_rf_reg(rtwdev, info, len * 4, i);
1662 		if (ret)
1663 			goto out;
1664 	}
1665 out:
1666 	info->curr_idx = 0;
1667 
1668 	return ret;
1669 }
1670 
1671 static void rtw89_phy_config_rf_reg_noio(struct rtw89_dev *rtwdev,
1672 					 const struct rtw89_reg2_def *reg,
1673 					 enum rtw89_rf_path rf_path,
1674 					 void *extra_data)
1675 {
1676 	u32 addr = reg->addr;
1677 
1678 	if (addr == 0xfe || addr == 0xfd || addr == 0xfc || addr == 0xfb ||
1679 	    addr == 0xfa || addr == 0xf9)
1680 		return;
1681 
1682 	if (rtw89_chip_rf_v1(rtwdev) && addr < 0x100)
1683 		return;
1684 
1685 	rtw89_phy_cofig_rf_reg_store(rtwdev, reg, rf_path,
1686 				     (struct rtw89_fw_h2c_rf_reg_info *)extra_data);
1687 }
1688 
1689 void rtw89_phy_config_rf_reg(struct rtw89_dev *rtwdev,
1690 			     const struct rtw89_reg2_def *reg,
1691 			     enum rtw89_rf_path rf_path,
1692 			     void *extra_data)
1693 {
1694 	if (reg->addr == 0xfe) {
1695 		mdelay(50);
1696 	} else if (reg->addr == 0xfd) {
1697 		mdelay(5);
1698 	} else if (reg->addr == 0xfc) {
1699 		mdelay(1);
1700 	} else if (reg->addr == 0xfb) {
1701 		udelay(50);
1702 	} else if (reg->addr == 0xfa) {
1703 		udelay(5);
1704 	} else if (reg->addr == 0xf9) {
1705 		udelay(1);
1706 	} else {
1707 		rtw89_write_rf(rtwdev, rf_path, reg->addr, 0xfffff, reg->data);
1708 		rtw89_phy_cofig_rf_reg_store(rtwdev, reg, rf_path,
1709 					     (struct rtw89_fw_h2c_rf_reg_info *)extra_data);
1710 	}
1711 }
1712 
1713 void rtw89_phy_config_rf_reg_v1(struct rtw89_dev *rtwdev,
1714 				const struct rtw89_reg2_def *reg,
1715 				enum rtw89_rf_path rf_path,
1716 				void *extra_data)
1717 {
1718 	rtw89_write_rf(rtwdev, rf_path, reg->addr, RFREG_MASK, reg->data);
1719 
1720 	if (reg->addr < 0x100)
1721 		return;
1722 
1723 	rtw89_phy_cofig_rf_reg_store(rtwdev, reg, rf_path,
1724 				     (struct rtw89_fw_h2c_rf_reg_info *)extra_data);
1725 }
1726 EXPORT_SYMBOL(rtw89_phy_config_rf_reg_v1);
1727 
1728 static int rtw89_phy_sel_headline(struct rtw89_dev *rtwdev,
1729 				  const struct rtw89_phy_table *table,
1730 				  u32 *headline_size, u32 *headline_idx,
1731 				  u8 rfe, u8 cv)
1732 {
1733 	const struct rtw89_reg2_def *reg;
1734 	u32 headline;
1735 	u32 compare, target;
1736 	u8 rfe_para, cv_para;
1737 	u8 cv_max = 0;
1738 	bool case_matched = false;
1739 	u32 i;
1740 
1741 	for (i = 0; i < table->n_regs; i++) {
1742 		reg = &table->regs[i];
1743 		headline = get_phy_headline(reg->addr);
1744 		if (headline != PHY_HEADLINE_VALID)
1745 			break;
1746 	}
1747 	*headline_size = i;
1748 	if (*headline_size == 0)
1749 		return 0;
1750 
1751 	/* case 1: RFE match, CV match */
1752 	compare = get_phy_compare(rfe, cv);
1753 	for (i = 0; i < *headline_size; i++) {
1754 		reg = &table->regs[i];
1755 		target = get_phy_target(reg->addr);
1756 		if (target == compare) {
1757 			*headline_idx = i;
1758 			return 0;
1759 		}
1760 	}
1761 
1762 	/* case 2: RFE match, CV don't care */
1763 	compare = get_phy_compare(rfe, PHY_COND_DONT_CARE);
1764 	for (i = 0; i < *headline_size; i++) {
1765 		reg = &table->regs[i];
1766 		target = get_phy_target(reg->addr);
1767 		if (target == compare) {
1768 			*headline_idx = i;
1769 			return 0;
1770 		}
1771 	}
1772 
1773 	/* case 3: RFE match, CV max in table */
1774 	for (i = 0; i < *headline_size; i++) {
1775 		reg = &table->regs[i];
1776 		rfe_para = get_phy_cond_rfe(reg->addr);
1777 		cv_para = get_phy_cond_cv(reg->addr);
1778 		if (rfe_para == rfe) {
1779 			if (cv_para >= cv_max) {
1780 				cv_max = cv_para;
1781 				*headline_idx = i;
1782 				case_matched = true;
1783 			}
1784 		}
1785 	}
1786 
1787 	if (case_matched)
1788 		return 0;
1789 
1790 	/* case 4: RFE don't care, CV max in table */
1791 	for (i = 0; i < *headline_size; i++) {
1792 		reg = &table->regs[i];
1793 		rfe_para = get_phy_cond_rfe(reg->addr);
1794 		cv_para = get_phy_cond_cv(reg->addr);
1795 		if (rfe_para == PHY_COND_DONT_CARE) {
1796 			if (cv_para >= cv_max) {
1797 				cv_max = cv_para;
1798 				*headline_idx = i;
1799 				case_matched = true;
1800 			}
1801 		}
1802 	}
1803 
1804 	if (case_matched)
1805 		return 0;
1806 
1807 	return -EINVAL;
1808 }
1809 
1810 static void rtw89_phy_init_reg(struct rtw89_dev *rtwdev,
1811 			       const struct rtw89_phy_table *table, bool by_acv,
1812 			       void (*config)(struct rtw89_dev *rtwdev,
1813 					      const struct rtw89_reg2_def *reg,
1814 					      enum rtw89_rf_path rf_path,
1815 					      void *data),
1816 			       void *extra_data)
1817 {
1818 	const struct rtw89_reg2_def *reg;
1819 	enum rtw89_rf_path rf_path = table->rf_path;
1820 	u8 cv = by_acv ? rtwdev->hal.acv : rtwdev->hal.cv;
1821 	u8 rfe = rtwdev->efuse.rfe_type;
1822 	u32 i;
1823 	u32 headline_size = 0, headline_idx = 0;
1824 	u32 target = 0, cfg_target;
1825 	u8 cond;
1826 	bool is_matched = true;
1827 	bool target_found = false;
1828 	int ret;
1829 
1830 	ret = rtw89_phy_sel_headline(rtwdev, table, &headline_size,
1831 				     &headline_idx, rfe, cv);
1832 	if (ret) {
1833 		rtw89_err(rtwdev, "invalid PHY package: %d/%d\n", rfe, cv);
1834 		return;
1835 	}
1836 
1837 	cfg_target = get_phy_target(table->regs[headline_idx].addr);
1838 	for (i = headline_size; i < table->n_regs; i++) {
1839 		reg = &table->regs[i];
1840 		cond = get_phy_cond(reg->addr);
1841 		switch (cond) {
1842 		case PHY_COND_BRANCH_IF:
1843 		case PHY_COND_BRANCH_ELIF:
1844 			target = get_phy_target(reg->addr);
1845 			break;
1846 		case PHY_COND_BRANCH_ELSE:
1847 			is_matched = false;
1848 			if (!target_found) {
1849 				rtw89_warn(rtwdev, "failed to load CR %x/%x\n",
1850 					   reg->addr, reg->data);
1851 				return;
1852 			}
1853 			break;
1854 		case PHY_COND_BRANCH_END:
1855 			is_matched = true;
1856 			target_found = false;
1857 			break;
1858 		case PHY_COND_CHECK:
1859 			if (target_found) {
1860 				is_matched = false;
1861 				break;
1862 			}
1863 
1864 			if (target == cfg_target) {
1865 				is_matched = true;
1866 				target_found = true;
1867 			} else {
1868 				is_matched = false;
1869 				target_found = false;
1870 			}
1871 			break;
1872 		default:
1873 			if (is_matched)
1874 				config(rtwdev, reg, rf_path, extra_data);
1875 			break;
1876 		}
1877 	}
1878 }
1879 
1880 void rtw89_phy_init_bb_reg(struct rtw89_dev *rtwdev)
1881 {
1882 	struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info;
1883 	const struct rtw89_chip_info *chip = rtwdev->chip;
1884 	const struct rtw89_phy_table *bb_table;
1885 	const struct rtw89_phy_table *bb_gain_table;
1886 
1887 	bb_table = elm_info->bb_tbl ? elm_info->bb_tbl : chip->bb_table;
1888 	rtw89_phy_init_reg(rtwdev, bb_table, false, rtw89_phy_config_bb_reg, NULL);
1889 	if (rtwdev->dbcc_en)
1890 		rtw89_phy_init_reg(rtwdev, bb_table, false, rtw89_phy_config_bb_reg,
1891 				   (void *)RTW89_PHY_1);
1892 
1893 	rtw89_chip_init_txpwr_unit(rtwdev);
1894 
1895 	bb_gain_table = elm_info->bb_gain ? elm_info->bb_gain : chip->bb_gain_table;
1896 	if (bb_gain_table)
1897 		rtw89_phy_init_reg(rtwdev, bb_gain_table, false,
1898 				   chip->phy_def->config_bb_gain, NULL);
1899 
1900 	rtw89_phy_bb_reset(rtwdev);
1901 }
1902 
1903 void rtw89_phy_init_bb_afe(struct rtw89_dev *rtwdev)
1904 {
1905 	struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info;
1906 	const struct rtw89_fw_element_hdr *afe_elm = elm_info->afe;
1907 	const struct rtw89_phy_afe_info *info;
1908 	u32 action, cat, class;
1909 	u32 addr, mask, val;
1910 	u32 poll, rpt;
1911 	u32 n, i;
1912 
1913 	if (!afe_elm)
1914 		return;
1915 
1916 	n = le32_to_cpu(afe_elm->size) / sizeof(*info);
1917 
1918 	for (i = 0; i < n; i++) {
1919 		info = &afe_elm->u.afe.infos[i];
1920 
1921 		class = le32_to_cpu(info->class);
1922 		switch (class) {
1923 		case RTW89_FW_AFE_CLASS_P0:
1924 		case RTW89_FW_AFE_CLASS_P1:
1925 		case RTW89_FW_AFE_CLASS_CMN:
1926 			/* Currently support two paths */
1927 			break;
1928 		case RTW89_FW_AFE_CLASS_P2:
1929 		case RTW89_FW_AFE_CLASS_P3:
1930 		case RTW89_FW_AFE_CLASS_P4:
1931 		default:
1932 			rtw89_warn(rtwdev, "unexpected AFE class %u\n", class);
1933 			continue;
1934 		}
1935 
1936 		addr = le32_to_cpu(info->addr);
1937 		mask = le32_to_cpu(info->mask);
1938 		val = le32_to_cpu(info->val);
1939 		cat = le32_to_cpu(info->cat);
1940 		action = le32_to_cpu(info->action);
1941 
1942 		switch (action) {
1943 		case RTW89_FW_AFE_ACTION_WRITE:
1944 			switch (cat) {
1945 			case RTW89_FW_AFE_CAT_MAC:
1946 			case RTW89_FW_AFE_CAT_MAC1:
1947 				rtw89_write32_mask(rtwdev, addr, mask, val);
1948 				break;
1949 			case RTW89_FW_AFE_CAT_AFEDIG:
1950 			case RTW89_FW_AFE_CAT_AFEDIG1:
1951 				rtw89_write32_mask(rtwdev, addr, mask, val);
1952 				break;
1953 			case RTW89_FW_AFE_CAT_BB:
1954 				rtw89_phy_write32_idx(rtwdev, addr, mask, val, RTW89_PHY_0);
1955 				break;
1956 			case RTW89_FW_AFE_CAT_BB1:
1957 				rtw89_phy_write32_idx(rtwdev, addr, mask, val, RTW89_PHY_1);
1958 				break;
1959 			default:
1960 				rtw89_warn(rtwdev,
1961 					   "unexpected AFE writing action %u\n", action);
1962 				break;
1963 			}
1964 			break;
1965 		case RTW89_FW_AFE_ACTION_POLL:
1966 			for (poll = 0; poll <= 10; poll++) {
1967 				/*
1968 				 * For CAT_BB, AFE reads register with mcu_offset 0,
1969 				 * so both CAT_MAC and CAT_BB use the same method.
1970 				 */
1971 				rpt = rtw89_read32_mask(rtwdev, addr, mask);
1972 				if (rpt == val)
1973 					goto poll_done;
1974 
1975 				fsleep(1);
1976 			}
1977 			rtw89_warn(rtwdev, "failed to poll AFE cat=%u addr=0x%x mask=0x%x\n",
1978 				   cat, addr, mask);
1979 poll_done:
1980 			break;
1981 		case RTW89_FW_AFE_ACTION_DELAY:
1982 			fsleep(addr);
1983 			break;
1984 		}
1985 	}
1986 }
1987 
1988 static u32 rtw89_phy_nctl_poll(struct rtw89_dev *rtwdev)
1989 {
1990 	rtw89_phy_write32(rtwdev, 0x8080, 0x4);
1991 	udelay(1);
1992 	return rtw89_phy_read32(rtwdev, 0x8080);
1993 }
1994 
1995 void rtw89_phy_init_rf_reg(struct rtw89_dev *rtwdev, bool noio)
1996 {
1997 	void (*config)(struct rtw89_dev *rtwdev, const struct rtw89_reg2_def *reg,
1998 		       enum rtw89_rf_path rf_path, void *data);
1999 	struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info;
2000 	const struct rtw89_chip_info *chip = rtwdev->chip;
2001 	const struct rtw89_phy_table *rf_table;
2002 	struct rtw89_fw_h2c_rf_reg_info *rf_reg_info;
2003 	bool by_acv = chip->chip_id == RTL8922D;
2004 	u8 path;
2005 
2006 	rf_reg_info = kzalloc_obj(*rf_reg_info);
2007 	if (!rf_reg_info)
2008 		return;
2009 
2010 	for (path = RF_PATH_A; path < chip->rf_path_num; path++) {
2011 		rf_table = elm_info->rf_radio[path] ?
2012 			   elm_info->rf_radio[path] : chip->rf_table[path];
2013 		rf_reg_info->rf_path = rf_table->rf_path;
2014 		if (noio)
2015 			config = rtw89_phy_config_rf_reg_noio;
2016 		else
2017 			config = rf_table->config ? rf_table->config :
2018 				 rtw89_phy_config_rf_reg;
2019 		rtw89_phy_init_reg(rtwdev, rf_table, by_acv, config, (void *)rf_reg_info);
2020 		if (rtw89_phy_config_rf_reg_fw(rtwdev, rf_reg_info))
2021 			rtw89_warn(rtwdev, "rf path %d reg h2c config failed\n",
2022 				   rf_reg_info->rf_path);
2023 	}
2024 	kfree(rf_reg_info);
2025 }
2026 
2027 static void rtw89_phy_preinit_rf_nctl_ax(struct rtw89_dev *rtwdev)
2028 {
2029 	const struct rtw89_chip_info *chip = rtwdev->chip;
2030 	u32 val;
2031 	int ret;
2032 
2033 	/* IQK/DPK clock & reset */
2034 	rtw89_phy_write32_set(rtwdev, R_IOQ_IQK_DPK, 0x3);
2035 	rtw89_phy_write32_set(rtwdev, R_GNT_BT_WGT_EN, 0x1);
2036 	rtw89_phy_write32_set(rtwdev, R_P0_PATH_RST, 0x8000000);
2037 	if (chip->chip_id != RTL8851B)
2038 		rtw89_phy_write32_set(rtwdev, R_P1_PATH_RST, 0x8000000);
2039 	if (chip->chip_id == RTL8852B || chip->chip_id == RTL8852BT)
2040 		rtw89_phy_write32_set(rtwdev, R_IOQ_IQK_DPK, 0x2);
2041 
2042 	/* check 0x8080 */
2043 	rtw89_phy_write32(rtwdev, R_NCTL_CFG, 0x8);
2044 
2045 	ret = read_poll_timeout(rtw89_phy_nctl_poll, val, val == 0x4, 10,
2046 				1000, false, rtwdev);
2047 	if (ret)
2048 		rtw89_err(rtwdev, "failed to poll nctl block\n");
2049 }
2050 
2051 static void rtw89_phy_init_rf_nctl(struct rtw89_dev *rtwdev)
2052 {
2053 	struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info;
2054 	const struct rtw89_chip_info *chip = rtwdev->chip;
2055 	const struct rtw89_phy_table *nctl_table;
2056 
2057 	rtw89_phy_preinit_rf_nctl(rtwdev);
2058 
2059 	nctl_table = elm_info->rf_nctl ? elm_info->rf_nctl : chip->nctl_table;
2060 	rtw89_phy_init_reg(rtwdev, nctl_table, false, rtw89_phy_config_bb_reg, NULL);
2061 
2062 	if (chip->nctl_post_table)
2063 		rtw89_rfk_parser(rtwdev, chip->nctl_post_table);
2064 }
2065 
2066 static u32 rtw89_phy0_phy1_offset_ax(struct rtw89_dev *rtwdev, u32 addr)
2067 {
2068 	u32 phy_page = addr >> 8;
2069 	u32 ofst = 0;
2070 
2071 	switch (phy_page) {
2072 	case 0x6:
2073 	case 0x7:
2074 	case 0x8:
2075 	case 0x9:
2076 	case 0xa:
2077 	case 0xb:
2078 	case 0xc:
2079 	case 0xd:
2080 	case 0x19:
2081 	case 0x1a:
2082 	case 0x1b:
2083 		ofst = 0x2000;
2084 		break;
2085 	default:
2086 		/* warning case */
2087 		ofst = 0;
2088 		break;
2089 	}
2090 
2091 	if (phy_page >= 0x40 && phy_page <= 0x4f)
2092 		ofst = 0x2000;
2093 
2094 	return ofst;
2095 }
2096 
2097 void rtw89_phy_write32_idx(struct rtw89_dev *rtwdev, u32 addr, u32 mask,
2098 			   u32 data, enum rtw89_phy_idx phy_idx)
2099 {
2100 	if (rtwdev->dbcc_en && phy_idx == RTW89_PHY_1)
2101 		addr += rtw89_phy0_phy1_offset(rtwdev, addr);
2102 	rtw89_phy_write32_mask(rtwdev, addr, mask, data);
2103 }
2104 EXPORT_SYMBOL(rtw89_phy_write32_idx);
2105 
2106 void rtw89_phy_write32_idx_set(struct rtw89_dev *rtwdev, u32 addr, u32 bits,
2107 			       enum rtw89_phy_idx phy_idx)
2108 {
2109 	if (rtwdev->dbcc_en && phy_idx == RTW89_PHY_1)
2110 		addr += rtw89_phy0_phy1_offset(rtwdev, addr);
2111 	rtw89_phy_write32_set(rtwdev, addr, bits);
2112 }
2113 EXPORT_SYMBOL(rtw89_phy_write32_idx_set);
2114 
2115 void rtw89_phy_write32_idx_clr(struct rtw89_dev *rtwdev, u32 addr, u32 bits,
2116 			       enum rtw89_phy_idx phy_idx)
2117 {
2118 	if (rtwdev->dbcc_en && phy_idx == RTW89_PHY_1)
2119 		addr += rtw89_phy0_phy1_offset(rtwdev, addr);
2120 	rtw89_phy_write32_clr(rtwdev, addr, bits);
2121 }
2122 EXPORT_SYMBOL(rtw89_phy_write32_idx_clr);
2123 
2124 u32 rtw89_phy_read32_idx(struct rtw89_dev *rtwdev, u32 addr, u32 mask,
2125 			 enum rtw89_phy_idx phy_idx)
2126 {
2127 	if (rtwdev->dbcc_en && phy_idx == RTW89_PHY_1)
2128 		addr += rtw89_phy0_phy1_offset(rtwdev, addr);
2129 	return rtw89_phy_read32_mask(rtwdev, addr, mask);
2130 }
2131 EXPORT_SYMBOL(rtw89_phy_read32_idx);
2132 
2133 void rtw89_phy_set_phy_regs(struct rtw89_dev *rtwdev, u32 addr, u32 mask,
2134 			    u32 val)
2135 {
2136 	rtw89_phy_write32_idx(rtwdev, addr, mask, val, RTW89_PHY_0);
2137 
2138 	if (!rtwdev->dbcc_en)
2139 		return;
2140 
2141 	rtw89_phy_write32_idx(rtwdev, addr, mask, val, RTW89_PHY_1);
2142 }
2143 EXPORT_SYMBOL(rtw89_phy_set_phy_regs);
2144 
2145 void rtw89_phy_write_reg3_tbl(struct rtw89_dev *rtwdev,
2146 			      const struct rtw89_phy_reg3_tbl *tbl)
2147 {
2148 	const struct rtw89_reg3_def *reg3;
2149 	int i;
2150 
2151 	for (i = 0; i < tbl->size; i++) {
2152 		reg3 = &tbl->reg3[i];
2153 		rtw89_phy_write32_mask(rtwdev, reg3->addr, reg3->mask, reg3->data);
2154 	}
2155 }
2156 EXPORT_SYMBOL(rtw89_phy_write_reg3_tbl);
2157 
2158 static u8 rtw89_phy_ant_gain_domain_to_regd(struct rtw89_dev *rtwdev, u8 ant_gain_regd)
2159 {
2160 	switch (ant_gain_regd) {
2161 	case RTW89_ANT_GAIN_ETSI:
2162 		return RTW89_ETSI;
2163 	default:
2164 		rtw89_debug(rtwdev, RTW89_DBG_TXPWR,
2165 			    "unknown antenna gain domain: %d\n",
2166 			    ant_gain_regd);
2167 		return RTW89_REGD_NUM;
2168 	}
2169 }
2170 
2171 /* antenna gain in unit of 0.25 dbm */
2172 #define RTW89_ANT_GAIN_2GHZ_MIN -8
2173 #define RTW89_ANT_GAIN_2GHZ_MAX 14
2174 #define RTW89_ANT_GAIN_5GHZ_MIN -8
2175 #define RTW89_ANT_GAIN_5GHZ_MAX 20
2176 #define RTW89_ANT_GAIN_6GHZ_MIN -8
2177 #define RTW89_ANT_GAIN_6GHZ_MAX 20
2178 
2179 #define RTW89_ANT_GAIN_REF_2GHZ 14
2180 #define RTW89_ANT_GAIN_REF_5GHZ 20
2181 #define RTW89_ANT_GAIN_REF_6GHZ 20
2182 
2183 void rtw89_phy_ant_gain_init(struct rtw89_dev *rtwdev)
2184 {
2185 	struct rtw89_ant_gain_info *ant_gain = &rtwdev->ant_gain;
2186 	const struct rtw89_chip_info *chip = rtwdev->chip;
2187 	struct rtw89_acpi_rtag_result res = {};
2188 	u32 domain;
2189 	int ret;
2190 	u8 i, j;
2191 	u8 regd;
2192 	u8 val;
2193 
2194 	if (!chip->support_ant_gain)
2195 		return;
2196 
2197 	ret = rtw89_acpi_evaluate_rtag(rtwdev, &res);
2198 	if (ret) {
2199 		rtw89_debug(rtwdev, RTW89_DBG_TXPWR,
2200 			    "acpi: cannot eval rtag: %d\n", ret);
2201 		return;
2202 	}
2203 
2204 	if (res.revision != 0) {
2205 		rtw89_debug(rtwdev, RTW89_DBG_TXPWR,
2206 			    "unknown rtag revision: %d\n", res.revision);
2207 		return;
2208 	}
2209 
2210 	domain = get_unaligned_le32(&res.domain);
2211 
2212 	for (i = 0; i < RTW89_ANT_GAIN_DOMAIN_NUM; i++) {
2213 		if (!(domain & BIT(i)))
2214 			continue;
2215 
2216 		regd = rtw89_phy_ant_gain_domain_to_regd(rtwdev, i);
2217 		if (regd >= RTW89_REGD_NUM)
2218 			continue;
2219 		ant_gain->regd_enabled |= BIT(regd);
2220 	}
2221 
2222 	for (i = 0; i < RTW89_ANT_GAIN_CHAIN_NUM; i++) {
2223 		for (j = 0; j < RTW89_ANT_GAIN_SUBBAND_NR; j++) {
2224 			val = res.ant_gain_table[i][j];
2225 			switch (j) {
2226 			default:
2227 			case RTW89_ANT_GAIN_2GHZ_SUBBAND:
2228 				val = RTW89_ANT_GAIN_REF_2GHZ -
2229 				      clamp_t(s8, val,
2230 					      RTW89_ANT_GAIN_2GHZ_MIN,
2231 					      RTW89_ANT_GAIN_2GHZ_MAX);
2232 				break;
2233 			case RTW89_ANT_GAIN_5GHZ_SUBBAND_1:
2234 			case RTW89_ANT_GAIN_5GHZ_SUBBAND_2:
2235 			case RTW89_ANT_GAIN_5GHZ_SUBBAND_2E:
2236 			case RTW89_ANT_GAIN_5GHZ_SUBBAND_3_4:
2237 				val = RTW89_ANT_GAIN_REF_5GHZ -
2238 				      clamp_t(s8, val,
2239 					      RTW89_ANT_GAIN_5GHZ_MIN,
2240 					      RTW89_ANT_GAIN_5GHZ_MAX);
2241 				break;
2242 			case RTW89_ANT_GAIN_6GHZ_SUBBAND_5_L:
2243 			case RTW89_ANT_GAIN_6GHZ_SUBBAND_5_H:
2244 			case RTW89_ANT_GAIN_6GHZ_SUBBAND_6:
2245 			case RTW89_ANT_GAIN_6GHZ_SUBBAND_7_L:
2246 			case RTW89_ANT_GAIN_6GHZ_SUBBAND_7_H:
2247 			case RTW89_ANT_GAIN_6GHZ_SUBBAND_8:
2248 				val = RTW89_ANT_GAIN_REF_6GHZ -
2249 				      clamp_t(s8, val,
2250 					      RTW89_ANT_GAIN_6GHZ_MIN,
2251 					      RTW89_ANT_GAIN_6GHZ_MAX);
2252 			}
2253 			ant_gain->offset[i][j] = val;
2254 		}
2255 	}
2256 }
2257 
2258 static
2259 enum rtw89_ant_gain_subband rtw89_phy_ant_gain_get_subband(struct rtw89_dev *rtwdev,
2260 							   u32 center_freq)
2261 {
2262 	switch (center_freq) {
2263 	default:
2264 		rtw89_debug(rtwdev, RTW89_DBG_TXPWR,
2265 			    "center freq: %u to antenna gain subband is unhandled\n",
2266 			    center_freq);
2267 		fallthrough;
2268 	case 2412 ... 2484:
2269 		return RTW89_ANT_GAIN_2GHZ_SUBBAND;
2270 	case 5180 ... 5240:
2271 		return RTW89_ANT_GAIN_5GHZ_SUBBAND_1;
2272 	case 5250 ... 5320:
2273 		return RTW89_ANT_GAIN_5GHZ_SUBBAND_2;
2274 	case 5500 ... 5720:
2275 		return RTW89_ANT_GAIN_5GHZ_SUBBAND_2E;
2276 	case 5745 ... 5885:
2277 		return RTW89_ANT_GAIN_5GHZ_SUBBAND_3_4;
2278 	case 5955 ... 6155:
2279 		return RTW89_ANT_GAIN_6GHZ_SUBBAND_5_L;
2280 	case 6175 ... 6415:
2281 		return RTW89_ANT_GAIN_6GHZ_SUBBAND_5_H;
2282 	case 6435 ... 6515:
2283 		return RTW89_ANT_GAIN_6GHZ_SUBBAND_6;
2284 	case 6535 ... 6695:
2285 		return RTW89_ANT_GAIN_6GHZ_SUBBAND_7_L;
2286 	case 6715 ... 6855:
2287 		return RTW89_ANT_GAIN_6GHZ_SUBBAND_7_H;
2288 
2289 	/* freq 6875 (ch 185, 20MHz) spans RTW89_ANT_GAIN_6GHZ_SUBBAND_7_H
2290 	 * and RTW89_ANT_GAIN_6GHZ_SUBBAND_8, so directly describe it with
2291 	 * struct rtw89_6ghz_span.
2292 	 */
2293 
2294 	case 6895 ... 7115:
2295 		return RTW89_ANT_GAIN_6GHZ_SUBBAND_8;
2296 	}
2297 }
2298 
2299 static s8 rtw89_phy_ant_gain_query(struct rtw89_dev *rtwdev,
2300 				   enum rtw89_rf_path path, u32 center_freq)
2301 {
2302 	struct rtw89_ant_gain_info *ant_gain = &rtwdev->ant_gain;
2303 	enum rtw89_ant_gain_subband subband_l, subband_h;
2304 	const struct rtw89_6ghz_span *span;
2305 
2306 	span = rtw89_get_6ghz_span(rtwdev, center_freq);
2307 
2308 	if (span && RTW89_ANT_GAIN_SPAN_VALID(span)) {
2309 		subband_l = span->ant_gain_subband_low;
2310 		subband_h = span->ant_gain_subband_high;
2311 	} else {
2312 		subband_l = rtw89_phy_ant_gain_get_subband(rtwdev, center_freq);
2313 		subband_h = subband_l;
2314 	}
2315 
2316 	rtw89_debug(rtwdev, RTW89_DBG_TXPWR,
2317 		    "center_freq %u: antenna gain subband {%u, %u}\n",
2318 		    center_freq, subband_l, subband_h);
2319 
2320 	return min(ant_gain->offset[path][subband_l],
2321 		   ant_gain->offset[path][subband_h]);
2322 }
2323 
2324 static s8 rtw89_phy_ant_gain_offset(struct rtw89_dev *rtwdev, u32 center_freq)
2325 {
2326 	s8 offset_patha, offset_pathb;
2327 
2328 	offset_patha = rtw89_phy_ant_gain_query(rtwdev, RF_PATH_A, center_freq);
2329 	offset_pathb = rtw89_phy_ant_gain_query(rtwdev, RF_PATH_B, center_freq);
2330 
2331 	if (RTW89_CHK_FW_FEATURE(NO_POWER_DIFFERENCE, &rtwdev->fw))
2332 		return min(offset_patha, offset_pathb);
2333 
2334 	return max(offset_patha, offset_pathb);
2335 }
2336 
2337 static bool rtw89_can_apply_ant_gain(struct rtw89_dev *rtwdev, u8 band)
2338 {
2339 	const struct rtw89_rfe_parms *rfe_parms = rtwdev->rfe_parms;
2340 	struct rtw89_ant_gain_info *ant_gain = &rtwdev->ant_gain;
2341 	const struct rtw89_chip_info *chip = rtwdev->chip;
2342 	u8 regd = rtw89_regd_get(rtwdev, band);
2343 
2344 	if (!chip->support_ant_gain)
2345 		return false;
2346 
2347 	if (ant_gain->block_country || !(ant_gain->regd_enabled & BIT(regd)))
2348 		return false;
2349 
2350 	if (!rfe_parms->has_da)
2351 		return false;
2352 
2353 	return true;
2354 }
2355 
2356 s16 rtw89_phy_ant_gain_pwr_offset(struct rtw89_dev *rtwdev,
2357 				  const struct rtw89_chan *chan)
2358 {
2359 	s8 offset_patha, offset_pathb;
2360 
2361 	if (!rtw89_can_apply_ant_gain(rtwdev, chan->band_type))
2362 		return 0;
2363 
2364 	if (RTW89_CHK_FW_FEATURE(NO_POWER_DIFFERENCE, &rtwdev->fw))
2365 		return 0;
2366 
2367 	offset_patha = rtw89_phy_ant_gain_query(rtwdev, RF_PATH_A, chan->freq);
2368 	offset_pathb = rtw89_phy_ant_gain_query(rtwdev, RF_PATH_B, chan->freq);
2369 
2370 	return rtw89_phy_txpwr_rf_to_bb(rtwdev, offset_patha - offset_pathb);
2371 }
2372 EXPORT_SYMBOL(rtw89_phy_ant_gain_pwr_offset);
2373 
2374 int rtw89_print_ant_gain(struct rtw89_dev *rtwdev, char *buf, size_t bufsz,
2375 			 const struct rtw89_chan *chan)
2376 {
2377 	char *p = buf, *end = buf + bufsz;
2378 	s8 offset_patha, offset_pathb;
2379 
2380 	if (!rtw89_can_apply_ant_gain(rtwdev, chan->band_type)) {
2381 		p += scnprintf(p, end - p, "no DAG is applied\n");
2382 		goto out;
2383 	}
2384 
2385 	offset_patha = rtw89_phy_ant_gain_query(rtwdev, RF_PATH_A, chan->freq);
2386 	offset_pathb = rtw89_phy_ant_gain_query(rtwdev, RF_PATH_B, chan->freq);
2387 
2388 	p += scnprintf(p, end - p, "ChainA offset: %d dBm\n", offset_patha);
2389 	p += scnprintf(p, end - p, "ChainB offset: %d dBm\n", offset_pathb);
2390 
2391 out:
2392 	return p - buf;
2393 }
2394 
2395 static const u8 rtw89_rs_idx_num_ax[] = {
2396 	[RTW89_RS_CCK] = RTW89_RATE_CCK_NUM,
2397 	[RTW89_RS_OFDM] = RTW89_RATE_OFDM_NUM,
2398 	[RTW89_RS_MCS] = RTW89_RATE_MCS_NUM_AX,
2399 	[RTW89_RS_HEDCM] = RTW89_RATE_HEDCM_NUM,
2400 	[RTW89_RS_OFFSET] = RTW89_RATE_OFFSET_NUM_AX,
2401 };
2402 
2403 static const u8 rtw89_rs_nss_num_ax[] = {
2404 	[RTW89_RS_CCK] = 1,
2405 	[RTW89_RS_OFDM] = 1,
2406 	[RTW89_RS_MCS] = RTW89_NSS_NUM,
2407 	[RTW89_RS_HEDCM] = RTW89_NSS_HEDCM_NUM,
2408 	[RTW89_RS_OFFSET] = 1,
2409 };
2410 
2411 s8 *rtw89_phy_raw_byr_seek(struct rtw89_dev *rtwdev,
2412 			   struct rtw89_txpwr_byrate *head,
2413 			   const struct rtw89_rate_desc *desc)
2414 {
2415 	switch (desc->rs) {
2416 	case RTW89_RS_CCK:
2417 		return &head->cck[desc->idx];
2418 	case RTW89_RS_OFDM:
2419 		return &head->ofdm[desc->idx];
2420 	case RTW89_RS_MCS:
2421 		return &head->mcs[desc->ofdma][desc->nss][desc->idx];
2422 	case RTW89_RS_HEDCM:
2423 		return &head->hedcm[desc->ofdma][desc->nss][desc->idx];
2424 	case RTW89_RS_OFFSET:
2425 		return &head->offset[desc->idx];
2426 	default:
2427 		rtw89_warn(rtwdev, "unrecognized byr rs: %d\n", desc->rs);
2428 		return &head->trap;
2429 	}
2430 }
2431 
2432 void rtw89_phy_load_txpwr_byrate(struct rtw89_dev *rtwdev,
2433 				 const struct rtw89_txpwr_table *tbl)
2434 {
2435 	const struct rtw89_txpwr_byrate_cfg *cfg = tbl->data;
2436 	const struct rtw89_txpwr_byrate_cfg *end = cfg + tbl->size;
2437 	struct rtw89_txpwr_byrate *byr_head;
2438 	struct rtw89_rate_desc desc = {};
2439 	s8 *byr;
2440 	u32 data;
2441 	u8 i;
2442 
2443 	for (; cfg < end; cfg++) {
2444 		byr_head = &rtwdev->byr[cfg->band][0];
2445 		desc.rs = cfg->rs;
2446 		desc.nss = cfg->nss;
2447 		data = cfg->data;
2448 
2449 		for (i = 0; i < cfg->len; i++, data >>= 8) {
2450 			desc.idx = cfg->shf + i;
2451 			byr = rtw89_phy_raw_byr_seek(rtwdev, byr_head, &desc);
2452 			*byr = data & 0xff;
2453 		}
2454 	}
2455 }
2456 EXPORT_SYMBOL(rtw89_phy_load_txpwr_byrate);
2457 
2458 static s8 rtw89_phy_txpwr_dbm_without_tolerance(s8 dbm)
2459 {
2460 	const u8 tssi_deviation_point = 0;
2461 	const u8 tssi_max_deviation = 2;
2462 
2463 	if (dbm <= tssi_deviation_point)
2464 		dbm -= tssi_max_deviation;
2465 
2466 	return dbm;
2467 }
2468 
2469 static s8 rtw89_phy_get_tpe_constraint(struct rtw89_dev *rtwdev, u8 band)
2470 {
2471 	struct rtw89_regulatory_info *regulatory = &rtwdev->regulatory;
2472 	const struct rtw89_reg_6ghz_tpe *tpe = &regulatory->reg_6ghz_tpe;
2473 	s8 cstr = S8_MAX;
2474 
2475 	if (band == RTW89_BAND_6G && tpe->valid)
2476 		cstr = rtw89_phy_txpwr_dbm_without_tolerance(tpe->constraint);
2477 
2478 	return rtw89_phy_txpwr_dbm_to_mac(rtwdev, cstr);
2479 }
2480 
2481 s8 rtw89_phy_read_txpwr_byrate(struct rtw89_dev *rtwdev, u8 band, u8 bw,
2482 			       const struct rtw89_rate_desc *rate_desc)
2483 {
2484 	struct rtw89_txpwr_byrate *byr_head;
2485 	s8 *byr;
2486 
2487 	if (rate_desc->rs == RTW89_RS_CCK)
2488 		band = RTW89_BAND_2G;
2489 
2490 	byr_head = &rtwdev->byr[band][bw];
2491 	byr = rtw89_phy_raw_byr_seek(rtwdev, byr_head, rate_desc);
2492 
2493 	return rtw89_phy_txpwr_rf_to_mac(rtwdev, *byr);
2494 }
2495 
2496 static u8 rtw89_channel_6g_to_idx(struct rtw89_dev *rtwdev, u8 channel_6g)
2497 {
2498 	switch (channel_6g) {
2499 	case 1 ... 29:
2500 		return (channel_6g - 1) / 2;
2501 	case 33 ... 61:
2502 		return (channel_6g - 3) / 2;
2503 	case 65 ... 93:
2504 		return (channel_6g - 5) / 2;
2505 	case 97 ... 125:
2506 		return (channel_6g - 7) / 2;
2507 	case 129 ... 157:
2508 		return (channel_6g - 9) / 2;
2509 	case 161 ... 189:
2510 		return (channel_6g - 11) / 2;
2511 	case 193 ... 221:
2512 		return (channel_6g - 13) / 2;
2513 	case 225 ... 253:
2514 		return (channel_6g - 15) / 2;
2515 	default:
2516 		rtw89_warn(rtwdev, "unknown 6g channel: %d\n", channel_6g);
2517 		return 0;
2518 	}
2519 }
2520 
2521 static u8 rtw89_channel_to_idx(struct rtw89_dev *rtwdev, u8 band, u8 channel)
2522 {
2523 	if (band == RTW89_BAND_6G)
2524 		return rtw89_channel_6g_to_idx(rtwdev, channel);
2525 
2526 	switch (channel) {
2527 	case 1 ... 14:
2528 		return channel - 1;
2529 	case 36 ... 64:
2530 		return (channel - 36) / 2;
2531 	case 100 ... 144:
2532 		return ((channel - 100) / 2) + 15;
2533 	case 149 ... 177:
2534 		return ((channel - 149) / 2) + 38;
2535 	default:
2536 		rtw89_warn(rtwdev, "unknown channel: %d\n", channel);
2537 		return 0;
2538 	}
2539 }
2540 
2541 static bool rtw89_phy_validate_txpwr_limit_bw(struct rtw89_dev *rtwdev,
2542 					      u8 band, u8 bw)
2543 {
2544 	switch (band) {
2545 	case RTW89_BAND_2G:
2546 		return bw < RTW89_2G_BW_NUM;
2547 	case RTW89_BAND_5G:
2548 		return bw < RTW89_5G_BW_NUM;
2549 	case RTW89_BAND_6G:
2550 		return bw < RTW89_6G_BW_NUM;
2551 	default:
2552 		return false;
2553 	}
2554 }
2555 
2556 s8 rtw89_phy_read_txpwr_limit(struct rtw89_dev *rtwdev, u8 band,
2557 			      u8 bw, u8 ntx, u8 rs, u8 bf, u8 ch)
2558 {
2559 	const struct rtw89_rfe_parms *rfe_parms = rtwdev->rfe_parms;
2560 	const struct rtw89_txpwr_rule_2ghz *rule_da_2ghz = &rfe_parms->rule_da_2ghz;
2561 	const struct rtw89_txpwr_rule_5ghz *rule_da_5ghz = &rfe_parms->rule_da_5ghz;
2562 	const struct rtw89_txpwr_rule_6ghz *rule_da_6ghz = &rfe_parms->rule_da_6ghz;
2563 	const struct rtw89_txpwr_rule_2ghz *rule_2ghz = &rfe_parms->rule_2ghz;
2564 	const struct rtw89_txpwr_rule_5ghz *rule_5ghz = &rfe_parms->rule_5ghz;
2565 	const struct rtw89_txpwr_rule_6ghz *rule_6ghz = &rfe_parms->rule_6ghz;
2566 	struct rtw89_regulatory_info *regulatory = &rtwdev->regulatory;
2567 	enum nl80211_band nl_band = rtw89_hw_to_nl80211_band(band);
2568 	bool has_ant_gain = rtw89_can_apply_ant_gain(rtwdev, band);
2569 	u32 freq = ieee80211_channel_to_frequency(ch, nl_band);
2570 	u8 ch_idx = rtw89_channel_to_idx(rtwdev, band, ch);
2571 	s8 lmt = 0, da_lmt = S8_MAX, sar, offset = 0;
2572 	u8 regd = rtw89_regd_get(rtwdev, band);
2573 	u8 reg6 = regulatory->reg_6ghz_power;
2574 	struct rtw89_sar_parm sar_parm = {
2575 		.center_freq = freq,
2576 		.ntx = ntx,
2577 	};
2578 	s8 cstr;
2579 
2580 	if (!rtw89_phy_validate_txpwr_limit_bw(rtwdev, band, bw)) {
2581 		rtw89_warn(rtwdev, "invalid band %u bandwidth %u\n", band, bw);
2582 		return 0;
2583 	}
2584 
2585 	switch (band) {
2586 	case RTW89_BAND_2G:
2587 		if (has_ant_gain)
2588 			da_lmt = (*rule_da_2ghz->lmt)[bw][ntx][rs][bf][regd][ch_idx];
2589 
2590 		lmt = (*rule_2ghz->lmt)[bw][ntx][rs][bf][regd][ch_idx];
2591 		if (lmt)
2592 			break;
2593 
2594 		lmt = (*rule_2ghz->lmt)[bw][ntx][rs][bf][RTW89_WW][ch_idx];
2595 		break;
2596 	case RTW89_BAND_5G:
2597 		if (has_ant_gain)
2598 			da_lmt = (*rule_da_5ghz->lmt)[bw][ntx][rs][bf][regd][ch_idx];
2599 
2600 		lmt = (*rule_5ghz->lmt)[bw][ntx][rs][bf][regd][ch_idx];
2601 		if (lmt)
2602 			break;
2603 
2604 		lmt = (*rule_5ghz->lmt)[bw][ntx][rs][bf][RTW89_WW][ch_idx];
2605 		break;
2606 	case RTW89_BAND_6G:
2607 		if (has_ant_gain)
2608 			da_lmt = (*rule_da_6ghz->lmt)[bw][ntx][rs][bf][regd][reg6][ch_idx];
2609 
2610 		lmt = (*rule_6ghz->lmt)[bw][ntx][rs][bf][regd][reg6][ch_idx];
2611 		if (lmt)
2612 			break;
2613 
2614 		lmt = (*rule_6ghz->lmt)[bw][ntx][rs][bf][RTW89_WW]
2615 				       [RTW89_REG_6GHZ_POWER_DFLT]
2616 				       [ch_idx];
2617 		break;
2618 	default:
2619 		rtw89_warn(rtwdev, "unknown band type: %d\n", band);
2620 		return 0;
2621 	}
2622 
2623 	da_lmt = da_lmt ?: S8_MAX;
2624 	if (da_lmt != S8_MAX)
2625 		offset = rtw89_phy_ant_gain_offset(rtwdev, freq);
2626 
2627 	lmt = rtw89_phy_txpwr_rf_to_mac(rtwdev, min(lmt + offset, da_lmt));
2628 	sar = rtw89_query_sar(rtwdev, &sar_parm);
2629 	cstr = rtw89_phy_get_tpe_constraint(rtwdev, band);
2630 
2631 	return min3(lmt, sar, cstr);
2632 }
2633 EXPORT_SYMBOL(rtw89_phy_read_txpwr_limit);
2634 
2635 #define __fill_txpwr_limit_nonbf_bf(ptr, band, bw, ntx, rs, ch)		\
2636 	do {								\
2637 		u8 __i;							\
2638 		for (__i = 0; __i < RTW89_BF_NUM; __i++)		\
2639 			ptr[__i] = rtw89_phy_read_txpwr_limit(rtwdev,	\
2640 							      band,	\
2641 							      bw, ntx,	\
2642 							      rs, __i,	\
2643 							      (ch));	\
2644 	} while (0)
2645 
2646 static void rtw89_phy_fill_txpwr_limit_20m_ax(struct rtw89_dev *rtwdev,
2647 					      struct rtw89_txpwr_limit_ax *lmt,
2648 					      u8 band, u8 ntx, u8 ch)
2649 {
2650 	__fill_txpwr_limit_nonbf_bf(lmt->cck_20m, band, RTW89_CHANNEL_WIDTH_20,
2651 				    ntx, RTW89_RS_CCK, ch);
2652 	__fill_txpwr_limit_nonbf_bf(lmt->cck_40m, band, RTW89_CHANNEL_WIDTH_40,
2653 				    ntx, RTW89_RS_CCK, ch);
2654 	__fill_txpwr_limit_nonbf_bf(lmt->ofdm, band, RTW89_CHANNEL_WIDTH_20,
2655 				    ntx, RTW89_RS_OFDM, ch);
2656 	__fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[0], band,
2657 				    RTW89_CHANNEL_WIDTH_20,
2658 				    ntx, RTW89_RS_MCS, ch);
2659 }
2660 
2661 static void rtw89_phy_fill_txpwr_limit_40m_ax(struct rtw89_dev *rtwdev,
2662 					      struct rtw89_txpwr_limit_ax *lmt,
2663 					      u8 band, u8 ntx, u8 ch, u8 pri_ch)
2664 {
2665 	__fill_txpwr_limit_nonbf_bf(lmt->cck_20m, band, RTW89_CHANNEL_WIDTH_20,
2666 				    ntx, RTW89_RS_CCK, ch - 2);
2667 	__fill_txpwr_limit_nonbf_bf(lmt->cck_40m, band, RTW89_CHANNEL_WIDTH_40,
2668 				    ntx, RTW89_RS_CCK, ch);
2669 	__fill_txpwr_limit_nonbf_bf(lmt->ofdm, band, RTW89_CHANNEL_WIDTH_20,
2670 				    ntx, RTW89_RS_OFDM, pri_ch);
2671 	__fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[0], band,
2672 				    RTW89_CHANNEL_WIDTH_20,
2673 				    ntx, RTW89_RS_MCS, ch - 2);
2674 	__fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[1], band,
2675 				    RTW89_CHANNEL_WIDTH_20,
2676 				    ntx, RTW89_RS_MCS, ch + 2);
2677 	__fill_txpwr_limit_nonbf_bf(lmt->mcs_40m[0], band,
2678 				    RTW89_CHANNEL_WIDTH_40,
2679 				    ntx, RTW89_RS_MCS, ch);
2680 }
2681 
2682 static void rtw89_phy_fill_txpwr_limit_80m_ax(struct rtw89_dev *rtwdev,
2683 					      struct rtw89_txpwr_limit_ax *lmt,
2684 					      u8 band, u8 ntx, u8 ch, u8 pri_ch)
2685 {
2686 	s8 val_0p5_n[RTW89_BF_NUM];
2687 	s8 val_0p5_p[RTW89_BF_NUM];
2688 	u8 i;
2689 
2690 	__fill_txpwr_limit_nonbf_bf(lmt->ofdm, band, RTW89_CHANNEL_WIDTH_20,
2691 				    ntx, RTW89_RS_OFDM, pri_ch);
2692 	__fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[0], band,
2693 				    RTW89_CHANNEL_WIDTH_20,
2694 				    ntx, RTW89_RS_MCS, ch - 6);
2695 	__fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[1], band,
2696 				    RTW89_CHANNEL_WIDTH_20,
2697 				    ntx, RTW89_RS_MCS, ch - 2);
2698 	__fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[2], band,
2699 				    RTW89_CHANNEL_WIDTH_20,
2700 				    ntx, RTW89_RS_MCS, ch + 2);
2701 	__fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[3], band,
2702 				    RTW89_CHANNEL_WIDTH_20,
2703 				    ntx, RTW89_RS_MCS, ch + 6);
2704 	__fill_txpwr_limit_nonbf_bf(lmt->mcs_40m[0], band,
2705 				    RTW89_CHANNEL_WIDTH_40,
2706 				    ntx, RTW89_RS_MCS, ch - 4);
2707 	__fill_txpwr_limit_nonbf_bf(lmt->mcs_40m[1], band,
2708 				    RTW89_CHANNEL_WIDTH_40,
2709 				    ntx, RTW89_RS_MCS, ch + 4);
2710 	__fill_txpwr_limit_nonbf_bf(lmt->mcs_80m[0], band,
2711 				    RTW89_CHANNEL_WIDTH_80,
2712 				    ntx, RTW89_RS_MCS, ch);
2713 
2714 	__fill_txpwr_limit_nonbf_bf(val_0p5_n, band, RTW89_CHANNEL_WIDTH_40,
2715 				    ntx, RTW89_RS_MCS, ch - 4);
2716 	__fill_txpwr_limit_nonbf_bf(val_0p5_p, band, RTW89_CHANNEL_WIDTH_40,
2717 				    ntx, RTW89_RS_MCS, ch + 4);
2718 
2719 	for (i = 0; i < RTW89_BF_NUM; i++)
2720 		lmt->mcs_40m_0p5[i] = min_t(s8, val_0p5_n[i], val_0p5_p[i]);
2721 }
2722 
2723 static void rtw89_phy_fill_txpwr_limit_160m_ax(struct rtw89_dev *rtwdev,
2724 					       struct rtw89_txpwr_limit_ax *lmt,
2725 					       u8 band, u8 ntx, u8 ch, u8 pri_ch)
2726 {
2727 	s8 val_0p5_n[RTW89_BF_NUM];
2728 	s8 val_0p5_p[RTW89_BF_NUM];
2729 	s8 val_2p5_n[RTW89_BF_NUM];
2730 	s8 val_2p5_p[RTW89_BF_NUM];
2731 	u8 i;
2732 
2733 	/* fill ofdm section */
2734 	__fill_txpwr_limit_nonbf_bf(lmt->ofdm, band, RTW89_CHANNEL_WIDTH_20,
2735 				    ntx, RTW89_RS_OFDM, pri_ch);
2736 
2737 	/* fill mcs 20m section */
2738 	__fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[0], band,
2739 				    RTW89_CHANNEL_WIDTH_20,
2740 				    ntx, RTW89_RS_MCS, ch - 14);
2741 	__fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[1], band,
2742 				    RTW89_CHANNEL_WIDTH_20,
2743 				    ntx, RTW89_RS_MCS, ch - 10);
2744 	__fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[2], band,
2745 				    RTW89_CHANNEL_WIDTH_20,
2746 				    ntx, RTW89_RS_MCS, ch - 6);
2747 	__fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[3], band,
2748 				    RTW89_CHANNEL_WIDTH_20,
2749 				    ntx, RTW89_RS_MCS, ch - 2);
2750 	__fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[4], band,
2751 				    RTW89_CHANNEL_WIDTH_20,
2752 				    ntx, RTW89_RS_MCS, ch + 2);
2753 	__fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[5], band,
2754 				    RTW89_CHANNEL_WIDTH_20,
2755 				    ntx, RTW89_RS_MCS, ch + 6);
2756 	__fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[6], band,
2757 				    RTW89_CHANNEL_WIDTH_20,
2758 				    ntx, RTW89_RS_MCS, ch + 10);
2759 	__fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[7], band,
2760 				    RTW89_CHANNEL_WIDTH_20,
2761 				    ntx, RTW89_RS_MCS, ch + 14);
2762 
2763 	/* fill mcs 40m section */
2764 	__fill_txpwr_limit_nonbf_bf(lmt->mcs_40m[0], band,
2765 				    RTW89_CHANNEL_WIDTH_40,
2766 				    ntx, RTW89_RS_MCS, ch - 12);
2767 	__fill_txpwr_limit_nonbf_bf(lmt->mcs_40m[1], band,
2768 				    RTW89_CHANNEL_WIDTH_40,
2769 				    ntx, RTW89_RS_MCS, ch - 4);
2770 	__fill_txpwr_limit_nonbf_bf(lmt->mcs_40m[2], band,
2771 				    RTW89_CHANNEL_WIDTH_40,
2772 				    ntx, RTW89_RS_MCS, ch + 4);
2773 	__fill_txpwr_limit_nonbf_bf(lmt->mcs_40m[3], band,
2774 				    RTW89_CHANNEL_WIDTH_40,
2775 				    ntx, RTW89_RS_MCS, ch + 12);
2776 
2777 	/* fill mcs 80m section */
2778 	__fill_txpwr_limit_nonbf_bf(lmt->mcs_80m[0], band,
2779 				    RTW89_CHANNEL_WIDTH_80,
2780 				    ntx, RTW89_RS_MCS, ch - 8);
2781 	__fill_txpwr_limit_nonbf_bf(lmt->mcs_80m[1], band,
2782 				    RTW89_CHANNEL_WIDTH_80,
2783 				    ntx, RTW89_RS_MCS, ch + 8);
2784 
2785 	/* fill mcs 160m section */
2786 	__fill_txpwr_limit_nonbf_bf(lmt->mcs_160m, band,
2787 				    RTW89_CHANNEL_WIDTH_160,
2788 				    ntx, RTW89_RS_MCS, ch);
2789 
2790 	/* fill mcs 40m 0p5 section */
2791 	__fill_txpwr_limit_nonbf_bf(val_0p5_n, band, RTW89_CHANNEL_WIDTH_40,
2792 				    ntx, RTW89_RS_MCS, ch - 4);
2793 	__fill_txpwr_limit_nonbf_bf(val_0p5_p, band, RTW89_CHANNEL_WIDTH_40,
2794 				    ntx, RTW89_RS_MCS, ch + 4);
2795 
2796 	for (i = 0; i < RTW89_BF_NUM; i++)
2797 		lmt->mcs_40m_0p5[i] = min_t(s8, val_0p5_n[i], val_0p5_p[i]);
2798 
2799 	/* fill mcs 40m 2p5 section */
2800 	__fill_txpwr_limit_nonbf_bf(val_2p5_n, band, RTW89_CHANNEL_WIDTH_40,
2801 				    ntx, RTW89_RS_MCS, ch - 8);
2802 	__fill_txpwr_limit_nonbf_bf(val_2p5_p, band, RTW89_CHANNEL_WIDTH_40,
2803 				    ntx, RTW89_RS_MCS, ch + 8);
2804 
2805 	for (i = 0; i < RTW89_BF_NUM; i++)
2806 		lmt->mcs_40m_2p5[i] = min_t(s8, val_2p5_n[i], val_2p5_p[i]);
2807 }
2808 
2809 static
2810 void rtw89_phy_fill_txpwr_limit_ax(struct rtw89_dev *rtwdev,
2811 				   const struct rtw89_chan *chan,
2812 				   struct rtw89_txpwr_limit_ax *lmt,
2813 				   u8 ntx)
2814 {
2815 	u8 band = chan->band_type;
2816 	u8 pri_ch = chan->primary_channel;
2817 	u8 ch = chan->channel;
2818 	u8 bw = chan->band_width;
2819 
2820 	memset(lmt, 0, sizeof(*lmt));
2821 
2822 	switch (bw) {
2823 	case RTW89_CHANNEL_WIDTH_20:
2824 		rtw89_phy_fill_txpwr_limit_20m_ax(rtwdev, lmt, band, ntx, ch);
2825 		break;
2826 	case RTW89_CHANNEL_WIDTH_40:
2827 		rtw89_phy_fill_txpwr_limit_40m_ax(rtwdev, lmt, band, ntx, ch,
2828 						  pri_ch);
2829 		break;
2830 	case RTW89_CHANNEL_WIDTH_80:
2831 		rtw89_phy_fill_txpwr_limit_80m_ax(rtwdev, lmt, band, ntx, ch,
2832 						  pri_ch);
2833 		break;
2834 	case RTW89_CHANNEL_WIDTH_160:
2835 		rtw89_phy_fill_txpwr_limit_160m_ax(rtwdev, lmt, band, ntx, ch,
2836 						   pri_ch);
2837 		break;
2838 	}
2839 }
2840 
2841 s8 rtw89_phy_read_txpwr_limit_ru(struct rtw89_dev *rtwdev, u8 band,
2842 				 u8 ru, u8 ntx, u8 ch)
2843 {
2844 	const struct rtw89_rfe_parms *rfe_parms = rtwdev->rfe_parms;
2845 	const struct rtw89_txpwr_rule_2ghz *rule_da_2ghz = &rfe_parms->rule_da_2ghz;
2846 	const struct rtw89_txpwr_rule_5ghz *rule_da_5ghz = &rfe_parms->rule_da_5ghz;
2847 	const struct rtw89_txpwr_rule_6ghz *rule_da_6ghz = &rfe_parms->rule_da_6ghz;
2848 	const struct rtw89_txpwr_rule_2ghz *rule_2ghz = &rfe_parms->rule_2ghz;
2849 	const struct rtw89_txpwr_rule_5ghz *rule_5ghz = &rfe_parms->rule_5ghz;
2850 	const struct rtw89_txpwr_rule_6ghz *rule_6ghz = &rfe_parms->rule_6ghz;
2851 	struct rtw89_regulatory_info *regulatory = &rtwdev->regulatory;
2852 	enum nl80211_band nl_band = rtw89_hw_to_nl80211_band(band);
2853 	bool has_ant_gain = rtw89_can_apply_ant_gain(rtwdev, band);
2854 	u32 freq = ieee80211_channel_to_frequency(ch, nl_band);
2855 	u8 ch_idx = rtw89_channel_to_idx(rtwdev, band, ch);
2856 	s8 lmt_ru = 0, da_lmt_ru = S8_MAX, sar, offset = 0;
2857 	u8 regd = rtw89_regd_get(rtwdev, band);
2858 	u8 reg6 = regulatory->reg_6ghz_power;
2859 	struct rtw89_sar_parm sar_parm = {
2860 		.center_freq = freq,
2861 		.ntx = ntx,
2862 	};
2863 	s8 cstr;
2864 
2865 	switch (band) {
2866 	case RTW89_BAND_2G:
2867 		if (has_ant_gain)
2868 			da_lmt_ru = (*rule_da_2ghz->lmt_ru)[ru][ntx][regd][ch_idx];
2869 
2870 		lmt_ru = (*rule_2ghz->lmt_ru)[ru][ntx][regd][ch_idx];
2871 		if (lmt_ru)
2872 			break;
2873 
2874 		lmt_ru = (*rule_2ghz->lmt_ru)[ru][ntx][RTW89_WW][ch_idx];
2875 		break;
2876 	case RTW89_BAND_5G:
2877 		if (has_ant_gain)
2878 			da_lmt_ru = (*rule_da_5ghz->lmt_ru)[ru][ntx][regd][ch_idx];
2879 
2880 		lmt_ru = (*rule_5ghz->lmt_ru)[ru][ntx][regd][ch_idx];
2881 		if (lmt_ru)
2882 			break;
2883 
2884 		lmt_ru = (*rule_5ghz->lmt_ru)[ru][ntx][RTW89_WW][ch_idx];
2885 		break;
2886 	case RTW89_BAND_6G:
2887 		if (has_ant_gain)
2888 			da_lmt_ru = (*rule_da_6ghz->lmt_ru)[ru][ntx][regd][reg6][ch_idx];
2889 
2890 		lmt_ru = (*rule_6ghz->lmt_ru)[ru][ntx][regd][reg6][ch_idx];
2891 		if (lmt_ru)
2892 			break;
2893 
2894 		lmt_ru = (*rule_6ghz->lmt_ru)[ru][ntx][RTW89_WW]
2895 					     [RTW89_REG_6GHZ_POWER_DFLT]
2896 					     [ch_idx];
2897 		break;
2898 	default:
2899 		rtw89_warn(rtwdev, "unknown band type: %d\n", band);
2900 		return 0;
2901 	}
2902 
2903 	da_lmt_ru = da_lmt_ru ?: S8_MAX;
2904 	if (da_lmt_ru != S8_MAX)
2905 		offset = rtw89_phy_ant_gain_offset(rtwdev, freq);
2906 
2907 	lmt_ru = rtw89_phy_txpwr_rf_to_mac(rtwdev, min(lmt_ru + offset, da_lmt_ru));
2908 	sar = rtw89_query_sar(rtwdev, &sar_parm);
2909 	cstr = rtw89_phy_get_tpe_constraint(rtwdev, band);
2910 
2911 	return min3(lmt_ru, sar, cstr);
2912 }
2913 
2914 static void
2915 rtw89_phy_fill_txpwr_limit_ru_20m_ax(struct rtw89_dev *rtwdev,
2916 				     struct rtw89_txpwr_limit_ru_ax *lmt_ru,
2917 				     u8 band, u8 ntx, u8 ch)
2918 {
2919 	lmt_ru->ru26[0] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
2920 							RTW89_RU26,
2921 							ntx, ch);
2922 	lmt_ru->ru52[0] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
2923 							RTW89_RU52,
2924 							ntx, ch);
2925 	lmt_ru->ru106[0] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
2926 							 RTW89_RU106,
2927 							 ntx, ch);
2928 }
2929 
2930 static void
2931 rtw89_phy_fill_txpwr_limit_ru_40m_ax(struct rtw89_dev *rtwdev,
2932 				     struct rtw89_txpwr_limit_ru_ax *lmt_ru,
2933 				     u8 band, u8 ntx, u8 ch)
2934 {
2935 	lmt_ru->ru26[0] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
2936 							RTW89_RU26,
2937 							ntx, ch - 2);
2938 	lmt_ru->ru26[1] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
2939 							RTW89_RU26,
2940 							ntx, ch + 2);
2941 	lmt_ru->ru52[0] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
2942 							RTW89_RU52,
2943 							ntx, ch - 2);
2944 	lmt_ru->ru52[1] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
2945 							RTW89_RU52,
2946 							ntx, ch + 2);
2947 	lmt_ru->ru106[0] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
2948 							 RTW89_RU106,
2949 							 ntx, ch - 2);
2950 	lmt_ru->ru106[1] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
2951 							 RTW89_RU106,
2952 							 ntx, ch + 2);
2953 }
2954 
2955 static void
2956 rtw89_phy_fill_txpwr_limit_ru_80m_ax(struct rtw89_dev *rtwdev,
2957 				     struct rtw89_txpwr_limit_ru_ax *lmt_ru,
2958 				     u8 band, u8 ntx, u8 ch)
2959 {
2960 	lmt_ru->ru26[0] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
2961 							RTW89_RU26,
2962 							ntx, ch - 6);
2963 	lmt_ru->ru26[1] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
2964 							RTW89_RU26,
2965 							ntx, ch - 2);
2966 	lmt_ru->ru26[2] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
2967 							RTW89_RU26,
2968 							ntx, ch + 2);
2969 	lmt_ru->ru26[3] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
2970 							RTW89_RU26,
2971 							ntx, ch + 6);
2972 	lmt_ru->ru52[0] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
2973 							RTW89_RU52,
2974 							ntx, ch - 6);
2975 	lmt_ru->ru52[1] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
2976 							RTW89_RU52,
2977 							ntx, ch - 2);
2978 	lmt_ru->ru52[2] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
2979 							RTW89_RU52,
2980 							ntx, ch + 2);
2981 	lmt_ru->ru52[3] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
2982 							RTW89_RU52,
2983 							ntx, ch + 6);
2984 	lmt_ru->ru106[0] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
2985 							 RTW89_RU106,
2986 							 ntx, ch - 6);
2987 	lmt_ru->ru106[1] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
2988 							 RTW89_RU106,
2989 							 ntx, ch - 2);
2990 	lmt_ru->ru106[2] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
2991 							 RTW89_RU106,
2992 							 ntx, ch + 2);
2993 	lmt_ru->ru106[3] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
2994 							 RTW89_RU106,
2995 							 ntx, ch + 6);
2996 }
2997 
2998 static void
2999 rtw89_phy_fill_txpwr_limit_ru_160m_ax(struct rtw89_dev *rtwdev,
3000 				      struct rtw89_txpwr_limit_ru_ax *lmt_ru,
3001 				      u8 band, u8 ntx, u8 ch)
3002 {
3003 	static const int ofst[] = { -14, -10, -6, -2, 2, 6, 10, 14 };
3004 	int i;
3005 
3006 	static_assert(ARRAY_SIZE(ofst) == RTW89_RU_SEC_NUM_AX);
3007 	for (i = 0; i < RTW89_RU_SEC_NUM_AX; i++) {
3008 		lmt_ru->ru26[i] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
3009 								RTW89_RU26,
3010 								ntx,
3011 								ch + ofst[i]);
3012 		lmt_ru->ru52[i] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
3013 								RTW89_RU52,
3014 								ntx,
3015 								ch + ofst[i]);
3016 		lmt_ru->ru106[i] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
3017 								 RTW89_RU106,
3018 								 ntx,
3019 								 ch + ofst[i]);
3020 	}
3021 }
3022 
3023 static
3024 void rtw89_phy_fill_txpwr_limit_ru_ax(struct rtw89_dev *rtwdev,
3025 				      const struct rtw89_chan *chan,
3026 				      struct rtw89_txpwr_limit_ru_ax *lmt_ru,
3027 				      u8 ntx)
3028 {
3029 	u8 band = chan->band_type;
3030 	u8 ch = chan->channel;
3031 	u8 bw = chan->band_width;
3032 
3033 	memset(lmt_ru, 0, sizeof(*lmt_ru));
3034 
3035 	switch (bw) {
3036 	case RTW89_CHANNEL_WIDTH_20:
3037 		rtw89_phy_fill_txpwr_limit_ru_20m_ax(rtwdev, lmt_ru, band, ntx,
3038 						     ch);
3039 		break;
3040 	case RTW89_CHANNEL_WIDTH_40:
3041 		rtw89_phy_fill_txpwr_limit_ru_40m_ax(rtwdev, lmt_ru, band, ntx,
3042 						     ch);
3043 		break;
3044 	case RTW89_CHANNEL_WIDTH_80:
3045 		rtw89_phy_fill_txpwr_limit_ru_80m_ax(rtwdev, lmt_ru, band, ntx,
3046 						     ch);
3047 		break;
3048 	case RTW89_CHANNEL_WIDTH_160:
3049 		rtw89_phy_fill_txpwr_limit_ru_160m_ax(rtwdev, lmt_ru, band, ntx,
3050 						      ch);
3051 		break;
3052 	}
3053 }
3054 
3055 static void rtw89_phy_set_txpwr_byrate_ax(struct rtw89_dev *rtwdev,
3056 					  const struct rtw89_chan *chan,
3057 					  enum rtw89_phy_idx phy_idx)
3058 {
3059 	u8 max_nss_num = rtwdev->chip->rf_path_num;
3060 	static const u8 rs[] = {
3061 		RTW89_RS_CCK,
3062 		RTW89_RS_OFDM,
3063 		RTW89_RS_MCS,
3064 		RTW89_RS_HEDCM,
3065 	};
3066 	struct rtw89_rate_desc cur = {};
3067 	u8 band = chan->band_type;
3068 	u8 ch = chan->channel;
3069 	u32 addr, val;
3070 	s8 v[4] = {};
3071 	u8 i;
3072 
3073 	rtw89_debug(rtwdev, RTW89_DBG_TXPWR,
3074 		    "[TXPWR] set txpwr byrate with ch=%d\n", ch);
3075 
3076 	BUILD_BUG_ON(rtw89_rs_idx_num_ax[RTW89_RS_CCK] % 4);
3077 	BUILD_BUG_ON(rtw89_rs_idx_num_ax[RTW89_RS_OFDM] % 4);
3078 	BUILD_BUG_ON(rtw89_rs_idx_num_ax[RTW89_RS_MCS] % 4);
3079 	BUILD_BUG_ON(rtw89_rs_idx_num_ax[RTW89_RS_HEDCM] % 4);
3080 
3081 	addr = R_AX_PWR_BY_RATE;
3082 	for (cur.nss = 0; cur.nss < max_nss_num; cur.nss++) {
3083 		for (i = 0; i < ARRAY_SIZE(rs); i++) {
3084 			if (cur.nss >= rtw89_rs_nss_num_ax[rs[i]])
3085 				continue;
3086 
3087 			cur.rs = rs[i];
3088 			for (cur.idx = 0; cur.idx < rtw89_rs_idx_num_ax[rs[i]];
3089 			     cur.idx++) {
3090 				v[cur.idx % 4] =
3091 					rtw89_phy_read_txpwr_byrate(rtwdev,
3092 								    band, 0,
3093 								    &cur);
3094 
3095 				if ((cur.idx + 1) % 4)
3096 					continue;
3097 
3098 				val = FIELD_PREP(GENMASK(7, 0), v[0]) |
3099 				      FIELD_PREP(GENMASK(15, 8), v[1]) |
3100 				      FIELD_PREP(GENMASK(23, 16), v[2]) |
3101 				      FIELD_PREP(GENMASK(31, 24), v[3]);
3102 
3103 				rtw89_mac_txpwr_write32(rtwdev, phy_idx, addr,
3104 							val);
3105 				addr += 4;
3106 			}
3107 		}
3108 	}
3109 }
3110 
3111 static
3112 void rtw89_phy_set_txpwr_offset_ax(struct rtw89_dev *rtwdev,
3113 				   const struct rtw89_chan *chan,
3114 				   enum rtw89_phy_idx phy_idx)
3115 {
3116 	struct rtw89_rate_desc desc = {
3117 		.nss = RTW89_NSS_1,
3118 		.rs = RTW89_RS_OFFSET,
3119 	};
3120 	u8 band = chan->band_type;
3121 	s8 v[RTW89_RATE_OFFSET_NUM_AX] = {};
3122 	u32 val;
3123 
3124 	rtw89_debug(rtwdev, RTW89_DBG_TXPWR, "[TXPWR] set txpwr offset\n");
3125 
3126 	for (desc.idx = 0; desc.idx < RTW89_RATE_OFFSET_NUM_AX; desc.idx++)
3127 		v[desc.idx] = rtw89_phy_read_txpwr_byrate(rtwdev, band, 0, &desc);
3128 
3129 	BUILD_BUG_ON(RTW89_RATE_OFFSET_NUM_AX != 5);
3130 	val = FIELD_PREP(GENMASK(3, 0), v[0]) |
3131 	      FIELD_PREP(GENMASK(7, 4), v[1]) |
3132 	      FIELD_PREP(GENMASK(11, 8), v[2]) |
3133 	      FIELD_PREP(GENMASK(15, 12), v[3]) |
3134 	      FIELD_PREP(GENMASK(19, 16), v[4]);
3135 
3136 	rtw89_mac_txpwr_write32_mask(rtwdev, phy_idx, R_AX_PWR_RATE_OFST_CTRL,
3137 				     GENMASK(19, 0), val);
3138 }
3139 
3140 static void rtw89_phy_set_txpwr_limit_ax(struct rtw89_dev *rtwdev,
3141 					 const struct rtw89_chan *chan,
3142 					 enum rtw89_phy_idx phy_idx)
3143 {
3144 	u8 max_ntx_num = rtwdev->chip->rf_path_num;
3145 	struct rtw89_txpwr_limit_ax lmt;
3146 	u8 ch = chan->channel;
3147 	u8 bw = chan->band_width;
3148 	const s8 *ptr;
3149 	u32 addr, val;
3150 	u8 i, j;
3151 
3152 	rtw89_debug(rtwdev, RTW89_DBG_TXPWR,
3153 		    "[TXPWR] set txpwr limit with ch=%d bw=%d\n", ch, bw);
3154 
3155 	BUILD_BUG_ON(sizeof(struct rtw89_txpwr_limit_ax) !=
3156 		     RTW89_TXPWR_LMT_PAGE_SIZE_AX);
3157 
3158 	addr = R_AX_PWR_LMT;
3159 	for (i = 0; i < max_ntx_num; i++) {
3160 		rtw89_phy_fill_txpwr_limit_ax(rtwdev, chan, &lmt, i);
3161 
3162 		ptr = (s8 *)&lmt;
3163 		for (j = 0; j < RTW89_TXPWR_LMT_PAGE_SIZE_AX;
3164 		     j += 4, addr += 4, ptr += 4) {
3165 			val = FIELD_PREP(GENMASK(7, 0), ptr[0]) |
3166 			      FIELD_PREP(GENMASK(15, 8), ptr[1]) |
3167 			      FIELD_PREP(GENMASK(23, 16), ptr[2]) |
3168 			      FIELD_PREP(GENMASK(31, 24), ptr[3]);
3169 
3170 			rtw89_mac_txpwr_write32(rtwdev, phy_idx, addr, val);
3171 		}
3172 	}
3173 }
3174 
3175 static void rtw89_phy_set_txpwr_limit_ru_ax(struct rtw89_dev *rtwdev,
3176 					    const struct rtw89_chan *chan,
3177 					    enum rtw89_phy_idx phy_idx)
3178 {
3179 	u8 max_ntx_num = rtwdev->chip->rf_path_num;
3180 	struct rtw89_txpwr_limit_ru_ax lmt_ru;
3181 	u8 ch = chan->channel;
3182 	u8 bw = chan->band_width;
3183 	const s8 *ptr;
3184 	u32 addr, val;
3185 	u8 i, j;
3186 
3187 	rtw89_debug(rtwdev, RTW89_DBG_TXPWR,
3188 		    "[TXPWR] set txpwr limit ru with ch=%d bw=%d\n", ch, bw);
3189 
3190 	BUILD_BUG_ON(sizeof(struct rtw89_txpwr_limit_ru_ax) !=
3191 		     RTW89_TXPWR_LMT_RU_PAGE_SIZE_AX);
3192 
3193 	addr = R_AX_PWR_RU_LMT;
3194 	for (i = 0; i < max_ntx_num; i++) {
3195 		rtw89_phy_fill_txpwr_limit_ru_ax(rtwdev, chan, &lmt_ru, i);
3196 
3197 		ptr = (s8 *)&lmt_ru;
3198 		for (j = 0; j < RTW89_TXPWR_LMT_RU_PAGE_SIZE_AX;
3199 		     j += 4, addr += 4, ptr += 4) {
3200 			val = FIELD_PREP(GENMASK(7, 0), ptr[0]) |
3201 			      FIELD_PREP(GENMASK(15, 8), ptr[1]) |
3202 			      FIELD_PREP(GENMASK(23, 16), ptr[2]) |
3203 			      FIELD_PREP(GENMASK(31, 24), ptr[3]);
3204 
3205 			rtw89_mac_txpwr_write32(rtwdev, phy_idx, addr, val);
3206 		}
3207 	}
3208 }
3209 
3210 struct rtw89_phy_iter_ra_data {
3211 	struct rtw89_dev *rtwdev;
3212 	struct sk_buff *c2h;
3213 };
3214 
3215 static void __rtw89_phy_c2h_ra_rpt_iter(struct rtw89_sta_link *rtwsta_link,
3216 					struct ieee80211_link_sta *link_sta,
3217 					struct rtw89_phy_iter_ra_data *ra_data,
3218 					bool *changed)
3219 {
3220 	struct rtw89_dev *rtwdev = ra_data->rtwdev;
3221 	const struct rtw89_c2h_ra_rpt *c2h =
3222 		(const struct rtw89_c2h_ra_rpt *)ra_data->c2h->data;
3223 	struct rtw89_ra_report *ra_report = &rtwsta_link->ra_report;
3224 	const struct rtw89_chip_info *chip = rtwdev->chip;
3225 	bool format_v1 = chip->chip_gen == RTW89_CHIP_BE;
3226 	u8 mode, rate, bw, giltf, mac_id;
3227 	u16 legacy_bitrate, amsdu_len;
3228 	bool valid;
3229 	u8 mcs = 0;
3230 	u8 t;
3231 
3232 	mac_id = le32_get_bits(c2h->w2, RTW89_C2H_RA_RPT_W2_MACID);
3233 	if (mac_id != rtwsta_link->mac_id)
3234 		return;
3235 
3236 	rate = le32_get_bits(c2h->w3, RTW89_C2H_RA_RPT_W3_MCSNSS);
3237 	bw = le32_get_bits(c2h->w3, RTW89_C2H_RA_RPT_W3_BW);
3238 	giltf = le32_get_bits(c2h->w3, RTW89_C2H_RA_RPT_W3_GILTF);
3239 	mode = le32_get_bits(c2h->w3, RTW89_C2H_RA_RPT_W3_MD_SEL);
3240 
3241 	if (format_v1) {
3242 		t = le32_get_bits(c2h->w2, RTW89_C2H_RA_RPT_W2_MCSNSS_B7);
3243 		rate |= u8_encode_bits(t, BIT(7));
3244 		t = le32_get_bits(c2h->w3, RTW89_C2H_RA_RPT_W3_BW_B2);
3245 		bw |= u8_encode_bits(t, BIT(2));
3246 		t = le32_get_bits(c2h->w3, RTW89_C2H_RA_RPT_W3_MD_SEL_B2);
3247 		mode |= u8_encode_bits(t, BIT(2));
3248 	}
3249 
3250 	if (mode == RTW89_RA_RPT_MODE_LEGACY) {
3251 		valid = rtw89_legacy_rate_to_bitrate(rtwdev, rate, &legacy_bitrate);
3252 		if (!valid)
3253 			return;
3254 	}
3255 
3256 	memset(&ra_report->txrate, 0, sizeof(ra_report->txrate));
3257 
3258 	switch (mode) {
3259 	case RTW89_RA_RPT_MODE_LEGACY:
3260 		ra_report->txrate.legacy = legacy_bitrate;
3261 		break;
3262 	case RTW89_RA_RPT_MODE_HT:
3263 		ra_report->txrate.flags |= RATE_INFO_FLAGS_MCS;
3264 		if (RTW89_CHK_FW_FEATURE(OLD_HT_RA_FORMAT, &rtwdev->fw))
3265 			rate = RTW89_MK_HT_RATE(FIELD_GET(RTW89_RA_RATE_MASK_NSS, rate),
3266 						FIELD_GET(RTW89_RA_RATE_MASK_MCS, rate));
3267 		else
3268 			rate = FIELD_GET(RTW89_RA_RATE_MASK_HT_MCS, rate);
3269 		ra_report->txrate.mcs = rate;
3270 		if (giltf)
3271 			ra_report->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
3272 		mcs = ra_report->txrate.mcs & 0x07;
3273 		break;
3274 	case RTW89_RA_RPT_MODE_VHT:
3275 		ra_report->txrate.flags |= RATE_INFO_FLAGS_VHT_MCS;
3276 		ra_report->txrate.mcs = format_v1 ?
3277 			u8_get_bits(rate, RTW89_RA_RATE_MASK_MCS_V1) :
3278 			u8_get_bits(rate, RTW89_RA_RATE_MASK_MCS);
3279 		ra_report->txrate.nss = format_v1 ?
3280 			u8_get_bits(rate, RTW89_RA_RATE_MASK_NSS_V1) + 1 :
3281 			u8_get_bits(rate, RTW89_RA_RATE_MASK_NSS) + 1;
3282 		if (giltf)
3283 			ra_report->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
3284 		mcs = ra_report->txrate.mcs;
3285 		break;
3286 	case RTW89_RA_RPT_MODE_HE:
3287 		ra_report->txrate.flags |= RATE_INFO_FLAGS_HE_MCS;
3288 		ra_report->txrate.mcs = format_v1 ?
3289 			u8_get_bits(rate, RTW89_RA_RATE_MASK_MCS_V1) :
3290 			u8_get_bits(rate, RTW89_RA_RATE_MASK_MCS);
3291 		ra_report->txrate.nss  = format_v1 ?
3292 			u8_get_bits(rate, RTW89_RA_RATE_MASK_NSS_V1) + 1 :
3293 			u8_get_bits(rate, RTW89_RA_RATE_MASK_NSS) + 1;
3294 		if (giltf == RTW89_GILTF_2XHE08 || giltf == RTW89_GILTF_1XHE08)
3295 			ra_report->txrate.he_gi = NL80211_RATE_INFO_HE_GI_0_8;
3296 		else if (giltf == RTW89_GILTF_2XHE16 || giltf == RTW89_GILTF_1XHE16)
3297 			ra_report->txrate.he_gi = NL80211_RATE_INFO_HE_GI_1_6;
3298 		else
3299 			ra_report->txrate.he_gi = NL80211_RATE_INFO_HE_GI_3_2;
3300 		mcs = ra_report->txrate.mcs;
3301 		break;
3302 	case RTW89_RA_RPT_MODE_EHT:
3303 		ra_report->txrate.flags |= RATE_INFO_FLAGS_EHT_MCS;
3304 		ra_report->txrate.mcs = u8_get_bits(rate, RTW89_RA_RATE_MASK_MCS_V1);
3305 		ra_report->txrate.nss = u8_get_bits(rate, RTW89_RA_RATE_MASK_NSS_V1) + 1;
3306 		if (giltf == RTW89_GILTF_2XHE08 || giltf == RTW89_GILTF_1XHE08)
3307 			ra_report->txrate.eht_gi = NL80211_RATE_INFO_EHT_GI_0_8;
3308 		else if (giltf == RTW89_GILTF_2XHE16 || giltf == RTW89_GILTF_1XHE16)
3309 			ra_report->txrate.eht_gi = NL80211_RATE_INFO_EHT_GI_1_6;
3310 		else
3311 			ra_report->txrate.eht_gi = NL80211_RATE_INFO_EHT_GI_3_2;
3312 		mcs = ra_report->txrate.mcs;
3313 		break;
3314 	}
3315 
3316 	ra_report->txrate.bw = rtw89_hw_to_rate_info_bw(bw);
3317 	ra_report->bit_rate = cfg80211_calculate_bitrate(&ra_report->txrate);
3318 	ra_report->hw_rate = format_v1 ?
3319 			     u16_encode_bits(mode, RTW89_HW_RATE_V1_MASK_MOD) |
3320 			     u16_encode_bits(rate, RTW89_HW_RATE_V1_MASK_VAL) :
3321 			     u16_encode_bits(mode, RTW89_HW_RATE_MASK_MOD) |
3322 			     u16_encode_bits(rate, RTW89_HW_RATE_MASK_VAL);
3323 	ra_report->might_fallback_legacy = mcs <= 2;
3324 
3325 	amsdu_len = get_max_amsdu_len(rtwdev, ra_report);
3326 	if (link_sta->agg.max_rc_amsdu_len != amsdu_len) {
3327 		link_sta->agg.max_rc_amsdu_len = amsdu_len;
3328 		*changed = true;
3329 	}
3330 
3331 	rtwsta_link->max_agg_wait = link_sta->agg.max_rc_amsdu_len / 1500 - 1;
3332 }
3333 
3334 static void rtw89_phy_c2h_ra_rpt_iter(void *data, struct ieee80211_sta *sta)
3335 {
3336 	struct rtw89_phy_iter_ra_data *ra_data = (struct rtw89_phy_iter_ra_data *)data;
3337 	struct rtw89_sta *rtwsta = sta_to_rtwsta(sta);
3338 	struct rtw89_sta_link *rtwsta_link;
3339 	struct ieee80211_link_sta *link_sta;
3340 	unsigned int link_id;
3341 	bool changed = false;
3342 
3343 	rcu_read_lock();
3344 
3345 	rtw89_sta_for_each_link(rtwsta, rtwsta_link, link_id) {
3346 		link_sta = rtw89_sta_rcu_dereference_link(rtwsta_link, false);
3347 		__rtw89_phy_c2h_ra_rpt_iter(rtwsta_link, link_sta, ra_data, &changed);
3348 	}
3349 
3350 	if (changed)
3351 		ieee80211_sta_recalc_aggregates(sta);
3352 
3353 	rcu_read_unlock();
3354 }
3355 
3356 static void
3357 rtw89_phy_c2h_ra_rpt(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len)
3358 {
3359 	struct rtw89_phy_iter_ra_data ra_data;
3360 
3361 	ra_data.rtwdev = rtwdev;
3362 	ra_data.c2h = c2h;
3363 	ieee80211_iterate_stations_atomic(rtwdev->hw,
3364 					  rtw89_phy_c2h_ra_rpt_iter,
3365 					  &ra_data);
3366 }
3367 
3368 static
3369 void (* const rtw89_phy_c2h_ra_handler[])(struct rtw89_dev *rtwdev,
3370 					  struct sk_buff *c2h, u32 len) = {
3371 	[RTW89_PHY_C2H_FUNC_STS_RPT] = rtw89_phy_c2h_ra_rpt,
3372 	[RTW89_PHY_C2H_FUNC_MU_GPTBL_RPT] = NULL,
3373 	[RTW89_PHY_C2H_FUNC_TXSTS] = NULL,
3374 	[RTW89_PHY_C2H_FUNC_ACCELERATE_EN] = rtw89_fw_c2h_dummy_handler,
3375 };
3376 
3377 static void
3378 rtw89_phy_c2h_lowrt_rty(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len)
3379 {
3380 }
3381 
3382 static void
3383 rtw89_phy_c2h_lps_rpt(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len)
3384 {
3385 	const struct rtw89_c2h_lps_rpt *c2h_rpt = (const void *)c2h->data;
3386 	const __le32 *data_a, *data_b;
3387 	u16 len_info, cr_len, idx;
3388 	const __le16 *addr;
3389 	const u8 *info;
3390 
3391 	/* elements size of BBCR/BBMCUCR/RFCR are 6/6/10 bytes respectively */
3392 	cr_len = c2h_rpt->cnt_bbcr * 6 +
3393 		 c2h_rpt->cnt_bbmcucr * 6 +
3394 		 c2h_rpt->cnt_rfcr * 10;
3395 	len_info = len - (sizeof(*c2h_rpt) + cr_len);
3396 
3397 	if (len < sizeof(*c2h_rpt) + cr_len || len_info % 4 != 0) {
3398 		rtw89_debug(rtwdev, RTW89_DBG_PS,
3399 			    "Invalid LPS RPT len(%d) TYPE(%d) CRCNT: BB(%d) MCU(%d) RF(%d)\n",
3400 			    len, c2h_rpt->type, c2h_rpt->cnt_bbcr,
3401 			    c2h_rpt->cnt_bbmcucr, c2h_rpt->cnt_rfcr);
3402 		return;
3403 	}
3404 
3405 	rtw89_debug(rtwdev, RTW89_DBG_PS,
3406 		    "LPS RPT TYPE(%d), CRCNT: BB(%d) MCU(%d) RF(%d)\n",
3407 		    c2h_rpt->type, c2h_rpt->cnt_bbcr,
3408 		    c2h_rpt->cnt_bbmcucr, c2h_rpt->cnt_rfcr);
3409 
3410 	info = &c2h_rpt->data[0];
3411 	for (idx = 0; idx < len_info; idx += 4, info += 4)
3412 		rtw89_debug(rtwdev, RTW89_DBG_PS,
3413 			    "BB LPS INFO (%02d) - 0x%02x,0x%02x,0x%02x,0x%02x\n",
3414 			    idx, info[3], info[2], info[1], info[0]);
3415 
3416 	addr = (const void *)(info);
3417 	data_a = (const void *)(addr + c2h_rpt->cnt_bbcr);
3418 	for (idx = 0; idx < c2h_rpt->cnt_bbcr; idx++, addr++, data_a++)
3419 		rtw89_debug(rtwdev, RTW89_DBG_PS,
3420 			    "LPS BB CR - 0x%04x=0x%08x\n",
3421 			    le16_to_cpu(*addr), le32_to_cpu(*data_a));
3422 
3423 	addr = (const void *)data_a;
3424 	data_a = (const void *)(addr + c2h_rpt->cnt_bbmcucr);
3425 	for (idx = 0; idx < c2h_rpt->cnt_bbmcucr; idx++, addr++, data_a++)
3426 		rtw89_debug(rtwdev, RTW89_DBG_PS,
3427 			    "LPS BBMCU - 0x%04x=0x%08x\n",
3428 			    le16_to_cpu(*addr), le32_to_cpu(*data_a));
3429 
3430 	addr = (const void *)data_a;
3431 	data_a = (const void *)(addr + c2h_rpt->cnt_rfcr);
3432 	data_b = (const void *)(data_a + c2h_rpt->cnt_rfcr);
3433 	for (idx = 0; idx < c2h_rpt->cnt_rfcr; idx++, addr++, data_a++, data_b++)
3434 		rtw89_debug(rtwdev, RTW89_DBG_PS,
3435 			    "LPS RFCR - 0x%04x=0x%05x,0x%05x\n",
3436 			    le16_to_cpu(*addr), le32_to_cpu(*data_a),
3437 			    le32_to_cpu(*data_b));
3438 }
3439 
3440 static void
3441 rtw89_phy_c2h_fw_scan_rpt(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len)
3442 {
3443 	const struct rtw89_c2h_fw_scan_rpt *c2h_rpt =
3444 		(const struct rtw89_c2h_fw_scan_rpt *)c2h->data;
3445 
3446 	rtw89_debug(rtwdev, RTW89_DBG_DIG,
3447 		    "%s: band: %u, op_chan: %u, PD_low_bd(ofdm, cck): (-%d, %d), phy_idx: %u\n",
3448 		    __func__, c2h_rpt->band, c2h_rpt->center_ch,
3449 		    PD_LOWER_BOUND_BASE - (c2h_rpt->ofdm_pd_idx << 1),
3450 		    c2h_rpt->cck_pd_idx, c2h_rpt->phy_idx);
3451 }
3452 
3453 static
3454 void (* const rtw89_phy_c2h_dm_handler[])(struct rtw89_dev *rtwdev,
3455 					  struct sk_buff *c2h, u32 len) = {
3456 	[RTW89_PHY_C2H_DM_FUNC_FW_TEST] = NULL,
3457 	[RTW89_PHY_C2H_DM_FUNC_FW_TRIG_TX_RPT] = NULL,
3458 	[RTW89_PHY_C2H_DM_FUNC_SIGB] = NULL,
3459 	[RTW89_PHY_C2H_DM_FUNC_LOWRT_RTY] = rtw89_phy_c2h_lowrt_rty,
3460 	[RTW89_PHY_C2H_DM_FUNC_MCC_DIG] = NULL,
3461 	[RTW89_PHY_C2H_DM_FUNC_LPS] = rtw89_phy_c2h_lps_rpt,
3462 	[RTW89_PHY_C2H_DM_FUNC_ENV_MNTR] = rtw89_fw_c2h_dummy_handler,
3463 	[RTW89_PHY_C2H_DM_FUNC_FW_SCAN] = rtw89_phy_c2h_fw_scan_rpt,
3464 };
3465 
3466 static
3467 void rtw89_phy_c2h_rfk_tas_pwr(struct rtw89_dev *rtwdev,
3468 			       const struct rtw89_c2h_rf_tas_rpt_log *content)
3469 {
3470 	const enum rtw89_sar_sources src = rtwdev->sar.src;
3471 	struct rtw89_tas_info *tas = &rtwdev->tas;
3472 	u64 linear = 0;
3473 	u32 i, cur_idx;
3474 	s16 txpwr;
3475 
3476 	if (!tas->enable || src == RTW89_SAR_SOURCE_NONE)
3477 		return;
3478 
3479 	cur_idx = le32_to_cpu(content->cur_idx);
3480 	for (i = 0; i < cur_idx; i++) {
3481 		txpwr = le16_to_cpu(content->txpwr_history[i]);
3482 		linear += rtw89_db_quarter_to_linear(txpwr);
3483 
3484 		rtw89_debug(rtwdev, RTW89_DBG_SAR,
3485 			    "tas: index: %u, txpwr: %d\n", i, txpwr);
3486 	}
3487 
3488 	if (cur_idx == 0)
3489 		tas->instant_txpwr = rtw89_db_to_linear(0);
3490 	else
3491 		tas->instant_txpwr = DIV_ROUND_DOWN_ULL(linear, cur_idx);
3492 }
3493 
3494 static void rtw89_phy_c2h_rfk_rpt_log(struct rtw89_dev *rtwdev,
3495 				      enum rtw89_phy_c2h_rfk_log_func func,
3496 				      void *content, u16 len)
3497 {
3498 	struct rtw89_c2h_rf_txgapk_rpt_log *txgapk;
3499 	struct rtw89_c2h_rf_rxdck_rpt_log *rxdck;
3500 	struct rtw89_c2h_rf_txiqk_rpt_log *txiqk;
3501 	struct rtw89_c2h_rf_cim3k_rpt_log *cim3k;
3502 	struct rtw89_c2h_rf_dack_rpt_log *dack;
3503 	struct rtw89_c2h_rf_tssi_rpt_log *tssi;
3504 	struct rtw89_c2h_rf_dpk_rpt_log *dpk;
3505 	struct rtw89_c2h_rf_iqk_rpt_log *iqk;
3506 	int i, j, k;
3507 
3508 	switch (func) {
3509 	case RTW89_PHY_C2H_RFK_LOG_FUNC_IQK:
3510 		if (len != sizeof(*iqk))
3511 			goto out;
3512 
3513 		iqk = content;
3514 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
3515 			    "[IQK] iqk->is_iqk_init = %x\n", iqk->is_iqk_init);
3516 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
3517 			    "[IQK] iqk->is_reload = %x\n", iqk->is_reload);
3518 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
3519 			    "[IQK] iqk->is_nbiqk = %x\n", iqk->is_nbiqk);
3520 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
3521 			    "[IQK] iqk->txiqk_en = %x\n", iqk->txiqk_en);
3522 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
3523 			    "[IQK] iqk->rxiqk_en = %x\n", iqk->rxiqk_en);
3524 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
3525 			    "[IQK] iqk->lok_en = %x\n", iqk->lok_en);
3526 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
3527 			    "[IQK] iqk->iqk_xym_en = %x\n", iqk->iqk_xym_en);
3528 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
3529 			    "[IQK] iqk->iqk_sram_en = %x\n", iqk->iqk_sram_en);
3530 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
3531 			    "[IQK] iqk->iqk_fft_en = %x\n", iqk->iqk_fft_en);
3532 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
3533 			    "[IQK] iqk->is_fw_iqk = %x\n", iqk->is_fw_iqk);
3534 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
3535 			    "[IQK] iqk->is_iqk_enable = %x\n", iqk->is_iqk_enable);
3536 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
3537 			    "[IQK] iqk->iqk_cfir_en = %x\n", iqk->iqk_cfir_en);
3538 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
3539 			    "[IQK] iqk->thermal_rek_en = %x\n", iqk->thermal_rek_en);
3540 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
3541 			    "[IQK] iqk->version = %x\n", iqk->version);
3542 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
3543 			    "[IQK] iqk->phy = %x\n", iqk->phy);
3544 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
3545 			    "[IQK] iqk->fwk_status = %x\n", iqk->fwk_status);
3546 
3547 		for (i = 0; i < 2; i++) {
3548 			rtw89_debug(rtwdev, RTW89_DBG_RFK,
3549 				    "[IQK] ======== Path %x  ========\n", i);
3550 			rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] iqk->iqk_band[%d] = %x\n",
3551 				    i, iqk->iqk_band[i]);
3552 			rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] iqk->iqk_ch[%d] = %x\n",
3553 				    i, iqk->iqk_ch[i]);
3554 			rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] iqk->iqk_bw[%d] = %x\n",
3555 				    i, iqk->iqk_bw[i]);
3556 			rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] iqk->rf_0x18[%d] = %x\n",
3557 				    i, le32_to_cpu(iqk->rf_0x18[i]));
3558 			rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] iqk->lok_idac[%d] = %x\n",
3559 				    i, le32_to_cpu(iqk->lok_idac[i]));
3560 			rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] iqk->lok_vbuf[%d] = %x\n",
3561 				    i, le32_to_cpu(iqk->lok_vbuf[i]));
3562 			rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] iqk->iqk_tx_fail[%d] = %x\n",
3563 				    i, iqk->iqk_tx_fail[i]);
3564 			rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] iqk->iqk_rx_fail[%d] = %x\n",
3565 				    i, iqk->iqk_rx_fail[i]);
3566 			for (j = 0; j < 6; j++)
3567 				rtw89_debug(rtwdev, RTW89_DBG_RFK,
3568 					    "[IQK] iqk->rftxgain[%d][%d] = %x\n",
3569 					    i, j, le32_to_cpu(iqk->rftxgain[i][j]));
3570 			for (j = 0; j < 6; j++)
3571 				rtw89_debug(rtwdev, RTW89_DBG_RFK,
3572 					    "[IQK] iqk->tx_xym[%d][%d] = %x\n",
3573 					    i, j, le32_to_cpu(iqk->tx_xym[i][j]));
3574 			for (j = 0; j < 6; j++)
3575 				rtw89_debug(rtwdev, RTW89_DBG_RFK,
3576 					    "[IQK] iqk->rfrxgain[%d][%d] = %x\n",
3577 					    i, j, le32_to_cpu(iqk->rfrxgain[i][j]));
3578 			for (j = 0; j < 6; j++)
3579 				rtw89_debug(rtwdev, RTW89_DBG_RFK,
3580 					    "[IQK] iqk->rx_xym[%d][%d] = %x\n",
3581 					    i, j, le32_to_cpu(iqk->rx_xym[i][j]));
3582 
3583 			if (!iqk->iqk_xym_en)
3584 				continue;
3585 
3586 			for (j = 0; j < 32; j++)
3587 				rtw89_debug(rtwdev, RTW89_DBG_RFK,
3588 					    "[IQK] iqk->rx_wb_xym[%d][%d] = %x\n",
3589 					    i, j, iqk->rx_wb_xym[i][j]);
3590 		}
3591 		return;
3592 	case RTW89_PHY_C2H_RFK_LOG_FUNC_DPK:
3593 		if (len != sizeof(*dpk))
3594 			goto out;
3595 
3596 		dpk = content;
3597 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
3598 			    "DPK ver:%d idx:%2ph band:%2ph bw:%2ph ch:%2ph path:%2ph\n",
3599 			    dpk->ver, dpk->idx, dpk->band, dpk->bw, dpk->ch, dpk->path_ok);
3600 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
3601 			    "DPK txagc:%2ph ther:%2ph gs:%2ph dc_i:%4ph dc_q:%4ph\n",
3602 			    dpk->txagc, dpk->ther, dpk->gs, dpk->dc_i, dpk->dc_q);
3603 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
3604 			    "DPK corr_v:%2ph corr_i:%2ph to:%2ph ov:%2ph\n",
3605 			    dpk->corr_val, dpk->corr_idx, dpk->is_timeout, dpk->rxbb_ov);
3606 		return;
3607 	case RTW89_PHY_C2H_RFK_LOG_FUNC_DACK:
3608 		if (len != sizeof(*dack))
3609 			goto out;
3610 
3611 		dack = content;
3612 
3613 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]FWDACK SUMMARY!!!!!\n");
3614 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
3615 			    "[DACK]FWDACK ver = 0x%x, FWDACK rpt_ver = 0x%x, driver rpt_ver = 0x%x\n",
3616 			    dack->fwdack_ver, dack->fwdack_info_ver, 0x2);
3617 
3618 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
3619 			    "[DACK]timeout code = [0x%x 0x%x 0x%x 0x%x 0x%x]\n",
3620 			    dack->addck_timeout, dack->cdack_timeout, dack->dadck_timeout,
3621 			    dack->adgaink_timeout, dack->msbk_timeout);
3622 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
3623 			    "[DACK]DACK fail = 0x%x\n", dack->dack_fail);
3624 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
3625 			    "[DACK]S0 WBADCK = [0x%x]\n", dack->wbdck_d[0]);
3626 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
3627 			    "[DACK]S1 WBADCK = [0x%x]\n", dack->wbdck_d[1]);
3628 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
3629 			    "[DACK]DRCK = [0x%x]\n", dack->rck_d);
3630 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 CDACK ic = [0x%x, 0x%x]\n",
3631 			    dack->cdack_d[0][0][0], dack->cdack_d[0][0][1]);
3632 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 CDACK qc = [0x%x, 0x%x]\n",
3633 			    dack->cdack_d[0][1][0], dack->cdack_d[0][1][1]);
3634 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 CDACK ic = [0x%x, 0x%x]\n",
3635 			    dack->cdack_d[1][0][0], dack->cdack_d[1][0][1]);
3636 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 CDACK qc = [0x%x, 0x%x]\n",
3637 			    dack->cdack_d[1][1][0], dack->cdack_d[1][1][1]);
3638 
3639 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 ADC_DCK ic = [0x%x, 0x%x]\n",
3640 			    ((u32)dack->addck2_hd[0][0][0] << 8) | dack->addck2_ld[0][0][0],
3641 			    ((u32)dack->addck2_hd[0][0][1] << 8) | dack->addck2_ld[0][0][1]);
3642 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 ADC_DCK qc = [0x%x, 0x%x]\n",
3643 			    ((u32)dack->addck2_hd[0][1][0] << 8) | dack->addck2_ld[0][1][0],
3644 			    ((u32)dack->addck2_hd[0][1][1] << 8) | dack->addck2_ld[0][1][1]);
3645 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 ADC_DCK ic = [0x%x, 0x%x]\n",
3646 			    ((u32)dack->addck2_hd[1][0][0] << 8) | dack->addck2_ld[1][0][0],
3647 			    ((u32)dack->addck2_hd[1][0][1] << 8) | dack->addck2_ld[1][0][1]);
3648 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 ADC_DCK qc = [0x%x, 0x%x]\n",
3649 			    ((u32)dack->addck2_hd[1][1][0] << 8) | dack->addck2_ld[1][1][0],
3650 			    ((u32)dack->addck2_hd[1][1][1] << 8) | dack->addck2_ld[1][1][1]);
3651 
3652 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 ADC_GAINK ic = 0x%x, qc = 0x%x\n",
3653 			    dack->adgaink_d[0][0], dack->adgaink_d[0][1]);
3654 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 ADC_GAINK ic = 0x%x, qc = 0x%x\n",
3655 			    dack->adgaink_d[1][0], dack->adgaink_d[1][1]);
3656 
3657 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 DAC_DCK ic = 0x%x, qc = 0x%x\n",
3658 			    dack->dadck_d[0][0], dack->dadck_d[0][1]);
3659 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 DAC_DCK ic = 0x%x, qc = 0x%x\n",
3660 			    dack->dadck_d[1][0], dack->dadck_d[1][1]);
3661 
3662 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 biask iqc = 0x%x\n",
3663 			    ((u32)dack->biask_hd[0][0] << 8) | dack->biask_ld[0][0]);
3664 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 biask iqc = 0x%x\n",
3665 			    ((u32)dack->biask_hd[1][0] << 8) | dack->biask_ld[1][0]);
3666 
3667 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 MSBK ic:\n");
3668 		for (i = 0; i < 0x10; i++)
3669 			rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x\n",
3670 				    dack->msbk_d[0][0][i]);
3671 
3672 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 MSBK qc:\n");
3673 		for (i = 0; i < 0x10; i++)
3674 			rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x\n",
3675 				    dack->msbk_d[0][1][i]);
3676 
3677 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 MSBK ic:\n");
3678 		for (i = 0; i < 0x10; i++)
3679 			rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x\n",
3680 				    dack->msbk_d[1][0][i]);
3681 
3682 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 MSBK qc:\n");
3683 		for (i = 0; i < 0x10; i++)
3684 			rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x\n",
3685 				    dack->msbk_d[1][1][i]);
3686 		return;
3687 	case RTW89_PHY_C2H_RFK_LOG_FUNC_RXDCK:
3688 		if (len != sizeof(*rxdck))
3689 			goto out;
3690 
3691 		rxdck = content;
3692 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
3693 			    "RXDCK ver:%d band:%2ph bw:%2ph ch:%2ph to:%2ph\n",
3694 			    rxdck->ver, rxdck->band, rxdck->bw, rxdck->ch,
3695 			    rxdck->timeout);
3696 		return;
3697 	case RTW89_PHY_C2H_RFK_LOG_FUNC_TSSI:
3698 		if (len != sizeof(*tssi))
3699 			goto out;
3700 
3701 		tssi = content;
3702 		for (i = 0; i < 2; i++) {
3703 			for (j = 0; j < 2; j++) {
3704 				for (k = 0; k < 4; k++) {
3705 					rtw89_debug(rtwdev, RTW89_DBG_RFK,
3706 						    "[TSSI] alignment_power_cw_h[%d][%d][%d]=%d\n",
3707 						    i, j, k, tssi->alignment_power_cw_h[i][j][k]);
3708 					rtw89_debug(rtwdev, RTW89_DBG_RFK,
3709 						    "[TSSI] alignment_power_cw_l[%d][%d][%d]=%d\n",
3710 						    i, j, k, tssi->alignment_power_cw_l[i][j][k]);
3711 					rtw89_debug(rtwdev, RTW89_DBG_RFK,
3712 						    "[TSSI] alignment_power[%d][%d][%d]=%d\n",
3713 						    i, j, k, tssi->alignment_power[i][j][k]);
3714 					rtw89_debug(rtwdev, RTW89_DBG_RFK,
3715 						    "[TSSI] alignment_power_cw[%d][%d][%d]=%d\n",
3716 						    i, j, k,
3717 						    (tssi->alignment_power_cw_h[i][j][k] << 8) +
3718 						     tssi->alignment_power_cw_l[i][j][k]);
3719 				}
3720 
3721 				rtw89_debug(rtwdev, RTW89_DBG_RFK,
3722 					    "[TSSI] tssi_alimk_state[%d][%d]=%d\n",
3723 					    i, j, tssi->tssi_alimk_state[i][j]);
3724 				rtw89_debug(rtwdev, RTW89_DBG_RFK,
3725 					    "[TSSI] default_txagc_offset[%d]=%d\n",
3726 					    j, tssi->default_txagc_offset[0][j]);
3727 			}
3728 		}
3729 		return;
3730 	case RTW89_PHY_C2H_RFK_LOG_FUNC_TXGAPK:
3731 		if (len != sizeof(*txgapk))
3732 			goto out;
3733 
3734 		txgapk = content;
3735 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
3736 			    "[TXGAPK]rpt r0x8010[0]=0x%x, r0x8010[1]=0x%x\n",
3737 			    le32_to_cpu(txgapk->r0x8010[0]),
3738 			    le32_to_cpu(txgapk->r0x8010[1]));
3739 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[TXGAPK]rpt chk_id = %d\n",
3740 			    txgapk->chk_id);
3741 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[TXGAPK]rpt chk_cnt = %d\n",
3742 			    le32_to_cpu(txgapk->chk_cnt));
3743 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[TXGAPK]rpt ver = 0x%x\n",
3744 			    txgapk->ver);
3745 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[TXGAPK]rpt d_bnd_ok = %d\n",
3746 			    txgapk->d_bnd_ok);
3747 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[TXGAPK]rpt stage[0] = 0x%x\n",
3748 			    le32_to_cpu(txgapk->stage[0]));
3749 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[TXGAPK]rpt stage[1] = 0x%x\n",
3750 			    le32_to_cpu(txgapk->stage[1]));
3751 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[TXGAPK]failcode[0] = 0x%x\n",
3752 			    le16_to_cpu(txgapk->failcode[0]));
3753 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[TXGAPK]failcode[1] = 0x%x\n",
3754 			    le16_to_cpu(txgapk->failcode[1]));
3755 
3756 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[TXGAPK]rpt track_d[0] = %*ph\n",
3757 			    (int)sizeof(txgapk->track_d[0]), txgapk->track_d[0]);
3758 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[TXGAPK]rpt power_d[0] = %*ph\n",
3759 			    (int)sizeof(txgapk->power_d[0]), txgapk->power_d[0]);
3760 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[TXGAPK]rpt track_d[1] = %*ph\n",
3761 			    (int)sizeof(txgapk->track_d[1]), txgapk->track_d[1]);
3762 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[TXGAPK]rpt power_d[1] = %*ph\n",
3763 			    (int)sizeof(txgapk->power_d[1]), txgapk->power_d[1]);
3764 		return;
3765 	case RTW89_PHY_C2H_RFK_LOG_FUNC_TAS_PWR:
3766 		if (len != sizeof(struct rtw89_c2h_rf_tas_rpt_log))
3767 			goto out;
3768 
3769 		rtw89_phy_c2h_rfk_tas_pwr(rtwdev, content);
3770 		return;
3771 	case RTW89_PHY_C2H_RFK_LOG_FUNC_TXIQK:
3772 		if (len != sizeof(*txiqk))
3773 			goto out;
3774 		return;
3775 	case RTW89_PHY_C2H_RFK_LOG_FUNC_CIM3K:
3776 		if (len != sizeof(*cim3k))
3777 			goto out;
3778 		return;
3779 	default:
3780 		break;
3781 	}
3782 
3783 out:
3784 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
3785 		    "unexpected RFK func %d report log with length %d\n", func, len);
3786 }
3787 
3788 static bool rtw89_phy_c2h_rfk_run_log(struct rtw89_dev *rtwdev,
3789 				      enum rtw89_phy_c2h_rfk_log_func func,
3790 				      void *content, u16 len)
3791 {
3792 	struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info;
3793 	const struct rtw89_c2h_rf_run_log *log = content;
3794 	const struct rtw89_fw_element_hdr *elm;
3795 	u32 fmt_idx;
3796 	u16 offset;
3797 
3798 	if (sizeof(*log) != len)
3799 		return false;
3800 
3801 	if (!elm_info->rfk_log_fmt)
3802 		return false;
3803 
3804 	elm = elm_info->rfk_log_fmt->elm[func];
3805 	fmt_idx = le32_to_cpu(log->fmt_idx);
3806 	if (!elm || fmt_idx >= elm->u.rfk_log_fmt.nr)
3807 		return false;
3808 
3809 	offset = le16_to_cpu(elm->u.rfk_log_fmt.offset[fmt_idx]);
3810 	if (offset == 0)
3811 		return false;
3812 
3813 	rtw89_debug(rtwdev, RTW89_DBG_RFK, &elm->u.common.contents[offset],
3814 		    le32_to_cpu(log->arg[0]), le32_to_cpu(log->arg[1]),
3815 		    le32_to_cpu(log->arg[2]), le32_to_cpu(log->arg[3]));
3816 
3817 	return true;
3818 }
3819 
3820 static void rtw89_phy_c2h_rfk_log(struct rtw89_dev *rtwdev, struct sk_buff *c2h,
3821 				  u32 len, enum rtw89_phy_c2h_rfk_log_func func,
3822 				  const char *rfk_name)
3823 {
3824 	struct rtw89_c2h_hdr *c2h_hdr = (struct rtw89_c2h_hdr *)c2h->data;
3825 	struct rtw89_c2h_rf_log_hdr *log_hdr;
3826 	void *log_ptr = c2h_hdr;
3827 	u16 content_len;
3828 	u16 chunk_len;
3829 	bool handled;
3830 
3831 	log_ptr += sizeof(*c2h_hdr);
3832 	len -= sizeof(*c2h_hdr);
3833 
3834 	while (len > sizeof(*log_hdr)) {
3835 		log_hdr = log_ptr;
3836 		content_len = le16_to_cpu(log_hdr->len);
3837 		chunk_len = content_len + sizeof(*log_hdr);
3838 
3839 		if (chunk_len > len)
3840 			break;
3841 
3842 		switch (log_hdr->type) {
3843 		case RTW89_RF_RUN_LOG:
3844 			handled = rtw89_phy_c2h_rfk_run_log(rtwdev, func,
3845 							    log_hdr->content, content_len);
3846 			if (handled)
3847 				break;
3848 
3849 			rtw89_debug(rtwdev, RTW89_DBG_RFK, "%s run: %*ph\n",
3850 				    rfk_name, content_len, log_hdr->content);
3851 			break;
3852 		case RTW89_RF_RPT_LOG:
3853 			rtw89_phy_c2h_rfk_rpt_log(rtwdev, func,
3854 						  log_hdr->content, content_len);
3855 			break;
3856 		default:
3857 			return;
3858 		}
3859 
3860 		log_ptr += chunk_len;
3861 		len -= chunk_len;
3862 	}
3863 }
3864 
3865 static void
3866 rtw89_phy_c2h_rfk_log_iqk(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len)
3867 {
3868 	rtw89_phy_c2h_rfk_log(rtwdev, c2h, len,
3869 			      RTW89_PHY_C2H_RFK_LOG_FUNC_IQK, "IQK");
3870 }
3871 
3872 static void
3873 rtw89_phy_c2h_rfk_log_dpk(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len)
3874 {
3875 	rtw89_phy_c2h_rfk_log(rtwdev, c2h, len,
3876 			      RTW89_PHY_C2H_RFK_LOG_FUNC_DPK, "DPK");
3877 }
3878 
3879 static void
3880 rtw89_phy_c2h_rfk_log_dack(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len)
3881 {
3882 	rtw89_phy_c2h_rfk_log(rtwdev, c2h, len,
3883 			      RTW89_PHY_C2H_RFK_LOG_FUNC_DACK, "DACK");
3884 }
3885 
3886 static void
3887 rtw89_phy_c2h_rfk_log_rxdck(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len)
3888 {
3889 	rtw89_phy_c2h_rfk_log(rtwdev, c2h, len,
3890 			      RTW89_PHY_C2H_RFK_LOG_FUNC_RXDCK, "RX_DCK");
3891 }
3892 
3893 static void
3894 rtw89_phy_c2h_rfk_log_tssi(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len)
3895 {
3896 	rtw89_phy_c2h_rfk_log(rtwdev, c2h, len,
3897 			      RTW89_PHY_C2H_RFK_LOG_FUNC_TSSI, "TSSI");
3898 }
3899 
3900 static void
3901 rtw89_phy_c2h_rfk_log_txgapk(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len)
3902 {
3903 	rtw89_phy_c2h_rfk_log(rtwdev, c2h, len,
3904 			      RTW89_PHY_C2H_RFK_LOG_FUNC_TXGAPK, "TXGAPK");
3905 }
3906 
3907 static void
3908 rtw89_phy_c2h_rfk_log_tas_pwr(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len)
3909 {
3910 	rtw89_phy_c2h_rfk_log(rtwdev, c2h, len,
3911 			      RTW89_PHY_C2H_RFK_LOG_FUNC_TAS_PWR, "TAS");
3912 }
3913 
3914 static void
3915 rtw89_phy_c2h_rfk_log_txiqk(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len)
3916 {
3917 	rtw89_phy_c2h_rfk_log(rtwdev, c2h, len,
3918 			      RTW89_PHY_C2H_RFK_LOG_FUNC_TXIQK, "TXIQK");
3919 }
3920 
3921 static void
3922 rtw89_phy_c2h_rfk_log_cim3k(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len)
3923 {
3924 	rtw89_phy_c2h_rfk_log(rtwdev, c2h, len,
3925 			      RTW89_PHY_C2H_RFK_LOG_FUNC_CIM3K, "CIM3K");
3926 }
3927 
3928 static
3929 void (* const rtw89_phy_c2h_rfk_log_handler[])(struct rtw89_dev *rtwdev,
3930 					       struct sk_buff *c2h, u32 len) = {
3931 	[RTW89_PHY_C2H_RFK_LOG_FUNC_IQK] = rtw89_phy_c2h_rfk_log_iqk,
3932 	[RTW89_PHY_C2H_RFK_LOG_FUNC_DPK] = rtw89_phy_c2h_rfk_log_dpk,
3933 	[RTW89_PHY_C2H_RFK_LOG_FUNC_DACK] = rtw89_phy_c2h_rfk_log_dack,
3934 	[RTW89_PHY_C2H_RFK_LOG_FUNC_RXDCK] = rtw89_phy_c2h_rfk_log_rxdck,
3935 	[RTW89_PHY_C2H_RFK_LOG_FUNC_TSSI] = rtw89_phy_c2h_rfk_log_tssi,
3936 	[RTW89_PHY_C2H_RFK_LOG_FUNC_TXGAPK] = rtw89_phy_c2h_rfk_log_txgapk,
3937 	[RTW89_PHY_C2H_RFK_LOG_FUNC_TAS_PWR] = rtw89_phy_c2h_rfk_log_tas_pwr,
3938 	[RTW89_PHY_C2H_RFK_LOG_FUNC_TXIQK] = rtw89_phy_c2h_rfk_log_txiqk,
3939 	[RTW89_PHY_C2H_RFK_LOG_FUNC_CIM3K] = rtw89_phy_c2h_rfk_log_cim3k,
3940 };
3941 
3942 static
3943 void rtw89_phy_rfk_report_prep(struct rtw89_dev *rtwdev)
3944 {
3945 	struct rtw89_rfk_wait_info *wait = &rtwdev->rfk_wait;
3946 
3947 	wait->state = RTW89_RFK_STATE_START;
3948 	wait->start_time = ktime_get();
3949 	reinit_completion(&wait->completion);
3950 }
3951 
3952 static
3953 int rtw89_phy_rfk_report_wait(struct rtw89_dev *rtwdev, const char *rfk_name,
3954 			      unsigned int ms)
3955 {
3956 	struct rtw89_rfk_wait_info *wait = &rtwdev->rfk_wait;
3957 	unsigned long time_left;
3958 
3959 	/* Since we can't receive C2H event during SER, use a fixed delay. */
3960 	if (test_bit(RTW89_FLAG_SER_HANDLING, rtwdev->flags)) {
3961 		fsleep(1000 * ms / 2);
3962 		goto out;
3963 	}
3964 
3965 	time_left = wait_for_completion_timeout(&wait->completion,
3966 						msecs_to_jiffies(ms));
3967 	if (time_left == 0) {
3968 		rtw89_warn(rtwdev, "failed to wait RF %s\n", rfk_name);
3969 		return -ETIMEDOUT;
3970 	} else if (wait->state != RTW89_RFK_STATE_OK) {
3971 		rtw89_warn(rtwdev, "failed to do RF %s result from state %d\n",
3972 			   rfk_name, wait->state);
3973 		return -EFAULT;
3974 	}
3975 
3976 out:
3977 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "RF %s takes %lld ms to complete\n",
3978 		    rfk_name, ktime_ms_delta(ktime_get(), wait->start_time));
3979 
3980 	return 0;
3981 }
3982 
3983 static void
3984 rtw89_phy_c2h_rfk_report_state(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len)
3985 {
3986 	const struct rtw89_c2h_rfk_report *report =
3987 		(const struct rtw89_c2h_rfk_report *)c2h->data;
3988 	struct rtw89_rfk_wait_info *wait = &rtwdev->rfk_wait;
3989 
3990 	wait->state = report->state;
3991 	wait->version = report->version;
3992 
3993 	complete(&wait->completion);
3994 
3995 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
3996 		    "RFK report state %d with version %d (%*ph)\n",
3997 		    wait->state, wait->version,
3998 		    (int)(len - sizeof(report->hdr)), &report->state);
3999 }
4000 
4001 static void
4002 rtw89_phy_c2h_rfk_report_tas_pwr(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len)
4003 {
4004 	const struct rtw89_c2h_rf_tas_info *report =
4005 		(const struct rtw89_c2h_rf_tas_info *)c2h->data;
4006 
4007 	rtw89_phy_c2h_rfk_tas_pwr(rtwdev, &report->content);
4008 }
4009 
4010 static
4011 void (* const rtw89_phy_c2h_rfk_report_handler[])(struct rtw89_dev *rtwdev,
4012 						  struct sk_buff *c2h, u32 len) = {
4013 	[RTW89_PHY_C2H_RFK_REPORT_FUNC_STATE] = rtw89_phy_c2h_rfk_report_state,
4014 	[RTW89_PHY_C2H_RFK_REPORT_FUNC_TAS_PWR] = rtw89_phy_c2h_rfk_report_tas_pwr,
4015 };
4016 
4017 bool rtw89_phy_c2h_chk_atomic(struct rtw89_dev *rtwdev, u8 class, u8 func)
4018 {
4019 	switch (class) {
4020 	case RTW89_PHY_C2H_RFK_LOG:
4021 		switch (func) {
4022 		case RTW89_PHY_C2H_RFK_LOG_FUNC_IQK:
4023 		case RTW89_PHY_C2H_RFK_LOG_FUNC_DPK:
4024 		case RTW89_PHY_C2H_RFK_LOG_FUNC_DACK:
4025 		case RTW89_PHY_C2H_RFK_LOG_FUNC_RXDCK:
4026 		case RTW89_PHY_C2H_RFK_LOG_FUNC_TSSI:
4027 		case RTW89_PHY_C2H_RFK_LOG_FUNC_TXGAPK:
4028 		case RTW89_PHY_C2H_RFK_LOG_FUNC_TXIQK:
4029 			return true;
4030 		default:
4031 			return false;
4032 		}
4033 	case RTW89_PHY_C2H_RFK_REPORT:
4034 		switch (func) {
4035 		case RTW89_PHY_C2H_RFK_REPORT_FUNC_STATE:
4036 			return true;
4037 		default:
4038 			return false;
4039 		}
4040 	default:
4041 		return false;
4042 	}
4043 }
4044 
4045 void rtw89_phy_c2h_handle(struct rtw89_dev *rtwdev, struct sk_buff *skb,
4046 			  u32 len, u8 class, u8 func)
4047 {
4048 	void (*handler)(struct rtw89_dev *rtwdev,
4049 			struct sk_buff *c2h, u32 len) = NULL;
4050 
4051 	switch (class) {
4052 	case RTW89_PHY_C2H_CLASS_RA:
4053 		if (func < ARRAY_SIZE(rtw89_phy_c2h_ra_handler))
4054 			handler = rtw89_phy_c2h_ra_handler[func];
4055 		break;
4056 	case RTW89_PHY_C2H_RFK_LOG:
4057 		if (func < ARRAY_SIZE(rtw89_phy_c2h_rfk_log_handler))
4058 			handler = rtw89_phy_c2h_rfk_log_handler[func];
4059 		break;
4060 	case RTW89_PHY_C2H_RFK_REPORT:
4061 		if (func < ARRAY_SIZE(rtw89_phy_c2h_rfk_report_handler))
4062 			handler = rtw89_phy_c2h_rfk_report_handler[func];
4063 		break;
4064 	case RTW89_PHY_C2H_CLASS_DM:
4065 		if (func < ARRAY_SIZE(rtw89_phy_c2h_dm_handler))
4066 			handler = rtw89_phy_c2h_dm_handler[func];
4067 		break;
4068 	default:
4069 		break;
4070 	}
4071 	if (!handler) {
4072 		rtw89_info_once(rtwdev, "PHY c2h class %d func %d not support\n",
4073 				class, func);
4074 		return;
4075 	}
4076 	handler(rtwdev, skb, len);
4077 }
4078 
4079 int rtw89_phy_rfk_pre_ntfy_and_wait(struct rtw89_dev *rtwdev,
4080 				    enum rtw89_phy_idx phy_idx,
4081 				    unsigned int ms)
4082 {
4083 	int ret;
4084 
4085 	if (RTW89_CHK_FW_FEATURE_GROUP(WITH_RFK_PRE_NOTIFY, &rtwdev->fw)) {
4086 		rtw89_phy_rfk_report_prep(rtwdev);
4087 		rtw89_fw_h2c_rf_pre_ntfy(rtwdev, phy_idx);
4088 		ret = rtw89_phy_rfk_report_wait(rtwdev, "PRE_NTFY", ms);
4089 		if (ret)
4090 			return ret;
4091 	}
4092 
4093 	if (RTW89_CHK_FW_FEATURE_GROUP(WITH_RFK_PRE_NOTIFY_MCC, &rtwdev->fw)) {
4094 		ret = rtw89_fw_h2c_rf_pre_ntfy_mcc(rtwdev, phy_idx);
4095 		if (ret)
4096 			return ret;
4097 	}
4098 
4099 	return 0;
4100 
4101 }
4102 EXPORT_SYMBOL(rtw89_phy_rfk_pre_ntfy_and_wait);
4103 
4104 int rtw89_phy_rfk_tssi_and_wait(struct rtw89_dev *rtwdev,
4105 				enum rtw89_phy_idx phy_idx,
4106 				const struct rtw89_chan *chan,
4107 				enum rtw89_tssi_mode tssi_mode,
4108 				unsigned int ms)
4109 {
4110 	int ret;
4111 
4112 	rtw89_phy_rfk_report_prep(rtwdev);
4113 
4114 	ret = rtw89_fw_h2c_rf_tssi(rtwdev, phy_idx, chan, tssi_mode);
4115 	if (ret)
4116 		return ret;
4117 
4118 	return rtw89_phy_rfk_report_wait(rtwdev, "TSSI", ms);
4119 }
4120 EXPORT_SYMBOL(rtw89_phy_rfk_tssi_and_wait);
4121 
4122 int rtw89_phy_rfk_iqk_and_wait(struct rtw89_dev *rtwdev,
4123 			       enum rtw89_phy_idx phy_idx,
4124 			       const struct rtw89_chan *chan,
4125 			       unsigned int ms)
4126 {
4127 	int ret;
4128 
4129 	rtw89_phy_rfk_report_prep(rtwdev);
4130 
4131 	ret = rtw89_fw_h2c_rf_iqk(rtwdev, phy_idx, chan);
4132 	if (ret)
4133 		return ret;
4134 
4135 	return rtw89_phy_rfk_report_wait(rtwdev, "IQK", ms);
4136 }
4137 EXPORT_SYMBOL(rtw89_phy_rfk_iqk_and_wait);
4138 
4139 int rtw89_phy_rfk_dpk_and_wait(struct rtw89_dev *rtwdev,
4140 			       enum rtw89_phy_idx phy_idx,
4141 			       const struct rtw89_chan *chan,
4142 			       unsigned int ms)
4143 {
4144 	int ret;
4145 
4146 	rtw89_phy_rfk_report_prep(rtwdev);
4147 
4148 	ret = rtw89_fw_h2c_rf_dpk(rtwdev, phy_idx, chan);
4149 	if (ret)
4150 		return ret;
4151 
4152 	return rtw89_phy_rfk_report_wait(rtwdev, "DPK", ms);
4153 }
4154 EXPORT_SYMBOL(rtw89_phy_rfk_dpk_and_wait);
4155 
4156 int rtw89_phy_rfk_txgapk_and_wait(struct rtw89_dev *rtwdev,
4157 				  enum rtw89_phy_idx phy_idx,
4158 				  const struct rtw89_chan *chan,
4159 				  unsigned int ms)
4160 {
4161 	int ret;
4162 
4163 	rtw89_phy_rfk_report_prep(rtwdev);
4164 
4165 	ret = rtw89_fw_h2c_rf_txgapk(rtwdev, phy_idx, chan);
4166 	if (ret)
4167 		return ret;
4168 
4169 	return rtw89_phy_rfk_report_wait(rtwdev, "TXGAPK", ms);
4170 }
4171 EXPORT_SYMBOL(rtw89_phy_rfk_txgapk_and_wait);
4172 
4173 int rtw89_phy_rfk_dack_and_wait(struct rtw89_dev *rtwdev,
4174 				enum rtw89_phy_idx phy_idx,
4175 				const struct rtw89_chan *chan,
4176 				unsigned int ms)
4177 {
4178 	int ret;
4179 
4180 	rtw89_phy_rfk_report_prep(rtwdev);
4181 
4182 	ret = rtw89_fw_h2c_rf_dack(rtwdev, phy_idx, chan);
4183 	if (ret)
4184 		return ret;
4185 
4186 	return rtw89_phy_rfk_report_wait(rtwdev, "DACK", ms);
4187 }
4188 EXPORT_SYMBOL(rtw89_phy_rfk_dack_and_wait);
4189 
4190 int rtw89_phy_rfk_rxdck_and_wait(struct rtw89_dev *rtwdev,
4191 				 enum rtw89_phy_idx phy_idx,
4192 				 const struct rtw89_chan *chan,
4193 				 bool is_chl_k, unsigned int ms)
4194 {
4195 	int ret;
4196 
4197 	rtw89_phy_rfk_report_prep(rtwdev);
4198 
4199 	ret = rtw89_fw_h2c_rf_rxdck(rtwdev, phy_idx, chan, is_chl_k);
4200 	if (ret)
4201 		return ret;
4202 
4203 	return rtw89_phy_rfk_report_wait(rtwdev, "RX_DCK", ms);
4204 }
4205 EXPORT_SYMBOL(rtw89_phy_rfk_rxdck_and_wait);
4206 
4207 int rtw89_phy_rfk_txiqk_and_wait(struct rtw89_dev *rtwdev,
4208 				 enum rtw89_phy_idx phy_idx,
4209 				 const struct rtw89_chan *chan,
4210 				 unsigned int ms)
4211 {
4212 	int ret;
4213 
4214 	rtw89_phy_rfk_report_prep(rtwdev);
4215 
4216 	ret = rtw89_fw_h2c_rf_txiqk(rtwdev, phy_idx, chan);
4217 	if (ret)
4218 		return ret;
4219 
4220 	return rtw89_phy_rfk_report_wait(rtwdev, "TX_IQK", ms);
4221 }
4222 EXPORT_SYMBOL(rtw89_phy_rfk_txiqk_and_wait);
4223 
4224 int rtw89_phy_rfk_cim3k_and_wait(struct rtw89_dev *rtwdev,
4225 				 enum rtw89_phy_idx phy_idx,
4226 				 const struct rtw89_chan *chan,
4227 				 unsigned int ms)
4228 {
4229 	int ret;
4230 
4231 	rtw89_phy_rfk_report_prep(rtwdev);
4232 
4233 	ret = rtw89_fw_h2c_rf_cim3k(rtwdev, phy_idx, chan);
4234 	if (ret)
4235 		return ret;
4236 
4237 	return rtw89_phy_rfk_report_wait(rtwdev, "CIM3k", ms);
4238 }
4239 EXPORT_SYMBOL(rtw89_phy_rfk_cim3k_and_wait);
4240 
4241 static u32 phy_tssi_get_cck_group(u8 ch)
4242 {
4243 	switch (ch) {
4244 	case 1 ... 2:
4245 		return 0;
4246 	case 3 ... 5:
4247 		return 1;
4248 	case 6 ... 8:
4249 		return 2;
4250 	case 9 ... 11:
4251 		return 3;
4252 	case 12 ... 13:
4253 		return 4;
4254 	case 14:
4255 		return 5;
4256 	}
4257 
4258 	return 0;
4259 }
4260 
4261 #define PHY_TSSI_EXTRA_GROUP_BIT BIT(31)
4262 #define PHY_TSSI_EXTRA_GROUP(idx) (PHY_TSSI_EXTRA_GROUP_BIT | (idx))
4263 #define PHY_IS_TSSI_EXTRA_GROUP(group) ((group) & PHY_TSSI_EXTRA_GROUP_BIT)
4264 #define PHY_TSSI_EXTRA_GET_GROUP_IDX1(group) \
4265 	((group) & ~PHY_TSSI_EXTRA_GROUP_BIT)
4266 #define PHY_TSSI_EXTRA_GET_GROUP_IDX2(group) \
4267 	(PHY_TSSI_EXTRA_GET_GROUP_IDX1(group) + 1)
4268 
4269 static u32 phy_tssi_get_ofdm_group(u8 ch)
4270 {
4271 	switch (ch) {
4272 	case 1 ... 2:
4273 		return 0;
4274 	case 3 ... 5:
4275 		return 1;
4276 	case 6 ... 8:
4277 		return 2;
4278 	case 9 ... 11:
4279 		return 3;
4280 	case 12 ... 14:
4281 		return 4;
4282 	case 36 ... 40:
4283 		return 5;
4284 	case 41 ... 43:
4285 		return PHY_TSSI_EXTRA_GROUP(5);
4286 	case 44 ... 48:
4287 		return 6;
4288 	case 49 ... 51:
4289 		return PHY_TSSI_EXTRA_GROUP(6);
4290 	case 52 ... 56:
4291 		return 7;
4292 	case 57 ... 59:
4293 		return PHY_TSSI_EXTRA_GROUP(7);
4294 	case 60 ... 64:
4295 		return 8;
4296 	case 100 ... 104:
4297 		return 9;
4298 	case 105 ... 107:
4299 		return PHY_TSSI_EXTRA_GROUP(9);
4300 	case 108 ... 112:
4301 		return 10;
4302 	case 113 ... 115:
4303 		return PHY_TSSI_EXTRA_GROUP(10);
4304 	case 116 ... 120:
4305 		return 11;
4306 	case 121 ... 123:
4307 		return PHY_TSSI_EXTRA_GROUP(11);
4308 	case 124 ... 128:
4309 		return 12;
4310 	case 129 ... 131:
4311 		return PHY_TSSI_EXTRA_GROUP(12);
4312 	case 132 ... 136:
4313 		return 13;
4314 	case 137 ... 139:
4315 		return PHY_TSSI_EXTRA_GROUP(13);
4316 	case 140 ... 144:
4317 		return 14;
4318 	case 149 ... 153:
4319 		return 15;
4320 	case 154 ... 156:
4321 		return PHY_TSSI_EXTRA_GROUP(15);
4322 	case 157 ... 161:
4323 		return 16;
4324 	case 162 ... 164:
4325 		return PHY_TSSI_EXTRA_GROUP(16);
4326 	case 165 ... 169:
4327 		return 17;
4328 	case 170 ... 172:
4329 		return PHY_TSSI_EXTRA_GROUP(17);
4330 	case 173 ... 177:
4331 		return 18;
4332 	}
4333 
4334 	return 0;
4335 }
4336 
4337 static u32 phy_tssi_get_6g_ofdm_group(u8 ch)
4338 {
4339 	switch (ch) {
4340 	case 1 ... 5:
4341 		return 0;
4342 	case 6 ... 8:
4343 		return PHY_TSSI_EXTRA_GROUP(0);
4344 	case 9 ... 13:
4345 		return 1;
4346 	case 14 ... 16:
4347 		return PHY_TSSI_EXTRA_GROUP(1);
4348 	case 17 ... 21:
4349 		return 2;
4350 	case 22 ... 24:
4351 		return PHY_TSSI_EXTRA_GROUP(2);
4352 	case 25 ... 29:
4353 		return 3;
4354 	case 33 ... 37:
4355 		return 4;
4356 	case 38 ... 40:
4357 		return PHY_TSSI_EXTRA_GROUP(4);
4358 	case 41 ... 45:
4359 		return 5;
4360 	case 46 ... 48:
4361 		return PHY_TSSI_EXTRA_GROUP(5);
4362 	case 49 ... 53:
4363 		return 6;
4364 	case 54 ... 56:
4365 		return PHY_TSSI_EXTRA_GROUP(6);
4366 	case 57 ... 61:
4367 		return 7;
4368 	case 65 ... 69:
4369 		return 8;
4370 	case 70 ... 72:
4371 		return PHY_TSSI_EXTRA_GROUP(8);
4372 	case 73 ... 77:
4373 		return 9;
4374 	case 78 ... 80:
4375 		return PHY_TSSI_EXTRA_GROUP(9);
4376 	case 81 ... 85:
4377 		return 10;
4378 	case 86 ... 88:
4379 		return PHY_TSSI_EXTRA_GROUP(10);
4380 	case 89 ... 93:
4381 		return 11;
4382 	case 97 ... 101:
4383 		return 12;
4384 	case 102 ... 104:
4385 		return PHY_TSSI_EXTRA_GROUP(12);
4386 	case 105 ... 109:
4387 		return 13;
4388 	case 110 ... 112:
4389 		return PHY_TSSI_EXTRA_GROUP(13);
4390 	case 113 ... 117:
4391 		return 14;
4392 	case 118 ... 120:
4393 		return PHY_TSSI_EXTRA_GROUP(14);
4394 	case 121 ... 125:
4395 		return 15;
4396 	case 129 ... 133:
4397 		return 16;
4398 	case 134 ... 136:
4399 		return PHY_TSSI_EXTRA_GROUP(16);
4400 	case 137 ... 141:
4401 		return 17;
4402 	case 142 ... 144:
4403 		return PHY_TSSI_EXTRA_GROUP(17);
4404 	case 145 ... 149:
4405 		return 18;
4406 	case 150 ... 152:
4407 		return PHY_TSSI_EXTRA_GROUP(18);
4408 	case 153 ... 157:
4409 		return 19;
4410 	case 161 ... 165:
4411 		return 20;
4412 	case 166 ... 168:
4413 		return PHY_TSSI_EXTRA_GROUP(20);
4414 	case 169 ... 173:
4415 		return 21;
4416 	case 174 ... 176:
4417 		return PHY_TSSI_EXTRA_GROUP(21);
4418 	case 177 ... 181:
4419 		return 22;
4420 	case 182 ... 184:
4421 		return PHY_TSSI_EXTRA_GROUP(22);
4422 	case 185 ... 189:
4423 		return 23;
4424 	case 193 ... 197:
4425 		return 24;
4426 	case 198 ... 200:
4427 		return PHY_TSSI_EXTRA_GROUP(24);
4428 	case 201 ... 205:
4429 		return 25;
4430 	case 206 ... 208:
4431 		return PHY_TSSI_EXTRA_GROUP(25);
4432 	case 209 ... 213:
4433 		return 26;
4434 	case 214 ... 216:
4435 		return PHY_TSSI_EXTRA_GROUP(26);
4436 	case 217 ... 221:
4437 		return 27;
4438 	case 225 ... 229:
4439 		return 28;
4440 	case 230 ... 232:
4441 		return PHY_TSSI_EXTRA_GROUP(28);
4442 	case 233 ... 237:
4443 		return 29;
4444 	case 238 ... 240:
4445 		return PHY_TSSI_EXTRA_GROUP(29);
4446 	case 241 ... 245:
4447 		return 30;
4448 	case 246 ... 248:
4449 		return PHY_TSSI_EXTRA_GROUP(30);
4450 	case 249 ... 253:
4451 		return 31;
4452 	}
4453 
4454 	return 0;
4455 }
4456 
4457 static u32 phy_tssi_get_trim_group(u8 ch)
4458 {
4459 	switch (ch) {
4460 	case 1 ... 8:
4461 		return 0;
4462 	case 9 ... 14:
4463 		return 1;
4464 	case 36 ... 48:
4465 		return 2;
4466 	case 49 ... 51:
4467 		return PHY_TSSI_EXTRA_GROUP(2);
4468 	case 52 ... 64:
4469 		return 3;
4470 	case 100 ... 112:
4471 		return 4;
4472 	case 113 ... 115:
4473 		return PHY_TSSI_EXTRA_GROUP(4);
4474 	case 116 ... 128:
4475 		return 5;
4476 	case 132 ... 144:
4477 		return 6;
4478 	case 149 ... 177:
4479 		return 7;
4480 	}
4481 
4482 	return 0;
4483 }
4484 
4485 static u32 phy_tssi_get_6g_trim_group(u8 ch)
4486 {
4487 	switch (ch) {
4488 	case 1 ... 13:
4489 		return 0;
4490 	case 14 ... 16:
4491 		return PHY_TSSI_EXTRA_GROUP(0);
4492 	case 17 ... 29:
4493 		return 1;
4494 	case 33 ... 45:
4495 		return 2;
4496 	case 46 ... 48:
4497 		return PHY_TSSI_EXTRA_GROUP(2);
4498 	case 49 ... 61:
4499 		return 3;
4500 	case 65 ... 77:
4501 		return 4;
4502 	case 78 ... 80:
4503 		return PHY_TSSI_EXTRA_GROUP(4);
4504 	case 81 ... 93:
4505 		return 5;
4506 	case 97 ... 109:
4507 		return 6;
4508 	case 110 ... 112:
4509 		return PHY_TSSI_EXTRA_GROUP(6);
4510 	case 113 ... 125:
4511 		return 7;
4512 	case 129 ... 141:
4513 		return 8;
4514 	case 142 ... 144:
4515 		return PHY_TSSI_EXTRA_GROUP(8);
4516 	case 145 ... 157:
4517 		return 9;
4518 	case 161 ... 173:
4519 		return 10;
4520 	case 174 ... 176:
4521 		return PHY_TSSI_EXTRA_GROUP(10);
4522 	case 177 ... 189:
4523 		return 11;
4524 	case 193 ... 205:
4525 		return 12;
4526 	case 206 ... 208:
4527 		return PHY_TSSI_EXTRA_GROUP(12);
4528 	case 209 ... 221:
4529 		return 13;
4530 	case 225 ... 237:
4531 		return 14;
4532 	case 238 ... 240:
4533 		return PHY_TSSI_EXTRA_GROUP(14);
4534 	case 241 ... 253:
4535 		return 15;
4536 	}
4537 
4538 	return 0;
4539 }
4540 
4541 static s8 phy_tssi_get_ofdm_de(struct rtw89_dev *rtwdev,
4542 			       enum rtw89_phy_idx phy,
4543 			       const struct rtw89_chan *chan,
4544 			       enum rtw89_rf_path path)
4545 {
4546 	struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
4547 	enum rtw89_band band = chan->band_type;
4548 	u8 ch = chan->channel;
4549 	u32 gidx_1st;
4550 	u32 gidx_2nd;
4551 	s8 de_1st;
4552 	s8 de_2nd;
4553 	u32 gidx;
4554 	s8 val;
4555 
4556 	if (band == RTW89_BAND_6G)
4557 		goto calc_6g;
4558 
4559 	gidx = phy_tssi_get_ofdm_group(ch);
4560 
4561 	rtw89_debug(rtwdev, RTW89_DBG_TSSI,
4562 		    "[TSSI][TRIM]: path=%d mcs group_idx=0x%x\n",
4563 		    path, gidx);
4564 
4565 	if (PHY_IS_TSSI_EXTRA_GROUP(gidx)) {
4566 		gidx_1st = PHY_TSSI_EXTRA_GET_GROUP_IDX1(gidx);
4567 		gidx_2nd = PHY_TSSI_EXTRA_GET_GROUP_IDX2(gidx);
4568 		de_1st = tssi_info->tssi_mcs[path][gidx_1st];
4569 		de_2nd = tssi_info->tssi_mcs[path][gidx_2nd];
4570 		val = (de_1st + de_2nd) / 2;
4571 
4572 		rtw89_debug(rtwdev, RTW89_DBG_TSSI,
4573 			    "[TSSI][TRIM]: path=%d mcs de=%d 1st=%d 2nd=%d\n",
4574 			    path, val, de_1st, de_2nd);
4575 	} else {
4576 		val = tssi_info->tssi_mcs[path][gidx];
4577 
4578 		rtw89_debug(rtwdev, RTW89_DBG_TSSI,
4579 			    "[TSSI][TRIM]: path=%d mcs de=%d\n", path, val);
4580 	}
4581 
4582 	return val;
4583 
4584 calc_6g:
4585 	gidx = phy_tssi_get_6g_ofdm_group(ch);
4586 
4587 	rtw89_debug(rtwdev, RTW89_DBG_TSSI,
4588 		    "[TSSI][TRIM]: path=%d mcs group_idx=0x%x\n",
4589 		    path, gidx);
4590 
4591 	if (PHY_IS_TSSI_EXTRA_GROUP(gidx)) {
4592 		gidx_1st = PHY_TSSI_EXTRA_GET_GROUP_IDX1(gidx);
4593 		gidx_2nd = PHY_TSSI_EXTRA_GET_GROUP_IDX2(gidx);
4594 		de_1st = tssi_info->tssi_6g_mcs[path][gidx_1st];
4595 		de_2nd = tssi_info->tssi_6g_mcs[path][gidx_2nd];
4596 		val = (de_1st + de_2nd) / 2;
4597 
4598 		rtw89_debug(rtwdev, RTW89_DBG_TSSI,
4599 			    "[TSSI][TRIM]: path=%d mcs de=%d 1st=%d 2nd=%d\n",
4600 			    path, val, de_1st, de_2nd);
4601 	} else {
4602 		val = tssi_info->tssi_6g_mcs[path][gidx];
4603 
4604 		rtw89_debug(rtwdev, RTW89_DBG_TSSI,
4605 			    "[TSSI][TRIM]: path=%d mcs de=%d\n", path, val);
4606 	}
4607 
4608 	return val;
4609 }
4610 
4611 static s8 phy_tssi_get_ofdm_trim_de(struct rtw89_dev *rtwdev,
4612 				    enum rtw89_phy_idx phy,
4613 				    const struct rtw89_chan *chan,
4614 				    enum rtw89_rf_path path)
4615 {
4616 	struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
4617 	enum rtw89_band band = chan->band_type;
4618 	u8 ch = chan->channel;
4619 	u32 tgidx_1st;
4620 	u32 tgidx_2nd;
4621 	s8 tde_1st;
4622 	s8 tde_2nd;
4623 	u32 tgidx;
4624 	s8 val;
4625 
4626 	if (band == RTW89_BAND_6G)
4627 		goto calc_6g;
4628 
4629 	tgidx = phy_tssi_get_trim_group(ch);
4630 
4631 	rtw89_debug(rtwdev, RTW89_DBG_TSSI,
4632 		    "[TSSI][TRIM]: path=%d mcs trim_group_idx=0x%x\n",
4633 		    path, tgidx);
4634 
4635 	if (PHY_IS_TSSI_EXTRA_GROUP(tgidx)) {
4636 		tgidx_1st = PHY_TSSI_EXTRA_GET_GROUP_IDX1(tgidx);
4637 		tgidx_2nd = PHY_TSSI_EXTRA_GET_GROUP_IDX2(tgidx);
4638 		tde_1st = tssi_info->tssi_trim[path][tgidx_1st];
4639 		tde_2nd = tssi_info->tssi_trim[path][tgidx_2nd];
4640 		val = (tde_1st + tde_2nd) / 2;
4641 
4642 		rtw89_debug(rtwdev, RTW89_DBG_TSSI,
4643 			    "[TSSI][TRIM]: path=%d mcs trim_de=%d 1st=%d 2nd=%d\n",
4644 			    path, val, tde_1st, tde_2nd);
4645 	} else {
4646 		val = tssi_info->tssi_trim[path][tgidx];
4647 
4648 		rtw89_debug(rtwdev, RTW89_DBG_TSSI,
4649 			    "[TSSI][TRIM]: path=%d mcs trim_de=%d\n",
4650 			    path, val);
4651 	}
4652 
4653 	return val;
4654 
4655 calc_6g:
4656 	tgidx = phy_tssi_get_6g_trim_group(ch);
4657 
4658 	rtw89_debug(rtwdev, RTW89_DBG_TSSI,
4659 		    "[TSSI][TRIM]: path=%d mcs trim_group_idx=0x%x\n",
4660 		    path, tgidx);
4661 
4662 	if (PHY_IS_TSSI_EXTRA_GROUP(tgidx)) {
4663 		tgidx_1st = PHY_TSSI_EXTRA_GET_GROUP_IDX1(tgidx);
4664 		tgidx_2nd = PHY_TSSI_EXTRA_GET_GROUP_IDX2(tgidx);
4665 		tde_1st = tssi_info->tssi_trim_6g[path][tgidx_1st];
4666 		tde_2nd = tssi_info->tssi_trim_6g[path][tgidx_2nd];
4667 		val = (tde_1st + tde_2nd) / 2;
4668 
4669 		rtw89_debug(rtwdev, RTW89_DBG_TSSI,
4670 			    "[TSSI][TRIM]: path=%d mcs trim_de=%d 1st=%d 2nd=%d\n",
4671 			    path, val, tde_1st, tde_2nd);
4672 	} else {
4673 		val = tssi_info->tssi_trim_6g[path][tgidx];
4674 
4675 		rtw89_debug(rtwdev, RTW89_DBG_TSSI,
4676 			    "[TSSI][TRIM]: path=%d mcs trim_de=%d\n",
4677 			    path, val);
4678 	}
4679 
4680 	return val;
4681 }
4682 
4683 void rtw89_phy_rfk_tssi_fill_fwcmd_efuse_to_de(struct rtw89_dev *rtwdev,
4684 					       enum rtw89_phy_idx phy,
4685 					       const struct rtw89_chan *chan,
4686 					       struct rtw89_h2c_rf_tssi *h2c)
4687 {
4688 	const struct rtw89_chip_info *chip = rtwdev->chip;
4689 	struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
4690 	u8 ch = chan->channel;
4691 	s8 trim_de;
4692 	s8 ofdm_de;
4693 	s8 cck_de;
4694 	u8 gidx;
4695 	s8 val;
4696 	int i;
4697 
4698 	rtw89_debug(rtwdev, RTW89_DBG_TSSI, "[TSSI][TRIM]: phy=%d ch=%d\n",
4699 		    phy, ch);
4700 
4701 	for (i = RF_PATH_A; i <= RF_PATH_B; i++) {
4702 		trim_de = phy_tssi_get_ofdm_trim_de(rtwdev, phy, chan, i);
4703 		h2c->curr_tssi_trim_de[i] = trim_de;
4704 
4705 		rtw89_debug(rtwdev, RTW89_DBG_TSSI,
4706 			    "[TSSI][TRIM]: path=%d trim_de=0x%x\n", i, trim_de);
4707 
4708 		gidx = phy_tssi_get_cck_group(ch);
4709 		cck_de = tssi_info->tssi_cck[i][gidx];
4710 		val = u32_get_bits(cck_de + trim_de, 0xff);
4711 
4712 		if (chip->chip_id == RTL8922A) {
4713 			h2c->curr_tssi_cck_de[i] = 0x0;
4714 			h2c->curr_tssi_cck_de_20m[i] = val;
4715 			h2c->curr_tssi_cck_de_40m[i] = val;
4716 		} else {
4717 			h2c->curr_tssi_cck_de[i] = val;
4718 		}
4719 
4720 		h2c->curr_tssi_efuse_cck_de[i] = cck_de;
4721 
4722 		rtw89_debug(rtwdev, RTW89_DBG_TSSI,
4723 			    "[TSSI][TRIM]: path=%d cck_de=0x%x\n", i, cck_de);
4724 
4725 		ofdm_de = phy_tssi_get_ofdm_de(rtwdev, phy, chan, i);
4726 		val = u32_get_bits(ofdm_de + trim_de, 0xff);
4727 
4728 		if (chip->chip_id == RTL8922A) {
4729 			h2c->curr_tssi_ofdm_de[i] = 0x0;
4730 			h2c->curr_tssi_ofdm_de_20m[i] = val;
4731 			h2c->curr_tssi_ofdm_de_40m[i] = val;
4732 			h2c->curr_tssi_ofdm_de_80m[i] = val;
4733 			h2c->curr_tssi_ofdm_de_160m[i] = val;
4734 			h2c->curr_tssi_ofdm_de_320m[i] = val;
4735 		} else {
4736 			h2c->curr_tssi_ofdm_de[i] = val;
4737 		}
4738 
4739 		h2c->curr_tssi_efuse_ofdm_de[i] = ofdm_de;
4740 
4741 		rtw89_debug(rtwdev, RTW89_DBG_TSSI,
4742 			    "[TSSI][TRIM]: path=%d ofdm_de=0x%x\n", i, ofdm_de);
4743 	}
4744 }
4745 
4746 void rtw89_phy_rfk_tssi_fill_fwcmd_tmeter_tbl(struct rtw89_dev *rtwdev,
4747 					      enum rtw89_phy_idx phy,
4748 					      const struct rtw89_chan *chan,
4749 					      struct rtw89_h2c_rf_tssi *h2c)
4750 {
4751 	struct rtw89_fw_txpwr_track_cfg *trk = rtwdev->fw.elm_info.txpwr_trk;
4752 	struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
4753 	const struct rtw89_chip_info *chip = rtwdev->chip;
4754 	const s8 *thm_up[RF_PATH_B + 1] = {};
4755 	const s8 *thm_down[RF_PATH_B + 1] = {};
4756 	u8 subband = chan->subband_type;
4757 	s8 thm_ofst[128] = {};
4758 	int multiplier;
4759 	u8 thermal;
4760 	u8 path;
4761 	u8 i, j;
4762 
4763 	switch (subband) {
4764 	default:
4765 	case RTW89_CH_2G:
4766 		thm_up[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_2GA_P][0];
4767 		thm_down[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_2GA_N][0];
4768 		thm_up[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_2GB_P][0];
4769 		thm_down[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_2GB_N][0];
4770 		break;
4771 	case RTW89_CH_5G_BAND_1:
4772 		thm_up[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GA_P][0];
4773 		thm_down[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GA_N][0];
4774 		thm_up[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GB_P][0];
4775 		thm_down[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GB_N][0];
4776 		break;
4777 	case RTW89_CH_5G_BAND_3:
4778 		thm_up[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GA_P][1];
4779 		thm_down[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GA_N][1];
4780 		thm_up[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GB_P][1];
4781 		thm_down[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GB_N][1];
4782 		break;
4783 	case RTW89_CH_5G_BAND_4:
4784 		thm_up[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GA_P][2];
4785 		thm_down[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GA_N][2];
4786 		thm_up[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GB_P][2];
4787 		thm_down[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GB_N][2];
4788 		break;
4789 	case RTW89_CH_6G_BAND_IDX0:
4790 	case RTW89_CH_6G_BAND_IDX1:
4791 		thm_up[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GA_P][0];
4792 		thm_down[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GA_N][0];
4793 		thm_up[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GB_P][0];
4794 		thm_down[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GB_N][0];
4795 		break;
4796 	case RTW89_CH_6G_BAND_IDX2:
4797 	case RTW89_CH_6G_BAND_IDX3:
4798 		thm_up[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GA_P][1];
4799 		thm_down[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GA_N][1];
4800 		thm_up[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GB_P][1];
4801 		thm_down[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GB_N][1];
4802 		break;
4803 	case RTW89_CH_6G_BAND_IDX4:
4804 	case RTW89_CH_6G_BAND_IDX5:
4805 		thm_up[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GA_P][2];
4806 		thm_down[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GA_N][2];
4807 		thm_up[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GB_P][2];
4808 		thm_down[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GB_N][2];
4809 		break;
4810 	case RTW89_CH_6G_BAND_IDX6:
4811 	case RTW89_CH_6G_BAND_IDX7:
4812 		thm_up[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GA_P][3];
4813 		thm_down[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GA_N][3];
4814 		thm_up[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GB_P][3];
4815 		thm_down[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GB_N][3];
4816 		break;
4817 	}
4818 
4819 	rtw89_debug(rtwdev, RTW89_DBG_TSSI,
4820 		    "[TSSI] tmeter tbl on subband: %u\n", subband);
4821 
4822 	if (chip->chip_id == RTL8922A)
4823 		multiplier = 1;
4824 	else
4825 		multiplier = -1;
4826 
4827 	for (path = RF_PATH_A; path <= RF_PATH_B; path++) {
4828 		thermal = tssi_info->thermal[path];
4829 		rtw89_debug(rtwdev, RTW89_DBG_TSSI,
4830 			    "path: %u, pg thermal: 0x%x\n", path, thermal);
4831 
4832 		if (thermal == 0xff) {
4833 			h2c->pg_thermal[path] = 0x38;
4834 			memset(h2c->ftable[path], 0, sizeof(h2c->ftable[path]));
4835 			continue;
4836 		}
4837 
4838 		h2c->pg_thermal[path] = thermal;
4839 
4840 		i = 0;
4841 		for (j = 0; j < 64; j++) {
4842 			thm_ofst[j] = i < DELTA_SWINGIDX_SIZE ?
4843 				      thm_up[path][i++] :
4844 				      thm_up[path][DELTA_SWINGIDX_SIZE - 1];
4845 			thm_ofst[j] *= multiplier;
4846 		}
4847 
4848 		i = 1;
4849 		for (j = 127; j >= 64; j--) {
4850 			thm_ofst[j] = i < DELTA_SWINGIDX_SIZE ?
4851 				      -thm_down[path][i++] :
4852 				      -thm_down[path][DELTA_SWINGIDX_SIZE - 1];
4853 			thm_ofst[j] *= multiplier;
4854 		}
4855 
4856 		for (i = 0; i < 128; i += 4) {
4857 			h2c->ftable[path][i + 0] = thm_ofst[i + 3];
4858 			h2c->ftable[path][i + 1] = thm_ofst[i + 2];
4859 			h2c->ftable[path][i + 2] = thm_ofst[i + 1];
4860 			h2c->ftable[path][i + 3] = thm_ofst[i + 0];
4861 
4862 			rtw89_debug(rtwdev, RTW89_DBG_TSSI,
4863 				    "thm ofst [%x]: %02x %02x %02x %02x\n",
4864 				    i, thm_ofst[i], thm_ofst[i + 1],
4865 				    thm_ofst[i + 2], thm_ofst[i + 3]);
4866 		}
4867 	}
4868 }
4869 
4870 static u8 rtw89_phy_cfo_get_xcap_reg(struct rtw89_dev *rtwdev, bool sc_xo)
4871 {
4872 	const struct rtw89_xtal_info *xtal = rtwdev->chip->xtal_info;
4873 	u32 reg_mask;
4874 
4875 	if (sc_xo)
4876 		reg_mask = xtal->sc_xo_mask;
4877 	else
4878 		reg_mask = xtal->sc_xi_mask;
4879 
4880 	return (u8)rtw89_read32_mask(rtwdev, xtal->xcap_reg, reg_mask);
4881 }
4882 
4883 static void rtw89_phy_cfo_set_xcap_reg(struct rtw89_dev *rtwdev, bool sc_xo,
4884 				       u8 val)
4885 {
4886 	const struct rtw89_xtal_info *xtal = rtwdev->chip->xtal_info;
4887 	u32 reg_mask;
4888 
4889 	if (sc_xo)
4890 		reg_mask = xtal->sc_xo_mask;
4891 	else
4892 		reg_mask = xtal->sc_xi_mask;
4893 
4894 	rtw89_write32_mask(rtwdev, xtal->xcap_reg, reg_mask, val);
4895 }
4896 
4897 static void rtw89_phy_cfo_set_crystal_cap(struct rtw89_dev *rtwdev,
4898 					  u8 crystal_cap, bool force)
4899 {
4900 	struct rtw89_cfo_tracking_info *cfo = &rtwdev->cfo_tracking;
4901 	const struct rtw89_chip_info *chip = rtwdev->chip;
4902 	u8 sc_xi_val = 0, sc_xo_val = 0;
4903 
4904 	if (!force && cfo->crystal_cap == crystal_cap)
4905 		return;
4906 	if (chip->chip_id == RTL8852A || chip->chip_id == RTL8851B) {
4907 		rtw89_phy_cfo_set_xcap_reg(rtwdev, true, crystal_cap);
4908 		rtw89_phy_cfo_set_xcap_reg(rtwdev, false, crystal_cap);
4909 		sc_xo_val = rtw89_phy_cfo_get_xcap_reg(rtwdev, true);
4910 		sc_xi_val = rtw89_phy_cfo_get_xcap_reg(rtwdev, false);
4911 	} else {
4912 		rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_XTAL_SC_XO,
4913 					crystal_cap, XTAL_SC_XO_MASK);
4914 		rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_XTAL_SC_XI,
4915 					crystal_cap, XTAL_SC_XI_MASK);
4916 		rtw89_mac_read_xtal_si(rtwdev, XTAL_SI_XTAL_SC_XO, &sc_xo_val);
4917 		rtw89_mac_read_xtal_si(rtwdev, XTAL_SI_XTAL_SC_XI, &sc_xi_val);
4918 	}
4919 	cfo->crystal_cap = sc_xi_val;
4920 	cfo->x_cap_ofst = (s8)((int)cfo->crystal_cap - cfo->def_x_cap);
4921 
4922 	rtw89_debug(rtwdev, RTW89_DBG_CFO, "Set sc_xi=0x%x\n", sc_xi_val);
4923 	rtw89_debug(rtwdev, RTW89_DBG_CFO, "Set sc_xo=0x%x\n", sc_xo_val);
4924 	rtw89_debug(rtwdev, RTW89_DBG_CFO, "Get xcap_ofst=%d\n",
4925 		    cfo->x_cap_ofst);
4926 	rtw89_debug(rtwdev, RTW89_DBG_CFO, "Set xcap OK\n");
4927 }
4928 
4929 static void rtw89_phy_cfo_reset(struct rtw89_dev *rtwdev)
4930 {
4931 	struct rtw89_cfo_tracking_info *cfo = &rtwdev->cfo_tracking;
4932 	u8 cap;
4933 
4934 	cfo->def_x_cap = cfo->crystal_cap_default & B_AX_XTAL_SC_MASK;
4935 	cfo->is_adjust = false;
4936 	if (cfo->crystal_cap == cfo->def_x_cap)
4937 		return;
4938 	cap = cfo->crystal_cap;
4939 	cap += (cap > cfo->def_x_cap ? -1 : 1);
4940 	rtw89_phy_cfo_set_crystal_cap(rtwdev, cap, false);
4941 	rtw89_debug(rtwdev, RTW89_DBG_CFO,
4942 		    "(0x%x) approach to dflt_val=(0x%x)\n", cfo->crystal_cap,
4943 		    cfo->def_x_cap);
4944 }
4945 
4946 static void rtw89_dcfo_comp(struct rtw89_dev *rtwdev, s32 curr_cfo)
4947 {
4948 	const struct rtw89_reg_def *dcfo_comp = rtwdev->chip->dcfo_comp;
4949 	bool is_linked = rtwdev->total_sta_assoc > 0;
4950 	s32 cfo_avg_312;
4951 	s32 dcfo_comp_val;
4952 	int sign;
4953 
4954 	if (!dcfo_comp)
4955 		return;
4956 
4957 	if (!is_linked) {
4958 		rtw89_debug(rtwdev, RTW89_DBG_CFO, "DCFO: is_linked=%d\n",
4959 			    is_linked);
4960 		return;
4961 	}
4962 	rtw89_debug(rtwdev, RTW89_DBG_CFO, "DCFO: curr_cfo=%d\n", curr_cfo);
4963 	if (curr_cfo == 0)
4964 		return;
4965 	dcfo_comp_val = rtw89_phy_read32_mask(rtwdev, R_DCFO, B_DCFO);
4966 	sign = curr_cfo > 0 ? 1 : -1;
4967 	cfo_avg_312 = curr_cfo / 625 + sign * dcfo_comp_val;
4968 	rtw89_debug(rtwdev, RTW89_DBG_CFO, "avg_cfo_312=%d step\n", cfo_avg_312);
4969 	if (rtwdev->chip->chip_id == RTL8852A && rtwdev->hal.cv == CHIP_CBV)
4970 		cfo_avg_312 = -cfo_avg_312;
4971 	rtw89_phy_set_phy_regs(rtwdev, dcfo_comp->addr, dcfo_comp->mask,
4972 			       cfo_avg_312);
4973 }
4974 
4975 static void rtw89_dcfo_comp_init(struct rtw89_dev *rtwdev)
4976 {
4977 	const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def;
4978 	const struct rtw89_chip_info *chip = rtwdev->chip;
4979 	const struct rtw89_cfo_regs *cfo = phy->cfo;
4980 
4981 	rtw89_phy_set_phy_regs(rtwdev, cfo->comp_seg0, cfo->valid_0_mask, 1);
4982 	rtw89_phy_set_phy_regs(rtwdev, cfo->comp, cfo->weighting_mask, 8);
4983 
4984 	if (chip->chip_gen == RTW89_CHIP_AX) {
4985 		if (chip->cfo_hw_comp) {
4986 			rtw89_write32_mask(rtwdev, R_AX_PWR_UL_CTRL2,
4987 					   B_AX_PWR_UL_CFO_MASK, 0x6);
4988 		} else {
4989 			rtw89_phy_set_phy_regs(rtwdev, R_DCFO, B_DCFO, 1);
4990 			rtw89_write32_clr(rtwdev, R_AX_PWR_UL_CTRL2,
4991 					  B_AX_PWR_UL_CFO_MASK);
4992 		}
4993 	}
4994 }
4995 
4996 static void rtw89_phy_cfo_init(struct rtw89_dev *rtwdev)
4997 {
4998 	struct rtw89_cfo_tracking_info *cfo = &rtwdev->cfo_tracking;
4999 	struct rtw89_efuse *efuse = &rtwdev->efuse;
5000 
5001 	cfo->crystal_cap_default = efuse->xtal_cap & B_AX_XTAL_SC_MASK;
5002 	cfo->crystal_cap = cfo->crystal_cap_default;
5003 	cfo->def_x_cap = cfo->crystal_cap;
5004 	cfo->x_cap_ub = min_t(int, cfo->def_x_cap + CFO_BOUND, 0x7f);
5005 	cfo->x_cap_lb = max_t(int, cfo->def_x_cap - CFO_BOUND, 0x1);
5006 	cfo->is_adjust = false;
5007 	cfo->divergence_lock_en = false;
5008 	cfo->x_cap_ofst = 0;
5009 	cfo->lock_cnt = 0;
5010 	cfo->rtw89_multi_cfo_mode = RTW89_TP_BASED_AVG_MODE;
5011 	cfo->apply_compensation = false;
5012 	cfo->residual_cfo_acc = 0;
5013 	rtw89_debug(rtwdev, RTW89_DBG_CFO, "Default xcap=%0x\n",
5014 		    cfo->crystal_cap_default);
5015 	rtw89_phy_cfo_set_crystal_cap(rtwdev, cfo->crystal_cap_default, true);
5016 	rtw89_dcfo_comp_init(rtwdev);
5017 	cfo->cfo_timer_ms = 2000;
5018 	cfo->cfo_trig_by_timer_en = false;
5019 	cfo->phy_cfo_trk_cnt = 0;
5020 	cfo->phy_cfo_status = RTW89_PHY_DCFO_STATE_NORMAL;
5021 	cfo->cfo_ul_ofdma_acc_mode = RTW89_CFO_UL_OFDMA_ACC_ENABLE;
5022 }
5023 
5024 static void rtw89_phy_cfo_crystal_cap_adjust(struct rtw89_dev *rtwdev,
5025 					     s32 curr_cfo)
5026 {
5027 	struct rtw89_cfo_tracking_info *cfo = &rtwdev->cfo_tracking;
5028 	int crystal_cap = cfo->crystal_cap;
5029 	s32 cfo_abs = abs(curr_cfo);
5030 	int sign;
5031 
5032 	if (curr_cfo == 0) {
5033 		rtw89_debug(rtwdev, RTW89_DBG_CFO, "curr_cfo=0\n");
5034 		return;
5035 	}
5036 	if (!cfo->is_adjust) {
5037 		if (cfo_abs > CFO_TRK_ENABLE_TH)
5038 			cfo->is_adjust = true;
5039 	} else {
5040 		if (cfo_abs <= CFO_TRK_STOP_TH)
5041 			cfo->is_adjust = false;
5042 	}
5043 	if (!cfo->is_adjust) {
5044 		rtw89_debug(rtwdev, RTW89_DBG_CFO, "Stop CFO tracking\n");
5045 		return;
5046 	}
5047 	sign = curr_cfo > 0 ? 1 : -1;
5048 	if (cfo_abs > CFO_TRK_STOP_TH_4)
5049 		crystal_cap += 3 * sign;
5050 	else if (cfo_abs > CFO_TRK_STOP_TH_3)
5051 		crystal_cap += 3 * sign;
5052 	else if (cfo_abs > CFO_TRK_STOP_TH_2)
5053 		crystal_cap += 1 * sign;
5054 	else if (cfo_abs > CFO_TRK_STOP_TH_1)
5055 		crystal_cap += 1 * sign;
5056 	else
5057 		return;
5058 
5059 	crystal_cap = clamp(crystal_cap, 0, 127);
5060 	rtw89_phy_cfo_set_crystal_cap(rtwdev, (u8)crystal_cap, false);
5061 	rtw89_debug(rtwdev, RTW89_DBG_CFO,
5062 		    "X_cap{Curr,Default}={0x%x,0x%x}\n",
5063 		    cfo->crystal_cap, cfo->def_x_cap);
5064 }
5065 
5066 static s32 rtw89_phy_average_cfo_calc(struct rtw89_dev *rtwdev)
5067 {
5068 	const struct rtw89_chip_info *chip = rtwdev->chip;
5069 	struct rtw89_cfo_tracking_info *cfo = &rtwdev->cfo_tracking;
5070 	s32 cfo_khz_all = 0;
5071 	s32 cfo_cnt_all = 0;
5072 	s32 cfo_all_avg = 0;
5073 	u8 i;
5074 
5075 	if (rtwdev->total_sta_assoc != 1)
5076 		return 0;
5077 	rtw89_debug(rtwdev, RTW89_DBG_CFO, "one_entry_only\n");
5078 	for (i = 0; i < CFO_TRACK_MAX_USER; i++) {
5079 		if (cfo->cfo_cnt[i] == 0)
5080 			continue;
5081 		cfo_khz_all += cfo->cfo_tail[i];
5082 		cfo_cnt_all += cfo->cfo_cnt[i];
5083 		cfo_all_avg = phy_div(cfo_khz_all, cfo_cnt_all);
5084 		cfo->pre_cfo_avg[i] = cfo->cfo_avg[i];
5085 		cfo->dcfo_avg = phy_div(cfo_khz_all << chip->dcfo_comp_sft,
5086 					cfo_cnt_all);
5087 	}
5088 	rtw89_debug(rtwdev, RTW89_DBG_CFO,
5089 		    "CFO track for macid = %d\n", i);
5090 	rtw89_debug(rtwdev, RTW89_DBG_CFO,
5091 		    "Total cfo=%dK, pkt_cnt=%d, avg_cfo=%dK\n",
5092 		    cfo_khz_all, cfo_cnt_all, cfo_all_avg);
5093 	return cfo_all_avg;
5094 }
5095 
5096 static s32 rtw89_phy_multi_sta_cfo_calc(struct rtw89_dev *rtwdev)
5097 {
5098 	struct rtw89_cfo_tracking_info *cfo = &rtwdev->cfo_tracking;
5099 	struct rtw89_traffic_stats *stats = &rtwdev->stats;
5100 	s32 target_cfo = 0;
5101 	s32 cfo_khz_all = 0;
5102 	s32 cfo_khz_all_tp_wgt = 0;
5103 	s32 cfo_avg = 0;
5104 	s32 max_cfo_lb = BIT(31);
5105 	s32 min_cfo_ub = GENMASK(30, 0);
5106 	u16 cfo_cnt_all = 0;
5107 	u8 active_entry_cnt = 0;
5108 	u8 sta_cnt = 0;
5109 	u32 tp_all = 0;
5110 	u8 i;
5111 	u8 cfo_tol = 0;
5112 
5113 	rtw89_debug(rtwdev, RTW89_DBG_CFO, "Multi entry cfo_trk\n");
5114 	if (cfo->rtw89_multi_cfo_mode == RTW89_PKT_BASED_AVG_MODE) {
5115 		rtw89_debug(rtwdev, RTW89_DBG_CFO, "Pkt based avg mode\n");
5116 		for (i = 0; i < CFO_TRACK_MAX_USER; i++) {
5117 			if (cfo->cfo_cnt[i] == 0)
5118 				continue;
5119 			cfo_khz_all += cfo->cfo_tail[i];
5120 			cfo_cnt_all += cfo->cfo_cnt[i];
5121 			cfo_avg = phy_div(cfo_khz_all, (s32)cfo_cnt_all);
5122 			rtw89_debug(rtwdev, RTW89_DBG_CFO,
5123 				    "Msta cfo=%d, pkt_cnt=%d, avg_cfo=%d\n",
5124 				    cfo_khz_all, cfo_cnt_all, cfo_avg);
5125 			target_cfo = cfo_avg;
5126 		}
5127 	} else if (cfo->rtw89_multi_cfo_mode == RTW89_ENTRY_BASED_AVG_MODE) {
5128 		rtw89_debug(rtwdev, RTW89_DBG_CFO, "Entry based avg mode\n");
5129 		for (i = 0; i < CFO_TRACK_MAX_USER; i++) {
5130 			if (cfo->cfo_cnt[i] == 0)
5131 				continue;
5132 			cfo->cfo_avg[i] = phy_div(cfo->cfo_tail[i],
5133 						  (s32)cfo->cfo_cnt[i]);
5134 			cfo_khz_all += cfo->cfo_avg[i];
5135 			rtw89_debug(rtwdev, RTW89_DBG_CFO,
5136 				    "Macid=%d, cfo_avg=%d\n", i,
5137 				    cfo->cfo_avg[i]);
5138 		}
5139 		sta_cnt = rtwdev->total_sta_assoc;
5140 		cfo_avg = phy_div(cfo_khz_all, (s32)sta_cnt);
5141 		rtw89_debug(rtwdev, RTW89_DBG_CFO,
5142 			    "Msta cfo_acc=%d, ent_cnt=%d, avg_cfo=%d\n",
5143 			    cfo_khz_all, sta_cnt, cfo_avg);
5144 		target_cfo = cfo_avg;
5145 	} else if (cfo->rtw89_multi_cfo_mode == RTW89_TP_BASED_AVG_MODE) {
5146 		rtw89_debug(rtwdev, RTW89_DBG_CFO, "TP based avg mode\n");
5147 		cfo_tol = cfo->sta_cfo_tolerance;
5148 		for (i = 0; i < CFO_TRACK_MAX_USER; i++) {
5149 			sta_cnt++;
5150 			if (cfo->cfo_cnt[i] != 0) {
5151 				cfo->cfo_avg[i] = phy_div(cfo->cfo_tail[i],
5152 							  (s32)cfo->cfo_cnt[i]);
5153 				active_entry_cnt++;
5154 			} else {
5155 				cfo->cfo_avg[i] = cfo->pre_cfo_avg[i];
5156 			}
5157 			max_cfo_lb = max(cfo->cfo_avg[i] - cfo_tol, max_cfo_lb);
5158 			min_cfo_ub = min(cfo->cfo_avg[i] + cfo_tol, min_cfo_ub);
5159 			cfo_khz_all += cfo->cfo_avg[i];
5160 			/* need tp for each entry */
5161 			rtw89_debug(rtwdev, RTW89_DBG_CFO,
5162 				    "[%d] cfo_avg=%d, tp=tbd\n",
5163 				    i, cfo->cfo_avg[i]);
5164 			if (sta_cnt >= rtwdev->total_sta_assoc)
5165 				break;
5166 		}
5167 		tp_all = stats->rx_throughput; /* need tp for each entry */
5168 		cfo_avg =  phy_div(cfo_khz_all_tp_wgt, (s32)tp_all);
5169 
5170 		rtw89_debug(rtwdev, RTW89_DBG_CFO, "Assoc sta cnt=%d\n",
5171 			    sta_cnt);
5172 		rtw89_debug(rtwdev, RTW89_DBG_CFO, "Active sta cnt=%d\n",
5173 			    active_entry_cnt);
5174 		rtw89_debug(rtwdev, RTW89_DBG_CFO,
5175 			    "Msta cfo with tp_wgt=%d, avg_cfo=%d\n",
5176 			    cfo_khz_all_tp_wgt, cfo_avg);
5177 		rtw89_debug(rtwdev, RTW89_DBG_CFO, "cfo_lb=%d,cfo_ub=%d\n",
5178 			    max_cfo_lb, min_cfo_ub);
5179 		if (max_cfo_lb <= min_cfo_ub) {
5180 			rtw89_debug(rtwdev, RTW89_DBG_CFO,
5181 				    "cfo win_size=%d\n",
5182 				    min_cfo_ub - max_cfo_lb);
5183 			target_cfo = clamp(cfo_avg, max_cfo_lb, min_cfo_ub);
5184 		} else {
5185 			rtw89_debug(rtwdev, RTW89_DBG_CFO,
5186 				    "No intersection of cfo tolerance windows\n");
5187 			target_cfo = phy_div(cfo_khz_all, (s32)sta_cnt);
5188 		}
5189 		for (i = 0; i < CFO_TRACK_MAX_USER; i++)
5190 			cfo->pre_cfo_avg[i] = cfo->cfo_avg[i];
5191 	}
5192 	rtw89_debug(rtwdev, RTW89_DBG_CFO, "Target cfo=%d\n", target_cfo);
5193 	return target_cfo;
5194 }
5195 
5196 static void rtw89_phy_cfo_statistics_reset(struct rtw89_dev *rtwdev)
5197 {
5198 	struct rtw89_cfo_tracking_info *cfo = &rtwdev->cfo_tracking;
5199 
5200 	memset(&cfo->cfo_tail, 0, sizeof(cfo->cfo_tail));
5201 	memset(&cfo->cfo_cnt, 0, sizeof(cfo->cfo_cnt));
5202 	cfo->packet_count = 0;
5203 	cfo->packet_count_pre = 0;
5204 	cfo->cfo_avg_pre = 0;
5205 }
5206 
5207 static void rtw89_phy_cfo_dm(struct rtw89_dev *rtwdev)
5208 {
5209 	struct rtw89_cfo_tracking_info *cfo = &rtwdev->cfo_tracking;
5210 	s32 new_cfo = 0;
5211 	bool x_cap_update = false;
5212 	u8 pre_x_cap = cfo->crystal_cap;
5213 	u8 dcfo_comp_sft = rtwdev->chip->dcfo_comp_sft;
5214 
5215 	cfo->dcfo_avg = 0;
5216 	rtw89_debug(rtwdev, RTW89_DBG_CFO, "CFO:total_sta_assoc=%d\n",
5217 		    rtwdev->total_sta_assoc);
5218 	if (rtwdev->total_sta_assoc == 0 || rtw89_is_mlo_1_1(rtwdev)) {
5219 		rtw89_phy_cfo_reset(rtwdev);
5220 		return;
5221 	}
5222 	if (cfo->packet_count == 0) {
5223 		rtw89_debug(rtwdev, RTW89_DBG_CFO, "Pkt cnt = 0\n");
5224 		return;
5225 	}
5226 	if (cfo->packet_count == cfo->packet_count_pre) {
5227 		rtw89_debug(rtwdev, RTW89_DBG_CFO, "Pkt cnt doesn't change\n");
5228 		return;
5229 	}
5230 	if (rtwdev->total_sta_assoc == 1)
5231 		new_cfo = rtw89_phy_average_cfo_calc(rtwdev);
5232 	else
5233 		new_cfo = rtw89_phy_multi_sta_cfo_calc(rtwdev);
5234 	if (cfo->divergence_lock_en) {
5235 		cfo->lock_cnt++;
5236 		if (cfo->lock_cnt > CFO_PERIOD_CNT) {
5237 			cfo->divergence_lock_en = false;
5238 			cfo->lock_cnt = 0;
5239 		} else {
5240 			rtw89_phy_cfo_reset(rtwdev);
5241 		}
5242 		return;
5243 	}
5244 	if (cfo->crystal_cap >= cfo->x_cap_ub ||
5245 	    cfo->crystal_cap <= cfo->x_cap_lb) {
5246 		cfo->divergence_lock_en = true;
5247 		rtw89_phy_cfo_reset(rtwdev);
5248 		return;
5249 	}
5250 
5251 	rtw89_phy_cfo_crystal_cap_adjust(rtwdev, new_cfo);
5252 	cfo->cfo_avg_pre = new_cfo;
5253 	cfo->dcfo_avg_pre = cfo->dcfo_avg;
5254 	x_cap_update =  cfo->crystal_cap != pre_x_cap;
5255 	rtw89_debug(rtwdev, RTW89_DBG_CFO, "Xcap_up=%d\n", x_cap_update);
5256 	rtw89_debug(rtwdev, RTW89_DBG_CFO, "Xcap: D:%x C:%x->%x, ofst=%d\n",
5257 		    cfo->def_x_cap, pre_x_cap, cfo->crystal_cap,
5258 		    cfo->x_cap_ofst);
5259 	if (x_cap_update) {
5260 		if (cfo->dcfo_avg > 0)
5261 			cfo->dcfo_avg -= CFO_SW_COMP_FINE_TUNE << dcfo_comp_sft;
5262 		else
5263 			cfo->dcfo_avg += CFO_SW_COMP_FINE_TUNE << dcfo_comp_sft;
5264 	}
5265 	rtw89_dcfo_comp(rtwdev, cfo->dcfo_avg);
5266 	rtw89_phy_cfo_statistics_reset(rtwdev);
5267 }
5268 
5269 void rtw89_phy_cfo_track_work(struct wiphy *wiphy, struct wiphy_work *work)
5270 {
5271 	struct rtw89_dev *rtwdev = container_of(work, struct rtw89_dev,
5272 						cfo_track_work.work);
5273 	struct rtw89_cfo_tracking_info *cfo = &rtwdev->cfo_tracking;
5274 
5275 	lockdep_assert_wiphy(wiphy);
5276 
5277 	if (!cfo->cfo_trig_by_timer_en)
5278 		return;
5279 	rtw89_leave_ps_mode(rtwdev);
5280 	rtw89_phy_cfo_dm(rtwdev);
5281 	wiphy_delayed_work_queue(wiphy, &rtwdev->cfo_track_work,
5282 				 msecs_to_jiffies(cfo->cfo_timer_ms));
5283 }
5284 
5285 static void rtw89_phy_cfo_start_work(struct rtw89_dev *rtwdev)
5286 {
5287 	struct rtw89_cfo_tracking_info *cfo = &rtwdev->cfo_tracking;
5288 
5289 	wiphy_delayed_work_queue(rtwdev->hw->wiphy, &rtwdev->cfo_track_work,
5290 				 msecs_to_jiffies(cfo->cfo_timer_ms));
5291 }
5292 
5293 void rtw89_phy_cfo_track(struct rtw89_dev *rtwdev)
5294 {
5295 	struct rtw89_cfo_tracking_info *cfo = &rtwdev->cfo_tracking;
5296 	struct rtw89_traffic_stats *stats = &rtwdev->stats;
5297 	bool is_ul_ofdma = false, ofdma_acc_en = false;
5298 
5299 	if (stats->rx_tf_periodic > CFO_TF_CNT_TH)
5300 		is_ul_ofdma = true;
5301 	if (cfo->cfo_ul_ofdma_acc_mode == RTW89_CFO_UL_OFDMA_ACC_ENABLE &&
5302 	    is_ul_ofdma)
5303 		ofdma_acc_en = true;
5304 
5305 	switch (cfo->phy_cfo_status) {
5306 	case RTW89_PHY_DCFO_STATE_NORMAL:
5307 		if (stats->tx_throughput >= CFO_TP_UPPER) {
5308 			cfo->phy_cfo_status = RTW89_PHY_DCFO_STATE_ENHANCE;
5309 			cfo->cfo_trig_by_timer_en = true;
5310 			cfo->cfo_timer_ms = CFO_COMP_PERIOD;
5311 			rtw89_phy_cfo_start_work(rtwdev);
5312 		}
5313 		break;
5314 	case RTW89_PHY_DCFO_STATE_ENHANCE:
5315 		if (stats->tx_throughput <= CFO_TP_LOWER)
5316 			cfo->phy_cfo_status = RTW89_PHY_DCFO_STATE_NORMAL;
5317 		else if (ofdma_acc_en &&
5318 			 cfo->phy_cfo_trk_cnt >= CFO_PERIOD_CNT)
5319 			cfo->phy_cfo_status = RTW89_PHY_DCFO_STATE_HOLD;
5320 		else
5321 			cfo->phy_cfo_trk_cnt++;
5322 
5323 		if (cfo->phy_cfo_status == RTW89_PHY_DCFO_STATE_NORMAL) {
5324 			cfo->phy_cfo_trk_cnt = 0;
5325 			cfo->cfo_trig_by_timer_en = false;
5326 		}
5327 		break;
5328 	case RTW89_PHY_DCFO_STATE_HOLD:
5329 		if (stats->tx_throughput <= CFO_TP_LOWER) {
5330 			cfo->phy_cfo_status = RTW89_PHY_DCFO_STATE_NORMAL;
5331 			cfo->phy_cfo_trk_cnt = 0;
5332 			cfo->cfo_trig_by_timer_en = false;
5333 		} else {
5334 			cfo->phy_cfo_trk_cnt++;
5335 		}
5336 		break;
5337 	default:
5338 		cfo->phy_cfo_status = RTW89_PHY_DCFO_STATE_NORMAL;
5339 		cfo->phy_cfo_trk_cnt = 0;
5340 		break;
5341 	}
5342 	rtw89_debug(rtwdev, RTW89_DBG_CFO,
5343 		    "[CFO]WatchDog tp=%d,state=%d,timer_en=%d,trk_cnt=%d,thermal=%ld\n",
5344 		    stats->tx_throughput, cfo->phy_cfo_status,
5345 		    cfo->cfo_trig_by_timer_en, cfo->phy_cfo_trk_cnt,
5346 		    ewma_thermal_read(&rtwdev->phystat.avg_thermal[0]));
5347 	if (cfo->cfo_trig_by_timer_en)
5348 		return;
5349 	rtw89_phy_cfo_dm(rtwdev);
5350 }
5351 
5352 void rtw89_phy_cfo_parse(struct rtw89_dev *rtwdev, s16 cfo_val,
5353 			 struct rtw89_rx_phy_ppdu *phy_ppdu)
5354 {
5355 	struct rtw89_cfo_tracking_info *cfo = &rtwdev->cfo_tracking;
5356 	u8 macid = phy_ppdu->mac_id;
5357 
5358 	if (macid >= CFO_TRACK_MAX_USER) {
5359 		rtw89_warn(rtwdev, "mac_id %d is out of range\n", macid);
5360 		return;
5361 	}
5362 
5363 	cfo->cfo_tail[macid] += cfo_val;
5364 	cfo->cfo_cnt[macid]++;
5365 	cfo->packet_count++;
5366 }
5367 
5368 void rtw89_phy_ul_tb_assoc(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link)
5369 {
5370 	const struct rtw89_chip_info *chip = rtwdev->chip;
5371 	const struct rtw89_chan *chan = rtw89_chan_get(rtwdev,
5372 						       rtwvif_link->chanctx_idx);
5373 	struct rtw89_phy_ul_tb_info *ul_tb_info = &rtwdev->ul_tb_info;
5374 
5375 	if (!chip->ul_tb_waveform_ctrl)
5376 		return;
5377 
5378 	rtwvif_link->def_tri_idx =
5379 		rtw89_phy_read32_mask(rtwdev, R_DCFO_OPT, B_TXSHAPE_TRIANGULAR_CFG);
5380 
5381 	if (chip->chip_id == RTL8852B && rtwdev->hal.cv > CHIP_CBV)
5382 		rtwvif_link->dyn_tb_bedge_en = false;
5383 	else if (chan->band_type >= RTW89_BAND_5G &&
5384 		 chan->band_width >= RTW89_CHANNEL_WIDTH_40)
5385 		rtwvif_link->dyn_tb_bedge_en = true;
5386 	else
5387 		rtwvif_link->dyn_tb_bedge_en = false;
5388 
5389 	rtw89_debug(rtwdev, RTW89_DBG_UL_TB,
5390 		    "[ULTB] def_if_bandedge=%d, def_tri_idx=%d\n",
5391 		    ul_tb_info->def_if_bandedge, rtwvif_link->def_tri_idx);
5392 	rtw89_debug(rtwdev, RTW89_DBG_UL_TB,
5393 		    "[ULTB] dyn_tb_begde_en=%d, dyn_tb_tri_en=%d\n",
5394 		    rtwvif_link->dyn_tb_bedge_en, ul_tb_info->dyn_tb_tri_en);
5395 }
5396 
5397 struct rtw89_phy_ul_tb_check_data {
5398 	bool valid;
5399 	bool high_tf_client;
5400 	bool low_tf_client;
5401 	bool dyn_tb_bedge_en;
5402 	u8 def_tri_idx;
5403 };
5404 
5405 struct rtw89_phy_power_diff {
5406 	u32 q_00;
5407 	u32 q_11;
5408 	u32 q_matrix_en;
5409 	u32 ultb_1t_norm_160;
5410 	u32 ultb_2t_norm_160;
5411 	u32 com1_norm_1sts;
5412 	u32 com2_resp_1sts_path;
5413 };
5414 
5415 static void rtw89_phy_ofdma_power_diff(struct rtw89_dev *rtwdev,
5416 				       struct rtw89_vif_link *rtwvif_link)
5417 {
5418 	static const struct rtw89_phy_power_diff table[2] = {
5419 		{0x0, 0x0, 0x0, 0x0, 0xf4, 0x3, 0x3},
5420 		{0xb50, 0xb50, 0x1, 0xc, 0x0, 0x1, 0x1},
5421 	};
5422 	const struct rtw89_phy_power_diff *param;
5423 	u32 reg;
5424 
5425 	if (!rtwdev->chip->ul_tb_pwr_diff)
5426 		return;
5427 
5428 	if (rtwvif_link->pwr_diff_en == rtwvif_link->pre_pwr_diff_en) {
5429 		rtwvif_link->pwr_diff_en = false;
5430 		return;
5431 	}
5432 
5433 	rtwvif_link->pre_pwr_diff_en = rtwvif_link->pwr_diff_en;
5434 	param = &table[rtwvif_link->pwr_diff_en];
5435 
5436 	rtw89_phy_write32_mask(rtwdev, R_Q_MATRIX_00, B_Q_MATRIX_00_REAL,
5437 			       param->q_00);
5438 	rtw89_phy_write32_mask(rtwdev, R_Q_MATRIX_11, B_Q_MATRIX_11_REAL,
5439 			       param->q_11);
5440 	rtw89_phy_write32_mask(rtwdev, R_CUSTOMIZE_Q_MATRIX,
5441 			       B_CUSTOMIZE_Q_MATRIX_EN, param->q_matrix_en);
5442 
5443 	reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_PWR_UL_TB_1T, rtwvif_link->mac_idx);
5444 	rtw89_write32_mask(rtwdev, reg, B_AX_PWR_UL_TB_1T_NORM_BW160,
5445 			   param->ultb_1t_norm_160);
5446 
5447 	reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_PWR_UL_TB_2T, rtwvif_link->mac_idx);
5448 	rtw89_write32_mask(rtwdev, reg, B_AX_PWR_UL_TB_2T_NORM_BW160,
5449 			   param->ultb_2t_norm_160);
5450 
5451 	reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_PATH_COM1, rtwvif_link->mac_idx);
5452 	rtw89_write32_mask(rtwdev, reg, B_AX_PATH_COM1_NORM_1STS,
5453 			   param->com1_norm_1sts);
5454 
5455 	reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_PATH_COM2, rtwvif_link->mac_idx);
5456 	rtw89_write32_mask(rtwdev, reg, B_AX_PATH_COM2_RESP_1STS_PATH,
5457 			   param->com2_resp_1sts_path);
5458 }
5459 
5460 static
5461 void rtw89_phy_ul_tb_ctrl_check(struct rtw89_dev *rtwdev,
5462 				struct rtw89_vif_link *rtwvif_link,
5463 				struct rtw89_phy_ul_tb_check_data *ul_tb_data)
5464 {
5465 	struct rtw89_traffic_stats *stats = &rtwdev->stats;
5466 	struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link);
5467 
5468 	if (rtwvif_link->wifi_role != RTW89_WIFI_ROLE_STATION)
5469 		return;
5470 
5471 	if (!vif->cfg.assoc)
5472 		return;
5473 
5474 	if (rtwdev->chip->ul_tb_waveform_ctrl) {
5475 		if (stats->rx_tf_periodic > UL_TB_TF_CNT_L2H_TH)
5476 			ul_tb_data->high_tf_client = true;
5477 		else if (stats->rx_tf_periodic < UL_TB_TF_CNT_H2L_TH)
5478 			ul_tb_data->low_tf_client = true;
5479 
5480 		ul_tb_data->valid = true;
5481 		ul_tb_data->def_tri_idx = rtwvif_link->def_tri_idx;
5482 		ul_tb_data->dyn_tb_bedge_en = rtwvif_link->dyn_tb_bedge_en;
5483 	}
5484 
5485 	rtw89_phy_ofdma_power_diff(rtwdev, rtwvif_link);
5486 }
5487 
5488 static void rtw89_phy_ul_tb_waveform_ctrl(struct rtw89_dev *rtwdev,
5489 					  struct rtw89_phy_ul_tb_check_data *ul_tb_data)
5490 {
5491 	struct rtw89_phy_ul_tb_info *ul_tb_info = &rtwdev->ul_tb_info;
5492 
5493 	if (!rtwdev->chip->ul_tb_waveform_ctrl)
5494 		return;
5495 
5496 	if (ul_tb_data->dyn_tb_bedge_en) {
5497 		if (ul_tb_data->high_tf_client) {
5498 			rtw89_phy_write32_mask(rtwdev, R_BANDEDGE, B_BANDEDGE_EN, 0);
5499 			rtw89_debug(rtwdev, RTW89_DBG_UL_TB,
5500 				    "[ULTB] Turn off if_bandedge\n");
5501 		} else if (ul_tb_data->low_tf_client) {
5502 			rtw89_phy_write32_mask(rtwdev, R_BANDEDGE, B_BANDEDGE_EN,
5503 					       ul_tb_info->def_if_bandedge);
5504 			rtw89_debug(rtwdev, RTW89_DBG_UL_TB,
5505 				    "[ULTB] Set to default if_bandedge = %d\n",
5506 				    ul_tb_info->def_if_bandedge);
5507 		}
5508 	}
5509 
5510 	if (ul_tb_info->dyn_tb_tri_en) {
5511 		if (ul_tb_data->high_tf_client) {
5512 			rtw89_phy_write32_mask(rtwdev, R_DCFO_OPT,
5513 					       B_TXSHAPE_TRIANGULAR_CFG, 0);
5514 			rtw89_debug(rtwdev, RTW89_DBG_UL_TB,
5515 				    "[ULTB] Turn off Tx triangle\n");
5516 		} else if (ul_tb_data->low_tf_client) {
5517 			rtw89_phy_write32_mask(rtwdev, R_DCFO_OPT,
5518 					       B_TXSHAPE_TRIANGULAR_CFG,
5519 					       ul_tb_data->def_tri_idx);
5520 			rtw89_debug(rtwdev, RTW89_DBG_UL_TB,
5521 				    "[ULTB] Set to default tx_shap_idx = %d\n",
5522 				    ul_tb_data->def_tri_idx);
5523 		}
5524 	}
5525 }
5526 
5527 void rtw89_phy_ul_tb_ctrl_track(struct rtw89_dev *rtwdev)
5528 {
5529 	const struct rtw89_chip_info *chip = rtwdev->chip;
5530 	struct rtw89_phy_ul_tb_check_data ul_tb_data = {};
5531 	struct rtw89_vif_link *rtwvif_link;
5532 	struct rtw89_vif *rtwvif;
5533 	unsigned int link_id;
5534 
5535 	if (!chip->ul_tb_waveform_ctrl && !chip->ul_tb_pwr_diff)
5536 		return;
5537 
5538 	if (rtwdev->total_sta_assoc != 1)
5539 		return;
5540 
5541 	rtw89_for_each_rtwvif(rtwdev, rtwvif)
5542 		rtw89_vif_for_each_link(rtwvif, rtwvif_link, link_id)
5543 			rtw89_phy_ul_tb_ctrl_check(rtwdev, rtwvif_link, &ul_tb_data);
5544 
5545 	if (!ul_tb_data.valid)
5546 		return;
5547 
5548 	rtw89_phy_ul_tb_waveform_ctrl(rtwdev, &ul_tb_data);
5549 }
5550 
5551 static void rtw89_phy_ul_tb_info_init(struct rtw89_dev *rtwdev)
5552 {
5553 	const struct rtw89_chip_info *chip = rtwdev->chip;
5554 	struct rtw89_phy_ul_tb_info *ul_tb_info = &rtwdev->ul_tb_info;
5555 
5556 	if (!chip->ul_tb_waveform_ctrl)
5557 		return;
5558 
5559 	ul_tb_info->dyn_tb_tri_en = true;
5560 	ul_tb_info->def_if_bandedge =
5561 		rtw89_phy_read32_mask(rtwdev, R_BANDEDGE, B_BANDEDGE_EN);
5562 }
5563 
5564 static
5565 void rtw89_phy_antdiv_sts_instance_reset(struct rtw89_antdiv_stats *antdiv_sts)
5566 {
5567 	ewma_rssi_init(&antdiv_sts->cck_rssi_avg);
5568 	ewma_rssi_init(&antdiv_sts->ofdm_rssi_avg);
5569 	ewma_rssi_init(&antdiv_sts->non_legacy_rssi_avg);
5570 	antdiv_sts->pkt_cnt_cck = 0;
5571 	antdiv_sts->pkt_cnt_ofdm = 0;
5572 	antdiv_sts->pkt_cnt_non_legacy = 0;
5573 	antdiv_sts->evm = 0;
5574 }
5575 
5576 static void rtw89_phy_antdiv_sts_instance_add(struct rtw89_dev *rtwdev,
5577 					      struct rtw89_rx_phy_ppdu *phy_ppdu,
5578 					      struct rtw89_antdiv_stats *stats)
5579 {
5580 	if (rtw89_get_data_rate_mode(rtwdev, phy_ppdu->rate) == DATA_RATE_MODE_NON_HT) {
5581 		if (phy_ppdu->rate < RTW89_HW_RATE_OFDM6) {
5582 			ewma_rssi_add(&stats->cck_rssi_avg, phy_ppdu->rssi_avg);
5583 			stats->pkt_cnt_cck++;
5584 		} else {
5585 			ewma_rssi_add(&stats->ofdm_rssi_avg, phy_ppdu->rssi_avg);
5586 			stats->pkt_cnt_ofdm++;
5587 			stats->evm += phy_ppdu->ofdm.evm_min;
5588 		}
5589 	} else {
5590 		ewma_rssi_add(&stats->non_legacy_rssi_avg, phy_ppdu->rssi_avg);
5591 		stats->pkt_cnt_non_legacy++;
5592 		stats->evm += phy_ppdu->ofdm.evm_min;
5593 	}
5594 }
5595 
5596 static u8 rtw89_phy_antdiv_sts_instance_get_rssi(struct rtw89_antdiv_stats *stats)
5597 {
5598 	if (stats->pkt_cnt_non_legacy >= stats->pkt_cnt_cck &&
5599 	    stats->pkt_cnt_non_legacy >= stats->pkt_cnt_ofdm)
5600 		return ewma_rssi_read(&stats->non_legacy_rssi_avg);
5601 	else if (stats->pkt_cnt_ofdm >= stats->pkt_cnt_cck &&
5602 		 stats->pkt_cnt_ofdm >= stats->pkt_cnt_non_legacy)
5603 		return ewma_rssi_read(&stats->ofdm_rssi_avg);
5604 	else
5605 		return ewma_rssi_read(&stats->cck_rssi_avg);
5606 }
5607 
5608 static u8 rtw89_phy_antdiv_sts_instance_get_evm(struct rtw89_antdiv_stats *stats)
5609 {
5610 	return phy_div(stats->evm, stats->pkt_cnt_non_legacy + stats->pkt_cnt_ofdm);
5611 }
5612 
5613 void rtw89_phy_antdiv_parse(struct rtw89_dev *rtwdev,
5614 			    struct rtw89_rx_phy_ppdu *phy_ppdu)
5615 {
5616 	struct rtw89_antdiv_info *antdiv = &rtwdev->antdiv;
5617 	struct rtw89_hal *hal = &rtwdev->hal;
5618 
5619 	if (!hal->ant_diversity || hal->ant_diversity_fixed)
5620 		return;
5621 
5622 	rtw89_phy_antdiv_sts_instance_add(rtwdev, phy_ppdu, &antdiv->target_stats);
5623 
5624 	if (!antdiv->get_stats)
5625 		return;
5626 
5627 	if (hal->antenna_rx == RF_A)
5628 		rtw89_phy_antdiv_sts_instance_add(rtwdev, phy_ppdu, &antdiv->main_stats);
5629 	else if (hal->antenna_rx == RF_B)
5630 		rtw89_phy_antdiv_sts_instance_add(rtwdev, phy_ppdu, &antdiv->aux_stats);
5631 }
5632 
5633 static void rtw89_phy_antdiv_reg_init(struct rtw89_dev *rtwdev)
5634 {
5635 	rtw89_phy_write32_idx(rtwdev, R_P0_TRSW, B_P0_ANT_TRAIN_EN,
5636 			      0x0, RTW89_PHY_0);
5637 	rtw89_phy_write32_idx(rtwdev, R_P0_TRSW, B_P0_TX_ANT_SEL,
5638 			      0x0, RTW89_PHY_0);
5639 
5640 	rtw89_phy_write32_idx(rtwdev, R_P0_ANT_SW, B_P0_TRSW_TX_EXTEND,
5641 			      0x0, RTW89_PHY_0);
5642 	rtw89_phy_write32_idx(rtwdev, R_P0_ANT_SW, B_P0_HW_ANTSW_DIS_BY_GNT_BT,
5643 			      0x0, RTW89_PHY_0);
5644 
5645 	rtw89_phy_write32_idx(rtwdev, R_P0_TRSW, B_P0_BT_FORCE_ANTIDX_EN,
5646 			      0x0, RTW89_PHY_0);
5647 
5648 	rtw89_phy_write32_idx(rtwdev, R_RFSW_CTRL_ANT0_BASE, B_RFSW_CTRL_ANT_MAPPING,
5649 			      0x0100, RTW89_PHY_0);
5650 
5651 	rtw89_phy_write32_idx(rtwdev, R_P0_ANTSEL, B_P0_ANTSEL_BTG_TRX,
5652 			      0x1, RTW89_PHY_0);
5653 	rtw89_phy_write32_idx(rtwdev, R_P0_ANTSEL, B_P0_ANTSEL_HW_CTRL,
5654 			      0x0, RTW89_PHY_0);
5655 	rtw89_phy_write32_idx(rtwdev, R_P0_ANTSEL, B_P0_ANTSEL_SW_2G,
5656 			      0x0, RTW89_PHY_0);
5657 	rtw89_phy_write32_idx(rtwdev, R_P0_ANTSEL, B_P0_ANTSEL_SW_5G,
5658 			      0x0, RTW89_PHY_0);
5659 }
5660 
5661 static void rtw89_phy_antdiv_sts_reset(struct rtw89_dev *rtwdev)
5662 {
5663 	struct rtw89_antdiv_info *antdiv = &rtwdev->antdiv;
5664 
5665 	rtw89_phy_antdiv_sts_instance_reset(&antdiv->target_stats);
5666 	rtw89_phy_antdiv_sts_instance_reset(&antdiv->main_stats);
5667 	rtw89_phy_antdiv_sts_instance_reset(&antdiv->aux_stats);
5668 }
5669 
5670 static void rtw89_phy_antdiv_init(struct rtw89_dev *rtwdev)
5671 {
5672 	struct rtw89_antdiv_info *antdiv = &rtwdev->antdiv;
5673 	struct rtw89_hal *hal = &rtwdev->hal;
5674 
5675 	if (!hal->ant_diversity)
5676 		return;
5677 
5678 	antdiv->get_stats = false;
5679 	antdiv->rssi_pre = 0;
5680 	rtw89_phy_antdiv_sts_reset(rtwdev);
5681 	rtw89_phy_antdiv_reg_init(rtwdev);
5682 }
5683 
5684 static void rtw89_phy_thermal_protect(struct rtw89_dev *rtwdev)
5685 {
5686 	struct rtw89_phy_stat *phystat = &rtwdev->phystat;
5687 	struct rtw89_hal *hal = &rtwdev->hal;
5688 	u8 th_max = phystat->last_thermal_max;
5689 	u8 lv = hal->thermal_prot_lv;
5690 
5691 	if (!hal->thermal_prot_th ||
5692 	    (hal->disabled_dm_bitmap & BIT(RTW89_DM_THERMAL_PROTECT)))
5693 		return;
5694 
5695 	if (th_max > hal->thermal_prot_th && lv < RTW89_THERMAL_PROT_LV_MAX)
5696 		lv++;
5697 	else if (th_max < hal->thermal_prot_th - 2 && lv > 0)
5698 		lv--;
5699 	else
5700 		return;
5701 
5702 	hal->thermal_prot_lv = lv;
5703 
5704 	rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK, "thermal protection lv=%d\n", lv);
5705 
5706 	rtw89_fw_h2c_tx_duty(rtwdev, hal->thermal_prot_lv);
5707 }
5708 
5709 static void rtw89_phy_stat_thermal_update(struct rtw89_dev *rtwdev)
5710 {
5711 	struct rtw89_phy_stat *phystat = &rtwdev->phystat;
5712 	u8 th, th_max = 0;
5713 	int i;
5714 
5715 	for (i = 0; i < rtwdev->chip->rf_path_num; i++) {
5716 		th = rtw89_chip_get_thermal(rtwdev, i);
5717 		if (th)
5718 			ewma_thermal_add(&phystat->avg_thermal[i], th);
5719 
5720 		rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
5721 			    "path(%d) thermal cur=%u avg=%ld", i, th,
5722 			    ewma_thermal_read(&phystat->avg_thermal[i]));
5723 
5724 		th_max = max(th_max, th);
5725 	}
5726 
5727 	phystat->last_thermal_max = th_max;
5728 }
5729 
5730 struct rtw89_phy_iter_rssi_data {
5731 	struct rtw89_dev *rtwdev;
5732 	bool rssi_changed;
5733 };
5734 
5735 static
5736 void __rtw89_phy_stat_rssi_update_iter(struct rtw89_sta_link *rtwsta_link,
5737 				       struct rtw89_phy_iter_rssi_data *rssi_data)
5738 {
5739 	struct rtw89_vif_link *rtwvif_link = rtwsta_link->rtwvif_link;
5740 	struct rtw89_dev *rtwdev = rssi_data->rtwdev;
5741 	struct rtw89_phy_ch_info *ch_info;
5742 	struct rtw89_bb_ctx *bb;
5743 	unsigned long rssi_curr;
5744 
5745 	rssi_curr = ewma_rssi_read(&rtwsta_link->avg_rssi);
5746 	bb = rtw89_get_bb_ctx(rtwdev, rtwvif_link->phy_idx);
5747 	ch_info = &bb->ch_info;
5748 
5749 	if (rssi_curr < ch_info->rssi_min) {
5750 		ch_info->rssi_min = rssi_curr;
5751 		ch_info->rssi_min_macid = rtwsta_link->mac_id;
5752 	}
5753 
5754 	if (rtwsta_link->prev_rssi == 0) {
5755 		rtwsta_link->prev_rssi = rssi_curr;
5756 	} else if (abs((int)rtwsta_link->prev_rssi - (int)rssi_curr) >
5757 		   (3 << RSSI_FACTOR)) {
5758 		rtwsta_link->prev_rssi = rssi_curr;
5759 		rssi_data->rssi_changed = true;
5760 	}
5761 }
5762 
5763 static void rtw89_phy_stat_rssi_update_iter(void *data,
5764 					    struct ieee80211_sta *sta)
5765 {
5766 	struct rtw89_phy_iter_rssi_data *rssi_data =
5767 					(struct rtw89_phy_iter_rssi_data *)data;
5768 	struct rtw89_sta *rtwsta = sta_to_rtwsta(sta);
5769 	struct rtw89_sta_link *rtwsta_link;
5770 	unsigned int link_id;
5771 
5772 	rtw89_sta_for_each_link(rtwsta, rtwsta_link, link_id)
5773 		__rtw89_phy_stat_rssi_update_iter(rtwsta_link, rssi_data);
5774 }
5775 
5776 static void rtw89_phy_stat_rssi_update(struct rtw89_dev *rtwdev)
5777 {
5778 	struct rtw89_phy_iter_rssi_data rssi_data = {};
5779 	struct rtw89_bb_ctx *bb;
5780 
5781 	rssi_data.rtwdev = rtwdev;
5782 	rtw89_for_each_active_bb(rtwdev, bb)
5783 		bb->ch_info.rssi_min = U8_MAX;
5784 
5785 	ieee80211_iterate_stations_atomic(rtwdev->hw,
5786 					  rtw89_phy_stat_rssi_update_iter,
5787 					  &rssi_data);
5788 	if (rssi_data.rssi_changed)
5789 		rtw89_btc_ntfy_wl_sta(rtwdev);
5790 }
5791 
5792 static void rtw89_phy_stat_init(struct rtw89_dev *rtwdev)
5793 {
5794 	struct rtw89_phy_stat *phystat = &rtwdev->phystat;
5795 	int i;
5796 
5797 	for (i = 0; i < rtwdev->chip->rf_path_num; i++)
5798 		ewma_thermal_init(&phystat->avg_thermal[i]);
5799 
5800 	rtw89_phy_stat_thermal_update(rtwdev);
5801 
5802 	memset(&phystat->cur_pkt_stat, 0, sizeof(phystat->cur_pkt_stat));
5803 	memset(&phystat->last_pkt_stat, 0, sizeof(phystat->last_pkt_stat));
5804 
5805 	ewma_rssi_init(&phystat->bcn_rssi);
5806 
5807 	rtwdev->hal.thermal_prot_lv = 0;
5808 }
5809 
5810 void rtw89_phy_stat_track(struct rtw89_dev *rtwdev)
5811 {
5812 	struct rtw89_phy_stat *phystat = &rtwdev->phystat;
5813 
5814 	rtw89_phy_stat_thermal_update(rtwdev);
5815 	rtw89_phy_thermal_protect(rtwdev);
5816 	rtw89_phy_stat_rssi_update(rtwdev);
5817 
5818 	phystat->last_pkt_stat = phystat->cur_pkt_stat;
5819 	memset(&phystat->cur_pkt_stat, 0, sizeof(phystat->cur_pkt_stat));
5820 }
5821 
5822 static u16 rtw89_phy_ccx_us_to_idx(struct rtw89_dev *rtwdev,
5823 				   struct rtw89_bb_ctx *bb, u32 time_us)
5824 {
5825 	struct rtw89_env_monitor_info *env = &bb->env_monitor;
5826 
5827 	return time_us >> (ilog2(CCX_US_BASE_RATIO) + env->ccx_unit_idx);
5828 }
5829 
5830 static u32 rtw89_phy_ccx_idx_to_us(struct rtw89_dev *rtwdev,
5831 				   struct rtw89_bb_ctx *bb, u16 idx)
5832 {
5833 	struct rtw89_env_monitor_info *env = &bb->env_monitor;
5834 
5835 	return idx << (ilog2(CCX_US_BASE_RATIO) + env->ccx_unit_idx);
5836 }
5837 
5838 static void rtw89_phy_ccx_top_setting_init(struct rtw89_dev *rtwdev,
5839 					   struct rtw89_bb_ctx *bb)
5840 {
5841 	const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def;
5842 	struct rtw89_env_monitor_info *env = &bb->env_monitor;
5843 	const struct rtw89_ccx_regs *ccx = phy->ccx;
5844 
5845 	env->ccx_manual_ctrl = false;
5846 	env->ccx_ongoing = false;
5847 	env->ccx_rac_lv = RTW89_RAC_RELEASE;
5848 	env->ccx_period = 0;
5849 	env->ccx_unit_idx = RTW89_CCX_32_US;
5850 
5851 	rtw89_phy_write32_idx(rtwdev, ccx->setting_addr, ccx->en_mask, 1, bb->phy_idx);
5852 	rtw89_phy_write32_idx(rtwdev, ccx->setting_addr, ccx->trig_opt_mask, 1,
5853 			      bb->phy_idx);
5854 	rtw89_phy_write32_idx(rtwdev, ccx->setting_addr, ccx->measurement_trig_mask, 1,
5855 			      bb->phy_idx);
5856 	rtw89_phy_write32_idx(rtwdev, ccx->setting_addr, ccx->edcca_opt_mask,
5857 			      RTW89_CCX_EDCCA_BW20_0, bb->phy_idx);
5858 }
5859 
5860 static u16 rtw89_phy_ccx_get_report(struct rtw89_dev *rtwdev,
5861 				    struct rtw89_bb_ctx *bb,
5862 				    u16 report, u16 score)
5863 {
5864 	struct rtw89_env_monitor_info *env = &bb->env_monitor;
5865 	u32 numer = 0;
5866 	u16 ret = 0;
5867 
5868 	numer = report * score + (env->ccx_period >> 1);
5869 	if (env->ccx_period)
5870 		ret = numer / env->ccx_period;
5871 
5872 	return ret >= score ? score - 1 : ret;
5873 }
5874 
5875 static void rtw89_phy_ccx_ms_to_period_unit(struct rtw89_dev *rtwdev,
5876 					    u16 time_ms, u32 *period,
5877 					    u32 *unit_idx)
5878 {
5879 	u32 idx;
5880 	u8 quotient;
5881 
5882 	if (time_ms >= CCX_MAX_PERIOD)
5883 		time_ms = CCX_MAX_PERIOD;
5884 
5885 	quotient = CCX_MAX_PERIOD_UNIT * time_ms / CCX_MAX_PERIOD;
5886 
5887 	if (quotient < 4)
5888 		idx = RTW89_CCX_4_US;
5889 	else if (quotient < 8)
5890 		idx = RTW89_CCX_8_US;
5891 	else if (quotient < 16)
5892 		idx = RTW89_CCX_16_US;
5893 	else
5894 		idx = RTW89_CCX_32_US;
5895 
5896 	*unit_idx = idx;
5897 	*period = (time_ms * MS_TO_4US_RATIO) >> idx;
5898 
5899 	rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
5900 		    "[Trigger Time] period:%d, unit_idx:%d\n",
5901 		    *period, *unit_idx);
5902 }
5903 
5904 static void rtw89_phy_ccx_racing_release(struct rtw89_dev *rtwdev,
5905 					 struct rtw89_bb_ctx *bb)
5906 {
5907 	struct rtw89_env_monitor_info *env = &bb->env_monitor;
5908 
5909 	rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
5910 		    "lv:(%d)->(0)\n", env->ccx_rac_lv);
5911 
5912 	env->ccx_ongoing = false;
5913 	env->ccx_rac_lv = RTW89_RAC_RELEASE;
5914 	env->ifs_clm_app = RTW89_IFS_CLM_BACKGROUND;
5915 }
5916 
5917 static bool rtw89_phy_ifs_clm_th_update_check(struct rtw89_dev *rtwdev,
5918 					      struct rtw89_bb_ctx *bb,
5919 					      struct rtw89_ccx_para_info *para)
5920 {
5921 	struct rtw89_env_monitor_info *env = &bb->env_monitor;
5922 	bool is_update = env->ifs_clm_app != para->ifs_clm_app;
5923 	u8 i = 0;
5924 	u16 *ifs_th_l = env->ifs_clm_th_l;
5925 	u16 *ifs_th_h = env->ifs_clm_th_h;
5926 	u32 ifs_th0_us = 0, ifs_th_times = 0;
5927 	u32 ifs_th_h_us[RTW89_IFS_CLM_NUM] = {0};
5928 
5929 	if (!is_update)
5930 		goto ifs_update_finished;
5931 
5932 	switch (para->ifs_clm_app) {
5933 	case RTW89_IFS_CLM_INIT:
5934 	case RTW89_IFS_CLM_BACKGROUND:
5935 	case RTW89_IFS_CLM_ACS:
5936 	case RTW89_IFS_CLM_DBG:
5937 	case RTW89_IFS_CLM_DIG:
5938 	case RTW89_IFS_CLM_TDMA_DIG:
5939 		ifs_th0_us = IFS_CLM_TH0_UPPER;
5940 		ifs_th_times = IFS_CLM_TH_MUL;
5941 		break;
5942 	case RTW89_IFS_CLM_DBG_MANUAL:
5943 		ifs_th0_us = para->ifs_clm_manual_th0;
5944 		ifs_th_times = para->ifs_clm_manual_th_times;
5945 		break;
5946 	default:
5947 		break;
5948 	}
5949 
5950 	/* Set sampling threshold for 4 different regions, unit in idx_cnt.
5951 	 * low[i] = high[i-1] + 1
5952 	 * high[i] = high[i-1] * ifs_th_times
5953 	 */
5954 	ifs_th_l[IFS_CLM_TH_START_IDX] = 0;
5955 	ifs_th_h_us[IFS_CLM_TH_START_IDX] = ifs_th0_us;
5956 	ifs_th_h[IFS_CLM_TH_START_IDX] = rtw89_phy_ccx_us_to_idx(rtwdev, bb,
5957 								 ifs_th0_us);
5958 	for (i = 1; i < RTW89_IFS_CLM_NUM; i++) {
5959 		ifs_th_l[i] = ifs_th_h[i - 1] + 1;
5960 		ifs_th_h_us[i] = ifs_th_h_us[i - 1] * ifs_th_times;
5961 		ifs_th_h[i] = rtw89_phy_ccx_us_to_idx(rtwdev, bb, ifs_th_h_us[i]);
5962 	}
5963 
5964 ifs_update_finished:
5965 	if (!is_update)
5966 		rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
5967 			    "No need to update IFS_TH\n");
5968 
5969 	return is_update;
5970 }
5971 
5972 static void rtw89_phy_ifs_clm_set_th_reg(struct rtw89_dev *rtwdev,
5973 					 struct rtw89_bb_ctx *bb)
5974 {
5975 	const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def;
5976 	struct rtw89_env_monitor_info *env = &bb->env_monitor;
5977 	const struct rtw89_ccx_regs *ccx = phy->ccx;
5978 	u8 i = 0;
5979 
5980 	rtw89_phy_write32_idx(rtwdev, ccx->ifs_t1_addr, ccx->ifs_t1_th_l_mask,
5981 			      env->ifs_clm_th_l[0], bb->phy_idx);
5982 	rtw89_phy_write32_idx(rtwdev, ccx->ifs_t2_addr, ccx->ifs_t2_th_l_mask,
5983 			      env->ifs_clm_th_l[1], bb->phy_idx);
5984 	rtw89_phy_write32_idx(rtwdev, ccx->ifs_t3_addr, ccx->ifs_t3_th_l_mask,
5985 			      env->ifs_clm_th_l[2], bb->phy_idx);
5986 	rtw89_phy_write32_idx(rtwdev, ccx->ifs_t4_addr, ccx->ifs_t4_th_l_mask,
5987 			      env->ifs_clm_th_l[3], bb->phy_idx);
5988 
5989 	rtw89_phy_write32_idx(rtwdev, ccx->ifs_t1_addr, ccx->ifs_t1_th_h_mask,
5990 			      env->ifs_clm_th_h[0], bb->phy_idx);
5991 	rtw89_phy_write32_idx(rtwdev, ccx->ifs_t2_addr, ccx->ifs_t2_th_h_mask,
5992 			      env->ifs_clm_th_h[1], bb->phy_idx);
5993 	rtw89_phy_write32_idx(rtwdev, ccx->ifs_t3_addr, ccx->ifs_t3_th_h_mask,
5994 			      env->ifs_clm_th_h[2], bb->phy_idx);
5995 	rtw89_phy_write32_idx(rtwdev, ccx->ifs_t4_addr, ccx->ifs_t4_th_h_mask,
5996 			      env->ifs_clm_th_h[3], bb->phy_idx);
5997 
5998 	for (i = 0; i < RTW89_IFS_CLM_NUM; i++)
5999 		rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
6000 			    "Update IFS_T%d_th{low, high} : {%d, %d}\n",
6001 			    i + 1, env->ifs_clm_th_l[i], env->ifs_clm_th_h[i]);
6002 }
6003 
6004 static void __rtw89_phy_nhm_setting_init(struct rtw89_dev *rtwdev,
6005 					 struct rtw89_bb_ctx *bb)
6006 {
6007 	const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def;
6008 	struct rtw89_env_monitor_info *env = &bb->env_monitor;
6009 	const struct rtw89_ccx_regs *ccx = phy->ccx;
6010 
6011 	env->nhm_include_cca = false;
6012 	env->nhm_mntr_time = 0;
6013 	env->nhm_sum = 0;
6014 
6015 	rtw89_phy_write32_idx_set(rtwdev, ccx->nhm_config, ccx->nhm_en_mask, bb->phy_idx);
6016 	rtw89_phy_write32_idx_set(rtwdev, ccx->nhm_method, ccx->nhm_pwr_method_msk,
6017 				  bb->phy_idx);
6018 }
6019 
6020 void rtw89_phy_nhm_setting_init(struct rtw89_dev *rtwdev)
6021 {
6022 	const struct rtw89_chip_info *chip = rtwdev->chip;
6023 	struct rtw89_bb_ctx *bb;
6024 
6025 	if (!chip->support_noise)
6026 		return;
6027 
6028 	rtw89_for_each_active_bb(rtwdev, bb)
6029 		__rtw89_phy_nhm_setting_init(rtwdev, bb);
6030 }
6031 
6032 static void rtw89_phy_ifs_clm_setting_init(struct rtw89_dev *rtwdev,
6033 					   struct rtw89_bb_ctx *bb)
6034 {
6035 	const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def;
6036 	struct rtw89_env_monitor_info *env = &bb->env_monitor;
6037 	const struct rtw89_ccx_regs *ccx = phy->ccx;
6038 	struct rtw89_ccx_para_info para = {};
6039 
6040 	env->ifs_clm_app = RTW89_IFS_CLM_BACKGROUND;
6041 	env->ifs_clm_mntr_time = 0;
6042 
6043 	para.ifs_clm_app = RTW89_IFS_CLM_INIT;
6044 	if (rtw89_phy_ifs_clm_th_update_check(rtwdev, bb, &para))
6045 		rtw89_phy_ifs_clm_set_th_reg(rtwdev, bb);
6046 
6047 	rtw89_phy_write32_idx(rtwdev, ccx->ifs_cnt_addr, ccx->ifs_collect_en_mask, true,
6048 			      bb->phy_idx);
6049 	rtw89_phy_write32_idx(rtwdev, ccx->ifs_t1_addr, ccx->ifs_t1_en_mask, true,
6050 			      bb->phy_idx);
6051 	rtw89_phy_write32_idx(rtwdev, ccx->ifs_t2_addr, ccx->ifs_t2_en_mask, true,
6052 			      bb->phy_idx);
6053 	rtw89_phy_write32_idx(rtwdev, ccx->ifs_t3_addr, ccx->ifs_t3_en_mask, true,
6054 			      bb->phy_idx);
6055 	rtw89_phy_write32_idx(rtwdev, ccx->ifs_t4_addr, ccx->ifs_t4_en_mask, true,
6056 			      bb->phy_idx);
6057 }
6058 
6059 static int rtw89_phy_ccx_racing_ctrl(struct rtw89_dev *rtwdev,
6060 				     struct rtw89_bb_ctx *bb,
6061 				     enum rtw89_env_racing_lv level)
6062 {
6063 	struct rtw89_env_monitor_info *env = &bb->env_monitor;
6064 	int ret = 0;
6065 
6066 	if (level >= RTW89_RAC_MAX_NUM) {
6067 		rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
6068 			    "[WARNING] Wrong LV=%d\n", level);
6069 		return -EINVAL;
6070 	}
6071 
6072 	rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
6073 		    "ccx_ongoing=%d, level:(%d)->(%d)\n", env->ccx_ongoing,
6074 		    env->ccx_rac_lv, level);
6075 
6076 	if (env->ccx_ongoing) {
6077 		if (level <= env->ccx_rac_lv)
6078 			ret = -EINVAL;
6079 		else
6080 			env->ccx_ongoing = false;
6081 	}
6082 
6083 	if (ret == 0)
6084 		env->ccx_rac_lv = level;
6085 
6086 	rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, "ccx racing success=%d\n",
6087 		    !ret);
6088 
6089 	return ret;
6090 }
6091 
6092 static void rtw89_phy_ccx_trigger(struct rtw89_dev *rtwdev,
6093 				  struct rtw89_bb_ctx *bb, u8 sel)
6094 {
6095 	const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def;
6096 	struct rtw89_env_monitor_info *env = &bb->env_monitor;
6097 	const struct rtw89_ccx_regs *ccx = phy->ccx;
6098 
6099 	rtw89_phy_write32_idx(rtwdev, ccx->ifs_cnt_addr, ccx->ifs_clm_cnt_clear_mask, 0,
6100 			      bb->phy_idx);
6101 	rtw89_phy_write32_idx(rtwdev, ccx->setting_addr, ccx->measurement_trig_mask, 0,
6102 			      bb->phy_idx);
6103 	if (sel & RTW89_PHY_ENV_MON_NHM)
6104 		rtw89_phy_write32_idx_clr(rtwdev, ccx->nhm_config,
6105 					  ccx->nhm_en_mask, bb->phy_idx);
6106 
6107 	rtw89_phy_write32_idx(rtwdev, ccx->ifs_cnt_addr, ccx->ifs_clm_cnt_clear_mask, 1,
6108 			      bb->phy_idx);
6109 	rtw89_phy_write32_idx(rtwdev, ccx->setting_addr, ccx->measurement_trig_mask, 1,
6110 			      bb->phy_idx);
6111 	if (sel & RTW89_PHY_ENV_MON_NHM)
6112 		rtw89_phy_write32_idx_set(rtwdev, ccx->nhm_config,
6113 					  ccx->nhm_en_mask, bb->phy_idx);
6114 
6115 	env->ccx_ongoing = true;
6116 }
6117 
6118 static void rtw89_phy_ifs_clm_get_utility(struct rtw89_dev *rtwdev,
6119 					  struct rtw89_bb_ctx *bb)
6120 {
6121 	struct rtw89_env_monitor_info *env = &bb->env_monitor;
6122 	u8 i = 0;
6123 	u32 res = 0;
6124 
6125 	env->ifs_clm_tx_ratio =
6126 		rtw89_phy_ccx_get_report(rtwdev, bb, env->ifs_clm_tx, PERCENT);
6127 	env->ifs_clm_edcca_excl_cca_ratio =
6128 		rtw89_phy_ccx_get_report(rtwdev, bb, env->ifs_clm_edcca_excl_cca,
6129 					 PERCENT);
6130 	env->ifs_clm_cck_fa_ratio =
6131 		rtw89_phy_ccx_get_report(rtwdev, bb, env->ifs_clm_cckfa, PERCENT);
6132 	env->ifs_clm_ofdm_fa_ratio =
6133 		rtw89_phy_ccx_get_report(rtwdev, bb, env->ifs_clm_ofdmfa, PERCENT);
6134 	env->ifs_clm_cck_cca_excl_fa_ratio =
6135 		rtw89_phy_ccx_get_report(rtwdev, bb, env->ifs_clm_cckcca_excl_fa,
6136 					 PERCENT);
6137 	env->ifs_clm_ofdm_cca_excl_fa_ratio =
6138 		rtw89_phy_ccx_get_report(rtwdev, bb, env->ifs_clm_ofdmcca_excl_fa,
6139 					 PERCENT);
6140 	env->ifs_clm_cck_fa_permil =
6141 		rtw89_phy_ccx_get_report(rtwdev, bb, env->ifs_clm_cckfa, PERMIL);
6142 	env->ifs_clm_ofdm_fa_permil =
6143 		rtw89_phy_ccx_get_report(rtwdev, bb, env->ifs_clm_ofdmfa, PERMIL);
6144 
6145 	for (i = 0; i < RTW89_IFS_CLM_NUM; i++) {
6146 		if (env->ifs_clm_his[i] > ENV_MNTR_IFSCLM_HIS_MAX) {
6147 			env->ifs_clm_ifs_avg[i] = ENV_MNTR_FAIL_DWORD;
6148 		} else {
6149 			env->ifs_clm_ifs_avg[i] =
6150 				rtw89_phy_ccx_idx_to_us(rtwdev, bb,
6151 							env->ifs_clm_avg[i]);
6152 		}
6153 
6154 		res = rtw89_phy_ccx_idx_to_us(rtwdev, bb, env->ifs_clm_cca[i]);
6155 		res += env->ifs_clm_his[i] >> 1;
6156 		if (env->ifs_clm_his[i])
6157 			res /= env->ifs_clm_his[i];
6158 		else
6159 			res = 0;
6160 		env->ifs_clm_cca_avg[i] = res;
6161 	}
6162 
6163 	rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
6164 		    "IFS-CLM ratio {Tx, EDCCA_exclu_cca} = {%d, %d}\n",
6165 		    env->ifs_clm_tx_ratio, env->ifs_clm_edcca_excl_cca_ratio);
6166 	rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
6167 		    "IFS-CLM FA ratio {CCK, OFDM} = {%d, %d}\n",
6168 		    env->ifs_clm_cck_fa_ratio, env->ifs_clm_ofdm_fa_ratio);
6169 	rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
6170 		    "IFS-CLM FA permil {CCK, OFDM} = {%d, %d}\n",
6171 		    env->ifs_clm_cck_fa_permil, env->ifs_clm_ofdm_fa_permil);
6172 	rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
6173 		    "IFS-CLM CCA_exclu_FA ratio {CCK, OFDM} = {%d, %d}\n",
6174 		    env->ifs_clm_cck_cca_excl_fa_ratio,
6175 		    env->ifs_clm_ofdm_cca_excl_fa_ratio);
6176 	rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
6177 		    "Time:[his, ifs_avg(us), cca_avg(us)]\n");
6178 	for (i = 0; i < RTW89_IFS_CLM_NUM; i++)
6179 		rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, "T%d:[%d, %d, %d]\n",
6180 			    i + 1, env->ifs_clm_his[i], env->ifs_clm_ifs_avg[i],
6181 			    env->ifs_clm_cca_avg[i]);
6182 }
6183 
6184 static u8 rtw89_nhm_weighted_avg(struct rtw89_dev *rtwdev, struct rtw89_bb_ctx *bb)
6185 {
6186 	struct rtw89_env_monitor_info *env = &bb->env_monitor;
6187 	u8 nhm_weight[RTW89_NHM_RPT_NUM];
6188 	u32 nhm_weighted_sum = 0;
6189 	u8 weight_zero;
6190 	u8 i;
6191 
6192 	if (env->nhm_sum == 0)
6193 		return 0;
6194 
6195 	weight_zero = clamp_t(u16, env->nhm_th[0] - RTW89_NHM_WEIGHT_OFFSET, 0, U8_MAX);
6196 
6197 	for (i = 0; i < RTW89_NHM_RPT_NUM; i++) {
6198 		if (i == 0)
6199 			nhm_weight[i] = weight_zero;
6200 		else if (i == (RTW89_NHM_RPT_NUM - 1))
6201 			nhm_weight[i] = env->nhm_th[i - 1] + RTW89_NHM_WEIGHT_OFFSET;
6202 		else
6203 			nhm_weight[i] = (env->nhm_th[i - 1] + env->nhm_th[i]) / 2;
6204 	}
6205 
6206 	if (rtwdev->chip->chip_id == RTL8852A || rtwdev->chip->chip_id == RTL8852B ||
6207 	    rtwdev->chip->chip_id == RTL8852C) {
6208 		if (env->nhm_th[RTW89_NHM_TH_NUM - 1] == RTW89_NHM_WA_TH) {
6209 			nhm_weight[RTW89_NHM_RPT_NUM - 1] =
6210 				env->nhm_th[RTW89_NHM_TH_NUM - 2] +
6211 				RTW89_NHM_WEIGHT_OFFSET;
6212 			nhm_weight[RTW89_NHM_RPT_NUM - 2] =
6213 				nhm_weight[RTW89_NHM_RPT_NUM - 1];
6214 		}
6215 
6216 		env->nhm_result[0] += env->nhm_result[RTW89_NHM_RPT_NUM - 1];
6217 		env->nhm_result[RTW89_NHM_RPT_NUM - 1] = 0;
6218 	}
6219 
6220 	for (i = 0; i < RTW89_NHM_RPT_NUM; i++)
6221 		nhm_weighted_sum += env->nhm_result[i] * nhm_weight[i];
6222 
6223 	return (nhm_weighted_sum / env->nhm_sum) >> RTW89_NHM_TH_FACTOR;
6224 }
6225 
6226 static void __rtw89_phy_nhm_get_result(struct rtw89_dev *rtwdev,
6227 				       struct rtw89_bb_ctx *bb, enum rtw89_band hw_band,
6228 				       u16 ch_hw_value)
6229 {
6230 	const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def;
6231 	struct rtw89_env_monitor_info *env = &bb->env_monitor;
6232 	const struct rtw89_chip_info *chip = rtwdev->chip;
6233 	const struct rtw89_ccx_regs *ccx = phy->ccx;
6234 	struct ieee80211_supported_band *sband;
6235 	const struct rtw89_reg_def *nhm_rpt;
6236 	enum nl80211_band band;
6237 	u32 sum = 0;
6238 	u8 chan_idx;
6239 	u8 nhm_pwr;
6240 	u8 i;
6241 
6242 	if (!rtw89_phy_read32_idx(rtwdev, ccx->nhm, ccx->nhm_ready, bb->phy_idx)) {
6243 		rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,  "[NHM] Get NHM report Fail\n");
6244 		return;
6245 	}
6246 
6247 	for (i = 0; i < RTW89_NHM_RPT_NUM; i++) {
6248 		nhm_rpt = &(*chip->nhm_report)[i];
6249 
6250 		env->nhm_result[i] =
6251 			rtw89_phy_read32_idx(rtwdev, nhm_rpt->addr,
6252 					     nhm_rpt->mask, bb->phy_idx);
6253 		sum += env->nhm_result[i];
6254 	}
6255 	env->nhm_sum = sum;
6256 	nhm_pwr = rtw89_nhm_weighted_avg(rtwdev, bb);
6257 
6258 	if (!ch_hw_value)
6259 		return;
6260 
6261 	band = rtw89_hw_to_nl80211_band(hw_band);
6262 	sband = rtwdev->hw->wiphy->bands[band];
6263 	if (!sband)
6264 		return;
6265 
6266 	for (chan_idx = 0; chan_idx < sband->n_channels; chan_idx++) {
6267 		struct ieee80211_channel *channel;
6268 		struct rtw89_nhm_report *rpt;
6269 		struct list_head *nhm_list;
6270 
6271 		channel = &sband->channels[chan_idx];
6272 		if (channel->hw_value != ch_hw_value)
6273 			continue;
6274 
6275 		rpt = &env->nhm_his[hw_band][chan_idx];
6276 		nhm_list = &env->nhm_rpt_list;
6277 
6278 		rpt->channel = channel;
6279 		rpt->noise = nhm_pwr;
6280 
6281 		if (list_empty(&rpt->list))
6282 			list_add_tail(&rpt->list, nhm_list);
6283 
6284 		return;
6285 	}
6286 
6287 	rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, "[NHM] channel not found\n");
6288 }
6289 
6290 void rtw89_phy_nhm_get_result(struct rtw89_dev *rtwdev, enum rtw89_band hw_band,
6291 			      u16 ch_hw_value)
6292 {
6293 	const struct rtw89_chip_info *chip = rtwdev->chip;
6294 	struct rtw89_bb_ctx *bb;
6295 
6296 	if (!chip->support_noise)
6297 		return;
6298 
6299 	rtw89_for_each_active_bb(rtwdev, bb)
6300 		__rtw89_phy_nhm_get_result(rtwdev, bb, hw_band, ch_hw_value);
6301 }
6302 
6303 static bool rtw89_phy_ifs_clm_get_result(struct rtw89_dev *rtwdev,
6304 					 struct rtw89_bb_ctx *bb)
6305 {
6306 	const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def;
6307 	struct rtw89_env_monitor_info *env = &bb->env_monitor;
6308 	const struct rtw89_ccx_regs *ccx = phy->ccx;
6309 	u8 i = 0;
6310 
6311 	if (rtw89_phy_read32_idx(rtwdev, ccx->ifs_total_addr,
6312 				 ccx->ifs_cnt_done_mask, bb->phy_idx) == 0) {
6313 		rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
6314 			    "Get IFS_CLM report Fail\n");
6315 		return false;
6316 	}
6317 
6318 	env->ifs_clm_tx =
6319 		rtw89_phy_read32_idx(rtwdev, ccx->ifs_clm_tx_cnt_addr,
6320 				     ccx->ifs_clm_tx_cnt_msk, bb->phy_idx);
6321 	env->ifs_clm_edcca_excl_cca =
6322 		rtw89_phy_read32_idx(rtwdev, ccx->ifs_clm_tx_cnt_addr,
6323 				     ccx->ifs_clm_edcca_excl_cca_fa_mask, bb->phy_idx);
6324 	env->ifs_clm_cckcca_excl_fa =
6325 		rtw89_phy_read32_idx(rtwdev, ccx->ifs_clm_cca_addr,
6326 				     ccx->ifs_clm_cckcca_excl_fa_mask, bb->phy_idx);
6327 	env->ifs_clm_ofdmcca_excl_fa =
6328 		rtw89_phy_read32_idx(rtwdev, ccx->ifs_clm_cca_addr,
6329 				     ccx->ifs_clm_ofdmcca_excl_fa_mask, bb->phy_idx);
6330 	env->ifs_clm_cckfa =
6331 		rtw89_phy_read32_idx(rtwdev, ccx->ifs_clm_fa_addr,
6332 				     ccx->ifs_clm_cck_fa_mask, bb->phy_idx);
6333 	env->ifs_clm_ofdmfa =
6334 		rtw89_phy_read32_idx(rtwdev, ccx->ifs_clm_fa_addr,
6335 				     ccx->ifs_clm_ofdm_fa_mask, bb->phy_idx);
6336 
6337 	env->ifs_clm_his[0] =
6338 		rtw89_phy_read32_idx(rtwdev, ccx->ifs_his_addr,
6339 				     ccx->ifs_t1_his_mask, bb->phy_idx);
6340 	env->ifs_clm_his[1] =
6341 		rtw89_phy_read32_idx(rtwdev, ccx->ifs_his_addr,
6342 				     ccx->ifs_t2_his_mask, bb->phy_idx);
6343 
6344 	env->ifs_clm_his[2] =
6345 		rtw89_phy_read32_idx(rtwdev, ccx->ifs_his_addr2,
6346 				     ccx->ifs_t3_his_mask, bb->phy_idx);
6347 	env->ifs_clm_his[3] =
6348 		rtw89_phy_read32_idx(rtwdev, ccx->ifs_his_addr2,
6349 				     ccx->ifs_t4_his_mask, bb->phy_idx);
6350 
6351 	env->ifs_clm_avg[0] =
6352 		rtw89_phy_read32_idx(rtwdev, ccx->ifs_avg_l_addr,
6353 				     ccx->ifs_t1_avg_mask, bb->phy_idx);
6354 	env->ifs_clm_avg[1] =
6355 		rtw89_phy_read32_idx(rtwdev, ccx->ifs_avg_l_addr,
6356 				     ccx->ifs_t2_avg_mask, bb->phy_idx);
6357 	env->ifs_clm_avg[2] =
6358 		rtw89_phy_read32_idx(rtwdev, ccx->ifs_avg_h_addr,
6359 				     ccx->ifs_t3_avg_mask, bb->phy_idx);
6360 	env->ifs_clm_avg[3] =
6361 		rtw89_phy_read32_idx(rtwdev, ccx->ifs_avg_h_addr,
6362 				     ccx->ifs_t4_avg_mask, bb->phy_idx);
6363 
6364 	env->ifs_clm_cca[0] =
6365 		rtw89_phy_read32_idx(rtwdev, ccx->ifs_cca_l_addr,
6366 				     ccx->ifs_t1_cca_mask, bb->phy_idx);
6367 	env->ifs_clm_cca[1] =
6368 		rtw89_phy_read32_idx(rtwdev, ccx->ifs_cca_l_addr,
6369 				     ccx->ifs_t2_cca_mask, bb->phy_idx);
6370 	env->ifs_clm_cca[2] =
6371 		rtw89_phy_read32_idx(rtwdev, ccx->ifs_cca_h_addr,
6372 				     ccx->ifs_t3_cca_mask, bb->phy_idx);
6373 	env->ifs_clm_cca[3] =
6374 		rtw89_phy_read32_idx(rtwdev, ccx->ifs_cca_h_addr,
6375 				     ccx->ifs_t4_cca_mask, bb->phy_idx);
6376 
6377 	env->ifs_clm_total_ifs =
6378 		rtw89_phy_read32_idx(rtwdev, ccx->ifs_total_addr,
6379 				     ccx->ifs_total_mask, bb->phy_idx);
6380 
6381 	rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, "IFS-CLM total_ifs = %d\n",
6382 		    env->ifs_clm_total_ifs);
6383 	rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
6384 		    "{Tx, EDCCA_exclu_cca} = {%d, %d}\n",
6385 		    env->ifs_clm_tx, env->ifs_clm_edcca_excl_cca);
6386 	rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
6387 		    "IFS-CLM FA{CCK, OFDM} = {%d, %d}\n",
6388 		    env->ifs_clm_cckfa, env->ifs_clm_ofdmfa);
6389 	rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
6390 		    "IFS-CLM CCA_exclu_FA{CCK, OFDM} = {%d, %d}\n",
6391 		    env->ifs_clm_cckcca_excl_fa, env->ifs_clm_ofdmcca_excl_fa);
6392 
6393 	rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, "Time:[his, avg, cca]\n");
6394 	for (i = 0; i < RTW89_IFS_CLM_NUM; i++)
6395 		rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
6396 			    "T%d:[%d, %d, %d]\n", i + 1, env->ifs_clm_his[i],
6397 			    env->ifs_clm_avg[i], env->ifs_clm_cca[i]);
6398 
6399 	rtw89_phy_ifs_clm_get_utility(rtwdev, bb);
6400 
6401 	return true;
6402 }
6403 
6404 static void rtw89_phy_nhm_th_update(struct rtw89_dev *rtwdev,
6405 				    struct rtw89_bb_ctx *bb)
6406 {
6407 	struct rtw89_env_monitor_info *env = &bb->env_monitor;
6408 	static const u8 nhm_th_11k[RTW89_NHM_RPT_NUM] = {
6409 		18, 21, 24, 27, 30, 35, 40, 45, 50, 55, 60, 0
6410 	};
6411 	const struct rtw89_chip_info *chip = rtwdev->chip;
6412 	const struct rtw89_reg_def *nhm_th;
6413 	u8 i;
6414 
6415 	for (i = 0; i < RTW89_NHM_RPT_NUM; i++)
6416 		env->nhm_th[i] = nhm_th_11k[i] << RTW89_NHM_TH_FACTOR;
6417 
6418 	if (chip->chip_id == RTL8852A || chip->chip_id == RTL8852B ||
6419 	    chip->chip_id == RTL8852C)
6420 		env->nhm_th[RTW89_NHM_TH_NUM - 1] = RTW89_NHM_WA_TH;
6421 
6422 	for (i = 0; i < RTW89_NHM_TH_NUM; i++) {
6423 		nhm_th = &(*chip->nhm_th)[i];
6424 
6425 		rtw89_phy_write32_idx(rtwdev, nhm_th->addr, nhm_th->mask,
6426 				      env->nhm_th[i], bb->phy_idx);
6427 	}
6428 }
6429 
6430 static int rtw89_phy_nhm_set(struct rtw89_dev *rtwdev,
6431 			     struct rtw89_bb_ctx *bb,
6432 			     struct rtw89_ccx_para_info *para)
6433 {
6434 	const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def;
6435 	struct rtw89_env_monitor_info *env = &bb->env_monitor;
6436 	const struct rtw89_ccx_regs *ccx = phy->ccx;
6437 	u32 unit_idx = 0;
6438 	u32 period = 0;
6439 
6440 	if (para->mntr_time == 0) {
6441 		rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
6442 			    "[NHM] MNTR_TIME is 0\n");
6443 		return -EINVAL;
6444 	}
6445 
6446 	if (rtw89_phy_ccx_racing_ctrl(rtwdev, bb, para->rac_lv))
6447 		return -EINVAL;
6448 
6449 	rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
6450 		    "[NHM]nhm_incld_cca=%d, mntr_time=%d ms\n",
6451 		    para->nhm_incld_cca, para->mntr_time);
6452 
6453 	if (para->mntr_time != env->nhm_mntr_time) {
6454 		rtw89_phy_ccx_ms_to_period_unit(rtwdev, para->mntr_time,
6455 						&period, &unit_idx);
6456 		rtw89_phy_write32_idx(rtwdev, ccx->nhm_config,
6457 				      ccx->nhm_period_mask, period, bb->phy_idx);
6458 		rtw89_phy_write32_idx(rtwdev, ccx->nhm_config,
6459 				      ccx->nhm_unit_mask, period, bb->phy_idx);
6460 
6461 		env->nhm_mntr_time = para->mntr_time;
6462 		env->ccx_period = period;
6463 		env->ccx_unit_idx = unit_idx;
6464 	}
6465 
6466 	if (para->nhm_incld_cca != env->nhm_include_cca) {
6467 		rtw89_phy_write32_idx(rtwdev, ccx->nhm_config,
6468 				      ccx->nhm_include_cca_mask, para->nhm_incld_cca,
6469 				      bb->phy_idx);
6470 
6471 		env->nhm_include_cca = para->nhm_incld_cca;
6472 	}
6473 
6474 	rtw89_phy_nhm_th_update(rtwdev, bb);
6475 
6476 	return 0;
6477 }
6478 
6479 static void __rtw89_phy_nhm_trigger(struct rtw89_dev *rtwdev, struct rtw89_bb_ctx *bb)
6480 {
6481 	struct rtw89_ccx_para_info para = {
6482 		.mntr_time = RTW89_NHM_MNTR_TIME,
6483 		.rac_lv = RTW89_RAC_LV_1,
6484 		.nhm_incld_cca = true,
6485 	};
6486 
6487 	rtw89_phy_ccx_racing_release(rtwdev, bb);
6488 
6489 	rtw89_phy_nhm_set(rtwdev, bb, &para);
6490 	rtw89_phy_ccx_trigger(rtwdev, bb, RTW89_PHY_ENV_MON_NHM);
6491 }
6492 
6493 void rtw89_phy_nhm_trigger(struct rtw89_dev *rtwdev)
6494 {
6495 	const struct rtw89_chip_info *chip = rtwdev->chip;
6496 	struct rtw89_bb_ctx *bb;
6497 
6498 	if (!chip->support_noise)
6499 		return;
6500 
6501 	rtw89_for_each_active_bb(rtwdev, bb)
6502 		__rtw89_phy_nhm_trigger(rtwdev, bb);
6503 }
6504 
6505 static int rtw89_phy_ifs_clm_set(struct rtw89_dev *rtwdev,
6506 				 struct rtw89_bb_ctx *bb,
6507 				 struct rtw89_ccx_para_info *para)
6508 {
6509 	const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def;
6510 	struct rtw89_env_monitor_info *env = &bb->env_monitor;
6511 	const struct rtw89_ccx_regs *ccx = phy->ccx;
6512 	u32 period = 0;
6513 	u32 unit_idx = 0;
6514 
6515 	if (para->mntr_time == 0) {
6516 		rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
6517 			    "[WARN] MNTR_TIME is 0\n");
6518 		return -EINVAL;
6519 	}
6520 
6521 	if (rtw89_phy_ccx_racing_ctrl(rtwdev, bb, para->rac_lv))
6522 		return -EINVAL;
6523 
6524 	if (para->mntr_time != env->ifs_clm_mntr_time) {
6525 		rtw89_phy_ccx_ms_to_period_unit(rtwdev, para->mntr_time,
6526 						&period, &unit_idx);
6527 		rtw89_phy_write32_idx(rtwdev, ccx->ifs_cnt_addr,
6528 				      ccx->ifs_clm_period_mask, period, bb->phy_idx);
6529 		rtw89_phy_write32_idx(rtwdev, ccx->ifs_cnt_addr,
6530 				      ccx->ifs_clm_cnt_unit_mask,
6531 				      unit_idx, bb->phy_idx);
6532 
6533 		rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
6534 			    "Update IFS-CLM time ((%d)) -> ((%d))\n",
6535 			    env->ifs_clm_mntr_time, para->mntr_time);
6536 
6537 		env->ifs_clm_mntr_time = para->mntr_time;
6538 		env->ccx_period = (u16)period;
6539 		env->ccx_unit_idx = (u8)unit_idx;
6540 	}
6541 
6542 	if (rtw89_phy_ifs_clm_th_update_check(rtwdev, bb, para)) {
6543 		env->ifs_clm_app = para->ifs_clm_app;
6544 		rtw89_phy_ifs_clm_set_th_reg(rtwdev, bb);
6545 	}
6546 
6547 	return 0;
6548 }
6549 
6550 static void __rtw89_phy_env_monitor_track(struct rtw89_dev *rtwdev,
6551 					  struct rtw89_bb_ctx *bb)
6552 {
6553 	struct rtw89_env_monitor_info *env = &bb->env_monitor;
6554 	struct rtw89_ccx_para_info para = {};
6555 	u8 chk_result = RTW89_PHY_ENV_MON_CCX_FAIL;
6556 
6557 	env->ccx_watchdog_result = RTW89_PHY_ENV_MON_CCX_FAIL;
6558 	if (env->ccx_manual_ctrl) {
6559 		rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
6560 			    "CCX in manual ctrl\n");
6561 		return;
6562 	}
6563 
6564 	rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
6565 		    "BB-%d env_monitor track\n", bb->phy_idx);
6566 
6567 	/* only ifs_clm for now */
6568 	if (rtw89_phy_ifs_clm_get_result(rtwdev, bb))
6569 		env->ccx_watchdog_result |= RTW89_PHY_ENV_MON_IFS_CLM;
6570 
6571 	rtw89_phy_ccx_racing_release(rtwdev, bb);
6572 	para.mntr_time = 1900;
6573 	para.rac_lv = RTW89_RAC_LV_1;
6574 	para.ifs_clm_app = RTW89_IFS_CLM_BACKGROUND;
6575 
6576 	if (rtw89_phy_ifs_clm_set(rtwdev, bb, &para) == 0)
6577 		chk_result |= RTW89_PHY_ENV_MON_IFS_CLM;
6578 	if (chk_result)
6579 		rtw89_phy_ccx_trigger(rtwdev, bb, chk_result);
6580 
6581 	rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
6582 		    "get_result=0x%x, chk_result:0x%x\n",
6583 		    env->ccx_watchdog_result, chk_result);
6584 }
6585 
6586 void rtw89_phy_env_monitor_track(struct rtw89_dev *rtwdev)
6587 {
6588 	struct rtw89_bb_ctx *bb;
6589 
6590 	rtw89_for_each_active_bb(rtwdev, bb)
6591 		__rtw89_phy_env_monitor_track(rtwdev, bb);
6592 }
6593 
6594 static bool rtw89_physts_ie_page_valid(struct rtw89_dev *rtwdev,
6595 				       enum rtw89_phy_status_bitmap *ie_page)
6596 {
6597 	const struct rtw89_chip_info *chip = rtwdev->chip;
6598 
6599 	if (*ie_page >= RTW89_PHYSTS_BITMAP_NUM ||
6600 	    *ie_page == RTW89_RSVD_9)
6601 		return false;
6602 	else if (*ie_page > RTW89_RSVD_9 && *ie_page < RTW89_EHT_PKT)
6603 		*ie_page -= 1;
6604 
6605 	if (*ie_page == RTW89_EHT_PKT && chip->chip_gen == RTW89_CHIP_AX)
6606 		return false;
6607 
6608 	return true;
6609 }
6610 
6611 static u32 rtw89_phy_get_ie_bitmap_addr(struct rtw89_dev *rtwdev,
6612 					enum rtw89_phy_status_bitmap ie_page)
6613 {
6614 	const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def;
6615 	static const u8 ie_page_shift = 2;
6616 
6617 	if (ie_page == RTW89_EHT_PKT)
6618 		return phy->physt_bmp_eht;
6619 
6620 	return phy->physt_bmp_start + (ie_page << ie_page_shift);
6621 }
6622 
6623 static u32 rtw89_physts_get_ie_bitmap(struct rtw89_dev *rtwdev,
6624 				      enum rtw89_phy_status_bitmap ie_page,
6625 				      enum rtw89_phy_idx phy_idx)
6626 {
6627 	u32 addr;
6628 
6629 	if (!rtw89_physts_ie_page_valid(rtwdev, &ie_page))
6630 		return 0;
6631 
6632 	addr = rtw89_phy_get_ie_bitmap_addr(rtwdev, ie_page);
6633 
6634 	return rtw89_phy_read32_idx(rtwdev, addr, MASKDWORD, phy_idx);
6635 }
6636 
6637 static void rtw89_physts_set_ie_bitmap(struct rtw89_dev *rtwdev,
6638 				       enum rtw89_phy_status_bitmap ie_page,
6639 				       u32 val, enum rtw89_phy_idx phy_idx)
6640 {
6641 	const struct rtw89_chip_info *chip = rtwdev->chip;
6642 	u32 addr;
6643 
6644 	if (!rtw89_physts_ie_page_valid(rtwdev, &ie_page))
6645 		return;
6646 
6647 	if (chip->chip_id == RTL8852A)
6648 		val &= B_PHY_STS_BITMAP_MSK_52A;
6649 
6650 	addr = rtw89_phy_get_ie_bitmap_addr(rtwdev, ie_page);
6651 	rtw89_phy_write32_idx(rtwdev, addr, MASKDWORD, val, phy_idx);
6652 }
6653 
6654 static void rtw89_physts_enable_fail_report(struct rtw89_dev *rtwdev,
6655 					    bool enable,
6656 					    enum rtw89_phy_idx phy_idx)
6657 {
6658 	const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def;
6659 	const struct rtw89_physts_regs *physts = phy->physts;
6660 
6661 	if (enable) {
6662 		rtw89_phy_write32_idx_clr(rtwdev, physts->setting_addr,
6663 					  physts->dis_trigger_fail_mask, phy_idx);
6664 		rtw89_phy_write32_idx_clr(rtwdev, physts->setting_addr,
6665 					  physts->dis_trigger_brk_mask, phy_idx);
6666 	} else {
6667 		rtw89_phy_write32_idx_set(rtwdev, physts->setting_addr,
6668 					  physts->dis_trigger_fail_mask, phy_idx);
6669 		rtw89_phy_write32_idx_set(rtwdev, physts->setting_addr,
6670 					  physts->dis_trigger_brk_mask, phy_idx);
6671 	}
6672 }
6673 
6674 static void rtw89_physts_enable_hdr_2(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
6675 {
6676 	const struct rtw89_chip_info *chip = rtwdev->chip;
6677 
6678 	if (chip->chip_gen == RTW89_CHIP_AX || chip->chip_id == RTL8922A)
6679 		return;
6680 
6681 	rtw89_phy_write32_idx_set(rtwdev, R_STS_HDR2_PARSING_BE4,
6682 				  B_STS_HDR2_PARSING_BE4, phy_idx);
6683 }
6684 
6685 static void __rtw89_physts_parsing_init(struct rtw89_dev *rtwdev,
6686 					enum rtw89_phy_idx phy_idx)
6687 {
6688 	const struct rtw89_chip_info *chip = rtwdev->chip;
6689 	u32 val;
6690 	u8 i;
6691 
6692 	rtw89_physts_enable_fail_report(rtwdev, false, phy_idx);
6693 
6694 	/* enable hdr_2 for 8922D (PHYSTS_BE_GEN2 above) */
6695 	rtw89_physts_enable_hdr_2(rtwdev, phy_idx);
6696 
6697 	for (i = 0; i < RTW89_PHYSTS_BITMAP_NUM; i++) {
6698 		if (i == RTW89_RSVD_9 ||
6699 		    (i == RTW89_EHT_PKT && chip->chip_gen == RTW89_CHIP_AX))
6700 			continue;
6701 
6702 		val = rtw89_physts_get_ie_bitmap(rtwdev, i, phy_idx);
6703 		if (i == RTW89_HE_MU || i == RTW89_VHT_MU) {
6704 			val |= BIT(RTW89_PHYSTS_IE13_DL_MU_DEF);
6705 		} else if (i == RTW89_TRIG_BASE_PPDU) {
6706 			val |= BIT(RTW89_PHYSTS_IE13_DL_MU_DEF) |
6707 			       BIT(RTW89_PHYSTS_IE01_CMN_OFDM);
6708 		} else if (i >= RTW89_CCK_PKT) {
6709 			val &= ~(GENMASK(RTW89_PHYSTS_IE07_CMN_EXT_PATH_D,
6710 					 RTW89_PHYSTS_IE04_CMN_EXT_PATH_A));
6711 
6712 			if (i == RTW89_CCK_PKT)
6713 				val |= BIT(RTW89_PHYSTS_IE01_CMN_OFDM);
6714 			else if (i >= RTW89_HT_PKT)
6715 				val |= BIT(RTW89_PHYSTS_IE20_DBG_OFDM_FD_USER_SEG_0);
6716 		}
6717 
6718 		rtw89_physts_set_ie_bitmap(rtwdev, i, val, phy_idx);
6719 	}
6720 }
6721 
6722 static void rtw89_physts_parsing_init(struct rtw89_dev *rtwdev)
6723 {
6724 	__rtw89_physts_parsing_init(rtwdev, RTW89_PHY_0);
6725 	if (rtwdev->dbcc_en)
6726 		__rtw89_physts_parsing_init(rtwdev, RTW89_PHY_1);
6727 }
6728 
6729 static void rtw89_phy_dig_read_gain_table(struct rtw89_dev *rtwdev,
6730 					  struct rtw89_bb_ctx *bb, int type)
6731 {
6732 	const struct rtw89_chip_info *chip = rtwdev->chip;
6733 	const struct rtw89_phy_dig_gain_cfg *cfg;
6734 	struct rtw89_dig_info *dig = &bb->dig;
6735 	const char *msg;
6736 	u8 i;
6737 	s8 gain_base;
6738 	s8 *gain_arr;
6739 	u32 tmp;
6740 
6741 	switch (type) {
6742 	case RTW89_DIG_GAIN_LNA_G:
6743 		gain_arr = dig->lna_gain_g;
6744 		gain_base = LNA0_GAIN;
6745 		cfg = chip->dig_table->cfg_lna_g;
6746 		msg = "lna_gain_g";
6747 		break;
6748 	case RTW89_DIG_GAIN_TIA_G:
6749 		gain_arr = dig->tia_gain_g;
6750 		gain_base = TIA0_GAIN_G;
6751 		cfg = chip->dig_table->cfg_tia_g;
6752 		msg = "tia_gain_g";
6753 		break;
6754 	case RTW89_DIG_GAIN_LNA_A:
6755 		gain_arr = dig->lna_gain_a;
6756 		gain_base = LNA0_GAIN;
6757 		cfg = chip->dig_table->cfg_lna_a;
6758 		msg = "lna_gain_a";
6759 		break;
6760 	case RTW89_DIG_GAIN_TIA_A:
6761 		gain_arr = dig->tia_gain_a;
6762 		gain_base = TIA0_GAIN_A;
6763 		cfg = chip->dig_table->cfg_tia_a;
6764 		msg = "tia_gain_a";
6765 		break;
6766 	default:
6767 		return;
6768 	}
6769 
6770 	for (i = 0; i < cfg->size; i++) {
6771 		tmp = rtw89_phy_read32_idx(rtwdev, cfg->table[i].addr,
6772 					   cfg->table[i].mask, bb->phy_idx);
6773 		tmp >>= DIG_GAIN_SHIFT;
6774 		gain_arr[i] = sign_extend32(tmp, U4_MAX_BIT) + gain_base;
6775 		gain_base += DIG_GAIN;
6776 
6777 		rtw89_debug(rtwdev, RTW89_DBG_DIG, "%s[%d]=%d\n",
6778 			    msg, i, gain_arr[i]);
6779 	}
6780 }
6781 
6782 static void rtw89_phy_dig_update_gain_para(struct rtw89_dev *rtwdev,
6783 					   struct rtw89_bb_ctx *bb)
6784 {
6785 	struct rtw89_dig_info *dig = &bb->dig;
6786 	u32 tmp;
6787 	u8 i;
6788 
6789 	if (!rtwdev->hal.support_igi)
6790 		return;
6791 
6792 	tmp = rtw89_phy_read32_idx(rtwdev, R_PATH0_IB_PKPW,
6793 				   B_PATH0_IB_PKPW_MSK, bb->phy_idx);
6794 	dig->ib_pkpwr = sign_extend32(tmp >> DIG_GAIN_SHIFT, U8_MAX_BIT);
6795 	dig->ib_pbk = rtw89_phy_read32_idx(rtwdev, R_PATH0_IB_PBK,
6796 					   B_PATH0_IB_PBK_MSK, bb->phy_idx);
6797 	rtw89_debug(rtwdev, RTW89_DBG_DIG, "ib_pkpwr=%d, ib_pbk=%d\n",
6798 		    dig->ib_pkpwr, dig->ib_pbk);
6799 
6800 	for (i = RTW89_DIG_GAIN_LNA_G; i < RTW89_DIG_GAIN_MAX; i++)
6801 		rtw89_phy_dig_read_gain_table(rtwdev, bb, i);
6802 }
6803 
6804 static const u8 rssi_nolink = 22;
6805 static const u8 igi_rssi_th[IGI_RSSI_TH_NUM] = {68, 84, 90, 98, 104};
6806 static const u16 fa_th_2g[FA_TH_NUM] = {22, 44, 66, 88};
6807 static const u16 fa_th_5g[FA_TH_NUM] = {4, 8, 12, 16};
6808 static const u16 fa_th_nolink[FA_TH_NUM] = {196, 352, 440, 528};
6809 
6810 static void rtw89_phy_dig_update_rssi_info(struct rtw89_dev *rtwdev,
6811 					   struct rtw89_bb_ctx *bb)
6812 {
6813 	struct rtw89_phy_ch_info *ch_info = &bb->ch_info;
6814 	struct rtw89_dig_info *dig = &bb->dig;
6815 	bool is_linked = rtwdev->total_sta_assoc > 0;
6816 
6817 	if (is_linked) {
6818 		dig->igi_rssi = ch_info->rssi_min >> 1;
6819 	} else {
6820 		rtw89_debug(rtwdev, RTW89_DBG_DIG, "RSSI update : NO Link\n");
6821 		dig->igi_rssi = rssi_nolink;
6822 	}
6823 }
6824 
6825 static void rtw89_phy_dig_update_para(struct rtw89_dev *rtwdev,
6826 				      struct rtw89_bb_ctx *bb)
6827 {
6828 	const struct rtw89_chan *chan = rtw89_mgnt_chan_get(rtwdev, bb->phy_idx);
6829 	struct rtw89_dig_info *dig = &bb->dig;
6830 	bool is_linked = rtwdev->total_sta_assoc > 0;
6831 	const u16 *fa_th_src = NULL;
6832 
6833 	switch (chan->band_type) {
6834 	case RTW89_BAND_2G:
6835 		dig->lna_gain = dig->lna_gain_g;
6836 		dig->tia_gain = dig->tia_gain_g;
6837 		fa_th_src = is_linked ? fa_th_2g : fa_th_nolink;
6838 		dig->force_gaincode_idx_en = false;
6839 		dig->dyn_pd_th_en = true;
6840 		break;
6841 	case RTW89_BAND_5G:
6842 	default:
6843 		dig->lna_gain = dig->lna_gain_a;
6844 		dig->tia_gain = dig->tia_gain_a;
6845 		fa_th_src = is_linked ? fa_th_5g : fa_th_nolink;
6846 		dig->force_gaincode_idx_en = true;
6847 		dig->dyn_pd_th_en = true;
6848 		break;
6849 	}
6850 	memcpy(dig->fa_th, fa_th_src, sizeof(dig->fa_th));
6851 	memcpy(dig->igi_rssi_th, igi_rssi_th, sizeof(dig->igi_rssi_th));
6852 }
6853 
6854 static const u8 pd_low_th_offset = 16, dynamic_igi_min = 0x20;
6855 static const u8 igi_max_performance_mode = 0x5a;
6856 static const u8 dynamic_pd_threshold_max;
6857 
6858 static void rtw89_phy_dig_para_reset(struct rtw89_dev *rtwdev,
6859 				     struct rtw89_bb_ctx *bb)
6860 {
6861 	struct rtw89_dig_info *dig = &bb->dig;
6862 
6863 	dig->cur_gaincode.lna_idx = LNA_IDX_MAX;
6864 	dig->cur_gaincode.tia_idx = TIA_IDX_MAX;
6865 	dig->cur_gaincode.rxb_idx = RXB_IDX_MAX;
6866 	dig->force_gaincode.lna_idx = LNA_IDX_MAX;
6867 	dig->force_gaincode.tia_idx = TIA_IDX_MAX;
6868 	dig->force_gaincode.rxb_idx = RXB_IDX_MAX;
6869 
6870 	dig->dyn_igi_max = igi_max_performance_mode;
6871 	dig->dyn_igi_min = dynamic_igi_min;
6872 	dig->dyn_pd_th_max = dynamic_pd_threshold_max;
6873 	dig->pd_low_th_ofst = pd_low_th_offset;
6874 	dig->is_linked_pre = false;
6875 }
6876 
6877 static void __rtw89_phy_dig_init(struct rtw89_dev *rtwdev,
6878 				 struct rtw89_bb_ctx *bb)
6879 {
6880 	rtw89_debug(rtwdev, RTW89_DBG_DIG, "BB-%d dig_init\n", bb->phy_idx);
6881 
6882 	rtw89_phy_dig_update_gain_para(rtwdev, bb);
6883 	rtw89_phy_dig_reset(rtwdev, bb);
6884 }
6885 
6886 static void rtw89_phy_dig_init(struct rtw89_dev *rtwdev)
6887 {
6888 	struct rtw89_bb_ctx *bb;
6889 
6890 	rtw89_for_each_capab_bb(rtwdev, bb)
6891 		__rtw89_phy_dig_init(rtwdev, bb);
6892 }
6893 
6894 static u8 rtw89_phy_dig_lna_idx_by_rssi(struct rtw89_dev *rtwdev,
6895 					struct rtw89_bb_ctx *bb, u8 rssi)
6896 {
6897 	struct rtw89_dig_info *dig = &bb->dig;
6898 	u8 lna_idx;
6899 
6900 	if (rssi < dig->igi_rssi_th[0])
6901 		lna_idx = RTW89_DIG_GAIN_LNA_IDX6;
6902 	else if (rssi < dig->igi_rssi_th[1])
6903 		lna_idx = RTW89_DIG_GAIN_LNA_IDX5;
6904 	else if (rssi < dig->igi_rssi_th[2])
6905 		lna_idx = RTW89_DIG_GAIN_LNA_IDX4;
6906 	else if (rssi < dig->igi_rssi_th[3])
6907 		lna_idx = RTW89_DIG_GAIN_LNA_IDX3;
6908 	else if (rssi < dig->igi_rssi_th[4])
6909 		lna_idx = RTW89_DIG_GAIN_LNA_IDX2;
6910 	else
6911 		lna_idx = RTW89_DIG_GAIN_LNA_IDX1;
6912 
6913 	return lna_idx;
6914 }
6915 
6916 static u8 rtw89_phy_dig_tia_idx_by_rssi(struct rtw89_dev *rtwdev,
6917 					struct rtw89_bb_ctx *bb, u8 rssi)
6918 {
6919 	struct rtw89_dig_info *dig = &bb->dig;
6920 	u8 tia_idx;
6921 
6922 	if (rssi < dig->igi_rssi_th[0])
6923 		tia_idx = RTW89_DIG_GAIN_TIA_IDX1;
6924 	else
6925 		tia_idx = RTW89_DIG_GAIN_TIA_IDX0;
6926 
6927 	return tia_idx;
6928 }
6929 
6930 #define IB_PBK_BASE 110
6931 #define WB_RSSI_BASE 10
6932 static u8 rtw89_phy_dig_rxb_idx_by_rssi(struct rtw89_dev *rtwdev,
6933 					struct rtw89_bb_ctx *bb, u8 rssi,
6934 					struct rtw89_agc_gaincode_set *set)
6935 {
6936 	struct rtw89_dig_info *dig = &bb->dig;
6937 	s8 lna_gain = dig->lna_gain[set->lna_idx];
6938 	s8 tia_gain = dig->tia_gain[set->tia_idx];
6939 	s32 wb_rssi = rssi + lna_gain + tia_gain;
6940 	s32 rxb_idx_tmp = IB_PBK_BASE + WB_RSSI_BASE;
6941 	u8 rxb_idx;
6942 
6943 	rxb_idx_tmp += dig->ib_pkpwr - dig->ib_pbk - wb_rssi;
6944 	rxb_idx = clamp_t(s32, rxb_idx_tmp, RXB_IDX_MIN, RXB_IDX_MAX);
6945 
6946 	rtw89_debug(rtwdev, RTW89_DBG_DIG, "wb_rssi=%03d, rxb_idx_tmp=%03d\n",
6947 		    wb_rssi, rxb_idx_tmp);
6948 
6949 	return rxb_idx;
6950 }
6951 
6952 static void rtw89_phy_dig_gaincode_by_rssi(struct rtw89_dev *rtwdev,
6953 					   struct rtw89_bb_ctx *bb, u8 rssi,
6954 					   struct rtw89_agc_gaincode_set *set)
6955 {
6956 	set->lna_idx = rtw89_phy_dig_lna_idx_by_rssi(rtwdev, bb, rssi);
6957 	set->tia_idx = rtw89_phy_dig_tia_idx_by_rssi(rtwdev, bb, rssi);
6958 	set->rxb_idx = rtw89_phy_dig_rxb_idx_by_rssi(rtwdev, bb, rssi, set);
6959 
6960 	rtw89_debug(rtwdev, RTW89_DBG_DIG,
6961 		    "final_rssi=%03d, (lna,tia,rab)=(%d,%d,%02d)\n",
6962 		    rssi, set->lna_idx, set->tia_idx, set->rxb_idx);
6963 }
6964 
6965 #define IGI_OFFSET_MAX 25
6966 #define IGI_OFFSET_MUL 2
6967 static void rtw89_phy_dig_igi_offset_by_env(struct rtw89_dev *rtwdev,
6968 					    struct rtw89_bb_ctx *bb)
6969 {
6970 	struct rtw89_dig_info *dig = &bb->dig;
6971 	struct rtw89_env_monitor_info *env = &bb->env_monitor;
6972 	enum rtw89_dig_noisy_level noisy_lv;
6973 	u8 igi_offset = dig->fa_rssi_ofst;
6974 	u16 fa_ratio = 0;
6975 
6976 	fa_ratio = env->ifs_clm_cck_fa_permil + env->ifs_clm_ofdm_fa_permil;
6977 
6978 	if (fa_ratio < dig->fa_th[0])
6979 		noisy_lv = RTW89_DIG_NOISY_LEVEL0;
6980 	else if (fa_ratio < dig->fa_th[1])
6981 		noisy_lv = RTW89_DIG_NOISY_LEVEL1;
6982 	else if (fa_ratio < dig->fa_th[2])
6983 		noisy_lv = RTW89_DIG_NOISY_LEVEL2;
6984 	else if (fa_ratio < dig->fa_th[3])
6985 		noisy_lv = RTW89_DIG_NOISY_LEVEL3;
6986 	else
6987 		noisy_lv = RTW89_DIG_NOISY_LEVEL_MAX;
6988 
6989 	if (noisy_lv == RTW89_DIG_NOISY_LEVEL0 && igi_offset < 2)
6990 		igi_offset = 0;
6991 	else
6992 		igi_offset += noisy_lv * IGI_OFFSET_MUL;
6993 
6994 	igi_offset = min_t(u8, igi_offset, IGI_OFFSET_MAX);
6995 	dig->fa_rssi_ofst = igi_offset;
6996 
6997 	rtw89_debug(rtwdev, RTW89_DBG_DIG,
6998 		    "fa_th: [+6 (%d) +4 (%d) +2 (%d) 0 (%d) -2 ]\n",
6999 		    dig->fa_th[3], dig->fa_th[2], dig->fa_th[1], dig->fa_th[0]);
7000 
7001 	rtw89_debug(rtwdev, RTW89_DBG_DIG,
7002 		    "fa(CCK,OFDM,ALL)=(%d,%d,%d)%%, noisy_lv=%d, ofst=%d\n",
7003 		    env->ifs_clm_cck_fa_permil, env->ifs_clm_ofdm_fa_permil,
7004 		    env->ifs_clm_cck_fa_permil + env->ifs_clm_ofdm_fa_permil,
7005 		    noisy_lv, igi_offset);
7006 }
7007 
7008 static void rtw89_phy_dig_set_lna_idx(struct rtw89_dev *rtwdev,
7009 				      struct rtw89_bb_ctx *bb, u8 lna_idx)
7010 {
7011 	const struct rtw89_dig_regs *dig_regs = rtwdev->chip->dig_regs;
7012 
7013 	rtw89_phy_write32_idx(rtwdev, dig_regs->p0_lna_init.addr,
7014 			      dig_regs->p0_lna_init.mask, lna_idx, bb->phy_idx);
7015 	rtw89_phy_write32_idx(rtwdev, dig_regs->p1_lna_init.addr,
7016 			      dig_regs->p1_lna_init.mask, lna_idx, bb->phy_idx);
7017 }
7018 
7019 static void rtw89_phy_dig_set_tia_idx(struct rtw89_dev *rtwdev,
7020 				      struct rtw89_bb_ctx *bb, u8 tia_idx)
7021 {
7022 	const struct rtw89_dig_regs *dig_regs = rtwdev->chip->dig_regs;
7023 
7024 	rtw89_phy_write32_idx(rtwdev, dig_regs->p0_tia_init.addr,
7025 			      dig_regs->p0_tia_init.mask, tia_idx, bb->phy_idx);
7026 	rtw89_phy_write32_idx(rtwdev, dig_regs->p1_tia_init.addr,
7027 			      dig_regs->p1_tia_init.mask, tia_idx, bb->phy_idx);
7028 }
7029 
7030 static void rtw89_phy_dig_set_rxb_idx(struct rtw89_dev *rtwdev,
7031 				      struct rtw89_bb_ctx *bb, u8 rxb_idx)
7032 {
7033 	const struct rtw89_dig_regs *dig_regs = rtwdev->chip->dig_regs;
7034 
7035 	rtw89_phy_write32_idx(rtwdev, dig_regs->p0_rxb_init.addr,
7036 			      dig_regs->p0_rxb_init.mask, rxb_idx, bb->phy_idx);
7037 	rtw89_phy_write32_idx(rtwdev, dig_regs->p1_rxb_init.addr,
7038 			      dig_regs->p1_rxb_init.mask, rxb_idx, bb->phy_idx);
7039 }
7040 
7041 static void rtw89_phy_dig_set_igi_cr(struct rtw89_dev *rtwdev,
7042 				     struct rtw89_bb_ctx *bb,
7043 				     const struct rtw89_agc_gaincode_set set)
7044 {
7045 	if (!rtwdev->hal.support_igi)
7046 		return;
7047 
7048 	rtw89_phy_dig_set_lna_idx(rtwdev, bb, set.lna_idx);
7049 	rtw89_phy_dig_set_tia_idx(rtwdev, bb, set.tia_idx);
7050 	rtw89_phy_dig_set_rxb_idx(rtwdev, bb, set.rxb_idx);
7051 
7052 	rtw89_debug(rtwdev, RTW89_DBG_DIG, "Set (lna,tia,rxb)=((%d,%d,%02d))\n",
7053 		    set.lna_idx, set.tia_idx, set.rxb_idx);
7054 }
7055 
7056 static void rtw89_phy_dig_sdagc_follow_pagc_config(struct rtw89_dev *rtwdev,
7057 						   struct rtw89_bb_ctx *bb,
7058 						   bool enable)
7059 {
7060 	const struct rtw89_dig_regs *dig_regs = rtwdev->chip->dig_regs;
7061 
7062 	if (rtwdev->chip->chip_gen != RTW89_CHIP_AX)
7063 		return;
7064 
7065 	rtw89_phy_write32_idx(rtwdev, dig_regs->p0_p20_pagcugc_en.addr,
7066 			      dig_regs->p0_p20_pagcugc_en.mask, enable, bb->phy_idx);
7067 	rtw89_phy_write32_idx(rtwdev, dig_regs->p0_s20_pagcugc_en.addr,
7068 			      dig_regs->p0_s20_pagcugc_en.mask, enable, bb->phy_idx);
7069 	rtw89_phy_write32_idx(rtwdev, dig_regs->p1_p20_pagcugc_en.addr,
7070 			      dig_regs->p1_p20_pagcugc_en.mask, enable, bb->phy_idx);
7071 	rtw89_phy_write32_idx(rtwdev, dig_regs->p1_s20_pagcugc_en.addr,
7072 			      dig_regs->p1_s20_pagcugc_en.mask, enable, bb->phy_idx);
7073 
7074 	rtw89_debug(rtwdev, RTW89_DBG_DIG, "sdagc_follow_pagc=%d\n", enable);
7075 }
7076 
7077 static void rtw89_phy_dig_config_igi(struct rtw89_dev *rtwdev,
7078 				     struct rtw89_bb_ctx *bb)
7079 {
7080 	struct rtw89_dig_info *dig = &bb->dig;
7081 
7082 	if (!rtwdev->hal.support_igi)
7083 		return;
7084 
7085 	if (dig->force_gaincode_idx_en) {
7086 		rtw89_phy_dig_set_igi_cr(rtwdev, bb, dig->force_gaincode);
7087 		rtw89_debug(rtwdev, RTW89_DBG_DIG,
7088 			    "Force gaincode index enabled.\n");
7089 	} else {
7090 		rtw89_phy_dig_gaincode_by_rssi(rtwdev, bb, dig->igi_fa_rssi,
7091 					       &dig->cur_gaincode);
7092 		rtw89_phy_dig_set_igi_cr(rtwdev, bb, dig->cur_gaincode);
7093 	}
7094 }
7095 
7096 static u8 rtw89_phy_dig_cal_under_region(struct rtw89_dev *rtwdev,
7097 					 struct rtw89_bb_ctx *bb,
7098 					 const struct rtw89_chan *chan)
7099 {
7100 	enum rtw89_bandwidth cbw = chan->band_width;
7101 	struct rtw89_dig_info *dig = &bb->dig;
7102 	u8 under_region = dig->pd_low_th_ofst;
7103 
7104 	if (rtwdev->chip->chip_gen == RTW89_CHIP_AX)
7105 		under_region += PD_TH_SB_FLTR_CMP_VAL;
7106 
7107 	switch (cbw) {
7108 	case RTW89_CHANNEL_WIDTH_40:
7109 		under_region += PD_TH_BW40_CMP_VAL;
7110 		break;
7111 	case RTW89_CHANNEL_WIDTH_80:
7112 		under_region += PD_TH_BW80_CMP_VAL;
7113 		break;
7114 	case RTW89_CHANNEL_WIDTH_160:
7115 		under_region += PD_TH_BW160_CMP_VAL;
7116 		break;
7117 	case RTW89_CHANNEL_WIDTH_20:
7118 		fallthrough;
7119 	default:
7120 		under_region += PD_TH_BW20_CMP_VAL;
7121 		break;
7122 	}
7123 
7124 	return under_region;
7125 }
7126 
7127 static u32 __rtw89_phy_dig_dyn_pd_th(struct rtw89_dev *rtwdev,
7128 				     struct rtw89_bb_ctx *bb,
7129 				     u8 rssi, bool enable,
7130 				     const struct rtw89_chan *chan)
7131 {
7132 	struct rtw89_dig_info *dig = &bb->dig;
7133 	u8 ofdm_cca_th, under_region;
7134 	u8 final_rssi;
7135 	u32 pd_val;
7136 
7137 	under_region = rtw89_phy_dig_cal_under_region(rtwdev, bb, chan);
7138 	dig->dyn_pd_th_max = dig->igi_rssi;
7139 
7140 	final_rssi = min_t(u8, rssi, dig->igi_rssi);
7141 	ofdm_cca_th = clamp_t(u8, final_rssi, PD_TH_MIN_RSSI + under_region,
7142 			      PD_TH_MAX_RSSI + under_region);
7143 
7144 	if (enable) {
7145 		pd_val = (ofdm_cca_th - under_region - PD_TH_MIN_RSSI) >> 1;
7146 		rtw89_debug(rtwdev, RTW89_DBG_DIG,
7147 			    "igi=%d, ofdm_ccaTH=%d, backoff=%d, PD_low=%d\n",
7148 			    final_rssi, ofdm_cca_th, under_region, pd_val);
7149 	} else {
7150 		pd_val = 0;
7151 		rtw89_debug(rtwdev, RTW89_DBG_DIG,
7152 			    "Dynamic PD th disabled, Set PD_low_bd=0\n");
7153 	}
7154 
7155 	return pd_val;
7156 }
7157 
7158 static void rtw89_phy_dig_dyn_pd_th(struct rtw89_dev *rtwdev,
7159 				    struct rtw89_bb_ctx *bb,
7160 				    u8 rssi, bool enable)
7161 {
7162 	const struct rtw89_chan *chan = rtw89_mgnt_chan_get(rtwdev, bb->phy_idx);
7163 	const struct rtw89_dig_regs *dig_regs = rtwdev->chip->dig_regs;
7164 	struct rtw89_dig_info *dig = &bb->dig;
7165 	u8 final_rssi, under_region = dig->pd_low_th_ofst;
7166 	s8 cck_cca_th;
7167 	u32 pd_val;
7168 
7169 	pd_val = __rtw89_phy_dig_dyn_pd_th(rtwdev, bb, rssi, enable, chan);
7170 	dig->bak_dig = pd_val;
7171 
7172 	rtw89_phy_write32_idx(rtwdev, dig_regs->seg0_pd_reg,
7173 			      dig_regs->pd_lower_bound_mask, pd_val, bb->phy_idx);
7174 	rtw89_phy_write32_idx(rtwdev, dig_regs->seg0_pd_reg,
7175 			      dig_regs->pd_spatial_reuse_en, enable, bb->phy_idx);
7176 
7177 	if (!rtwdev->hal.support_cckpd)
7178 		return;
7179 
7180 	final_rssi = min_t(u8, rssi, dig->igi_rssi);
7181 	under_region = rtw89_phy_dig_cal_under_region(rtwdev, bb, chan);
7182 	cck_cca_th = max_t(s8, final_rssi - under_region, CCKPD_TH_MIN_RSSI);
7183 	pd_val = (u32)(cck_cca_th - IGI_RSSI_MAX);
7184 
7185 	rtw89_debug(rtwdev, RTW89_DBG_DIG,
7186 		    "igi=%d, cck_ccaTH=%d, backoff=%d, cck_PD_low=((%d))dB\n",
7187 		    final_rssi, cck_cca_th, under_region, pd_val);
7188 
7189 	rtw89_phy_write32_idx(rtwdev, dig_regs->bmode_pd_reg,
7190 			      dig_regs->bmode_cca_rssi_limit_en, enable, bb->phy_idx);
7191 	rtw89_phy_write32_idx(rtwdev, dig_regs->bmode_pd_lower_bound_reg,
7192 			      dig_regs->bmode_rssi_nocca_low_th_mask, pd_val, bb->phy_idx);
7193 }
7194 
7195 void rtw89_phy_dig_reset(struct rtw89_dev *rtwdev, struct rtw89_bb_ctx *bb)
7196 {
7197 	struct rtw89_dig_info *dig = &bb->dig;
7198 
7199 	dig->bypass_dig = false;
7200 	rtw89_phy_dig_para_reset(rtwdev, bb);
7201 	rtw89_phy_dig_set_igi_cr(rtwdev, bb, dig->force_gaincode);
7202 	rtw89_phy_dig_dyn_pd_th(rtwdev, bb, rssi_nolink, false);
7203 	rtw89_phy_dig_sdagc_follow_pagc_config(rtwdev, bb, false);
7204 	rtw89_phy_dig_update_para(rtwdev, bb);
7205 }
7206 
7207 #define IGI_RSSI_MIN 10
7208 #define ABS_IGI_MIN 0xc
7209 static
7210 void rtw89_phy_cal_igi_fa_rssi(struct rtw89_dev *rtwdev, struct rtw89_bb_ctx *bb)
7211 {
7212 	struct rtw89_dig_info *dig = &bb->dig;
7213 	u8 igi_min;
7214 
7215 	rtw89_phy_dig_igi_offset_by_env(rtwdev, bb);
7216 
7217 	igi_min = max_t(int, dig->igi_rssi - IGI_RSSI_MIN, 0);
7218 	dig->dyn_igi_max = min(igi_min + IGI_OFFSET_MAX, igi_max_performance_mode);
7219 	dig->dyn_igi_min = max(igi_min, ABS_IGI_MIN);
7220 
7221 	if (dig->dyn_igi_max >= dig->dyn_igi_min) {
7222 		dig->igi_fa_rssi += dig->fa_rssi_ofst;
7223 		dig->igi_fa_rssi = clamp(dig->igi_fa_rssi, dig->dyn_igi_min,
7224 					 dig->dyn_igi_max);
7225 	} else {
7226 		dig->igi_fa_rssi = dig->dyn_igi_max;
7227 	}
7228 }
7229 
7230 struct rtw89_phy_iter_mcc_dig {
7231 	struct rtw89_vif_link *rtwvif_link;
7232 	bool has_sta;
7233 	u8 rssi_min;
7234 };
7235 
7236 static void rtw89_phy_set_mcc_dig(struct rtw89_dev *rtwdev,
7237 				  struct rtw89_vif_link *rtwvif_link,
7238 				  struct rtw89_bb_ctx *bb,
7239 				  u8 rssi_min, u8 mcc_role_idx,
7240 				  bool is_linked)
7241 {
7242 	struct rtw89_dig_info *dig = &bb->dig;
7243 	const struct rtw89_chan *chan;
7244 	u8 pd_val;
7245 
7246 	if (is_linked) {
7247 		dig->igi_rssi = rssi_min >> 1;
7248 		dig->igi_fa_rssi = dig->igi_rssi;
7249 	} else {
7250 		rtw89_debug(rtwdev, RTW89_DBG_DIG, "RSSI update : NO Link\n");
7251 		dig->igi_rssi = rssi_nolink;
7252 		dig->igi_fa_rssi = dig->igi_rssi;
7253 	}
7254 
7255 	chan = rtw89_chan_get(rtwdev, rtwvif_link->chanctx_idx);
7256 	rtw89_phy_cal_igi_fa_rssi(rtwdev, bb);
7257 	pd_val = __rtw89_phy_dig_dyn_pd_th(rtwdev, bb, dig->igi_fa_rssi,
7258 					   is_linked, chan);
7259 	rtw89_fw_h2c_mcc_dig(rtwdev, rtwvif_link->chanctx_idx,
7260 			     mcc_role_idx, pd_val, true);
7261 
7262 	rtw89_debug(rtwdev, RTW89_DBG_DIG,
7263 		    "MCC chanctx_idx %d chan %d rssi %d pd_val %d",
7264 		    rtwvif_link->chanctx_idx, chan->primary_channel,
7265 		    dig->igi_rssi, pd_val);
7266 }
7267 
7268 static void rtw89_phy_set_mcc_dig_iter(void *data, struct ieee80211_sta *sta)
7269 {
7270 	struct rtw89_phy_iter_mcc_dig *mcc_dig = (struct rtw89_phy_iter_mcc_dig *)data;
7271 	unsigned int link_id = mcc_dig->rtwvif_link->link_id;
7272 	struct rtw89_sta *rtwsta = sta_to_rtwsta(sta);
7273 	struct rtw89_sta_link *rtwsta_link;
7274 
7275 	if (rtwsta->rtwvif != mcc_dig->rtwvif_link->rtwvif)
7276 		return;
7277 
7278 	rtwsta_link = rtwsta->links[link_id];
7279 	if (!rtwsta_link)
7280 		return;
7281 
7282 	mcc_dig->has_sta = true;
7283 	if (ewma_rssi_read(&rtwsta_link->avg_rssi) < mcc_dig->rssi_min)
7284 		mcc_dig->rssi_min = ewma_rssi_read(&rtwsta_link->avg_rssi);
7285 }
7286 
7287 static void rtw89_phy_dig_mcc(struct rtw89_dev *rtwdev, struct rtw89_bb_ctx *bb)
7288 {
7289 	struct rtw89_phy_iter_mcc_dig mcc_dig;
7290 	struct rtw89_vif_link *rtwvif_link;
7291 	struct rtw89_mcc_links_info info;
7292 	int i;
7293 
7294 	rtw89_mcc_get_links(rtwdev, &info);
7295 	for (i = 0; i < ARRAY_SIZE(info.links); i++) {
7296 		rtwvif_link = info.links[i];
7297 		if (!rtwvif_link)
7298 			continue;
7299 
7300 		memset(&mcc_dig, 0, sizeof(mcc_dig));
7301 		mcc_dig.rtwvif_link = rtwvif_link;
7302 		mcc_dig.has_sta = false;
7303 		mcc_dig.rssi_min = U8_MAX;
7304 		ieee80211_iterate_stations_atomic(rtwdev->hw,
7305 						  rtw89_phy_set_mcc_dig_iter,
7306 						  &mcc_dig);
7307 
7308 		rtw89_phy_set_mcc_dig(rtwdev, rtwvif_link, bb,
7309 				      mcc_dig.rssi_min, i, mcc_dig.has_sta);
7310 	}
7311 }
7312 
7313 static void rtw89_phy_dig_ctrl(struct rtw89_dev *rtwdev, struct rtw89_bb_ctx *bb,
7314 			       bool pause_dig, bool restore)
7315 {
7316 	const struct rtw89_dig_regs *dig_regs = rtwdev->chip->dig_regs;
7317 	struct rtw89_dig_info *dig = &bb->dig;
7318 	bool en_dig;
7319 	u32 pd_val;
7320 
7321 	if (dig->pause_dig == pause_dig)
7322 		return;
7323 
7324 	if (pause_dig) {
7325 		en_dig = false;
7326 		pd_val = 0;
7327 	} else {
7328 		en_dig = rtwdev->total_sta_assoc > 0;
7329 		pd_val = restore ? dig->bak_dig : 0;
7330 	}
7331 
7332 	rtw89_debug(rtwdev, RTW89_DBG_DIG, "%s <%s> PD_low=%d", __func__,
7333 		    pause_dig ? "suspend" : "resume", pd_val);
7334 
7335 	rtw89_phy_write32_idx(rtwdev, dig_regs->seg0_pd_reg,
7336 			      dig_regs->pd_lower_bound_mask, pd_val, bb->phy_idx);
7337 	rtw89_phy_write32_idx(rtwdev, dig_regs->seg0_pd_reg,
7338 			      dig_regs->pd_spatial_reuse_en, en_dig, bb->phy_idx);
7339 
7340 	dig->pause_dig = pause_dig;
7341 }
7342 
7343 void rtw89_phy_dig_suspend(struct rtw89_dev *rtwdev)
7344 {
7345 	struct rtw89_bb_ctx *bb;
7346 
7347 	rtw89_for_each_active_bb(rtwdev, bb)
7348 		rtw89_phy_dig_ctrl(rtwdev, bb, true, false);
7349 }
7350 
7351 void rtw89_phy_dig_resume(struct rtw89_dev *rtwdev, bool restore)
7352 {
7353 	struct rtw89_bb_ctx *bb;
7354 
7355 	rtw89_for_each_active_bb(rtwdev, bb)
7356 		rtw89_phy_dig_ctrl(rtwdev, bb, false, restore);
7357 }
7358 
7359 static void __rtw89_phy_dig(struct rtw89_dev *rtwdev, struct rtw89_bb_ctx *bb)
7360 {
7361 	struct rtw89_dig_info *dig = &bb->dig;
7362 	bool is_linked = rtwdev->total_sta_assoc > 0;
7363 	enum rtw89_entity_mode mode;
7364 
7365 	if (unlikely(dig->bypass_dig)) {
7366 		dig->bypass_dig = false;
7367 		return;
7368 	}
7369 
7370 	rtw89_debug(rtwdev, RTW89_DBG_DIG, "BB-%d dig track\n", bb->phy_idx);
7371 
7372 	rtw89_phy_dig_update_rssi_info(rtwdev, bb);
7373 
7374 	mode = rtw89_get_entity_mode(rtwdev);
7375 	if (mode == RTW89_ENTITY_MODE_MCC) {
7376 		rtw89_phy_dig_mcc(rtwdev, bb);
7377 		return;
7378 	}
7379 
7380 	if (unlikely(dig->pause_dig))
7381 		return;
7382 
7383 	if (!dig->is_linked_pre && is_linked) {
7384 		rtw89_debug(rtwdev, RTW89_DBG_DIG, "First connected\n");
7385 		rtw89_phy_dig_update_para(rtwdev, bb);
7386 		dig->igi_fa_rssi = dig->igi_rssi;
7387 	} else if (dig->is_linked_pre && !is_linked) {
7388 		rtw89_debug(rtwdev, RTW89_DBG_DIG, "First disconnected\n");
7389 		rtw89_phy_dig_update_para(rtwdev, bb);
7390 		dig->igi_fa_rssi = dig->igi_rssi;
7391 	}
7392 	dig->is_linked_pre = is_linked;
7393 
7394 	rtw89_phy_cal_igi_fa_rssi(rtwdev, bb);
7395 
7396 	rtw89_debug(rtwdev, RTW89_DBG_DIG,
7397 		    "rssi=%03d, dyn_joint(max,min)=(%d,%d), final_rssi=%d\n",
7398 		    dig->igi_rssi, dig->dyn_igi_max, dig->dyn_igi_min,
7399 		    dig->igi_fa_rssi);
7400 
7401 	rtw89_phy_dig_config_igi(rtwdev, bb);
7402 
7403 	rtw89_phy_dig_dyn_pd_th(rtwdev, bb, dig->igi_fa_rssi, dig->dyn_pd_th_en);
7404 
7405 	if (dig->dyn_pd_th_en && dig->igi_fa_rssi > dig->dyn_pd_th_max)
7406 		rtw89_phy_dig_sdagc_follow_pagc_config(rtwdev, bb, true);
7407 	else
7408 		rtw89_phy_dig_sdagc_follow_pagc_config(rtwdev, bb, false);
7409 }
7410 
7411 void rtw89_phy_dig(struct rtw89_dev *rtwdev)
7412 {
7413 	struct rtw89_bb_ctx *bb;
7414 
7415 	rtw89_for_each_active_bb(rtwdev, bb)
7416 		__rtw89_phy_dig(rtwdev, bb);
7417 }
7418 
7419 static void __rtw89_phy_tx_path_div_sta_iter(struct rtw89_dev *rtwdev,
7420 					     struct rtw89_sta_link *rtwsta_link)
7421 {
7422 	struct rtw89_hal *hal = &rtwdev->hal;
7423 	u8 rssi_a, rssi_b;
7424 	u32 candidate;
7425 
7426 	rssi_a = ewma_rssi_read(&rtwsta_link->rssi[RF_PATH_A]);
7427 	rssi_b = ewma_rssi_read(&rtwsta_link->rssi[RF_PATH_B]);
7428 
7429 	if (rssi_a > rssi_b + RTW89_TX_DIV_RSSI_RAW_TH)
7430 		candidate = RF_A;
7431 	else if (rssi_b > rssi_a + RTW89_TX_DIV_RSSI_RAW_TH)
7432 		candidate = RF_B;
7433 	else
7434 		return;
7435 
7436 	if (hal->antenna_tx == candidate)
7437 		return;
7438 
7439 	hal->antenna_tx = candidate;
7440 	rtw89_fw_h2c_txpath_cmac_tbl(rtwdev, rtwsta_link);
7441 
7442 	if (hal->antenna_tx == RF_A) {
7443 		rtw89_phy_write32_mask(rtwdev, R_P0_RFMODE, B_P0_RFMODE_MUX, 0x12);
7444 		rtw89_phy_write32_mask(rtwdev, R_P1_RFMODE, B_P1_RFMODE_MUX, 0x11);
7445 	} else if (hal->antenna_tx == RF_B) {
7446 		rtw89_phy_write32_mask(rtwdev, R_P0_RFMODE, B_P0_RFMODE_MUX, 0x11);
7447 		rtw89_phy_write32_mask(rtwdev, R_P1_RFMODE, B_P1_RFMODE_MUX, 0x12);
7448 	}
7449 }
7450 
7451 static void rtw89_phy_tx_path_div_sta_iter(void *data, struct ieee80211_sta *sta)
7452 {
7453 	struct rtw89_sta *rtwsta = sta_to_rtwsta(sta);
7454 	struct rtw89_dev *rtwdev = rtwsta->rtwdev;
7455 	struct rtw89_vif *rtwvif = rtwsta->rtwvif;
7456 	struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif);
7457 	struct rtw89_vif_link *rtwvif_link;
7458 	struct rtw89_sta_link *rtwsta_link;
7459 	unsigned int link_id;
7460 	bool *done = data;
7461 
7462 	if (WARN(ieee80211_vif_is_mld(vif), "MLD mix path_div\n"))
7463 		return;
7464 
7465 	if (sta->tdls)
7466 		return;
7467 
7468 	if (*done)
7469 		return;
7470 
7471 	rtw89_sta_for_each_link(rtwsta, rtwsta_link, link_id) {
7472 		rtwvif_link = rtwsta_link->rtwvif_link;
7473 		if (rtwvif_link->wifi_role != RTW89_WIFI_ROLE_STATION)
7474 			continue;
7475 
7476 		*done = true;
7477 		__rtw89_phy_tx_path_div_sta_iter(rtwdev, rtwsta_link);
7478 		return;
7479 	}
7480 }
7481 
7482 void rtw89_phy_tx_path_div_track(struct rtw89_dev *rtwdev)
7483 {
7484 	struct rtw89_hal *hal = &rtwdev->hal;
7485 	bool done = false;
7486 
7487 	if (!hal->tx_path_diversity)
7488 		return;
7489 
7490 	ieee80211_iterate_stations_atomic(rtwdev->hw,
7491 					  rtw89_phy_tx_path_div_sta_iter,
7492 					  &done);
7493 }
7494 
7495 #define ANTDIV_MAIN 0
7496 #define ANTDIV_AUX 1
7497 
7498 static void rtw89_phy_antdiv_set_ant(struct rtw89_dev *rtwdev)
7499 {
7500 	struct rtw89_hal *hal = &rtwdev->hal;
7501 	u8 default_ant, optional_ant;
7502 
7503 	if (!hal->ant_diversity || hal->antenna_tx == 0)
7504 		return;
7505 
7506 	if (hal->antenna_tx == RF_B) {
7507 		default_ant = ANTDIV_AUX;
7508 		optional_ant = ANTDIV_MAIN;
7509 	} else {
7510 		default_ant = ANTDIV_MAIN;
7511 		optional_ant = ANTDIV_AUX;
7512 	}
7513 
7514 	rtw89_phy_write32_idx(rtwdev, R_P0_ANTSEL, B_P0_ANTSEL_CGCS_CTRL,
7515 			      default_ant, RTW89_PHY_0);
7516 	rtw89_phy_write32_idx(rtwdev, R_P0_ANTSEL, B_P0_ANTSEL_RX_ORI,
7517 			      default_ant, RTW89_PHY_0);
7518 	rtw89_phy_write32_idx(rtwdev, R_P0_ANTSEL, B_P0_ANTSEL_RX_ALT,
7519 			      optional_ant, RTW89_PHY_0);
7520 	rtw89_phy_write32_idx(rtwdev, R_P0_ANTSEL, B_P0_ANTSEL_TX_ORI,
7521 			      default_ant, RTW89_PHY_0);
7522 }
7523 
7524 static void rtw89_phy_swap_hal_antenna(struct rtw89_dev *rtwdev)
7525 {
7526 	struct rtw89_hal *hal = &rtwdev->hal;
7527 
7528 	hal->antenna_rx = hal->antenna_rx == RF_A ? RF_B : RF_A;
7529 	hal->antenna_tx = hal->antenna_rx;
7530 }
7531 
7532 static void rtw89_phy_antdiv_decision_state(struct rtw89_dev *rtwdev)
7533 {
7534 	struct rtw89_antdiv_info *antdiv = &rtwdev->antdiv;
7535 	struct rtw89_hal *hal = &rtwdev->hal;
7536 	bool no_change = false;
7537 	u8 main_rssi, aux_rssi;
7538 	u8 main_evm, aux_evm;
7539 	u32 candidate;
7540 
7541 	antdiv->get_stats = false;
7542 	antdiv->training_count = 0;
7543 
7544 	main_rssi = rtw89_phy_antdiv_sts_instance_get_rssi(&antdiv->main_stats);
7545 	main_evm = rtw89_phy_antdiv_sts_instance_get_evm(&antdiv->main_stats);
7546 	aux_rssi = rtw89_phy_antdiv_sts_instance_get_rssi(&antdiv->aux_stats);
7547 	aux_evm = rtw89_phy_antdiv_sts_instance_get_evm(&antdiv->aux_stats);
7548 
7549 	if (main_evm > aux_evm + ANTDIV_EVM_DIFF_TH)
7550 		candidate = RF_A;
7551 	else if (aux_evm > main_evm + ANTDIV_EVM_DIFF_TH)
7552 		candidate = RF_B;
7553 	else if (main_rssi > aux_rssi + RTW89_TX_DIV_RSSI_RAW_TH)
7554 		candidate = RF_A;
7555 	else if (aux_rssi > main_rssi + RTW89_TX_DIV_RSSI_RAW_TH)
7556 		candidate = RF_B;
7557 	else
7558 		no_change = true;
7559 
7560 	if (no_change) {
7561 		/* swap back from training antenna to original */
7562 		rtw89_phy_swap_hal_antenna(rtwdev);
7563 		return;
7564 	}
7565 
7566 	hal->antenna_tx = candidate;
7567 	hal->antenna_rx = candidate;
7568 }
7569 
7570 static void rtw89_phy_antdiv_training_state(struct rtw89_dev *rtwdev)
7571 {
7572 	struct rtw89_antdiv_info *antdiv = &rtwdev->antdiv;
7573 	u64 state_period;
7574 
7575 	if (antdiv->training_count % 2 == 0) {
7576 		if (antdiv->training_count == 0)
7577 			rtw89_phy_antdiv_sts_reset(rtwdev);
7578 
7579 		antdiv->get_stats = true;
7580 		state_period = msecs_to_jiffies(ANTDIV_TRAINNING_INTVL);
7581 	} else {
7582 		antdiv->get_stats = false;
7583 		state_period = msecs_to_jiffies(ANTDIV_DELAY);
7584 
7585 		rtw89_phy_swap_hal_antenna(rtwdev);
7586 		rtw89_phy_antdiv_set_ant(rtwdev);
7587 	}
7588 
7589 	antdiv->training_count++;
7590 	wiphy_delayed_work_queue(rtwdev->hw->wiphy, &rtwdev->antdiv_work,
7591 				 state_period);
7592 }
7593 
7594 void rtw89_phy_antdiv_work(struct wiphy *wiphy, struct wiphy_work *work)
7595 {
7596 	struct rtw89_dev *rtwdev = container_of(work, struct rtw89_dev,
7597 						antdiv_work.work);
7598 	struct rtw89_antdiv_info *antdiv = &rtwdev->antdiv;
7599 
7600 	lockdep_assert_wiphy(wiphy);
7601 
7602 	if (antdiv->training_count <= ANTDIV_TRAINNING_CNT) {
7603 		rtw89_phy_antdiv_training_state(rtwdev);
7604 	} else {
7605 		rtw89_phy_antdiv_decision_state(rtwdev);
7606 		rtw89_phy_antdiv_set_ant(rtwdev);
7607 	}
7608 }
7609 
7610 void rtw89_phy_antdiv_track(struct rtw89_dev *rtwdev)
7611 {
7612 	struct rtw89_antdiv_info *antdiv = &rtwdev->antdiv;
7613 	struct rtw89_hal *hal = &rtwdev->hal;
7614 	u8 rssi, rssi_pre;
7615 
7616 	if (!hal->ant_diversity || hal->ant_diversity_fixed)
7617 		return;
7618 
7619 	rssi = rtw89_phy_antdiv_sts_instance_get_rssi(&antdiv->target_stats);
7620 	rssi_pre = antdiv->rssi_pre;
7621 	antdiv->rssi_pre = rssi;
7622 	rtw89_phy_antdiv_sts_instance_reset(&antdiv->target_stats);
7623 
7624 	if (abs((int)rssi - (int)rssi_pre) < ANTDIV_RSSI_DIFF_TH)
7625 		return;
7626 
7627 	antdiv->training_count = 0;
7628 	wiphy_delayed_work_queue(rtwdev->hw->wiphy, &rtwdev->antdiv_work, 0);
7629 }
7630 
7631 static void __rtw89_phy_env_monitor_init(struct rtw89_dev *rtwdev,
7632 					 struct rtw89_bb_ctx *bb)
7633 {
7634 	rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
7635 		    "BB-%d env_monitor init\n", bb->phy_idx);
7636 
7637 	rtw89_phy_ccx_top_setting_init(rtwdev, bb);
7638 	rtw89_phy_ifs_clm_setting_init(rtwdev, bb);
7639 }
7640 
7641 static void rtw89_phy_env_monitor_init(struct rtw89_dev *rtwdev)
7642 {
7643 	struct rtw89_bb_ctx *bb;
7644 
7645 	rtw89_for_each_capab_bb(rtwdev, bb)
7646 		__rtw89_phy_env_monitor_init(rtwdev, bb);
7647 }
7648 
7649 static void __rtw89_phy_edcca_init(struct rtw89_dev *rtwdev,
7650 				   struct rtw89_bb_ctx *bb)
7651 {
7652 	const struct rtw89_edcca_regs *edcca_regs = rtwdev->chip->edcca_regs;
7653 	struct rtw89_edcca_bak *edcca_bak = &bb->edcca_bak;
7654 
7655 	rtw89_debug(rtwdev, RTW89_DBG_EDCCA, "BB-%d edcca init\n", bb->phy_idx);
7656 
7657 	memset(edcca_bak, 0, sizeof(*edcca_bak));
7658 
7659 	if (rtwdev->chip->chip_id == RTL8922A && rtwdev->hal.cv == CHIP_CAV) {
7660 		rtw89_phy_set_phy_regs(rtwdev, R_TXGATING, B_TXGATING_EN, 0);
7661 		rtw89_phy_set_phy_regs(rtwdev, R_CTLTOP, B_CTLTOP_VAL, 2);
7662 		rtw89_phy_set_phy_regs(rtwdev, R_CTLTOP, B_CTLTOP_ON, 1);
7663 		rtw89_phy_set_phy_regs(rtwdev, R_SPOOF_CG, B_SPOOF_CG_EN, 0);
7664 		rtw89_phy_set_phy_regs(rtwdev, R_DFS_FFT_CG, B_DFS_CG_EN, 0);
7665 		rtw89_phy_set_phy_regs(rtwdev, R_DFS_FFT_CG, B_DFS_FFT_EN, 0);
7666 		rtw89_phy_set_phy_regs(rtwdev, R_SEGSND, B_SEGSND_EN, 0);
7667 		rtw89_phy_set_phy_regs(rtwdev, R_SEGSND, B_SEGSND_EN, 1);
7668 		rtw89_phy_set_phy_regs(rtwdev, R_DFS_FFT_CG, B_DFS_FFT_EN, 1);
7669 	}
7670 
7671 	rtw89_phy_write32_idx(rtwdev, edcca_regs->tx_collision_t2r_st,
7672 			      edcca_regs->tx_collision_t2r_st_mask, 0x29, bb->phy_idx);
7673 }
7674 
7675 static void rtw89_phy_edcca_init(struct rtw89_dev *rtwdev)
7676 {
7677 	struct rtw89_bb_ctx *bb;
7678 
7679 	rtw89_for_each_capab_bb(rtwdev, bb)
7680 		__rtw89_phy_edcca_init(rtwdev, bb);
7681 }
7682 
7683 void rtw89_phy_dm_init(struct rtw89_dev *rtwdev)
7684 {
7685 	rtw89_phy_stat_init(rtwdev);
7686 
7687 	rtw89_chip_bb_sethw(rtwdev);
7688 
7689 	rtw89_phy_env_monitor_init(rtwdev);
7690 	rtw89_phy_nhm_setting_init(rtwdev);
7691 	rtw89_physts_parsing_init(rtwdev);
7692 	rtw89_phy_dig_init(rtwdev);
7693 	rtw89_phy_cfo_init(rtwdev);
7694 	rtw89_phy_bb_wrap_init(rtwdev);
7695 	rtw89_phy_edcca_init(rtwdev);
7696 	rtw89_phy_ch_info_init(rtwdev);
7697 	rtw89_phy_ul_tb_info_init(rtwdev);
7698 	rtw89_phy_antdiv_init(rtwdev);
7699 	rtw89_chip_rfe_gpio(rtwdev);
7700 	rtw89_phy_antdiv_set_ant(rtwdev);
7701 
7702 	rtw89_chip_rfk_hw_init(rtwdev);
7703 	rtw89_phy_init_rf_nctl(rtwdev);
7704 	rtw89_chip_rfk_init(rtwdev);
7705 	rtw89_chip_set_txpwr_ctrl(rtwdev);
7706 	rtw89_chip_power_trim(rtwdev);
7707 	rtw89_chip_cfg_txrx_path(rtwdev);
7708 }
7709 
7710 void rtw89_phy_dm_reinit(struct rtw89_dev *rtwdev)
7711 {
7712 	rtw89_phy_env_monitor_init(rtwdev);
7713 	rtw89_physts_parsing_init(rtwdev);
7714 }
7715 
7716 static void __rtw89_phy_dm_init_data(struct rtw89_dev *rtwdev, struct rtw89_bb_ctx *bb)
7717 {
7718 	struct rtw89_env_monitor_info *env = &bb->env_monitor;
7719 	const struct rtw89_chip_info *chip = rtwdev->chip;
7720 	struct ieee80211_supported_band *sband;
7721 	enum rtw89_band hw_band;
7722 	enum nl80211_band band;
7723 	u8 idx;
7724 
7725 	if (!chip->support_noise)
7726 		return;
7727 
7728 	for (band = 0; band < NUM_NL80211_BANDS; band++) {
7729 		sband = rtwdev->hw->wiphy->bands[band];
7730 		if (!sband)
7731 			continue;
7732 
7733 		hw_band = rtw89_nl80211_to_hw_band(band);
7734 		env->nhm_his[hw_band] =
7735 			devm_kcalloc(rtwdev->dev, sband->n_channels,
7736 				     sizeof(*env->nhm_his[0]), GFP_KERNEL);
7737 
7738 		for (idx = 0; idx < sband->n_channels; idx++)
7739 			INIT_LIST_HEAD(&env->nhm_his[hw_band][idx].list);
7740 
7741 		INIT_LIST_HEAD(&env->nhm_rpt_list);
7742 	}
7743 }
7744 
7745 void rtw89_phy_dm_init_data(struct rtw89_dev *rtwdev)
7746 {
7747 	struct rtw89_bb_ctx *bb;
7748 
7749 	rtw89_for_each_capab_bb(rtwdev, bb)
7750 		__rtw89_phy_dm_init_data(rtwdev, bb);
7751 }
7752 
7753 void rtw89_phy_set_bss_color(struct rtw89_dev *rtwdev,
7754 			     struct rtw89_vif_link *rtwvif_link)
7755 {
7756 	struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link);
7757 	const struct rtw89_chip_info *chip = rtwdev->chip;
7758 	const struct rtw89_reg_def *bss_clr_vld = &chip->bss_clr_vld;
7759 	enum rtw89_phy_idx phy_idx = rtwvif_link->phy_idx;
7760 	struct ieee80211_bss_conf *bss_conf;
7761 	u8 bss_color;
7762 
7763 	rcu_read_lock();
7764 
7765 	bss_conf = rtw89_vif_rcu_dereference_link(rtwvif_link, true);
7766 	if (!bss_conf->he_support || !vif->cfg.assoc) {
7767 		rcu_read_unlock();
7768 		return;
7769 	}
7770 
7771 	bss_color = bss_conf->he_bss_color.color;
7772 
7773 	rcu_read_unlock();
7774 
7775 	rtw89_phy_write32_idx(rtwdev, bss_clr_vld->addr, bss_clr_vld->mask, 0x1,
7776 			      phy_idx);
7777 	rtw89_phy_write32_idx(rtwdev, chip->bss_clr_map_reg, B_BSS_CLR_MAP_TGT,
7778 			      bss_color, phy_idx);
7779 	rtw89_phy_write32_idx(rtwdev, chip->bss_clr_map_reg, B_BSS_CLR_MAP_STAID,
7780 			      vif->cfg.aid, phy_idx);
7781 }
7782 
7783 static bool rfk_chan_validate_desc(const struct rtw89_rfk_chan_desc *desc)
7784 {
7785 	return desc->ch != 0;
7786 }
7787 
7788 static bool rfk_chan_is_equivalent(const struct rtw89_rfk_chan_desc *desc,
7789 				   const struct rtw89_chan *chan)
7790 {
7791 	if (!rfk_chan_validate_desc(desc))
7792 		return false;
7793 
7794 	if (desc->ch != chan->channel)
7795 		return false;
7796 
7797 	if (desc->has_band && desc->band != chan->band_type)
7798 		return false;
7799 
7800 	if (desc->has_bw && desc->bw != chan->band_width)
7801 		return false;
7802 
7803 	return true;
7804 }
7805 
7806 struct rfk_chan_iter_data {
7807 	const struct rtw89_rfk_chan_desc desc;
7808 	unsigned int found;
7809 };
7810 
7811 static int rfk_chan_iter_search(const struct rtw89_chan *chan, void *data)
7812 {
7813 	struct rfk_chan_iter_data *iter_data = data;
7814 
7815 	if (rfk_chan_is_equivalent(&iter_data->desc, chan))
7816 		iter_data->found++;
7817 
7818 	return 0;
7819 }
7820 
7821 u8 rtw89_rfk_chan_lookup(struct rtw89_dev *rtwdev,
7822 			 const struct rtw89_rfk_chan_desc *desc, u8 desc_nr,
7823 			 const struct rtw89_chan *target_chan)
7824 {
7825 	int sel = -1;
7826 	u8 i;
7827 
7828 	for (i = 0; i < desc_nr; i++) {
7829 		struct rfk_chan_iter_data iter_data = {
7830 			.desc = desc[i],
7831 		};
7832 
7833 		if (rfk_chan_is_equivalent(&desc[i], target_chan))
7834 			return i;
7835 
7836 		rtw89_iterate_entity_chan(rtwdev, rfk_chan_iter_search, &iter_data);
7837 		if (!iter_data.found && sel == -1)
7838 			sel = i;
7839 	}
7840 
7841 	if (sel == -1) {
7842 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
7843 			    "no idle rfk entry; force replace the first\n");
7844 		sel = 0;
7845 	}
7846 
7847 	return sel;
7848 }
7849 EXPORT_SYMBOL(rtw89_rfk_chan_lookup);
7850 
7851 static void
7852 _rfk_write_rf(struct rtw89_dev *rtwdev, const struct rtw89_reg5_def *def)
7853 {
7854 	rtw89_write_rf(rtwdev, def->path, def->addr, def->mask, def->data);
7855 }
7856 
7857 static void
7858 _rfk_write32_mask(struct rtw89_dev *rtwdev, const struct rtw89_reg5_def *def)
7859 {
7860 	rtw89_phy_write32_mask(rtwdev, def->addr, def->mask, def->data);
7861 }
7862 
7863 static void
7864 _rfk_write32_set(struct rtw89_dev *rtwdev, const struct rtw89_reg5_def *def)
7865 {
7866 	rtw89_phy_write32_set(rtwdev, def->addr, def->mask);
7867 }
7868 
7869 static void
7870 _rfk_write32_clr(struct rtw89_dev *rtwdev, const struct rtw89_reg5_def *def)
7871 {
7872 	rtw89_phy_write32_clr(rtwdev, def->addr, def->mask);
7873 }
7874 
7875 static void
7876 _rfk_delay(struct rtw89_dev *rtwdev, const struct rtw89_reg5_def *def)
7877 {
7878 	udelay(def->data);
7879 }
7880 
7881 static void
7882 (*_rfk_handler[])(struct rtw89_dev *rtwdev, const struct rtw89_reg5_def *def) = {
7883 	[RTW89_RFK_F_WRF] = _rfk_write_rf,
7884 	[RTW89_RFK_F_WM] = _rfk_write32_mask,
7885 	[RTW89_RFK_F_WS] = _rfk_write32_set,
7886 	[RTW89_RFK_F_WC] = _rfk_write32_clr,
7887 	[RTW89_RFK_F_DELAY] = _rfk_delay,
7888 };
7889 
7890 static_assert(ARRAY_SIZE(_rfk_handler) == RTW89_RFK_F_NUM);
7891 
7892 void
7893 rtw89_rfk_parser(struct rtw89_dev *rtwdev, const struct rtw89_rfk_tbl *tbl)
7894 {
7895 	const struct rtw89_reg5_def *p = tbl->defs;
7896 	const struct rtw89_reg5_def *end = tbl->defs + tbl->size;
7897 
7898 	for (; p < end; p++)
7899 		_rfk_handler[p->flag](rtwdev, p);
7900 }
7901 EXPORT_SYMBOL(rtw89_rfk_parser);
7902 
7903 #define RTW89_TSSI_FAST_MODE_NUM 4
7904 
7905 static const struct rtw89_reg_def rtw89_tssi_fastmode_regs_flat[RTW89_TSSI_FAST_MODE_NUM] = {
7906 	{0xD934, 0xff0000},
7907 	{0xD934, 0xff000000},
7908 	{0xD938, 0xff},
7909 	{0xD934, 0xff00},
7910 };
7911 
7912 static const struct rtw89_reg_def rtw89_tssi_fastmode_regs_level[RTW89_TSSI_FAST_MODE_NUM] = {
7913 	{0xD930, 0xff0000},
7914 	{0xD930, 0xff000000},
7915 	{0xD934, 0xff},
7916 	{0xD930, 0xff00},
7917 };
7918 
7919 static
7920 void rtw89_phy_tssi_ctrl_set_fast_mode_cfg(struct rtw89_dev *rtwdev,
7921 					   enum rtw89_mac_idx mac_idx,
7922 					   enum rtw89_tssi_bandedge_cfg bandedge_cfg,
7923 					   u32 val)
7924 {
7925 	const struct rtw89_reg_def *regs;
7926 	u32 reg;
7927 	int i;
7928 
7929 	if (bandedge_cfg == RTW89_TSSI_BANDEDGE_FLAT)
7930 		regs = rtw89_tssi_fastmode_regs_flat;
7931 	else
7932 		regs = rtw89_tssi_fastmode_regs_level;
7933 
7934 	for (i = 0; i < RTW89_TSSI_FAST_MODE_NUM; i++) {
7935 		reg = rtw89_mac_reg_by_idx(rtwdev, regs[i].addr, mac_idx);
7936 		rtw89_write32_mask(rtwdev, reg, regs[i].mask, val);
7937 	}
7938 }
7939 
7940 static const struct rtw89_reg_def rtw89_tssi_bandedge_regs_flat[RTW89_TSSI_SBW_NUM] = {
7941 	{0xD91C, 0xff000000},
7942 	{0xD920, 0xff},
7943 	{0xD920, 0xff00},
7944 	{0xD920, 0xff0000},
7945 	{0xD920, 0xff000000},
7946 	{0xD924, 0xff},
7947 	{0xD924, 0xff00},
7948 	{0xD914, 0xff000000},
7949 	{0xD918, 0xff},
7950 	{0xD918, 0xff00},
7951 	{0xD918, 0xff0000},
7952 	{0xD918, 0xff000000},
7953 	{0xD91C, 0xff},
7954 	{0xD91C, 0xff00},
7955 	{0xD91C, 0xff0000},
7956 };
7957 
7958 static const struct rtw89_reg_def rtw89_tssi_bandedge_regs_level[RTW89_TSSI_SBW_NUM] = {
7959 	{0xD910, 0xff},
7960 	{0xD910, 0xff00},
7961 	{0xD910, 0xff0000},
7962 	{0xD910, 0xff000000},
7963 	{0xD914, 0xff},
7964 	{0xD914, 0xff00},
7965 	{0xD914, 0xff0000},
7966 	{0xD908, 0xff},
7967 	{0xD908, 0xff00},
7968 	{0xD908, 0xff0000},
7969 	{0xD908, 0xff000000},
7970 	{0xD90C, 0xff},
7971 	{0xD90C, 0xff00},
7972 	{0xD90C, 0xff0000},
7973 	{0xD90C, 0xff000000},
7974 };
7975 
7976 void rtw89_phy_tssi_ctrl_set_bandedge_cfg(struct rtw89_dev *rtwdev,
7977 					  enum rtw89_mac_idx mac_idx,
7978 					  enum rtw89_tssi_bandedge_cfg bandedge_cfg)
7979 {
7980 	const struct rtw89_chip_info *chip = rtwdev->chip;
7981 	const struct rtw89_reg_def *regs;
7982 	const u32 *data;
7983 	u32 reg;
7984 	int i;
7985 
7986 	if (bandedge_cfg >= RTW89_TSSI_CFG_NUM)
7987 		return;
7988 
7989 	if (bandedge_cfg == RTW89_TSSI_BANDEDGE_FLAT)
7990 		regs = rtw89_tssi_bandedge_regs_flat;
7991 	else
7992 		regs = rtw89_tssi_bandedge_regs_level;
7993 
7994 	data = chip->tssi_dbw_table->data[bandedge_cfg];
7995 
7996 	for (i = 0; i < RTW89_TSSI_SBW_NUM; i++) {
7997 		reg = rtw89_mac_reg_by_idx(rtwdev, regs[i].addr, mac_idx);
7998 		rtw89_write32_mask(rtwdev, reg, regs[i].mask, data[i]);
7999 	}
8000 
8001 	reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_BANDEDGE_CFG, mac_idx);
8002 	rtw89_write32_mask(rtwdev, reg, B_AX_BANDEDGE_CFG_IDX_MASK, bandedge_cfg);
8003 
8004 	rtw89_phy_tssi_ctrl_set_fast_mode_cfg(rtwdev, mac_idx, bandedge_cfg,
8005 					      data[RTW89_TSSI_SBW20]);
8006 }
8007 EXPORT_SYMBOL(rtw89_phy_tssi_ctrl_set_bandedge_cfg);
8008 
8009 static
8010 const u8 rtw89_ch_base_table[16] = {1, 0xff,
8011 				    36, 100, 132, 149, 0xff,
8012 				    1, 33, 65, 97, 129, 161, 193, 225, 0xff};
8013 #define RTW89_CH_BASE_IDX_2G		0
8014 #define RTW89_CH_BASE_IDX_5G_FIRST	2
8015 #define RTW89_CH_BASE_IDX_5G_LAST	5
8016 #define RTW89_CH_BASE_IDX_6G_FIRST	7
8017 #define RTW89_CH_BASE_IDX_6G_LAST	14
8018 
8019 #define RTW89_CH_BASE_IDX_MASK		GENMASK(7, 4)
8020 #define RTW89_CH_OFFSET_MASK		GENMASK(3, 0)
8021 
8022 u8 rtw89_encode_chan_idx(struct rtw89_dev *rtwdev, u8 central_ch, u8 band)
8023 {
8024 	u8 chan_idx;
8025 	u8 last, first;
8026 	u8 idx;
8027 
8028 	switch (band) {
8029 	case RTW89_BAND_2G:
8030 		chan_idx = FIELD_PREP(RTW89_CH_BASE_IDX_MASK, RTW89_CH_BASE_IDX_2G) |
8031 			   FIELD_PREP(RTW89_CH_OFFSET_MASK, central_ch);
8032 		return chan_idx;
8033 	case RTW89_BAND_5G:
8034 		first = RTW89_CH_BASE_IDX_5G_FIRST;
8035 		last = RTW89_CH_BASE_IDX_5G_LAST;
8036 		break;
8037 	case RTW89_BAND_6G:
8038 		first = RTW89_CH_BASE_IDX_6G_FIRST;
8039 		last = RTW89_CH_BASE_IDX_6G_LAST;
8040 		break;
8041 	default:
8042 		rtw89_warn(rtwdev, "Unsupported band %d\n", band);
8043 		return 0;
8044 	}
8045 
8046 	for (idx = last; idx >= first; idx--)
8047 		if (central_ch >= rtw89_ch_base_table[idx])
8048 			break;
8049 
8050 	if (idx < first) {
8051 		rtw89_warn(rtwdev, "Unknown band %d channel %d\n", band, central_ch);
8052 		return 0;
8053 	}
8054 
8055 	chan_idx = FIELD_PREP(RTW89_CH_BASE_IDX_MASK, idx) |
8056 		   FIELD_PREP(RTW89_CH_OFFSET_MASK,
8057 			      (central_ch - rtw89_ch_base_table[idx]) >> 1);
8058 	return chan_idx;
8059 }
8060 EXPORT_SYMBOL(rtw89_encode_chan_idx);
8061 
8062 void rtw89_decode_chan_idx(struct rtw89_dev *rtwdev, u8 chan_idx,
8063 			   u8 *ch, enum nl80211_band *band)
8064 {
8065 	u8 idx, offset;
8066 
8067 	idx = FIELD_GET(RTW89_CH_BASE_IDX_MASK, chan_idx);
8068 	offset = FIELD_GET(RTW89_CH_OFFSET_MASK, chan_idx);
8069 
8070 	if (idx == RTW89_CH_BASE_IDX_2G) {
8071 		*band = NL80211_BAND_2GHZ;
8072 		*ch = offset;
8073 		return;
8074 	}
8075 
8076 	*band = idx <= RTW89_CH_BASE_IDX_5G_LAST ? NL80211_BAND_5GHZ : NL80211_BAND_6GHZ;
8077 	*ch = rtw89_ch_base_table[idx] + (offset << 1);
8078 }
8079 EXPORT_SYMBOL(rtw89_decode_chan_idx);
8080 
8081 void rtw89_phy_config_edcca(struct rtw89_dev *rtwdev,
8082 			    struct rtw89_bb_ctx *bb, bool scan)
8083 {
8084 	const struct rtw89_edcca_regs *edcca_regs = rtwdev->chip->edcca_regs;
8085 	struct rtw89_edcca_bak *edcca_bak = &bb->edcca_bak;
8086 
8087 	if (scan) {
8088 		edcca_bak->a =
8089 			rtw89_phy_read32_idx(rtwdev, edcca_regs->edcca_level,
8090 					     edcca_regs->edcca_mask, bb->phy_idx);
8091 		edcca_bak->p =
8092 			rtw89_phy_read32_idx(rtwdev, edcca_regs->edcca_level,
8093 					     edcca_regs->edcca_p_mask, bb->phy_idx);
8094 		edcca_bak->ppdu =
8095 			rtw89_phy_read32_idx(rtwdev, edcca_regs->ppdu_level,
8096 					     edcca_regs->ppdu_mask, bb->phy_idx);
8097 
8098 		rtw89_phy_write32_idx(rtwdev, edcca_regs->edcca_level,
8099 				      edcca_regs->edcca_mask, EDCCA_MAX, bb->phy_idx);
8100 		rtw89_phy_write32_idx(rtwdev, edcca_regs->edcca_level,
8101 				      edcca_regs->edcca_p_mask, EDCCA_MAX, bb->phy_idx);
8102 		rtw89_phy_write32_idx(rtwdev, edcca_regs->ppdu_level,
8103 				      edcca_regs->ppdu_mask, EDCCA_MAX, bb->phy_idx);
8104 	} else {
8105 		rtw89_phy_write32_idx(rtwdev, edcca_regs->edcca_level,
8106 				      edcca_regs->edcca_mask,
8107 				      edcca_bak->a, bb->phy_idx);
8108 		rtw89_phy_write32_idx(rtwdev, edcca_regs->edcca_level,
8109 				      edcca_regs->edcca_p_mask,
8110 				      edcca_bak->p, bb->phy_idx);
8111 		rtw89_phy_write32_idx(rtwdev, edcca_regs->ppdu_level,
8112 				      edcca_regs->ppdu_mask,
8113 				      edcca_bak->ppdu, bb->phy_idx);
8114 	}
8115 }
8116 
8117 static void rtw89_phy_edcca_log(struct rtw89_dev *rtwdev, struct rtw89_bb_ctx *bb)
8118 {
8119 	const struct rtw89_edcca_regs *edcca_regs = rtwdev->chip->edcca_regs;
8120 	const struct rtw89_edcca_p_regs *edcca_p_regs;
8121 	bool flag_fb, flag_p20, flag_s20, flag_s40, flag_s80;
8122 	s8 pwdb_fb, pwdb_p20, pwdb_s20, pwdb_s40, pwdb_s80;
8123 	u8 path, per20_bitmap = 0;
8124 	u8 pwdb_sel = 5;
8125 	u8 pwdb[8];
8126 	u32 tmp;
8127 
8128 	if (!rtw89_debug_is_enabled(rtwdev, RTW89_DBG_EDCCA))
8129 		return;
8130 
8131 	if (bb->phy_idx == RTW89_PHY_1)
8132 		edcca_p_regs = &edcca_regs->p[RTW89_PHY_1];
8133 	else
8134 		edcca_p_regs = &edcca_regs->p[RTW89_PHY_0];
8135 
8136 	rtw89_phy_write32_mask(rtwdev, edcca_p_regs->rpt_sel,
8137 			       edcca_p_regs->rpt_sel_mask, 0);
8138 	if (rtwdev->chip->chip_id == RTL8922A || rtwdev->chip->chip_id == RTL8922D) {
8139 		rtw89_phy_write32_mask(rtwdev, edcca_regs->rpt_sel_be,
8140 				       edcca_regs->rpt_sel_be_mask, 0);
8141 		per20_bitmap = rtw89_phy_read32_mask(rtwdev, edcca_p_regs->rpt_a,
8142 						     MASKBYTE0);
8143 	}
8144 	tmp = rtw89_phy_read32(rtwdev, edcca_p_regs->rpt_b);
8145 	path = u32_get_bits(tmp, B_EDCCA_RPT_B_PATH_MASK);
8146 	flag_s80 = u32_get_bits(tmp, B_EDCCA_RPT_B_S80);
8147 	flag_s40 = u32_get_bits(tmp, B_EDCCA_RPT_B_S40);
8148 	flag_s20 = u32_get_bits(tmp, B_EDCCA_RPT_B_S20);
8149 	flag_p20 = u32_get_bits(tmp, B_EDCCA_RPT_B_P20);
8150 	flag_fb = u32_get_bits(tmp, B_EDCCA_RPT_B_FB);
8151 	pwdb_s20 = u32_get_bits(tmp, MASKBYTE1);
8152 	pwdb_p20 = u32_get_bits(tmp, MASKBYTE2);
8153 	pwdb_fb = u32_get_bits(tmp, MASKBYTE3);
8154 
8155 	if (rtwdev->chip->chip_id == RTL8922D)
8156 		pwdb_sel = 2;
8157 
8158 	rtw89_phy_write32_mask(rtwdev, edcca_p_regs->rpt_sel,
8159 			       edcca_p_regs->rpt_sel_mask, pwdb_sel);
8160 	tmp = rtw89_phy_read32(rtwdev, edcca_p_regs->rpt_b);
8161 	pwdb_s80 = u32_get_bits(tmp, MASKBYTE1);
8162 	pwdb_s40 = u32_get_bits(tmp, MASKBYTE2);
8163 
8164 	if (rtwdev->chip->chip_id == RTL8922A || rtwdev->chip->chip_id == RTL8922D) {
8165 		rtw89_phy_write32_mask(rtwdev, edcca_regs->rpt_sel_be,
8166 				       edcca_regs->rpt_sel_be_mask, 4);
8167 		tmp = rtw89_phy_read32(rtwdev, edcca_p_regs->rpt_b);
8168 		pwdb[0] = u32_get_bits(tmp, MASKBYTE3);
8169 		pwdb[1] = u32_get_bits(tmp, MASKBYTE2);
8170 		pwdb[2] = u32_get_bits(tmp, MASKBYTE1);
8171 		pwdb[3] = u32_get_bits(tmp, MASKBYTE0);
8172 
8173 		rtw89_phy_write32_mask(rtwdev, edcca_regs->rpt_sel_be,
8174 				       edcca_regs->rpt_sel_be_mask, 5);
8175 		tmp = rtw89_phy_read32(rtwdev, edcca_p_regs->rpt_b);
8176 		pwdb[4] = u32_get_bits(tmp, MASKBYTE3);
8177 		pwdb[5] = u32_get_bits(tmp, MASKBYTE2);
8178 		pwdb[6] = u32_get_bits(tmp, MASKBYTE1);
8179 		pwdb[7] = u32_get_bits(tmp, MASKBYTE0);
8180 	} else {
8181 		rtw89_phy_write32_mask(rtwdev, edcca_p_regs->rpt_sel,
8182 				       edcca_p_regs->rpt_sel_mask, 0);
8183 		tmp = rtw89_phy_read32(rtwdev, edcca_p_regs->rpt_a);
8184 		pwdb[0] = u32_get_bits(tmp, MASKBYTE3);
8185 		pwdb[1] = u32_get_bits(tmp, MASKBYTE2);
8186 
8187 		rtw89_phy_write32_mask(rtwdev, edcca_p_regs->rpt_sel,
8188 				       edcca_p_regs->rpt_sel_mask, 5);
8189 		tmp = rtw89_phy_read32(rtwdev, edcca_p_regs->rpt_a);
8190 		pwdb[2] = u32_get_bits(tmp, MASKBYTE3);
8191 		pwdb[3] = u32_get_bits(tmp, MASKBYTE2);
8192 
8193 		rtw89_phy_write32_mask(rtwdev, edcca_p_regs->rpt_sel,
8194 				       edcca_p_regs->rpt_sel_mask, 2);
8195 		tmp = rtw89_phy_read32(rtwdev, edcca_p_regs->rpt_a);
8196 		pwdb[4] = u32_get_bits(tmp, MASKBYTE3);
8197 		pwdb[5] = u32_get_bits(tmp, MASKBYTE2);
8198 
8199 		rtw89_phy_write32_mask(rtwdev, edcca_p_regs->rpt_sel,
8200 				       edcca_p_regs->rpt_sel_mask, 3);
8201 		tmp = rtw89_phy_read32(rtwdev, edcca_p_regs->rpt_a);
8202 		pwdb[6] = u32_get_bits(tmp, MASKBYTE3);
8203 		pwdb[7] = u32_get_bits(tmp, MASKBYTE2);
8204 	}
8205 
8206 	rtw89_debug(rtwdev, RTW89_DBG_EDCCA,
8207 		    "[EDCCA]: edcca_bitmap = %04x\n", per20_bitmap);
8208 
8209 	rtw89_debug(rtwdev, RTW89_DBG_EDCCA,
8210 		    "[EDCCA]: pwdb per20{0,1,2,3,4,5,6,7} = {%d,%d,%d,%d,%d,%d,%d,%d}(dBm)\n",
8211 		    pwdb[0], pwdb[1], pwdb[2], pwdb[3], pwdb[4], pwdb[5],
8212 		    pwdb[6], pwdb[7]);
8213 
8214 	rtw89_debug(rtwdev, RTW89_DBG_EDCCA,
8215 		    "[EDCCA]: path=%d, flag {FB,p20,s20,s40,s80} = {%d,%d,%d,%d,%d}\n",
8216 		    path, flag_fb, flag_p20, flag_s20, flag_s40, flag_s80);
8217 
8218 	rtw89_debug(rtwdev, RTW89_DBG_EDCCA,
8219 		    "[EDCCA]: pwdb {FB,p20,s20,s40,s80} = {%d,%d,%d,%d,%d}(dBm)\n",
8220 		    pwdb_fb, pwdb_p20, pwdb_s20, pwdb_s40, pwdb_s80);
8221 }
8222 
8223 static u8 rtw89_phy_edcca_get_thre_by_rssi(struct rtw89_dev *rtwdev,
8224 					   struct rtw89_bb_ctx *bb)
8225 {
8226 	struct rtw89_phy_ch_info *ch_info = &bb->ch_info;
8227 	bool is_linked = rtwdev->total_sta_assoc > 0;
8228 	u8 rssi_min = ch_info->rssi_min >> 1;
8229 	u8 edcca_thre;
8230 
8231 	if (!is_linked) {
8232 		edcca_thre = EDCCA_MAX;
8233 	} else {
8234 		edcca_thre = rssi_min - RSSI_UNIT_CONVER + EDCCA_UNIT_CONVER -
8235 			     EDCCA_TH_REF;
8236 		edcca_thre = max_t(u8, edcca_thre, EDCCA_TH_L2H_LB);
8237 	}
8238 
8239 	return edcca_thre;
8240 }
8241 
8242 void rtw89_phy_edcca_thre_calc(struct rtw89_dev *rtwdev, struct rtw89_bb_ctx *bb)
8243 {
8244 	const struct rtw89_edcca_regs *edcca_regs = rtwdev->chip->edcca_regs;
8245 	struct rtw89_edcca_bak *edcca_bak = &bb->edcca_bak;
8246 	u8 th;
8247 
8248 	th = rtw89_phy_edcca_get_thre_by_rssi(rtwdev, bb);
8249 	if (th == edcca_bak->th_old)
8250 		return;
8251 
8252 	edcca_bak->th_old = th;
8253 
8254 	rtw89_debug(rtwdev, RTW89_DBG_EDCCA,
8255 		    "[EDCCA]: Normal Mode, EDCCA_th = %d\n", th);
8256 
8257 	rtw89_phy_write32_idx(rtwdev, edcca_regs->edcca_level,
8258 			      edcca_regs->edcca_mask, th, bb->phy_idx);
8259 	rtw89_phy_write32_idx(rtwdev, edcca_regs->edcca_level,
8260 			      edcca_regs->edcca_p_mask, th, bb->phy_idx);
8261 	rtw89_phy_write32_idx(rtwdev, edcca_regs->ppdu_level,
8262 			      edcca_regs->ppdu_mask, th, bb->phy_idx);
8263 }
8264 
8265 static
8266 void __rtw89_phy_edcca_track(struct rtw89_dev *rtwdev, struct rtw89_bb_ctx *bb)
8267 {
8268 	rtw89_debug(rtwdev, RTW89_DBG_EDCCA, "BB-%d edcca track\n", bb->phy_idx);
8269 
8270 	rtw89_phy_edcca_thre_calc(rtwdev, bb);
8271 	rtw89_phy_edcca_log(rtwdev, bb);
8272 }
8273 
8274 void rtw89_phy_edcca_track(struct rtw89_dev *rtwdev)
8275 {
8276 	struct rtw89_hal *hal = &rtwdev->hal;
8277 	struct rtw89_bb_ctx *bb;
8278 
8279 	if (hal->disabled_dm_bitmap & BIT(RTW89_DM_DYNAMIC_EDCCA))
8280 		return;
8281 
8282 	rtw89_for_each_active_bb(rtwdev, bb)
8283 		__rtw89_phy_edcca_track(rtwdev, bb);
8284 }
8285 
8286 enum rtw89_rf_path_bit rtw89_phy_get_kpath(struct rtw89_dev *rtwdev,
8287 					   enum rtw89_phy_idx phy_idx)
8288 {
8289 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
8290 		    "[RFK] kpath dbcc_en: 0x%x, mode=0x%x, PHY%d\n",
8291 		    rtwdev->dbcc_en, rtwdev->mlo_dbcc_mode, phy_idx);
8292 
8293 	switch (rtwdev->mlo_dbcc_mode) {
8294 	case MLO_1_PLUS_1_1RF:
8295 		if (phy_idx == RTW89_PHY_0)
8296 			return RF_A;
8297 		else
8298 			return RF_B;
8299 	case MLO_1_PLUS_1_2RF:
8300 		if (phy_idx == RTW89_PHY_0)
8301 			return RF_A;
8302 		else
8303 			return RF_D;
8304 	case MLO_0_PLUS_2_1RF:
8305 	case MLO_2_PLUS_0_1RF:
8306 		/* for both PHY 0/1 */
8307 		return RF_AB;
8308 	case MLO_0_PLUS_2_2RF:
8309 	case MLO_2_PLUS_0_2RF:
8310 	case MLO_2_PLUS_2_2RF:
8311 	default:
8312 		if (phy_idx == RTW89_PHY_0)
8313 			return RF_AB;
8314 		else
8315 			return RF_CD;
8316 	}
8317 }
8318 EXPORT_SYMBOL(rtw89_phy_get_kpath);
8319 
8320 enum rtw89_rf_path rtw89_phy_get_syn_sel(struct rtw89_dev *rtwdev,
8321 					 enum rtw89_phy_idx phy_idx)
8322 {
8323 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
8324 		    "[RFK] kpath dbcc_en: 0x%x, mode=0x%x, PHY%d\n",
8325 		    rtwdev->dbcc_en, rtwdev->mlo_dbcc_mode, phy_idx);
8326 
8327 	switch (rtwdev->mlo_dbcc_mode) {
8328 	case MLO_1_PLUS_1_1RF:
8329 		if (phy_idx == RTW89_PHY_0)
8330 			return RF_PATH_A;
8331 		else
8332 			return RF_PATH_B;
8333 	case MLO_1_PLUS_1_2RF:
8334 		if (phy_idx == RTW89_PHY_0)
8335 			return RF_PATH_A;
8336 		else
8337 			return RF_PATH_D;
8338 	case MLO_0_PLUS_2_1RF:
8339 	case MLO_2_PLUS_0_1RF:
8340 		if (phy_idx == RTW89_PHY_0)
8341 			return RF_PATH_A;
8342 		else
8343 			return RF_PATH_B;
8344 	case MLO_0_PLUS_2_2RF:
8345 	case MLO_2_PLUS_0_2RF:
8346 	case MLO_2_PLUS_2_2RF:
8347 	default:
8348 		if (phy_idx == RTW89_PHY_0)
8349 			return RF_PATH_A;
8350 		else
8351 			return RF_PATH_C;
8352 	}
8353 }
8354 EXPORT_SYMBOL(rtw89_phy_get_syn_sel);
8355 
8356 static const struct rtw89_ccx_regs rtw89_ccx_regs_ax = {
8357 	.setting_addr = R_CCX,
8358 	.edcca_opt_mask = B_CCX_EDCCA_OPT_MSK,
8359 	.measurement_trig_mask = B_MEASUREMENT_TRIG_MSK,
8360 	.trig_opt_mask = B_CCX_TRIG_OPT_MSK,
8361 	.en_mask = B_CCX_EN_MSK,
8362 	.ifs_cnt_addr = R_IFS_COUNTER,
8363 	.ifs_clm_period_mask = B_IFS_CLM_PERIOD_MSK,
8364 	.ifs_clm_cnt_unit_mask = B_IFS_CLM_COUNTER_UNIT_MSK,
8365 	.ifs_clm_cnt_clear_mask = B_IFS_COUNTER_CLR_MSK,
8366 	.ifs_collect_en_mask = B_IFS_COLLECT_EN,
8367 	.ifs_t1_addr = R_IFS_T1,
8368 	.ifs_t1_th_h_mask = B_IFS_T1_TH_HIGH_MSK,
8369 	.ifs_t1_en_mask = B_IFS_T1_EN_MSK,
8370 	.ifs_t1_th_l_mask = B_IFS_T1_TH_LOW_MSK,
8371 	.ifs_t2_addr = R_IFS_T2,
8372 	.ifs_t2_th_h_mask = B_IFS_T2_TH_HIGH_MSK,
8373 	.ifs_t2_en_mask = B_IFS_T2_EN_MSK,
8374 	.ifs_t2_th_l_mask = B_IFS_T2_TH_LOW_MSK,
8375 	.ifs_t3_addr = R_IFS_T3,
8376 	.ifs_t3_th_h_mask = B_IFS_T3_TH_HIGH_MSK,
8377 	.ifs_t3_en_mask = B_IFS_T3_EN_MSK,
8378 	.ifs_t3_th_l_mask = B_IFS_T3_TH_LOW_MSK,
8379 	.ifs_t4_addr = R_IFS_T4,
8380 	.ifs_t4_th_h_mask = B_IFS_T4_TH_HIGH_MSK,
8381 	.ifs_t4_en_mask = B_IFS_T4_EN_MSK,
8382 	.ifs_t4_th_l_mask = B_IFS_T4_TH_LOW_MSK,
8383 	.ifs_clm_tx_cnt_addr = R_IFS_CLM_TX_CNT,
8384 	.ifs_clm_edcca_excl_cca_fa_mask = B_IFS_CLM_EDCCA_EXCLUDE_CCA_FA_MSK,
8385 	.ifs_clm_tx_cnt_msk = B_IFS_CLM_TX_CNT_MSK,
8386 	.ifs_clm_cca_addr = R_IFS_CLM_CCA,
8387 	.ifs_clm_ofdmcca_excl_fa_mask = B_IFS_CLM_OFDMCCA_EXCLUDE_FA_MSK,
8388 	.ifs_clm_cckcca_excl_fa_mask = B_IFS_CLM_CCKCCA_EXCLUDE_FA_MSK,
8389 	.ifs_clm_fa_addr = R_IFS_CLM_FA,
8390 	.ifs_clm_ofdm_fa_mask = B_IFS_CLM_OFDM_FA_MSK,
8391 	.ifs_clm_cck_fa_mask = B_IFS_CLM_CCK_FA_MSK,
8392 	.ifs_his_addr = R_IFS_HIS,
8393 	.ifs_his_addr2 = R_IFS_HIS,
8394 	.ifs_t4_his_mask = B_IFS_T4_HIS_MSK,
8395 	.ifs_t3_his_mask = B_IFS_T3_HIS_MSK,
8396 	.ifs_t2_his_mask = B_IFS_T2_HIS_MSK,
8397 	.ifs_t1_his_mask = B_IFS_T1_HIS_MSK,
8398 	.ifs_avg_l_addr = R_IFS_AVG_L,
8399 	.ifs_t2_avg_mask = B_IFS_T2_AVG_MSK,
8400 	.ifs_t1_avg_mask = B_IFS_T1_AVG_MSK,
8401 	.ifs_avg_h_addr = R_IFS_AVG_H,
8402 	.ifs_t4_avg_mask = B_IFS_T4_AVG_MSK,
8403 	.ifs_t3_avg_mask = B_IFS_T3_AVG_MSK,
8404 	.ifs_cca_l_addr = R_IFS_CCA_L,
8405 	.ifs_t2_cca_mask = B_IFS_T2_CCA_MSK,
8406 	.ifs_t1_cca_mask = B_IFS_T1_CCA_MSK,
8407 	.ifs_cca_h_addr = R_IFS_CCA_H,
8408 	.ifs_t4_cca_mask = B_IFS_T4_CCA_MSK,
8409 	.ifs_t3_cca_mask = B_IFS_T3_CCA_MSK,
8410 	.ifs_total_addr = R_IFSCNT,
8411 	.ifs_cnt_done_mask = B_IFSCNT_DONE_MSK,
8412 	.ifs_total_mask = B_IFSCNT_TOTAL_CNT_MSK,
8413 	.nhm = R_NHM_AX,
8414 	.nhm_ready = B_NHM_READY_MSK,
8415 	.nhm_config = R_NHM_CFG,
8416 	.nhm_period_mask = B_NHM_PERIOD_MSK,
8417 	.nhm_unit_mask = B_NHM_COUNTER_MSK,
8418 	.nhm_include_cca_mask = B_NHM_INCLUDE_CCA_MSK,
8419 	.nhm_en_mask = B_NHM_EN_MSK,
8420 	.nhm_method = R_NHM_TH9,
8421 	.nhm_pwr_method_msk = B_NHM_PWDB_METHOD_MSK,
8422 };
8423 
8424 static const struct rtw89_physts_regs rtw89_physts_regs_ax = {
8425 	.setting_addr = R_PLCP_HISTOGRAM,
8426 	.dis_trigger_fail_mask = B_STS_DIS_TRIG_BY_FAIL,
8427 	.dis_trigger_brk_mask = B_STS_DIS_TRIG_BY_BRK,
8428 };
8429 
8430 static const struct rtw89_cfo_regs rtw89_cfo_regs_ax = {
8431 	.comp = R_DCFO_WEIGHT,
8432 	.weighting_mask = B_DCFO_WEIGHT_MSK,
8433 	.comp_seg0 = R_DCFO_OPT,
8434 	.valid_0_mask = B_DCFO_OPT_EN,
8435 };
8436 
8437 const struct rtw89_phy_gen_def rtw89_phy_gen_ax = {
8438 	.cr_base = 0x10000,
8439 	.physt_bmp_start = R_PHY_STS_BITMAP_ADDR_START,
8440 	.physt_bmp_eht = 0xfc,
8441 	.ccx = &rtw89_ccx_regs_ax,
8442 	.physts = &rtw89_physts_regs_ax,
8443 	.cfo = &rtw89_cfo_regs_ax,
8444 	.bb_wrap = NULL,
8445 	.phy0_phy1_offset = rtw89_phy0_phy1_offset_ax,
8446 	.config_bb_gain = rtw89_phy_config_bb_gain_ax,
8447 	.preinit_rf_nctl = rtw89_phy_preinit_rf_nctl_ax,
8448 	.bb_wrap_init = NULL,
8449 	.ch_info_init = NULL,
8450 
8451 	.set_txpwr_byrate = rtw89_phy_set_txpwr_byrate_ax,
8452 	.set_txpwr_offset = rtw89_phy_set_txpwr_offset_ax,
8453 	.set_txpwr_limit = rtw89_phy_set_txpwr_limit_ax,
8454 	.set_txpwr_limit_ru = rtw89_phy_set_txpwr_limit_ru_ax,
8455 };
8456 EXPORT_SYMBOL(rtw89_phy_gen_ax);
8457