xref: /linux/drivers/net/wireless/mediatek/mt76/mac80211.c (revision 91a4855d6c03e770e42f17c798a36a3c46e63de2)
1 // SPDX-License-Identifier: BSD-3-Clause-Clear
2 /*
3  * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
4  */
5 #include <linux/sched.h>
6 #include <linux/of.h>
7 #include "mt76.h"
8 
9 #define CHAN2G(_idx, _freq) {			\
10 	.band = NL80211_BAND_2GHZ,		\
11 	.center_freq = (_freq),			\
12 	.hw_value = (_idx),			\
13 	.max_power = 30,			\
14 }
15 
16 #define CHAN5G(_idx, _freq) {			\
17 	.band = NL80211_BAND_5GHZ,		\
18 	.center_freq = (_freq),			\
19 	.hw_value = (_idx),			\
20 	.max_power = 30,			\
21 }
22 
23 #define CHAN6G(_idx, _freq) {			\
24 	.band = NL80211_BAND_6GHZ,		\
25 	.center_freq = (_freq),			\
26 	.hw_value = (_idx),			\
27 	.max_power = 30,			\
28 }
29 
30 static const struct ieee80211_channel mt76_channels_2ghz[] = {
31 	CHAN2G(1, 2412),
32 	CHAN2G(2, 2417),
33 	CHAN2G(3, 2422),
34 	CHAN2G(4, 2427),
35 	CHAN2G(5, 2432),
36 	CHAN2G(6, 2437),
37 	CHAN2G(7, 2442),
38 	CHAN2G(8, 2447),
39 	CHAN2G(9, 2452),
40 	CHAN2G(10, 2457),
41 	CHAN2G(11, 2462),
42 	CHAN2G(12, 2467),
43 	CHAN2G(13, 2472),
44 	CHAN2G(14, 2484),
45 };
46 
47 static const struct ieee80211_channel mt76_channels_5ghz[] = {
48 	CHAN5G(36, 5180),
49 	CHAN5G(40, 5200),
50 	CHAN5G(44, 5220),
51 	CHAN5G(48, 5240),
52 
53 	CHAN5G(52, 5260),
54 	CHAN5G(56, 5280),
55 	CHAN5G(60, 5300),
56 	CHAN5G(64, 5320),
57 
58 	CHAN5G(100, 5500),
59 	CHAN5G(104, 5520),
60 	CHAN5G(108, 5540),
61 	CHAN5G(112, 5560),
62 	CHAN5G(116, 5580),
63 	CHAN5G(120, 5600),
64 	CHAN5G(124, 5620),
65 	CHAN5G(128, 5640),
66 	CHAN5G(132, 5660),
67 	CHAN5G(136, 5680),
68 	CHAN5G(140, 5700),
69 	CHAN5G(144, 5720),
70 
71 	CHAN5G(149, 5745),
72 	CHAN5G(153, 5765),
73 	CHAN5G(157, 5785),
74 	CHAN5G(161, 5805),
75 	CHAN5G(165, 5825),
76 	CHAN5G(169, 5845),
77 	CHAN5G(173, 5865),
78 	CHAN5G(177, 5885),
79 };
80 
81 static const struct ieee80211_channel mt76_channels_6ghz[] = {
82 	/* UNII-5 */
83 	CHAN6G(1, 5955),
84 	CHAN6G(5, 5975),
85 	CHAN6G(9, 5995),
86 	CHAN6G(13, 6015),
87 	CHAN6G(17, 6035),
88 	CHAN6G(21, 6055),
89 	CHAN6G(25, 6075),
90 	CHAN6G(29, 6095),
91 	CHAN6G(33, 6115),
92 	CHAN6G(37, 6135),
93 	CHAN6G(41, 6155),
94 	CHAN6G(45, 6175),
95 	CHAN6G(49, 6195),
96 	CHAN6G(53, 6215),
97 	CHAN6G(57, 6235),
98 	CHAN6G(61, 6255),
99 	CHAN6G(65, 6275),
100 	CHAN6G(69, 6295),
101 	CHAN6G(73, 6315),
102 	CHAN6G(77, 6335),
103 	CHAN6G(81, 6355),
104 	CHAN6G(85, 6375),
105 	CHAN6G(89, 6395),
106 	CHAN6G(93, 6415),
107 	/* UNII-6 */
108 	CHAN6G(97, 6435),
109 	CHAN6G(101, 6455),
110 	CHAN6G(105, 6475),
111 	CHAN6G(109, 6495),
112 	CHAN6G(113, 6515),
113 	CHAN6G(117, 6535),
114 	/* UNII-7 */
115 	CHAN6G(121, 6555),
116 	CHAN6G(125, 6575),
117 	CHAN6G(129, 6595),
118 	CHAN6G(133, 6615),
119 	CHAN6G(137, 6635),
120 	CHAN6G(141, 6655),
121 	CHAN6G(145, 6675),
122 	CHAN6G(149, 6695),
123 	CHAN6G(153, 6715),
124 	CHAN6G(157, 6735),
125 	CHAN6G(161, 6755),
126 	CHAN6G(165, 6775),
127 	CHAN6G(169, 6795),
128 	CHAN6G(173, 6815),
129 	CHAN6G(177, 6835),
130 	CHAN6G(181, 6855),
131 	CHAN6G(185, 6875),
132 	/* UNII-8 */
133 	CHAN6G(189, 6895),
134 	CHAN6G(193, 6915),
135 	CHAN6G(197, 6935),
136 	CHAN6G(201, 6955),
137 	CHAN6G(205, 6975),
138 	CHAN6G(209, 6995),
139 	CHAN6G(213, 7015),
140 	CHAN6G(217, 7035),
141 	CHAN6G(221, 7055),
142 	CHAN6G(225, 7075),
143 	CHAN6G(229, 7095),
144 	CHAN6G(233, 7115),
145 };
146 
147 static const struct ieee80211_tpt_blink mt76_tpt_blink[] = {
148 	{ .throughput =   0 * 1024, .blink_time = 334 },
149 	{ .throughput =   1 * 1024, .blink_time = 260 },
150 	{ .throughput =   5 * 1024, .blink_time = 220 },
151 	{ .throughput =  10 * 1024, .blink_time = 190 },
152 	{ .throughput =  20 * 1024, .blink_time = 170 },
153 	{ .throughput =  50 * 1024, .blink_time = 150 },
154 	{ .throughput =  70 * 1024, .blink_time = 130 },
155 	{ .throughput = 100 * 1024, .blink_time = 110 },
156 	{ .throughput = 200 * 1024, .blink_time =  80 },
157 	{ .throughput = 300 * 1024, .blink_time =  50 },
158 };
159 
160 struct ieee80211_rate mt76_rates[] = {
161 	CCK_RATE(0, 10),
162 	CCK_RATE(1, 20),
163 	CCK_RATE(2, 55),
164 	CCK_RATE(3, 110),
165 	OFDM_RATE(11, 60),
166 	OFDM_RATE(15, 90),
167 	OFDM_RATE(10, 120),
168 	OFDM_RATE(14, 180),
169 	OFDM_RATE(9,  240),
170 	OFDM_RATE(13, 360),
171 	OFDM_RATE(8,  480),
172 	OFDM_RATE(12, 540),
173 };
174 EXPORT_SYMBOL_GPL(mt76_rates);
175 
176 static const struct cfg80211_sar_freq_ranges mt76_sar_freq_ranges[] = {
177 	{ .start_freq = 2402, .end_freq = 2494, },
178 	{ .start_freq = 5150, .end_freq = 5350, },
179 	{ .start_freq = 5350, .end_freq = 5470, },
180 	{ .start_freq = 5470, .end_freq = 5725, },
181 	{ .start_freq = 5725, .end_freq = 5950, },
182 	{ .start_freq = 5945, .end_freq = 6165, },
183 	{ .start_freq = 6165, .end_freq = 6405, },
184 	{ .start_freq = 6405, .end_freq = 6525, },
185 	{ .start_freq = 6525, .end_freq = 6705, },
186 	{ .start_freq = 6705, .end_freq = 6865, },
187 	{ .start_freq = 6865, .end_freq = 7125, },
188 };
189 
190 static const struct cfg80211_sar_capa mt76_sar_capa = {
191 	.type = NL80211_SAR_TYPE_POWER,
192 	.num_freq_ranges = ARRAY_SIZE(mt76_sar_freq_ranges),
193 	.freq_ranges = &mt76_sar_freq_ranges[0],
194 };
195 
196 static int mt76_led_init(struct mt76_phy *phy)
197 {
198 	struct mt76_dev *dev = phy->dev;
199 	struct ieee80211_hw *hw = phy->hw;
200 	struct device_node *np = dev->dev->of_node;
201 
202 	if (!phy->leds.cdev.brightness_set && !phy->leds.cdev.blink_set)
203 		return 0;
204 
205 	np = of_get_child_by_name(np, "led");
206 	if (np) {
207 		if (!of_device_is_available(np)) {
208 			of_node_put(np);
209 			dev_info(dev->dev,
210 				"led registration was explicitly disabled by dts\n");
211 			return 0;
212 		}
213 
214 		if (phy == &dev->phy) {
215 			int led_pin;
216 
217 			if (!of_property_read_u32(np, "led-sources", &led_pin))
218 				phy->leds.pin = led_pin;
219 
220 			phy->leds.al =
221 				of_property_read_bool(np, "led-active-low");
222 		}
223 
224 		of_node_put(np);
225 	}
226 
227 	snprintf(phy->leds.name, sizeof(phy->leds.name), "mt76-%s",
228 		 wiphy_name(hw->wiphy));
229 
230 	phy->leds.cdev.name = phy->leds.name;
231 	phy->leds.cdev.default_trigger =
232 		ieee80211_create_tpt_led_trigger(hw,
233 					IEEE80211_TPT_LEDTRIG_FL_RADIO,
234 					mt76_tpt_blink,
235 					ARRAY_SIZE(mt76_tpt_blink));
236 
237 	dev_info(dev->dev,
238 		"registering led '%s'\n", phy->leds.name);
239 
240 	return led_classdev_register(dev->dev, &phy->leds.cdev);
241 }
242 
243 static void mt76_led_cleanup(struct mt76_phy *phy)
244 {
245 	if (!phy->leds.cdev.brightness_set && !phy->leds.cdev.blink_set)
246 		return;
247 
248 	led_classdev_unregister(&phy->leds.cdev);
249 }
250 
251 static void mt76_init_stream_cap(struct mt76_phy *phy,
252 				 struct ieee80211_supported_band *sband,
253 				 bool vht)
254 {
255 	struct ieee80211_sta_ht_cap *ht_cap = &sband->ht_cap;
256 	int i, nstream = hweight8(phy->antenna_mask);
257 	struct ieee80211_sta_vht_cap *vht_cap;
258 	u16 mcs_map = 0;
259 
260 	if (nstream > 1)
261 		ht_cap->cap |= IEEE80211_HT_CAP_TX_STBC;
262 	else
263 		ht_cap->cap &= ~IEEE80211_HT_CAP_TX_STBC;
264 
265 	for (i = 0; i < IEEE80211_HT_MCS_MASK_LEN; i++)
266 		ht_cap->mcs.rx_mask[i] = i < nstream ? 0xff : 0;
267 
268 	if (!vht)
269 		return;
270 
271 	vht_cap = &sband->vht_cap;
272 	if (nstream > 1)
273 		vht_cap->cap |= IEEE80211_VHT_CAP_TXSTBC;
274 	else
275 		vht_cap->cap &= ~IEEE80211_VHT_CAP_TXSTBC;
276 	vht_cap->cap |= IEEE80211_VHT_CAP_TX_ANTENNA_PATTERN |
277 			IEEE80211_VHT_CAP_RX_ANTENNA_PATTERN;
278 
279 	for (i = 0; i < 8; i++) {
280 		if (i < nstream)
281 			mcs_map |= (IEEE80211_VHT_MCS_SUPPORT_0_9 << (i * 2));
282 		else
283 			mcs_map |=
284 				(IEEE80211_VHT_MCS_NOT_SUPPORTED << (i * 2));
285 	}
286 	vht_cap->vht_mcs.rx_mcs_map = cpu_to_le16(mcs_map);
287 	vht_cap->vht_mcs.tx_mcs_map = cpu_to_le16(mcs_map);
288 	if (ieee80211_hw_check(phy->hw, SUPPORTS_VHT_EXT_NSS_BW))
289 		vht_cap->vht_mcs.tx_highest |=
290 				cpu_to_le16(IEEE80211_VHT_EXT_NSS_BW_CAPABLE);
291 }
292 
293 void mt76_set_stream_caps(struct mt76_phy *phy, bool vht)
294 {
295 	if (phy->cap.has_2ghz)
296 		mt76_init_stream_cap(phy, &phy->sband_2g.sband, false);
297 	if (phy->cap.has_5ghz)
298 		mt76_init_stream_cap(phy, &phy->sband_5g.sband, vht);
299 	if (phy->cap.has_6ghz)
300 		mt76_init_stream_cap(phy, &phy->sband_6g.sband, vht);
301 }
302 EXPORT_SYMBOL_GPL(mt76_set_stream_caps);
303 
304 static int
305 mt76_init_sband(struct mt76_phy *phy, struct mt76_sband *msband,
306 		const struct ieee80211_channel *chan, int n_chan,
307 		struct ieee80211_rate *rates, int n_rates,
308 		bool ht, bool vht)
309 {
310 	struct ieee80211_supported_band *sband = &msband->sband;
311 	struct ieee80211_sta_vht_cap *vht_cap;
312 	struct ieee80211_sta_ht_cap *ht_cap;
313 	struct mt76_dev *dev = phy->dev;
314 	void *chanlist;
315 	int size;
316 
317 	size = n_chan * sizeof(*chan);
318 	chanlist = devm_kmemdup(dev->dev, chan, size, GFP_KERNEL);
319 	if (!chanlist)
320 		return -ENOMEM;
321 
322 	msband->chan = devm_kcalloc(dev->dev, n_chan, sizeof(*msband->chan),
323 				    GFP_KERNEL);
324 	if (!msband->chan)
325 		return -ENOMEM;
326 
327 	sband->channels = chanlist;
328 	sband->n_channels = n_chan;
329 	sband->bitrates = rates;
330 	sband->n_bitrates = n_rates;
331 
332 	if (!ht)
333 		return 0;
334 
335 	ht_cap = &sband->ht_cap;
336 	ht_cap->ht_supported = true;
337 	ht_cap->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
338 		       IEEE80211_HT_CAP_GRN_FLD |
339 		       IEEE80211_HT_CAP_SGI_20 |
340 		       IEEE80211_HT_CAP_SGI_40 |
341 		       (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT);
342 
343 	ht_cap->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
344 	ht_cap->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
345 
346 	mt76_init_stream_cap(phy, sband, vht);
347 
348 	if (!vht)
349 		return 0;
350 
351 	vht_cap = &sband->vht_cap;
352 	vht_cap->vht_supported = true;
353 	vht_cap->cap |= IEEE80211_VHT_CAP_RXLDPC |
354 			IEEE80211_VHT_CAP_RXSTBC_1 |
355 			IEEE80211_VHT_CAP_SHORT_GI_80 |
356 			(3 << IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT);
357 
358 	return 0;
359 }
360 
361 static int
362 mt76_init_sband_2g(struct mt76_phy *phy, struct ieee80211_rate *rates,
363 		   int n_rates)
364 {
365 	phy->hw->wiphy->bands[NL80211_BAND_2GHZ] = &phy->sband_2g.sband;
366 
367 	return mt76_init_sband(phy, &phy->sband_2g, mt76_channels_2ghz,
368 			       ARRAY_SIZE(mt76_channels_2ghz), rates,
369 			       n_rates, true, false);
370 }
371 
372 static int
373 mt76_init_sband_5g(struct mt76_phy *phy, struct ieee80211_rate *rates,
374 		   int n_rates, bool vht)
375 {
376 	phy->hw->wiphy->bands[NL80211_BAND_5GHZ] = &phy->sband_5g.sband;
377 
378 	return mt76_init_sband(phy, &phy->sband_5g, mt76_channels_5ghz,
379 			       ARRAY_SIZE(mt76_channels_5ghz), rates,
380 			       n_rates, true, vht);
381 }
382 
383 static int
384 mt76_init_sband_6g(struct mt76_phy *phy, struct ieee80211_rate *rates,
385 		   int n_rates)
386 {
387 	phy->hw->wiphy->bands[NL80211_BAND_6GHZ] = &phy->sband_6g.sband;
388 
389 	return mt76_init_sband(phy, &phy->sband_6g, mt76_channels_6ghz,
390 			       ARRAY_SIZE(mt76_channels_6ghz), rates,
391 			       n_rates, false, false);
392 }
393 
394 static void
395 mt76_check_sband(struct mt76_phy *phy, struct mt76_sband *msband,
396 		 enum nl80211_band band)
397 {
398 	struct ieee80211_supported_band *sband = &msband->sband;
399 	bool found = false;
400 	int i;
401 
402 	if (!sband)
403 		return;
404 
405 	for (i = 0; i < sband->n_channels; i++) {
406 		if (sband->channels[i].flags & IEEE80211_CHAN_DISABLED)
407 			continue;
408 
409 		found = true;
410 		break;
411 	}
412 
413 	if (found) {
414 		cfg80211_chandef_create(&phy->chandef, &sband->channels[0],
415 					NL80211_CHAN_HT20);
416 		phy->chan_state = &msband->chan[0];
417 		phy->dev->band_phys[band] = phy;
418 		return;
419 	}
420 
421 	sband->n_channels = 0;
422 	if (phy->hw->wiphy->bands[band] == sband)
423 		phy->hw->wiphy->bands[band] = NULL;
424 }
425 
426 static int
427 mt76_phy_init(struct mt76_phy *phy, struct ieee80211_hw *hw)
428 {
429 	struct mt76_dev *dev = phy->dev;
430 	struct wiphy *wiphy = hw->wiphy;
431 
432 	INIT_LIST_HEAD(&phy->tx_list);
433 	spin_lock_init(&phy->tx_lock);
434 	INIT_DELAYED_WORK(&phy->roc_work, mt76_roc_complete_work);
435 
436 	if ((void *)phy != hw->priv)
437 		return 0;
438 
439 	SET_IEEE80211_DEV(hw, dev->dev);
440 	SET_IEEE80211_PERM_ADDR(hw, phy->macaddr);
441 
442 	wiphy->features |= NL80211_FEATURE_ACTIVE_MONITOR |
443 			   NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE;
444 	wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH |
445 			WIPHY_FLAG_SUPPORTS_TDLS |
446 			WIPHY_FLAG_AP_UAPSD;
447 
448 	wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
449 	wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_AIRTIME_FAIRNESS);
450 	wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_AQL);
451 
452 	if (!wiphy->available_antennas_tx)
453 		wiphy->available_antennas_tx = phy->antenna_mask;
454 	if (!wiphy->available_antennas_rx)
455 		wiphy->available_antennas_rx = phy->antenna_mask;
456 
457 	wiphy->sar_capa = &mt76_sar_capa;
458 	phy->frp = devm_kcalloc(dev->dev, wiphy->sar_capa->num_freq_ranges,
459 				sizeof(struct mt76_freq_range_power),
460 				GFP_KERNEL);
461 	if (!phy->frp)
462 		return -ENOMEM;
463 
464 	hw->txq_data_size = sizeof(struct mt76_txq);
465 	hw->uapsd_max_sp_len = IEEE80211_WMM_IE_STA_QOSINFO_SP_ALL;
466 
467 	if (!hw->max_tx_fragments)
468 		hw->max_tx_fragments = 16;
469 
470 	ieee80211_hw_set(hw, SIGNAL_DBM);
471 	ieee80211_hw_set(hw, AMPDU_AGGREGATION);
472 	ieee80211_hw_set(hw, SUPPORTS_RC_TABLE);
473 	ieee80211_hw_set(hw, SUPPORT_FAST_XMIT);
474 	ieee80211_hw_set(hw, SUPPORTS_CLONED_SKBS);
475 	ieee80211_hw_set(hw, SUPPORTS_AMSDU_IN_AMPDU);
476 	ieee80211_hw_set(hw, SUPPORTS_REORDERING_BUFFER);
477 	ieee80211_hw_set(hw, SPECTRUM_MGMT);
478 
479 	if (!(dev->drv->drv_flags & MT_DRV_AMSDU_OFFLOAD) &&
480 	    hw->max_tx_fragments > 1) {
481 		ieee80211_hw_set(hw, TX_AMSDU);
482 		ieee80211_hw_set(hw, TX_FRAG_LIST);
483 	}
484 
485 	ieee80211_hw_set(hw, MFP_CAPABLE);
486 	ieee80211_hw_set(hw, AP_LINK_PS);
487 	ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS);
488 
489 	return 0;
490 }
491 
492 struct mt76_phy *
493 mt76_alloc_radio_phy(struct mt76_dev *dev, unsigned int size,
494 		     u8 band_idx)
495 {
496 	struct ieee80211_hw *hw = dev->phy.hw;
497 	unsigned int phy_size;
498 	struct mt76_phy *phy;
499 
500 	phy_size = ALIGN(sizeof(*phy), 8);
501 	phy = devm_kzalloc(dev->dev, size + phy_size, GFP_KERNEL);
502 	if (!phy)
503 		return NULL;
504 
505 	phy->dev = dev;
506 	phy->hw = hw;
507 	phy->priv = (void *)phy + phy_size;
508 	phy->band_idx = band_idx;
509 
510 	return phy;
511 }
512 EXPORT_SYMBOL_GPL(mt76_alloc_radio_phy);
513 
514 struct mt76_phy *
515 mt76_alloc_phy(struct mt76_dev *dev, unsigned int size,
516 	       const struct ieee80211_ops *ops, u8 band_idx)
517 {
518 	struct ieee80211_hw *hw;
519 	unsigned int phy_size;
520 	struct mt76_phy *phy;
521 
522 	phy_size = ALIGN(sizeof(*phy), 8);
523 	hw = ieee80211_alloc_hw(size + phy_size, ops);
524 	if (!hw)
525 		return NULL;
526 
527 	phy = hw->priv;
528 	phy->dev = dev;
529 	phy->hw = hw;
530 	phy->priv = hw->priv + phy_size;
531 	phy->band_idx = band_idx;
532 
533 	hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
534 	hw->wiphy->interface_modes =
535 		BIT(NL80211_IFTYPE_STATION) |
536 		BIT(NL80211_IFTYPE_AP) |
537 #ifdef CONFIG_MAC80211_MESH
538 		BIT(NL80211_IFTYPE_MESH_POINT) |
539 #endif
540 		BIT(NL80211_IFTYPE_P2P_CLIENT) |
541 		BIT(NL80211_IFTYPE_P2P_GO) |
542 		BIT(NL80211_IFTYPE_ADHOC);
543 
544 	return phy;
545 }
546 EXPORT_SYMBOL_GPL(mt76_alloc_phy);
547 
548 int mt76_register_phy(struct mt76_phy *phy, bool vht,
549 		      struct ieee80211_rate *rates, int n_rates)
550 {
551 	int ret;
552 
553 	ret = mt76_phy_init(phy, phy->hw);
554 	if (ret)
555 		return ret;
556 
557 	if (phy->cap.has_2ghz) {
558 		ret = mt76_init_sband_2g(phy, rates, n_rates);
559 		if (ret)
560 			return ret;
561 	}
562 
563 	if (phy->cap.has_5ghz) {
564 		ret = mt76_init_sband_5g(phy, rates + 4, n_rates - 4, vht);
565 		if (ret)
566 			return ret;
567 	}
568 
569 	if (phy->cap.has_6ghz) {
570 		ret = mt76_init_sband_6g(phy, rates + 4, n_rates - 4);
571 		if (ret)
572 			return ret;
573 	}
574 
575 	if (IS_ENABLED(CONFIG_MT76_LEDS)) {
576 		ret = mt76_led_init(phy);
577 		if (ret)
578 			return ret;
579 	}
580 
581 	wiphy_read_of_freq_limits(phy->hw->wiphy);
582 	mt76_check_sband(phy, &phy->sband_2g, NL80211_BAND_2GHZ);
583 	mt76_check_sband(phy, &phy->sband_5g, NL80211_BAND_5GHZ);
584 	mt76_check_sband(phy, &phy->sband_6g, NL80211_BAND_6GHZ);
585 
586 	if ((void *)phy == phy->hw->priv) {
587 		ret = ieee80211_register_hw(phy->hw);
588 		if (ret)
589 			return ret;
590 	}
591 
592 	set_bit(MT76_STATE_REGISTERED, &phy->state);
593 	phy->dev->phys[phy->band_idx] = phy;
594 
595 	return 0;
596 }
597 EXPORT_SYMBOL_GPL(mt76_register_phy);
598 
599 void mt76_unregister_phy(struct mt76_phy *phy)
600 {
601 	struct mt76_dev *dev = phy->dev;
602 
603 	if (!test_bit(MT76_STATE_REGISTERED, &phy->state))
604 		return;
605 
606 	if (IS_ENABLED(CONFIG_MT76_LEDS))
607 		mt76_led_cleanup(phy);
608 	mt76_tx_status_check(dev, true);
609 	ieee80211_unregister_hw(phy->hw);
610 	dev->phys[phy->band_idx] = NULL;
611 }
612 EXPORT_SYMBOL_GPL(mt76_unregister_phy);
613 
614 int mt76_create_page_pool(struct mt76_dev *dev, struct mt76_queue *q)
615 {
616 	bool is_qrx = mt76_queue_is_rx(dev, q);
617 	struct page_pool_params pp_params = {
618 		.order = 0,
619 		.flags = 0,
620 		.nid = NUMA_NO_NODE,
621 		.dev = dev->dma_dev,
622 	};
623 	int idx = is_qrx ? q - dev->q_rx : -1;
624 
625 	/* Allocate page_pools just for rx/wed_tx_free queues */
626 	if (!is_qrx && !mt76_queue_is_wed_tx_free(q))
627 		return 0;
628 
629 	switch (idx) {
630 	case MT_RXQ_MAIN:
631 	case MT_RXQ_BAND1:
632 	case MT_RXQ_BAND2:
633 	case MT_RXQ_NPU0:
634 	case MT_RXQ_NPU1:
635 		pp_params.pool_size = 256;
636 		break;
637 	default:
638 		pp_params.pool_size = 16;
639 		break;
640 	}
641 
642 	if (mt76_is_mmio(dev)) {
643 		/* rely on page_pool for DMA mapping */
644 		pp_params.flags |= PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
645 		pp_params.dma_dir = DMA_FROM_DEVICE;
646 		pp_params.max_len = PAGE_SIZE;
647 		pp_params.offset = 0;
648 		/* NAPI is available just for rx queues */
649 		if (idx >= 0 && idx < ARRAY_SIZE(dev->napi))
650 			pp_params.napi = &dev->napi[idx];
651 	}
652 
653 	q->page_pool = page_pool_create(&pp_params);
654 	if (IS_ERR(q->page_pool)) {
655 		int err = PTR_ERR(q->page_pool);
656 
657 		q->page_pool = NULL;
658 		return err;
659 	}
660 
661 	return 0;
662 }
663 EXPORT_SYMBOL_GPL(mt76_create_page_pool);
664 
665 struct mt76_dev *
666 mt76_alloc_device(struct device *pdev, unsigned int size,
667 		  const struct ieee80211_ops *ops,
668 		  const struct mt76_driver_ops *drv_ops)
669 {
670 	struct ieee80211_hw *hw;
671 	struct mt76_phy *phy;
672 	struct mt76_dev *dev;
673 	int i;
674 
675 	hw = ieee80211_alloc_hw(size, ops);
676 	if (!hw)
677 		return NULL;
678 
679 	dev = hw->priv;
680 	dev->hw = hw;
681 	dev->dev = pdev;
682 	dev->drv = drv_ops;
683 	dev->dma_dev = pdev;
684 
685 	phy = &dev->phy;
686 	phy->dev = dev;
687 	phy->hw = hw;
688 	phy->band_idx = MT_BAND0;
689 	dev->phys[phy->band_idx] = phy;
690 
691 	spin_lock_init(&dev->rx_lock);
692 	spin_lock_init(&dev->lock);
693 	spin_lock_init(&dev->cc_lock);
694 	spin_lock_init(&dev->status_lock);
695 	spin_lock_init(&dev->wed_lock);
696 	mutex_init(&dev->mutex);
697 	init_waitqueue_head(&dev->tx_wait);
698 
699 	skb_queue_head_init(&dev->mcu.res_q);
700 	init_waitqueue_head(&dev->mcu.wait);
701 	mutex_init(&dev->mcu.mutex);
702 	dev->tx_worker.fn = mt76_tx_worker;
703 
704 	hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
705 	hw->wiphy->interface_modes =
706 		BIT(NL80211_IFTYPE_STATION) |
707 		BIT(NL80211_IFTYPE_AP) |
708 #ifdef CONFIG_MAC80211_MESH
709 		BIT(NL80211_IFTYPE_MESH_POINT) |
710 #endif
711 		BIT(NL80211_IFTYPE_P2P_CLIENT) |
712 		BIT(NL80211_IFTYPE_P2P_GO) |
713 		BIT(NL80211_IFTYPE_ADHOC);
714 
715 	spin_lock_init(&dev->token_lock);
716 	idr_init(&dev->token);
717 
718 	spin_lock_init(&dev->rx_token_lock);
719 	idr_init(&dev->rx_token);
720 
721 	INIT_LIST_HEAD(&dev->wcid_list);
722 	INIT_LIST_HEAD(&dev->sta_poll_list);
723 	spin_lock_init(&dev->sta_poll_lock);
724 
725 	INIT_LIST_HEAD(&dev->txwi_cache);
726 	INIT_LIST_HEAD(&dev->rxwi_cache);
727 	dev->token_size = dev->drv->token_size;
728 	INIT_DELAYED_WORK(&dev->scan_work, mt76_scan_work);
729 	spin_lock_init(&dev->scan_lock);
730 
731 	for (i = 0; i < ARRAY_SIZE(dev->q_rx); i++)
732 		skb_queue_head_init(&dev->rx_skb[i]);
733 
734 	dev->wq = alloc_ordered_workqueue("mt76", 0);
735 	if (!dev->wq) {
736 		ieee80211_free_hw(hw);
737 		return NULL;
738 	}
739 
740 	return dev;
741 }
742 EXPORT_SYMBOL_GPL(mt76_alloc_device);
743 
744 int mt76_register_device(struct mt76_dev *dev, bool vht,
745 			 struct ieee80211_rate *rates, int n_rates)
746 {
747 	struct ieee80211_hw *hw = dev->hw;
748 	struct mt76_phy *phy = &dev->phy;
749 	int ret;
750 
751 	dev_set_drvdata(dev->dev, dev);
752 	mt76_wcid_init(&dev->global_wcid, phy->band_idx);
753 	ret = mt76_phy_init(phy, hw);
754 	if (ret)
755 		return ret;
756 
757 	if (phy->cap.has_2ghz) {
758 		ret = mt76_init_sband_2g(phy, rates, n_rates);
759 		if (ret)
760 			return ret;
761 	}
762 
763 	if (phy->cap.has_5ghz) {
764 		ret = mt76_init_sband_5g(phy, rates + 4, n_rates - 4, vht);
765 		if (ret)
766 			return ret;
767 	}
768 
769 	if (phy->cap.has_6ghz) {
770 		ret = mt76_init_sband_6g(phy, rates + 4, n_rates - 4);
771 		if (ret)
772 			return ret;
773 	}
774 
775 	wiphy_read_of_freq_limits(hw->wiphy);
776 	mt76_check_sband(&dev->phy, &phy->sband_2g, NL80211_BAND_2GHZ);
777 	mt76_check_sband(&dev->phy, &phy->sband_5g, NL80211_BAND_5GHZ);
778 	mt76_check_sband(&dev->phy, &phy->sband_6g, NL80211_BAND_6GHZ);
779 
780 	if (IS_ENABLED(CONFIG_MT76_LEDS)) {
781 		ret = mt76_led_init(phy);
782 		if (ret)
783 			return ret;
784 	}
785 
786 	ret = ieee80211_register_hw(hw);
787 	if (ret)
788 		return ret;
789 
790 	WARN_ON(mt76_worker_setup(hw, &dev->tx_worker, NULL, "tx"));
791 	set_bit(MT76_STATE_REGISTERED, &phy->state);
792 	sched_set_fifo_low(dev->tx_worker.task);
793 
794 	return 0;
795 }
796 EXPORT_SYMBOL_GPL(mt76_register_device);
797 
798 void mt76_unregister_device(struct mt76_dev *dev)
799 {
800 	struct ieee80211_hw *hw = dev->hw;
801 
802 	if (!test_bit(MT76_STATE_REGISTERED, &dev->phy.state))
803 		return;
804 
805 	if (IS_ENABLED(CONFIG_MT76_LEDS))
806 		mt76_led_cleanup(&dev->phy);
807 	mt76_tx_status_check(dev, true);
808 	mt76_wcid_cleanup(dev, &dev->global_wcid);
809 	ieee80211_unregister_hw(hw);
810 }
811 EXPORT_SYMBOL_GPL(mt76_unregister_device);
812 
813 void mt76_free_device(struct mt76_dev *dev)
814 {
815 	mt76_worker_teardown(&dev->tx_worker);
816 	if (dev->wq) {
817 		destroy_workqueue(dev->wq);
818 		dev->wq = NULL;
819 	}
820 	mt76_npu_deinit(dev);
821 	ieee80211_free_hw(dev->hw);
822 }
823 EXPORT_SYMBOL_GPL(mt76_free_device);
824 
825 static void mt76_reset_phy(struct mt76_phy *phy)
826 {
827 	if (!phy)
828 		return;
829 
830 	INIT_LIST_HEAD(&phy->tx_list);
831 	phy->num_sta = 0;
832 	phy->chanctx = NULL;
833 	mt76_roc_complete(phy);
834 }
835 
836 void mt76_reset_device(struct mt76_dev *dev)
837 {
838 	int i;
839 
840 	rcu_read_lock();
841 	for (i = 0; i < ARRAY_SIZE(dev->wcid); i++) {
842 		struct mt76_wcid *wcid;
843 
844 		wcid = rcu_dereference(dev->wcid[i]);
845 		if (!wcid)
846 			continue;
847 
848 		wcid->sta = 0;
849 		mt76_wcid_cleanup(dev, wcid);
850 		rcu_assign_pointer(dev->wcid[i], NULL);
851 	}
852 	rcu_read_unlock();
853 
854 	INIT_LIST_HEAD(&dev->wcid_list);
855 	INIT_LIST_HEAD(&dev->sta_poll_list);
856 	dev->vif_mask = 0;
857 	memset(dev->wcid_mask, 0, sizeof(dev->wcid_mask));
858 
859 	mt76_reset_phy(&dev->phy);
860 	for (i = 0; i < ARRAY_SIZE(dev->phys); i++)
861 		mt76_reset_phy(dev->phys[i]);
862 }
863 EXPORT_SYMBOL_GPL(mt76_reset_device);
864 
865 struct mt76_phy *mt76_vif_phy(struct ieee80211_hw *hw,
866 			      struct ieee80211_vif *vif)
867 {
868 	struct mt76_vif_link *mlink = (struct mt76_vif_link *)vif->drv_priv;
869 	struct mt76_chanctx *ctx;
870 
871 	if (!hw->wiphy->n_radio)
872 		return hw->priv;
873 
874 	if (!mlink->ctx)
875 		return NULL;
876 
877 	ctx = (struct mt76_chanctx *)mlink->ctx->drv_priv;
878 	return ctx->phy;
879 }
880 EXPORT_SYMBOL_GPL(mt76_vif_phy);
881 
882 static void mt76_rx_release_amsdu(struct mt76_phy *phy, enum mt76_rxq_id q)
883 {
884 	struct sk_buff *skb = phy->rx_amsdu[q].head;
885 	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
886 	struct mt76_dev *dev = phy->dev;
887 
888 	phy->rx_amsdu[q].head = NULL;
889 	phy->rx_amsdu[q].tail = NULL;
890 
891 	/*
892 	 * Validate if the amsdu has a proper first subframe.
893 	 * A single MSDU can be parsed as A-MSDU when the unauthenticated A-MSDU
894 	 * flag of the QoS header gets flipped. In such cases, the first
895 	 * subframe has a LLC/SNAP header in the location of the destination
896 	 * address.
897 	 */
898 	if (skb_shinfo(skb)->frag_list) {
899 		int offset = 0;
900 
901 		if (!(status->flag & RX_FLAG_8023)) {
902 			offset = ieee80211_get_hdrlen_from_skb(skb);
903 
904 			if ((status->flag &
905 			     (RX_FLAG_DECRYPTED | RX_FLAG_IV_STRIPPED)) ==
906 			    RX_FLAG_DECRYPTED)
907 				offset += 8;
908 		}
909 
910 		if (ether_addr_equal(skb->data + offset, rfc1042_header)) {
911 			dev_kfree_skb(skb);
912 			return;
913 		}
914 	}
915 	__skb_queue_tail(&dev->rx_skb[q], skb);
916 }
917 
918 static void mt76_rx_release_burst(struct mt76_phy *phy, enum mt76_rxq_id q,
919 				  struct sk_buff *skb)
920 {
921 	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
922 
923 	if (phy->rx_amsdu[q].head &&
924 	    (!status->amsdu || status->first_amsdu ||
925 	     status->seqno != phy->rx_amsdu[q].seqno))
926 		mt76_rx_release_amsdu(phy, q);
927 
928 	if (!phy->rx_amsdu[q].head) {
929 		phy->rx_amsdu[q].tail = &skb_shinfo(skb)->frag_list;
930 		phy->rx_amsdu[q].seqno = status->seqno;
931 		phy->rx_amsdu[q].head = skb;
932 	} else {
933 		*phy->rx_amsdu[q].tail = skb;
934 		phy->rx_amsdu[q].tail = &skb->next;
935 	}
936 
937 	if (!status->amsdu || status->last_amsdu)
938 		mt76_rx_release_amsdu(phy, q);
939 }
940 
941 void mt76_rx(struct mt76_dev *dev, enum mt76_rxq_id q, struct sk_buff *skb)
942 {
943 	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
944 	struct mt76_phy *phy = mt76_dev_phy(dev, status->phy_idx);
945 
946 	if (!test_bit(MT76_STATE_RUNNING, &phy->state)) {
947 		dev_kfree_skb(skb);
948 		return;
949 	}
950 
951 #ifdef CONFIG_NL80211_TESTMODE
952 	if (phy->test.state == MT76_TM_STATE_RX_FRAMES) {
953 		phy->test.rx_stats.packets[q]++;
954 		if (status->flag & RX_FLAG_FAILED_FCS_CRC)
955 			phy->test.rx_stats.fcs_error[q]++;
956 	}
957 #endif
958 
959 	mt76_rx_release_burst(phy, q, skb);
960 }
961 EXPORT_SYMBOL_GPL(mt76_rx);
962 
963 bool mt76_has_tx_pending(struct mt76_phy *phy)
964 {
965 	struct mt76_queue *q;
966 	int i;
967 
968 	for (i = 0; i < __MT_TXQ_MAX; i++) {
969 		q = phy->q_tx[i];
970 		if (q && q->queued)
971 			return true;
972 	}
973 
974 	if (atomic_read(&phy->mgmt_tx_pending))
975 		return true;
976 
977 	return false;
978 }
979 EXPORT_SYMBOL_GPL(mt76_has_tx_pending);
980 
981 static struct mt76_channel_state *
982 mt76_channel_state(struct mt76_phy *phy, struct ieee80211_channel *c)
983 {
984 	struct mt76_sband *msband;
985 	int idx;
986 
987 	if (c->band == NL80211_BAND_2GHZ)
988 		msband = &phy->sband_2g;
989 	else if (c->band == NL80211_BAND_6GHZ)
990 		msband = &phy->sband_6g;
991 	else
992 		msband = &phy->sband_5g;
993 
994 	idx = c - &msband->sband.channels[0];
995 	return &msband->chan[idx];
996 }
997 
998 void mt76_update_survey_active_time(struct mt76_phy *phy, ktime_t time)
999 {
1000 	struct mt76_channel_state *state = phy->chan_state;
1001 
1002 	state->cc_active += ktime_to_us(ktime_sub(time,
1003 						  phy->survey_time));
1004 	phy->survey_time = time;
1005 }
1006 EXPORT_SYMBOL_GPL(mt76_update_survey_active_time);
1007 
1008 void mt76_update_survey(struct mt76_phy *phy)
1009 {
1010 	struct mt76_dev *dev = phy->dev;
1011 	ktime_t cur_time;
1012 
1013 	if (dev->drv->update_survey)
1014 		dev->drv->update_survey(phy);
1015 
1016 	cur_time = ktime_get_boottime();
1017 	mt76_update_survey_active_time(phy, cur_time);
1018 
1019 	if (dev->drv->drv_flags & MT_DRV_SW_RX_AIRTIME) {
1020 		struct mt76_channel_state *state = phy->chan_state;
1021 
1022 		spin_lock_bh(&dev->cc_lock);
1023 		state->cc_bss_rx += dev->cur_cc_bss_rx;
1024 		dev->cur_cc_bss_rx = 0;
1025 		spin_unlock_bh(&dev->cc_lock);
1026 	}
1027 }
1028 EXPORT_SYMBOL_GPL(mt76_update_survey);
1029 
1030 int __mt76_set_channel(struct mt76_phy *phy, struct cfg80211_chan_def *chandef,
1031 		       bool offchannel)
1032 {
1033 	struct mt76_dev *dev = phy->dev;
1034 	int timeout = HZ / 5;
1035 	int ret;
1036 
1037 	mt76_worker_disable(&dev->tx_worker);
1038 	mt76_txq_schedule_pending(phy);
1039 
1040 	set_bit(MT76_RESET, &phy->state);
1041 	wait_event_timeout(dev->tx_wait, !mt76_has_tx_pending(phy), timeout);
1042 	mt76_update_survey(phy);
1043 
1044 	if (phy->chandef.chan->center_freq != chandef->chan->center_freq ||
1045 	    phy->chandef.width != chandef->width)
1046 		phy->dfs_state = MT_DFS_STATE_UNKNOWN;
1047 
1048 	phy->chandef = *chandef;
1049 	phy->chan_state = mt76_channel_state(phy, chandef->chan);
1050 	phy->offchannel = offchannel;
1051 
1052 	if (!offchannel)
1053 		phy->main_chandef = *chandef;
1054 
1055 	if (chandef->chan != phy->main_chandef.chan)
1056 		memset(phy->chan_state, 0, sizeof(*phy->chan_state));
1057 
1058 	ret = dev->drv->set_channel(phy);
1059 
1060 	clear_bit(MT76_RESET, &phy->state);
1061 	mt76_worker_enable(&dev->tx_worker);
1062 	mt76_worker_schedule(&dev->tx_worker);
1063 
1064 	return ret;
1065 }
1066 
1067 int mt76_set_channel(struct mt76_phy *phy, struct cfg80211_chan_def *chandef,
1068 		     bool offchannel)
1069 {
1070 	struct mt76_dev *dev = phy->dev;
1071 	int ret;
1072 
1073 	cancel_delayed_work_sync(&phy->mac_work);
1074 
1075 	mutex_lock(&dev->mutex);
1076 	ret = __mt76_set_channel(phy, chandef, offchannel);
1077 	mutex_unlock(&dev->mutex);
1078 
1079 	return ret;
1080 }
1081 
1082 int mt76_update_channel(struct mt76_phy *phy)
1083 {
1084 	struct ieee80211_hw *hw = phy->hw;
1085 	struct cfg80211_chan_def *chandef = &hw->conf.chandef;
1086 	bool offchannel = hw->conf.flags & IEEE80211_CONF_OFFCHANNEL;
1087 
1088 	phy->radar_enabled = hw->conf.radar_enabled;
1089 
1090 	return mt76_set_channel(phy, chandef, offchannel);
1091 }
1092 EXPORT_SYMBOL_GPL(mt76_update_channel);
1093 
1094 static struct mt76_sband *
1095 mt76_get_survey_sband(struct mt76_phy *phy, int *idx)
1096 {
1097 	if (*idx < phy->sband_2g.sband.n_channels)
1098 		return &phy->sband_2g;
1099 
1100 	*idx -= phy->sband_2g.sband.n_channels;
1101 	if (*idx < phy->sband_5g.sband.n_channels)
1102 		return &phy->sband_5g;
1103 
1104 	*idx -= phy->sband_5g.sband.n_channels;
1105 	if (*idx < phy->sband_6g.sband.n_channels)
1106 		return &phy->sband_6g;
1107 
1108 	*idx -= phy->sband_6g.sband.n_channels;
1109 	return NULL;
1110 }
1111 
1112 int mt76_get_survey(struct ieee80211_hw *hw, int idx,
1113 		    struct survey_info *survey)
1114 {
1115 	struct mt76_phy *phy = hw->priv;
1116 	struct mt76_dev *dev = phy->dev;
1117 	struct mt76_sband *sband = NULL;
1118 	struct ieee80211_channel *chan;
1119 	struct mt76_channel_state *state;
1120 	int phy_idx = 0;
1121 	int ret = 0;
1122 
1123 	mutex_lock(&dev->mutex);
1124 
1125 	for (phy_idx = 0; phy_idx < ARRAY_SIZE(dev->phys); phy_idx++) {
1126 		sband = NULL;
1127 		phy = dev->phys[phy_idx];
1128 		if (!phy || phy->hw != hw)
1129 			continue;
1130 
1131 		sband = mt76_get_survey_sband(phy, &idx);
1132 
1133 		if (idx == 0 && phy->dev->drv->update_survey)
1134 			mt76_update_survey(phy);
1135 
1136 		if (sband || !hw->wiphy->n_radio)
1137 			break;
1138 	}
1139 
1140 	if (!sband) {
1141 		ret = -ENOENT;
1142 		goto out;
1143 	}
1144 
1145 	chan = &sband->sband.channels[idx];
1146 	state = mt76_channel_state(phy, chan);
1147 
1148 	memset(survey, 0, sizeof(*survey));
1149 	survey->channel = chan;
1150 	survey->filled = SURVEY_INFO_TIME | SURVEY_INFO_TIME_BUSY;
1151 	survey->filled |= dev->drv->survey_flags;
1152 	if (state->noise)
1153 		survey->filled |= SURVEY_INFO_NOISE_DBM;
1154 
1155 	if (chan == phy->main_chandef.chan) {
1156 		survey->filled |= SURVEY_INFO_IN_USE;
1157 
1158 		if (dev->drv->drv_flags & MT_DRV_SW_RX_AIRTIME)
1159 			survey->filled |= SURVEY_INFO_TIME_BSS_RX;
1160 	}
1161 
1162 	survey->time_busy = div_u64(state->cc_busy, 1000);
1163 	survey->time_rx = div_u64(state->cc_rx, 1000);
1164 	survey->time = div_u64(state->cc_active, 1000);
1165 	survey->noise = state->noise;
1166 
1167 	spin_lock_bh(&dev->cc_lock);
1168 	survey->time_bss_rx = div_u64(state->cc_bss_rx, 1000);
1169 	survey->time_tx = div_u64(state->cc_tx, 1000);
1170 	spin_unlock_bh(&dev->cc_lock);
1171 
1172 out:
1173 	mutex_unlock(&dev->mutex);
1174 
1175 	return ret;
1176 }
1177 EXPORT_SYMBOL_GPL(mt76_get_survey);
1178 
1179 void mt76_wcid_key_setup(struct mt76_dev *dev, struct mt76_wcid *wcid,
1180 			 struct ieee80211_key_conf *key)
1181 {
1182 	struct ieee80211_key_seq seq;
1183 	int i;
1184 
1185 	wcid->rx_check_pn = false;
1186 
1187 	if (!key)
1188 		return;
1189 
1190 	if (key->cipher != WLAN_CIPHER_SUITE_CCMP)
1191 		return;
1192 
1193 	wcid->rx_check_pn = true;
1194 
1195 	/* data frame */
1196 	for (i = 0; i < IEEE80211_NUM_TIDS; i++) {
1197 		ieee80211_get_key_rx_seq(key, i, &seq);
1198 		memcpy(wcid->rx_key_pn[i], seq.ccmp.pn, sizeof(seq.ccmp.pn));
1199 	}
1200 
1201 	/* robust management frame */
1202 	ieee80211_get_key_rx_seq(key, -1, &seq);
1203 	memcpy(wcid->rx_key_pn[i], seq.ccmp.pn, sizeof(seq.ccmp.pn));
1204 
1205 }
1206 EXPORT_SYMBOL(mt76_wcid_key_setup);
1207 
1208 int mt76_rx_signal(u8 chain_mask, s8 *chain_signal)
1209 {
1210 	int signal = -128;
1211 	u8 chains;
1212 
1213 	for (chains = chain_mask; chains; chains >>= 1, chain_signal++) {
1214 		int cur, diff;
1215 
1216 		cur = *chain_signal;
1217 		if (!(chains & BIT(0)) ||
1218 		    cur > 0)
1219 			continue;
1220 
1221 		if (cur > signal)
1222 			swap(cur, signal);
1223 
1224 		diff = signal - cur;
1225 		if (diff == 0)
1226 			signal += 3;
1227 		else if (diff <= 2)
1228 			signal += 2;
1229 		else if (diff <= 6)
1230 			signal += 1;
1231 	}
1232 
1233 	return signal;
1234 }
1235 EXPORT_SYMBOL(mt76_rx_signal);
1236 
1237 static void
1238 mt76_rx_convert(struct mt76_dev *dev, struct sk_buff *skb,
1239 		struct ieee80211_hw **hw,
1240 		struct ieee80211_sta **sta)
1241 {
1242 	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
1243 	struct ieee80211_hdr *hdr = mt76_skb_get_hdr(skb);
1244 	struct mt76_rx_status mstat;
1245 
1246 	mstat = *((struct mt76_rx_status *)skb->cb);
1247 	memset(status, 0, sizeof(*status));
1248 
1249 	skb->priority = mstat.qos_ctl & IEEE80211_QOS_CTL_TID_MASK;
1250 
1251 	status->flag = mstat.flag;
1252 	status->freq = mstat.freq;
1253 	status->enc_flags = mstat.enc_flags;
1254 	status->encoding = mstat.encoding;
1255 	status->bw = mstat.bw;
1256 	if (status->encoding == RX_ENC_EHT) {
1257 		status->eht.ru = mstat.eht.ru;
1258 		status->eht.gi = mstat.eht.gi;
1259 	} else {
1260 		status->he_ru = mstat.he_ru;
1261 		status->he_gi = mstat.he_gi;
1262 		status->he_dcm = mstat.he_dcm;
1263 	}
1264 	status->rate_idx = mstat.rate_idx;
1265 	status->nss = mstat.nss;
1266 	status->band = mstat.band;
1267 	status->signal = mstat.signal;
1268 	status->chains = mstat.chains;
1269 	status->ampdu_reference = mstat.ampdu_ref;
1270 	status->device_timestamp = mstat.timestamp;
1271 	status->mactime = mstat.timestamp;
1272 	status->signal = mt76_rx_signal(mstat.chains, mstat.chain_signal);
1273 	if (status->signal <= -128)
1274 		status->flag |= RX_FLAG_NO_SIGNAL_VAL;
1275 
1276 	if (ieee80211_is_beacon(hdr->frame_control) ||
1277 	    ieee80211_is_probe_resp(hdr->frame_control))
1278 		status->boottime_ns = ktime_get_boottime_ns();
1279 
1280 	BUILD_BUG_ON(sizeof(mstat) > sizeof(skb->cb));
1281 	BUILD_BUG_ON(sizeof(status->chain_signal) !=
1282 		     sizeof(mstat.chain_signal));
1283 	memcpy(status->chain_signal, mstat.chain_signal,
1284 	       sizeof(mstat.chain_signal));
1285 
1286 	if (mstat.wcid) {
1287 		status->link_valid = mstat.wcid->link_valid;
1288 		status->link_id = mstat.wcid->link_id;
1289 	}
1290 
1291 	*sta = wcid_to_sta(mstat.wcid);
1292 	*hw = mt76_phy_hw(dev, mstat.phy_idx);
1293 }
1294 
1295 static void
1296 mt76_check_ccmp_pn(struct sk_buff *skb)
1297 {
1298 	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
1299 	struct mt76_wcid *wcid = status->wcid;
1300 	struct ieee80211_hdr *hdr;
1301 	int security_idx;
1302 	int ret;
1303 
1304 	if (!(status->flag & RX_FLAG_DECRYPTED))
1305 		return;
1306 
1307 	if (status->flag & RX_FLAG_ONLY_MONITOR)
1308 		return;
1309 
1310 	if (!wcid || !wcid->rx_check_pn)
1311 		return;
1312 
1313 	security_idx = status->qos_ctl & IEEE80211_QOS_CTL_TID_MASK;
1314 	if (status->flag & RX_FLAG_8023)
1315 		goto skip_hdr_check;
1316 
1317 	hdr = mt76_skb_get_hdr(skb);
1318 	if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
1319 		/*
1320 		 * Validate the first fragment both here and in mac80211
1321 		 * All further fragments will be validated by mac80211 only.
1322 		 */
1323 		if (ieee80211_is_frag(hdr) &&
1324 		    !ieee80211_is_first_frag(hdr->frame_control))
1325 			return;
1326 	}
1327 
1328 	/* IEEE 802.11-2020, 12.5.3.4.4 "PN and replay detection" c):
1329 	 *
1330 	 * the recipient shall maintain a single replay counter for received
1331 	 * individually addressed robust Management frames that are received
1332 	 * with the To DS subfield equal to 0, [...]
1333 	 */
1334 	if (ieee80211_is_mgmt(hdr->frame_control) &&
1335 	    !ieee80211_has_tods(hdr->frame_control))
1336 		security_idx = IEEE80211_NUM_TIDS;
1337 
1338 skip_hdr_check:
1339 	BUILD_BUG_ON(sizeof(status->iv) != sizeof(wcid->rx_key_pn[0]));
1340 	ret = memcmp(status->iv, wcid->rx_key_pn[security_idx],
1341 		     sizeof(status->iv));
1342 	if (ret <= 0) {
1343 		status->flag |= RX_FLAG_ONLY_MONITOR;
1344 		return;
1345 	}
1346 
1347 	memcpy(wcid->rx_key_pn[security_idx], status->iv, sizeof(status->iv));
1348 
1349 	if (status->flag & RX_FLAG_IV_STRIPPED)
1350 		status->flag |= RX_FLAG_PN_VALIDATED;
1351 }
1352 
1353 static void
1354 mt76_airtime_report(struct mt76_dev *dev, struct mt76_rx_status *status,
1355 		    int len)
1356 {
1357 	struct mt76_wcid *wcid = status->wcid;
1358 	struct ieee80211_rx_status info = {
1359 		.enc_flags = status->enc_flags,
1360 		.rate_idx = status->rate_idx,
1361 		.encoding = status->encoding,
1362 		.band = status->band,
1363 		.nss = status->nss,
1364 		.bw = status->bw,
1365 	};
1366 	struct ieee80211_sta *sta;
1367 	u32 airtime;
1368 	u8 tidno = status->qos_ctl & IEEE80211_QOS_CTL_TID_MASK;
1369 
1370 	airtime = ieee80211_calc_rx_airtime(dev->hw, &info, len);
1371 	spin_lock(&dev->cc_lock);
1372 	dev->cur_cc_bss_rx += airtime;
1373 	spin_unlock(&dev->cc_lock);
1374 
1375 	if (!wcid || !wcid->sta)
1376 		return;
1377 
1378 	sta = container_of((void *)wcid, struct ieee80211_sta, drv_priv);
1379 	ieee80211_sta_register_airtime(sta, tidno, 0, airtime);
1380 }
1381 
1382 static void
1383 mt76_airtime_flush_ampdu(struct mt76_dev *dev)
1384 {
1385 	struct mt76_wcid *wcid;
1386 	int wcid_idx;
1387 
1388 	if (!dev->rx_ampdu_len)
1389 		return;
1390 
1391 	wcid_idx = dev->rx_ampdu_status.wcid_idx;
1392 	if (wcid_idx < ARRAY_SIZE(dev->wcid))
1393 		wcid = rcu_dereference(dev->wcid[wcid_idx]);
1394 	else
1395 		wcid = NULL;
1396 	dev->rx_ampdu_status.wcid = wcid;
1397 
1398 	mt76_airtime_report(dev, &dev->rx_ampdu_status, dev->rx_ampdu_len);
1399 
1400 	dev->rx_ampdu_len = 0;
1401 	dev->rx_ampdu_ref = 0;
1402 }
1403 
1404 static void
1405 mt76_airtime_check(struct mt76_dev *dev, struct sk_buff *skb)
1406 {
1407 	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
1408 	struct mt76_wcid *wcid = status->wcid;
1409 
1410 	if (!(dev->drv->drv_flags & MT_DRV_SW_RX_AIRTIME))
1411 		return;
1412 
1413 	if (!wcid || !wcid->sta) {
1414 		struct ieee80211_hdr *hdr = mt76_skb_get_hdr(skb);
1415 
1416 		if (status->flag & RX_FLAG_8023)
1417 			return;
1418 
1419 		if (!ether_addr_equal(hdr->addr1, dev->phy.macaddr))
1420 			return;
1421 
1422 		wcid = NULL;
1423 	}
1424 
1425 	if (!(status->flag & RX_FLAG_AMPDU_DETAILS) ||
1426 	    status->ampdu_ref != dev->rx_ampdu_ref)
1427 		mt76_airtime_flush_ampdu(dev);
1428 
1429 	if (status->flag & RX_FLAG_AMPDU_DETAILS) {
1430 		if (!dev->rx_ampdu_len ||
1431 		    status->ampdu_ref != dev->rx_ampdu_ref) {
1432 			dev->rx_ampdu_status = *status;
1433 			dev->rx_ampdu_status.wcid_idx = wcid ? wcid->idx : 0xff;
1434 			dev->rx_ampdu_ref = status->ampdu_ref;
1435 		}
1436 
1437 		dev->rx_ampdu_len += skb->len;
1438 		return;
1439 	}
1440 
1441 	mt76_airtime_report(dev, status, skb->len);
1442 }
1443 
1444 static void
1445 mt76_check_sta(struct mt76_dev *dev, struct sk_buff *skb)
1446 {
1447 	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
1448 	struct ieee80211_hdr *hdr = mt76_skb_get_hdr(skb);
1449 	struct ieee80211_sta *sta;
1450 	struct ieee80211_hw *hw;
1451 	struct mt76_wcid *wcid = status->wcid;
1452 	u8 tidno = status->qos_ctl & IEEE80211_QOS_CTL_TID_MASK;
1453 	bool ps;
1454 
1455 	hw = mt76_phy_hw(dev, status->phy_idx);
1456 	if (ieee80211_is_pspoll(hdr->frame_control) && !wcid &&
1457 	    !(status->flag & RX_FLAG_8023)) {
1458 		sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr2, NULL);
1459 		if (sta)
1460 			wcid = status->wcid = (struct mt76_wcid *)sta->drv_priv;
1461 	}
1462 
1463 	mt76_airtime_check(dev, skb);
1464 
1465 	if (!wcid || !wcid->sta)
1466 		return;
1467 
1468 	sta = container_of((void *)wcid, struct ieee80211_sta, drv_priv);
1469 
1470 	if (status->signal <= 0)
1471 		ewma_signal_add(&wcid->rssi, -status->signal);
1472 
1473 	wcid->inactive_count = 0;
1474 
1475 	if (status->flag & RX_FLAG_8023)
1476 		return;
1477 
1478 	if (!test_bit(MT_WCID_FLAG_CHECK_PS, &wcid->flags))
1479 		return;
1480 
1481 	if (ieee80211_is_pspoll(hdr->frame_control)) {
1482 		ieee80211_sta_pspoll(sta);
1483 		return;
1484 	}
1485 
1486 	if (ieee80211_has_morefrags(hdr->frame_control) ||
1487 	    !(ieee80211_is_mgmt(hdr->frame_control) ||
1488 	      ieee80211_is_data(hdr->frame_control)))
1489 		return;
1490 
1491 	ps = ieee80211_has_pm(hdr->frame_control);
1492 
1493 	if (ps && (ieee80211_is_data_qos(hdr->frame_control) ||
1494 		   ieee80211_is_qos_nullfunc(hdr->frame_control)))
1495 		ieee80211_sta_uapsd_trigger(sta, tidno);
1496 
1497 	if (!!test_bit(MT_WCID_FLAG_PS, &wcid->flags) == ps)
1498 		return;
1499 
1500 	if (ps)
1501 		set_bit(MT_WCID_FLAG_PS, &wcid->flags);
1502 
1503 	if (dev->drv->sta_ps)
1504 		dev->drv->sta_ps(dev, sta, ps);
1505 
1506 	if (!ps)
1507 		clear_bit(MT_WCID_FLAG_PS, &wcid->flags);
1508 
1509 	ieee80211_sta_ps_transition(sta, ps);
1510 }
1511 
1512 void mt76_rx_complete(struct mt76_dev *dev, struct sk_buff_head *frames,
1513 		      struct napi_struct *napi)
1514 {
1515 	struct ieee80211_sta *sta;
1516 	struct ieee80211_hw *hw;
1517 	struct sk_buff *skb, *tmp;
1518 	LIST_HEAD(list);
1519 
1520 	spin_lock(&dev->rx_lock);
1521 	while ((skb = __skb_dequeue(frames)) != NULL) {
1522 		struct sk_buff *nskb = skb_shinfo(skb)->frag_list;
1523 
1524 		mt76_check_ccmp_pn(skb);
1525 		skb_shinfo(skb)->frag_list = NULL;
1526 		mt76_rx_convert(dev, skb, &hw, &sta);
1527 		ieee80211_rx_list(hw, sta, skb, &list);
1528 
1529 		/* subsequent amsdu frames */
1530 		while (nskb) {
1531 			skb = nskb;
1532 			nskb = nskb->next;
1533 			skb->next = NULL;
1534 
1535 			mt76_rx_convert(dev, skb, &hw, &sta);
1536 			ieee80211_rx_list(hw, sta, skb, &list);
1537 		}
1538 	}
1539 	spin_unlock(&dev->rx_lock);
1540 
1541 	if (!napi) {
1542 		netif_receive_skb_list(&list);
1543 		return;
1544 	}
1545 
1546 	list_for_each_entry_safe(skb, tmp, &list, list) {
1547 		skb_list_del_init(skb);
1548 		napi_gro_receive(napi, skb);
1549 	}
1550 }
1551 
1552 void mt76_rx_poll_complete(struct mt76_dev *dev, enum mt76_rxq_id q,
1553 			   struct napi_struct *napi)
1554 {
1555 	struct sk_buff_head frames;
1556 	struct sk_buff *skb;
1557 
1558 	__skb_queue_head_init(&frames);
1559 
1560 	while ((skb = __skb_dequeue(&dev->rx_skb[q])) != NULL) {
1561 		mt76_check_sta(dev, skb);
1562 		if (mtk_wed_device_active(&dev->mmio.wed) ||
1563 		    mt76_npu_device_active(dev))
1564 			__skb_queue_tail(&frames, skb);
1565 		else
1566 			mt76_rx_aggr_reorder(skb, &frames);
1567 	}
1568 
1569 	mt76_rx_complete(dev, &frames, napi);
1570 }
1571 EXPORT_SYMBOL_GPL(mt76_rx_poll_complete);
1572 
1573 static int
1574 mt76_sta_add(struct mt76_phy *phy, struct ieee80211_vif *vif,
1575 	     struct ieee80211_sta *sta)
1576 {
1577 	struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
1578 	struct mt76_dev *dev = phy->dev;
1579 	int ret;
1580 	int i;
1581 
1582 	mutex_lock(&dev->mutex);
1583 
1584 	ret = dev->drv->sta_add(dev, vif, sta);
1585 	if (ret)
1586 		goto out;
1587 
1588 	for (i = 0; i < ARRAY_SIZE(sta->txq); i++) {
1589 		struct mt76_txq *mtxq;
1590 
1591 		if (!sta->txq[i])
1592 			continue;
1593 
1594 		mtxq = (struct mt76_txq *)sta->txq[i]->drv_priv;
1595 		mtxq->wcid = wcid->idx;
1596 	}
1597 
1598 	ewma_signal_init(&wcid->rssi);
1599 	rcu_assign_pointer(dev->wcid[wcid->idx], wcid);
1600 	phy->num_sta++;
1601 
1602 	mt76_wcid_init(wcid, phy->band_idx);
1603 out:
1604 	mutex_unlock(&dev->mutex);
1605 
1606 	return ret;
1607 }
1608 
1609 void __mt76_sta_remove(struct mt76_phy *phy, struct ieee80211_vif *vif,
1610 		       struct ieee80211_sta *sta)
1611 {
1612 	struct mt76_dev *dev = phy->dev;
1613 	struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
1614 	int i, idx = wcid->idx;
1615 
1616 	for (i = 0; i < ARRAY_SIZE(wcid->aggr); i++)
1617 		mt76_rx_aggr_stop(dev, wcid, i);
1618 
1619 	if (dev->drv->sta_remove)
1620 		dev->drv->sta_remove(dev, vif, sta);
1621 
1622 	mt76_wcid_cleanup(dev, wcid);
1623 
1624 	mt76_wcid_mask_clear(dev->wcid_mask, idx);
1625 	phy->num_sta--;
1626 }
1627 EXPORT_SYMBOL_GPL(__mt76_sta_remove);
1628 
1629 static void
1630 mt76_sta_remove(struct mt76_phy *phy, struct ieee80211_vif *vif,
1631 		struct ieee80211_sta *sta)
1632 {
1633 	struct mt76_dev *dev = phy->dev;
1634 
1635 	mutex_lock(&dev->mutex);
1636 	__mt76_sta_remove(phy, vif, sta);
1637 	mutex_unlock(&dev->mutex);
1638 }
1639 
1640 int mt76_sta_state(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1641 		   struct ieee80211_sta *sta,
1642 		   enum ieee80211_sta_state old_state,
1643 		   enum ieee80211_sta_state new_state)
1644 {
1645 	struct mt76_phy *phy = hw->priv;
1646 	struct mt76_dev *dev = phy->dev;
1647 	enum mt76_sta_event ev;
1648 
1649 	phy = mt76_vif_phy(hw, vif);
1650 	if (!phy)
1651 		return -EINVAL;
1652 
1653 	if (old_state == IEEE80211_STA_NOTEXIST &&
1654 	    new_state == IEEE80211_STA_NONE)
1655 		return mt76_sta_add(phy, vif, sta);
1656 
1657 	if (old_state == IEEE80211_STA_NONE &&
1658 	    new_state == IEEE80211_STA_NOTEXIST)
1659 		mt76_sta_remove(phy, vif, sta);
1660 
1661 	if (!dev->drv->sta_event)
1662 		return 0;
1663 
1664 	if (old_state == IEEE80211_STA_AUTH &&
1665 	    new_state == IEEE80211_STA_ASSOC)
1666 		ev = MT76_STA_EVENT_ASSOC;
1667 	else if (old_state == IEEE80211_STA_ASSOC &&
1668 		 new_state == IEEE80211_STA_AUTHORIZED)
1669 		ev = MT76_STA_EVENT_AUTHORIZE;
1670 	else if (old_state == IEEE80211_STA_ASSOC &&
1671 		 new_state == IEEE80211_STA_AUTH)
1672 		ev = MT76_STA_EVENT_DISASSOC;
1673 	else
1674 		return 0;
1675 
1676 	return dev->drv->sta_event(dev, vif, sta, ev);
1677 }
1678 EXPORT_SYMBOL_GPL(mt76_sta_state);
1679 
1680 void mt76_sta_pre_rcu_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1681 			     struct ieee80211_sta *sta)
1682 {
1683 	struct mt76_phy *phy = hw->priv;
1684 	struct mt76_dev *dev = phy->dev;
1685 	struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
1686 
1687 	mutex_lock(&dev->mutex);
1688 	spin_lock_bh(&dev->status_lock);
1689 	rcu_assign_pointer(dev->wcid[wcid->idx], NULL);
1690 	spin_unlock_bh(&dev->status_lock);
1691 	mutex_unlock(&dev->mutex);
1692 }
1693 EXPORT_SYMBOL_GPL(mt76_sta_pre_rcu_remove);
1694 
1695 void mt76_wcid_init(struct mt76_wcid *wcid, u8 band_idx)
1696 {
1697 	wcid->hw_key_idx = -1;
1698 	wcid->phy_idx = band_idx;
1699 
1700 	INIT_LIST_HEAD(&wcid->tx_list);
1701 	skb_queue_head_init(&wcid->tx_pending);
1702 	skb_queue_head_init(&wcid->tx_offchannel);
1703 
1704 	INIT_LIST_HEAD(&wcid->list);
1705 	idr_init(&wcid->pktid);
1706 
1707 	INIT_LIST_HEAD(&wcid->poll_list);
1708 }
1709 EXPORT_SYMBOL_GPL(mt76_wcid_init);
1710 
1711 void mt76_wcid_cleanup(struct mt76_dev *dev, struct mt76_wcid *wcid)
1712 {
1713 	struct mt76_phy *phy = mt76_dev_phy(dev, wcid->phy_idx);
1714 	struct ieee80211_hw *hw;
1715 	struct sk_buff_head list;
1716 	struct sk_buff *skb;
1717 
1718 	mt76_tx_status_lock(dev, &list);
1719 	mt76_tx_status_skb_get(dev, wcid, -1, &list);
1720 	mt76_tx_status_unlock(dev, &list);
1721 
1722 	idr_destroy(&wcid->pktid);
1723 
1724 	/* Remove from sta_poll_list to prevent list corruption after reset.
1725 	 * Without this, mt76_reset_device() reinitializes sta_poll_list but
1726 	 * leaves wcid->poll_list with stale pointers, causing list corruption
1727 	 * when mt76_wcid_add_poll() checks list_empty().
1728 	 */
1729 	spin_lock_bh(&dev->sta_poll_lock);
1730 	if (!list_empty(&wcid->poll_list))
1731 		list_del_init(&wcid->poll_list);
1732 	spin_unlock_bh(&dev->sta_poll_lock);
1733 
1734 	spin_lock_bh(&phy->tx_lock);
1735 
1736 	if (!list_empty(&wcid->tx_list))
1737 		list_del_init(&wcid->tx_list);
1738 
1739 	spin_lock(&wcid->tx_pending.lock);
1740 	skb_queue_splice_tail_init(&wcid->tx_pending, &list);
1741 	spin_unlock(&wcid->tx_pending.lock);
1742 
1743 	spin_lock(&wcid->tx_offchannel.lock);
1744 	skb_queue_splice_tail_init(&wcid->tx_offchannel, &list);
1745 	spin_unlock(&wcid->tx_offchannel.lock);
1746 
1747 	spin_unlock_bh(&phy->tx_lock);
1748 
1749 	while ((skb = __skb_dequeue(&list)) != NULL) {
1750 		hw = mt76_tx_status_get_hw(dev, skb);
1751 		ieee80211_free_txskb(hw, skb);
1752 	}
1753 }
1754 EXPORT_SYMBOL_GPL(mt76_wcid_cleanup);
1755 
1756 void mt76_wcid_add_poll(struct mt76_dev *dev, struct mt76_wcid *wcid)
1757 {
1758 	if (test_bit(MT76_MCU_RESET, &dev->phy.state) || !wcid->sta)
1759 		return;
1760 
1761 	spin_lock_bh(&dev->sta_poll_lock);
1762 	if (list_empty(&wcid->poll_list))
1763 		list_add_tail(&wcid->poll_list, &dev->sta_poll_list);
1764 	spin_unlock_bh(&dev->sta_poll_lock);
1765 }
1766 EXPORT_SYMBOL_GPL(mt76_wcid_add_poll);
1767 
1768 s8 mt76_get_power_bound(struct mt76_phy *phy, s8 txpower)
1769 {
1770 	int n_chains = hweight16(phy->chainmask);
1771 
1772 	txpower = mt76_get_sar_power(phy, phy->chandef.chan, txpower * 2);
1773 	txpower -= mt76_tx_power_path_delta(n_chains);
1774 
1775 	return txpower;
1776 }
1777 EXPORT_SYMBOL_GPL(mt76_get_power_bound);
1778 
1779 int mt76_get_txpower(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1780 		     unsigned int link_id, int *dbm)
1781 {
1782 	struct mt76_phy *phy = mt76_vif_phy(hw, vif);
1783 	int n_chains, delta;
1784 
1785 	if (!phy)
1786 		return -EINVAL;
1787 
1788 	n_chains = hweight16(phy->chainmask);
1789 	delta = mt76_tx_power_path_delta(n_chains);
1790 	*dbm = DIV_ROUND_UP(phy->txpower_cur + delta, 2);
1791 
1792 	return 0;
1793 }
1794 EXPORT_SYMBOL_GPL(mt76_get_txpower);
1795 
1796 int mt76_init_sar_power(struct ieee80211_hw *hw,
1797 			const struct cfg80211_sar_specs *sar)
1798 {
1799 	struct mt76_phy *phy = hw->priv;
1800 	const struct cfg80211_sar_capa *capa = hw->wiphy->sar_capa;
1801 	int i;
1802 
1803 	if (sar->type != NL80211_SAR_TYPE_POWER || !sar->num_sub_specs)
1804 		return -EINVAL;
1805 
1806 	for (i = 0; i < sar->num_sub_specs; i++) {
1807 		u32 index = sar->sub_specs[i].freq_range_index;
1808 		/* SAR specifies power limitaton in 0.25dbm */
1809 		s32 power = sar->sub_specs[i].power >> 1;
1810 
1811 		if (power > 127 || power < -127)
1812 			power = 127;
1813 
1814 		phy->frp[index].range = &capa->freq_ranges[index];
1815 		phy->frp[index].power = power;
1816 	}
1817 
1818 	return 0;
1819 }
1820 EXPORT_SYMBOL_GPL(mt76_init_sar_power);
1821 
1822 int mt76_get_sar_power(struct mt76_phy *phy,
1823 		       struct ieee80211_channel *chan,
1824 		       int power)
1825 {
1826 	const struct cfg80211_sar_capa *capa = phy->hw->wiphy->sar_capa;
1827 	int freq, i;
1828 
1829 	if (!capa || !phy->frp)
1830 		return power;
1831 
1832 	if (power > 127 || power < -127)
1833 		power = 127;
1834 
1835 	freq = ieee80211_channel_to_frequency(chan->hw_value, chan->band);
1836 	for (i = 0 ; i < capa->num_freq_ranges; i++) {
1837 		if (phy->frp[i].range &&
1838 		    freq >= phy->frp[i].range->start_freq &&
1839 		    freq < phy->frp[i].range->end_freq) {
1840 			power = min_t(int, phy->frp[i].power, power);
1841 			break;
1842 		}
1843 	}
1844 
1845 	return power;
1846 }
1847 EXPORT_SYMBOL_GPL(mt76_get_sar_power);
1848 
1849 static void
1850 __mt76_csa_finish(void *priv, u8 *mac, struct ieee80211_vif *vif)
1851 {
1852 	if (vif->bss_conf.csa_active && ieee80211_beacon_cntdwn_is_complete(vif, 0))
1853 		ieee80211_csa_finish(vif, 0);
1854 }
1855 
1856 void mt76_csa_finish(struct mt76_dev *dev)
1857 {
1858 	if (!dev->csa_complete)
1859 		return;
1860 
1861 	ieee80211_iterate_active_interfaces_atomic(dev->hw,
1862 		IEEE80211_IFACE_ITER_RESUME_ALL,
1863 		__mt76_csa_finish, dev);
1864 
1865 	dev->csa_complete = 0;
1866 }
1867 EXPORT_SYMBOL_GPL(mt76_csa_finish);
1868 
1869 static void
1870 __mt76_csa_check(void *priv, u8 *mac, struct ieee80211_vif *vif)
1871 {
1872 	struct mt76_dev *dev = priv;
1873 
1874 	if (!vif->bss_conf.csa_active)
1875 		return;
1876 
1877 	dev->csa_complete |= ieee80211_beacon_cntdwn_is_complete(vif, 0);
1878 }
1879 
1880 void mt76_csa_check(struct mt76_dev *dev)
1881 {
1882 	ieee80211_iterate_active_interfaces_atomic(dev->hw,
1883 		IEEE80211_IFACE_ITER_RESUME_ALL,
1884 		__mt76_csa_check, dev);
1885 }
1886 EXPORT_SYMBOL_GPL(mt76_csa_check);
1887 
1888 int
1889 mt76_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta, bool set)
1890 {
1891 	return 0;
1892 }
1893 EXPORT_SYMBOL_GPL(mt76_set_tim);
1894 
1895 void mt76_insert_ccmp_hdr(struct sk_buff *skb, u8 key_id)
1896 {
1897 	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
1898 	int hdr_len = ieee80211_get_hdrlen_from_skb(skb);
1899 	u8 *hdr, *pn = status->iv;
1900 
1901 	__skb_push(skb, 8);
1902 	memmove(skb->data, skb->data + 8, hdr_len);
1903 	hdr = skb->data + hdr_len;
1904 
1905 	hdr[0] = pn[5];
1906 	hdr[1] = pn[4];
1907 	hdr[2] = 0;
1908 	hdr[3] = 0x20 | (key_id << 6);
1909 	hdr[4] = pn[3];
1910 	hdr[5] = pn[2];
1911 	hdr[6] = pn[1];
1912 	hdr[7] = pn[0];
1913 
1914 	status->flag &= ~RX_FLAG_IV_STRIPPED;
1915 }
1916 EXPORT_SYMBOL_GPL(mt76_insert_ccmp_hdr);
1917 
1918 int mt76_get_rate(struct mt76_dev *dev,
1919 		  struct ieee80211_supported_band *sband,
1920 		  int idx, bool cck)
1921 {
1922 	bool is_2g = sband->band == NL80211_BAND_2GHZ;
1923 	int i, offset = 0, len = sband->n_bitrates;
1924 
1925 	if (cck) {
1926 		if (!is_2g)
1927 			return 0;
1928 
1929 		idx &= ~BIT(2); /* short preamble */
1930 	} else if (is_2g) {
1931 		offset = 4;
1932 	}
1933 
1934 	for (i = offset; i < len; i++) {
1935 		if ((sband->bitrates[i].hw_value & GENMASK(7, 0)) == idx)
1936 			return i;
1937 	}
1938 
1939 	return 0;
1940 }
1941 EXPORT_SYMBOL_GPL(mt76_get_rate);
1942 
1943 void mt76_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1944 		  const u8 *mac)
1945 {
1946 	struct mt76_phy *phy = hw->priv;
1947 
1948 	set_bit(MT76_SCANNING, &phy->state);
1949 }
1950 EXPORT_SYMBOL_GPL(mt76_sw_scan);
1951 
1952 void mt76_sw_scan_complete(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
1953 {
1954 	struct mt76_phy *phy = hw->priv;
1955 
1956 	clear_bit(MT76_SCANNING, &phy->state);
1957 }
1958 EXPORT_SYMBOL_GPL(mt76_sw_scan_complete);
1959 
1960 int mt76_get_antenna(struct ieee80211_hw *hw, int radio_idx, u32 *tx_ant,
1961 		     u32 *rx_ant)
1962 {
1963 	struct mt76_phy *phy = hw->priv;
1964 	struct mt76_dev *dev = phy->dev;
1965 	int i;
1966 
1967 	mutex_lock(&dev->mutex);
1968 	*tx_ant = 0;
1969 	for (i = 0; i < ARRAY_SIZE(dev->phys); i++)
1970 		if (dev->phys[i] && dev->phys[i]->hw == hw)
1971 			*tx_ant |= dev->phys[i]->chainmask;
1972 	*rx_ant = *tx_ant;
1973 	mutex_unlock(&dev->mutex);
1974 
1975 	return 0;
1976 }
1977 EXPORT_SYMBOL_GPL(mt76_get_antenna);
1978 
1979 struct mt76_queue *
1980 mt76_init_queue(struct mt76_dev *dev, int qid, int idx, int n_desc,
1981 		int ring_base, void *wed, u32 flags)
1982 {
1983 	struct mt76_queue *hwq;
1984 	int err;
1985 
1986 	hwq = devm_kzalloc(dev->dev, sizeof(*hwq), GFP_KERNEL);
1987 	if (!hwq)
1988 		return ERR_PTR(-ENOMEM);
1989 
1990 	hwq->flags = flags;
1991 	hwq->wed = wed;
1992 
1993 	err = dev->queue_ops->alloc(dev, hwq, idx, n_desc, 0, ring_base);
1994 	if (err < 0)
1995 		return ERR_PTR(err);
1996 
1997 	return hwq;
1998 }
1999 EXPORT_SYMBOL_GPL(mt76_init_queue);
2000 
2001 void mt76_ethtool_worker(struct mt76_ethtool_worker_info *wi,
2002 			 struct mt76_sta_stats *stats, bool eht)
2003 {
2004 	int i, ei = wi->initial_stat_idx;
2005 	u64 *data = wi->data;
2006 
2007 	wi->sta_count++;
2008 
2009 	data[ei++] += stats->tx_mode[MT_PHY_TYPE_CCK];
2010 	data[ei++] += stats->tx_mode[MT_PHY_TYPE_OFDM];
2011 	data[ei++] += stats->tx_mode[MT_PHY_TYPE_HT];
2012 	data[ei++] += stats->tx_mode[MT_PHY_TYPE_HT_GF];
2013 	data[ei++] += stats->tx_mode[MT_PHY_TYPE_VHT];
2014 	data[ei++] += stats->tx_mode[MT_PHY_TYPE_HE_SU];
2015 	data[ei++] += stats->tx_mode[MT_PHY_TYPE_HE_EXT_SU];
2016 	data[ei++] += stats->tx_mode[MT_PHY_TYPE_HE_TB];
2017 	data[ei++] += stats->tx_mode[MT_PHY_TYPE_HE_MU];
2018 	if (eht) {
2019 		data[ei++] += stats->tx_mode[MT_PHY_TYPE_EHT_SU];
2020 		data[ei++] += stats->tx_mode[MT_PHY_TYPE_EHT_TRIG];
2021 		data[ei++] += stats->tx_mode[MT_PHY_TYPE_EHT_MU];
2022 	}
2023 
2024 	for (i = 0; i < (ARRAY_SIZE(stats->tx_bw) - !eht); i++)
2025 		data[ei++] += stats->tx_bw[i];
2026 
2027 	for (i = 0; i < (eht ? 14 : 12); i++)
2028 		data[ei++] += stats->tx_mcs[i];
2029 
2030 	for (i = 0; i < 4; i++)
2031 		data[ei++] += stats->tx_nss[i];
2032 
2033 	wi->worker_stat_count = ei - wi->initial_stat_idx;
2034 }
2035 EXPORT_SYMBOL_GPL(mt76_ethtool_worker);
2036 
2037 void mt76_ethtool_page_pool_stats(struct mt76_dev *dev, u64 *data, int *index)
2038 {
2039 #ifdef CONFIG_PAGE_POOL_STATS
2040 	struct page_pool_stats stats = {};
2041 	int i;
2042 
2043 	mt76_for_each_q_rx(dev, i)
2044 		page_pool_get_stats(dev->q_rx[i].page_pool, &stats);
2045 
2046 	page_pool_ethtool_stats_get(data, &stats);
2047 	*index += page_pool_ethtool_stats_get_count();
2048 #endif
2049 }
2050 EXPORT_SYMBOL_GPL(mt76_ethtool_page_pool_stats);
2051 
2052 enum mt76_dfs_state mt76_phy_dfs_state(struct mt76_phy *phy)
2053 {
2054 	struct ieee80211_hw *hw = phy->hw;
2055 	struct mt76_dev *dev = phy->dev;
2056 
2057 	if (dev->region == NL80211_DFS_UNSET ||
2058 	    test_bit(MT76_SCANNING, &phy->state))
2059 		return MT_DFS_STATE_DISABLED;
2060 
2061 	if (!phy->radar_enabled) {
2062 		if ((hw->conf.flags & IEEE80211_CONF_MONITOR) &&
2063 		    (phy->chandef.chan->flags & IEEE80211_CHAN_RADAR))
2064 			return MT_DFS_STATE_ACTIVE;
2065 
2066 		return MT_DFS_STATE_DISABLED;
2067 	}
2068 
2069 	if (!cfg80211_reg_can_beacon(hw->wiphy, &phy->chandef, NL80211_IFTYPE_AP))
2070 		return MT_DFS_STATE_CAC;
2071 
2072 	return MT_DFS_STATE_ACTIVE;
2073 }
2074 EXPORT_SYMBOL_GPL(mt76_phy_dfs_state);
2075 
2076 void mt76_vif_cleanup(struct mt76_dev *dev, struct ieee80211_vif *vif)
2077 {
2078 	struct mt76_vif_link *mlink = (struct mt76_vif_link *)vif->drv_priv;
2079 	struct mt76_vif_data *mvif = mlink->mvif;
2080 
2081 	rcu_assign_pointer(mvif->link[0], NULL);
2082 	mt76_abort_scan(dev);
2083 	if (mvif->roc_phy)
2084 		mt76_abort_roc(mvif->roc_phy);
2085 }
2086 EXPORT_SYMBOL_GPL(mt76_vif_cleanup);
2087 
2088 u16 mt76_select_links(struct ieee80211_vif *vif, int max_active_links)
2089 {
2090 	unsigned long usable_links = ieee80211_vif_usable_links(vif);
2091 	struct  {
2092 		u8 link_id;
2093 		enum nl80211_band band;
2094 	} data[IEEE80211_MLD_MAX_NUM_LINKS];
2095 	unsigned int link_id;
2096 	int i, n_data = 0;
2097 	u16 sel_links = 0;
2098 
2099 	if (!ieee80211_vif_is_mld(vif))
2100 		return 0;
2101 
2102 	if (vif->active_links == usable_links)
2103 		return vif->active_links;
2104 
2105 	rcu_read_lock();
2106 	for_each_set_bit(link_id, &usable_links, IEEE80211_MLD_MAX_NUM_LINKS) {
2107 		struct ieee80211_bss_conf *link_conf;
2108 
2109 		link_conf = rcu_dereference(vif->link_conf[link_id]);
2110 		if (WARN_ON_ONCE(!link_conf))
2111 			continue;
2112 
2113 		data[n_data].link_id = link_id;
2114 		data[n_data].band = link_conf->chanreq.oper.chan->band;
2115 		n_data++;
2116 	}
2117 	rcu_read_unlock();
2118 
2119 	for (i = 0; i < n_data; i++) {
2120 		int j;
2121 
2122 		if (!(BIT(data[i].link_id) & vif->active_links))
2123 			continue;
2124 
2125 		sel_links = BIT(data[i].link_id);
2126 		for (j = 0; j < n_data; j++) {
2127 			if (data[i].band != data[j].band) {
2128 				sel_links |= BIT(data[j].link_id);
2129 				if (hweight16(sel_links) == max_active_links)
2130 					break;
2131 			}
2132 		}
2133 		break;
2134 	}
2135 
2136 	return sel_links;
2137 }
2138 EXPORT_SYMBOL_GPL(mt76_select_links);
2139 
2140 struct mt76_offchannel_cb_data {
2141 	struct mt76_phy *phy;
2142 	bool offchannel;
2143 };
2144 
2145 static void
2146 mt76_offchannel_send_nullfunc(struct mt76_offchannel_cb_data *data,
2147 			      struct ieee80211_vif *vif, int link_id)
2148 {
2149 	struct mt76_phy *phy = data->phy;
2150 	struct ieee80211_tx_info *info;
2151 	struct ieee80211_sta *sta = NULL;
2152 	struct ieee80211_hdr *hdr;
2153 	struct mt76_wcid *wcid;
2154 	struct sk_buff *skb;
2155 
2156 	skb = ieee80211_nullfunc_get(phy->hw, vif, link_id, true);
2157 	if (!skb)
2158 		return;
2159 
2160 	hdr = (struct ieee80211_hdr *)skb->data;
2161 	if (data->offchannel)
2162 		hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PM);
2163 
2164 	skb->priority = 7;
2165 	skb_set_queue_mapping(skb, IEEE80211_AC_VO);
2166 
2167 	if (!ieee80211_tx_prepare_skb(phy->hw, vif, skb,
2168 				      phy->main_chandef.chan->band,
2169 				      &sta))
2170 		return;
2171 
2172 	if (sta)
2173 		wcid = (struct mt76_wcid *)sta->drv_priv;
2174 	else
2175 		wcid = ((struct mt76_vif_link *)vif->drv_priv)->wcid;
2176 
2177 	if (link_id >= 0) {
2178 		info = IEEE80211_SKB_CB(skb);
2179 		info->control.flags &= ~IEEE80211_TX_CTRL_MLO_LINK;
2180 		info->control.flags |=
2181 			u32_encode_bits(link_id, IEEE80211_TX_CTRL_MLO_LINK);
2182 	}
2183 
2184 	mt76_tx(phy, sta, wcid, skb);
2185 }
2186 
2187 static void
2188 mt76_offchannel_notify_iter(void *_data, u8 *mac, struct ieee80211_vif *vif)
2189 {
2190 	struct mt76_offchannel_cb_data *data = _data;
2191 	struct mt76_vif_link *mlink;
2192 	struct mt76_vif_data *mvif;
2193 	int link_id;
2194 
2195 	if (vif->type != NL80211_IFTYPE_STATION || !vif->cfg.assoc)
2196 		return;
2197 
2198 	mlink = (struct mt76_vif_link *)vif->drv_priv;
2199 	mvif = mlink->mvif;
2200 
2201 	if (!ieee80211_vif_is_mld(vif)) {
2202 		if (mt76_vif_link_phy(mlink) == data->phy) {
2203 			if (!data->offchannel && mlink->beacon_mon_interval)
2204 				WRITE_ONCE(mlink->beacon_mon_last, jiffies);
2205 			mt76_offchannel_send_nullfunc(data, vif, -1);
2206 		}
2207 		return;
2208 	}
2209 
2210 	for (link_id = 0; link_id < IEEE80211_MLD_MAX_NUM_LINKS; link_id++) {
2211 		if (link_id == mvif->deflink_id)
2212 			mlink = (struct mt76_vif_link *)vif->drv_priv;
2213 		else
2214 			mlink = rcu_dereference(mvif->link[link_id]);
2215 		if (!mlink)
2216 			continue;
2217 		if (mt76_vif_link_phy(mlink) != data->phy)
2218 			continue;
2219 
2220 		if (!data->offchannel && mlink->beacon_mon_interval)
2221 			WRITE_ONCE(mlink->beacon_mon_last, jiffies);
2222 
2223 		mt76_offchannel_send_nullfunc(data, vif, link_id);
2224 	}
2225 }
2226 
2227 void mt76_offchannel_notify(struct mt76_phy *phy, bool offchannel)
2228 {
2229 	struct mt76_offchannel_cb_data data = {
2230 		.phy = phy,
2231 		.offchannel = offchannel,
2232 	};
2233 
2234 	if (!phy->num_sta)
2235 		return;
2236 
2237 	local_bh_disable();
2238 	ieee80211_iterate_active_interfaces_atomic(phy->hw,
2239 		IEEE80211_IFACE_ITER_NORMAL,
2240 		mt76_offchannel_notify_iter, &data);
2241 	local_bh_enable();
2242 }
2243 EXPORT_SYMBOL_GPL(mt76_offchannel_notify);
2244 
2245 struct mt76_rx_beacon_data {
2246 	struct mt76_phy *phy;
2247 	const u8 *bssid;
2248 };
2249 
2250 static void mt76_rx_beacon_iter(void *_data, u8 *mac,
2251 				struct ieee80211_vif *vif)
2252 {
2253 	struct mt76_rx_beacon_data *data = _data;
2254 	struct mt76_vif_link *mlink = (struct mt76_vif_link *)vif->drv_priv;
2255 	struct mt76_vif_data *mvif = mlink->mvif;
2256 	int link_id;
2257 
2258 	if (vif->type != NL80211_IFTYPE_STATION || !vif->cfg.assoc)
2259 		return;
2260 
2261 	for (link_id = 0; link_id < IEEE80211_MLD_MAX_NUM_LINKS; link_id++) {
2262 		struct ieee80211_bss_conf *link_conf;
2263 
2264 		if (link_id == mvif->deflink_id)
2265 			mlink = (struct mt76_vif_link *)vif->drv_priv;
2266 		else
2267 			mlink = rcu_dereference(mvif->link[link_id]);
2268 		if (!mlink || !mlink->beacon_mon_interval)
2269 			continue;
2270 
2271 		if (mt76_vif_link_phy(mlink) != data->phy)
2272 			continue;
2273 
2274 		link_conf = rcu_dereference(vif->link_conf[link_id]);
2275 		if (!link_conf)
2276 			continue;
2277 
2278 		if (!ether_addr_equal(link_conf->bssid, data->bssid) &&
2279 		    (!link_conf->nontransmitted ||
2280 		     !ether_addr_equal(link_conf->transmitter_bssid,
2281 				       data->bssid)))
2282 			continue;
2283 
2284 		WRITE_ONCE(mlink->beacon_mon_last, jiffies);
2285 	}
2286 }
2287 
2288 void mt76_rx_beacon(struct mt76_phy *phy, struct sk_buff *skb)
2289 {
2290 	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
2291 	struct ieee80211_hdr *hdr = mt76_skb_get_hdr(skb);
2292 	struct mt76_rx_beacon_data data = {
2293 		.phy = phy,
2294 		.bssid = hdr->addr3,
2295 	};
2296 
2297 	mt76_scan_rx_beacon(phy->dev, phy->chandef.chan);
2298 
2299 	if (!phy->num_sta)
2300 		return;
2301 
2302 	if (status->flag & (RX_FLAG_FAILED_FCS_CRC | RX_FLAG_ONLY_MONITOR))
2303 		return;
2304 
2305 	ieee80211_iterate_active_interfaces_atomic(phy->hw,
2306 		IEEE80211_IFACE_ITER_RESUME_ALL,
2307 		mt76_rx_beacon_iter, &data);
2308 }
2309 EXPORT_SYMBOL_GPL(mt76_rx_beacon);
2310 
2311 static void mt76_beacon_mon_iter(void *data, u8 *mac,
2312 				 struct ieee80211_vif *vif)
2313 {
2314 	struct mt76_phy *phy = data;
2315 	struct mt76_vif_link *mlink = (struct mt76_vif_link *)vif->drv_priv;
2316 	struct mt76_vif_data *mvif = mlink->mvif;
2317 	int link_id;
2318 
2319 	if (vif->type != NL80211_IFTYPE_STATION || !vif->cfg.assoc)
2320 		return;
2321 
2322 	for (link_id = 0; link_id < IEEE80211_MLD_MAX_NUM_LINKS; link_id++) {
2323 		if (link_id == mvif->deflink_id)
2324 			mlink = (struct mt76_vif_link *)vif->drv_priv;
2325 		else
2326 			mlink = rcu_dereference(mvif->link[link_id]);
2327 		if (!mlink || !mlink->beacon_mon_interval)
2328 			continue;
2329 
2330 		if (mt76_vif_link_phy(mlink) != phy)
2331 			continue;
2332 
2333 		if (time_after(jiffies,
2334 			       READ_ONCE(mlink->beacon_mon_last) +
2335 			       MT76_BEACON_MON_MAX_MISS * mlink->beacon_mon_interval))
2336 			ieee80211_beacon_loss(vif);
2337 	}
2338 }
2339 
2340 void mt76_beacon_mon_check(struct mt76_phy *phy)
2341 {
2342 	if (phy->offchannel)
2343 		return;
2344 
2345 	ieee80211_iterate_active_interfaces_atomic(phy->hw,
2346 		IEEE80211_IFACE_ITER_RESUME_ALL,
2347 		mt76_beacon_mon_iter, phy);
2348 }
2349 EXPORT_SYMBOL_GPL(mt76_beacon_mon_check);
2350